1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_MIPS64
6 
7 #include "src/code-stubs.h"
8 #include "src/api-arguments.h"
9 #include "src/bootstrapper.h"
10 #include "src/codegen.h"
11 #include "src/ic/handler-compiler.h"
12 #include "src/ic/ic.h"
13 #include "src/ic/stub-cache.h"
14 #include "src/isolate.h"
15 #include "src/mips64/code-stubs-mips64.h"
16 #include "src/regexp/jsregexp.h"
17 #include "src/regexp/regexp-macro-assembler.h"
18 #include "src/runtime/runtime.h"
19 
20 namespace v8 {
21 namespace internal {
22 
23 #define __ ACCESS_MASM(masm)
24 
Generate(MacroAssembler * masm)25 void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
26   __ dsll(t9, a0, kPointerSizeLog2);
27   __ Daddu(t9, sp, t9);
28   __ sd(a1, MemOperand(t9, 0));
29   __ Push(a1);
30   __ Push(a2);
31   __ Daddu(a0, a0, 3);
32   __ TailCallRuntime(Runtime::kNewArray);
33 }
34 
InitializeDescriptor(CodeStubDescriptor * descriptor)35 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
36   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
37   descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
38 }
39 
InitializeDescriptor(CodeStubDescriptor * descriptor)40 void FastFunctionBindStub::InitializeDescriptor(
41     CodeStubDescriptor* descriptor) {
42   Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
43   descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
44 }
45 
46 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
47                                           Condition cc);
48 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
49                                     Register lhs,
50                                     Register rhs,
51                                     Label* rhs_not_nan,
52                                     Label* slow,
53                                     bool strict);
54 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
55                                            Register lhs,
56                                            Register rhs);
57 
58 
GenerateLightweightMiss(MacroAssembler * masm,ExternalReference miss)59 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
60                                                ExternalReference miss) {
61   // Update the static counter each time a new code stub is generated.
62   isolate()->counters()->code_stubs()->Increment();
63 
64   CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
65   int param_count = descriptor.GetRegisterParameterCount();
66   {
67     // Call the runtime system in a fresh internal frame.
68     FrameScope scope(masm, StackFrame::INTERNAL);
69     DCHECK((param_count == 0) ||
70            a0.is(descriptor.GetRegisterParameter(param_count - 1)));
71     // Push arguments, adjust sp.
72     __ Dsubu(sp, sp, Operand(param_count * kPointerSize));
73     for (int i = 0; i < param_count; ++i) {
74       // Store argument to stack.
75       __ sd(descriptor.GetRegisterParameter(i),
76             MemOperand(sp, (param_count - 1 - i) * kPointerSize));
77     }
78     __ CallExternalReference(miss, param_count);
79   }
80 
81   __ Ret();
82 }
83 
84 
Generate(MacroAssembler * masm)85 void DoubleToIStub::Generate(MacroAssembler* masm) {
86   Label out_of_range, only_low, negate, done;
87   Register input_reg = source();
88   Register result_reg = destination();
89 
90   int double_offset = offset();
91   // Account for saved regs if input is sp.
92   if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
93 
94   Register scratch =
95       GetRegisterThatIsNotOneOf(input_reg, result_reg);
96   Register scratch2 =
97       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
98   Register scratch3 =
99       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
100   DoubleRegister double_scratch = kLithiumScratchDouble;
101 
102   __ Push(scratch, scratch2, scratch3);
103   if (!skip_fastpath()) {
104     // Load double input.
105     __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
106 
107     // Clear cumulative exception flags and save the FCSR.
108     __ cfc1(scratch2, FCSR);
109     __ ctc1(zero_reg, FCSR);
110 
111     // Try a conversion to a signed integer.
112     __ Trunc_w_d(double_scratch, double_scratch);
113     // Move the converted value into the result register.
114     __ mfc1(scratch3, double_scratch);
115 
116     // Retrieve and restore the FCSR.
117     __ cfc1(scratch, FCSR);
118     __ ctc1(scratch2, FCSR);
119 
120     // Check for overflow and NaNs.
121     __ And(
122         scratch, scratch,
123         kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
124            | kFCSRInvalidOpFlagMask);
125     // If we had no exceptions then set result_reg and we are done.
126     Label error;
127     __ Branch(&error, ne, scratch, Operand(zero_reg));
128     __ Move(result_reg, scratch3);
129     __ Branch(&done);
130     __ bind(&error);
131   }
132 
133   // Load the double value and perform a manual truncation.
134   Register input_high = scratch2;
135   Register input_low = scratch3;
136 
137   __ lw(input_low,
138         MemOperand(input_reg, double_offset + Register::kMantissaOffset));
139   __ lw(input_high,
140         MemOperand(input_reg, double_offset + Register::kExponentOffset));
141 
142   Label normal_exponent, restore_sign;
143   // Extract the biased exponent in result.
144   __ Ext(result_reg,
145          input_high,
146          HeapNumber::kExponentShift,
147          HeapNumber::kExponentBits);
148 
149   // Check for Infinity and NaNs, which should return 0.
150   __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
151   __ Movz(result_reg, zero_reg, scratch);
152   __ Branch(&done, eq, scratch, Operand(zero_reg));
153 
154   // Express exponent as delta to (number of mantissa bits + 31).
155   __ Subu(result_reg,
156           result_reg,
157           Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
158 
159   // If the delta is strictly positive, all bits would be shifted away,
160   // which means that we can return 0.
161   __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
162   __ mov(result_reg, zero_reg);
163   __ Branch(&done);
164 
165   __ bind(&normal_exponent);
166   const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
167   // Calculate shift.
168   __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
169 
170   // Save the sign.
171   Register sign = result_reg;
172   result_reg = no_reg;
173   __ And(sign, input_high, Operand(HeapNumber::kSignMask));
174 
175   // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
176   // to check for this specific case.
177   Label high_shift_needed, high_shift_done;
178   __ Branch(&high_shift_needed, lt, scratch, Operand(32));
179   __ mov(input_high, zero_reg);
180   __ Branch(&high_shift_done);
181   __ bind(&high_shift_needed);
182 
183   // Set the implicit 1 before the mantissa part in input_high.
184   __ Or(input_high,
185         input_high,
186         Operand(1 << HeapNumber::kMantissaBitsInTopWord));
187   // Shift the mantissa bits to the correct position.
188   // We don't need to clear non-mantissa bits as they will be shifted away.
189   // If they weren't, it would mean that the answer is in the 32bit range.
190   __ sllv(input_high, input_high, scratch);
191 
192   __ bind(&high_shift_done);
193 
194   // Replace the shifted bits with bits from the lower mantissa word.
195   Label pos_shift, shift_done;
196   __ li(at, 32);
197   __ subu(scratch, at, scratch);
198   __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
199 
200   // Negate scratch.
201   __ Subu(scratch, zero_reg, scratch);
202   __ sllv(input_low, input_low, scratch);
203   __ Branch(&shift_done);
204 
205   __ bind(&pos_shift);
206   __ srlv(input_low, input_low, scratch);
207 
208   __ bind(&shift_done);
209   __ Or(input_high, input_high, Operand(input_low));
210   // Restore sign if necessary.
211   __ mov(scratch, sign);
212   result_reg = sign;
213   sign = no_reg;
214   __ Subu(result_reg, zero_reg, input_high);
215   __ Movz(result_reg, input_high, scratch);
216 
217   __ bind(&done);
218 
219   __ Pop(scratch, scratch2, scratch3);
220   __ Ret();
221 }
222 
223 
224 // Handle the case where the lhs and rhs are the same object.
225 // Equality is almost reflexive (everything but NaN), so this is a test
226 // for "identity and not NaN".
EmitIdenticalObjectComparison(MacroAssembler * masm,Label * slow,Condition cc)227 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
228                                           Condition cc) {
229   Label not_identical;
230   Label heap_number, return_equal;
231   Register exp_mask_reg = t1;
232 
233   __ Branch(&not_identical, ne, a0, Operand(a1));
234 
235   __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
236 
237   // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
238   // so we do the second best thing - test it ourselves.
239   // They are both equal and they are not both Smis so both of them are not
240   // Smis. If it's not a heap number, then return equal.
241   __ GetObjectType(a0, t0, t0);
242   if (cc == less || cc == greater) {
243     // Call runtime on identical JSObjects.
244     __ Branch(slow, greater, t0, Operand(FIRST_JS_RECEIVER_TYPE));
245     // Call runtime on identical symbols since we need to throw a TypeError.
246     __ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
247     // Call runtime on identical SIMD values since we must throw a TypeError.
248     __ Branch(slow, eq, t0, Operand(SIMD128_VALUE_TYPE));
249   } else {
250     __ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE));
251     // Comparing JS objects with <=, >= is complicated.
252     if (cc != eq) {
253       __ Branch(slow, greater, t0, Operand(FIRST_JS_RECEIVER_TYPE));
254       // Call runtime on identical symbols since we need to throw a TypeError.
255       __ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
256       // Call runtime on identical SIMD values since we must throw a TypeError.
257       __ Branch(slow, eq, t0, Operand(SIMD128_VALUE_TYPE));
258       // Normally here we fall through to return_equal, but undefined is
259       // special: (undefined == undefined) == true, but
260       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
261       if (cc == less_equal || cc == greater_equal) {
262         __ Branch(&return_equal, ne, t0, Operand(ODDBALL_TYPE));
263         __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
264         __ Branch(&return_equal, ne, a0, Operand(a6));
265         DCHECK(is_int16(GREATER) && is_int16(LESS));
266         __ Ret(USE_DELAY_SLOT);
267         if (cc == le) {
268           // undefined <= undefined should fail.
269           __ li(v0, Operand(GREATER));
270         } else  {
271           // undefined >= undefined should fail.
272           __ li(v0, Operand(LESS));
273         }
274       }
275     }
276   }
277 
278   __ bind(&return_equal);
279   DCHECK(is_int16(GREATER) && is_int16(LESS));
280   __ Ret(USE_DELAY_SLOT);
281   if (cc == less) {
282     __ li(v0, Operand(GREATER));  // Things aren't less than themselves.
283   } else if (cc == greater) {
284     __ li(v0, Operand(LESS));     // Things aren't greater than themselves.
285   } else {
286     __ mov(v0, zero_reg);         // Things are <=, >=, ==, === themselves.
287   }
288   // For less and greater we don't have to check for NaN since the result of
289   // x < x is false regardless.  For the others here is some code to check
290   // for NaN.
291   if (cc != lt && cc != gt) {
292     __ bind(&heap_number);
293     // It is a heap number, so return non-equal if it's NaN and equal if it's
294     // not NaN.
295 
296     // The representation of NaN values has all exponent bits (52..62) set,
297     // and not all mantissa bits (0..51) clear.
298     // Read top bits of double representation (second word of value).
299     __ lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset));
300     // Test that exponent bits are all set.
301     __ And(a7, a6, Operand(exp_mask_reg));
302     // If all bits not set (ne cond), then not a NaN, objects are equal.
303     __ Branch(&return_equal, ne, a7, Operand(exp_mask_reg));
304 
305     // Shift out flag and all exponent bits, retaining only mantissa.
306     __ sll(a6, a6, HeapNumber::kNonMantissaBitsInTopWord);
307     // Or with all low-bits of mantissa.
308     __ lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
309     __ Or(v0, a7, Operand(a6));
310     // For equal we already have the right value in v0:  Return zero (equal)
311     // if all bits in mantissa are zero (it's an Infinity) and non-zero if
312     // not (it's a NaN).  For <= and >= we need to load v0 with the failing
313     // value if it's a NaN.
314     if (cc != eq) {
315       // All-zero means Infinity means equal.
316       __ Ret(eq, v0, Operand(zero_reg));
317       DCHECK(is_int16(GREATER) && is_int16(LESS));
318       __ Ret(USE_DELAY_SLOT);
319       if (cc == le) {
320         __ li(v0, Operand(GREATER));  // NaN <= NaN should fail.
321       } else {
322         __ li(v0, Operand(LESS));     // NaN >= NaN should fail.
323       }
324     }
325   }
326   // No fall through here.
327 
328   __ bind(&not_identical);
329 }
330 
331 
EmitSmiNonsmiComparison(MacroAssembler * masm,Register lhs,Register rhs,Label * both_loaded_as_doubles,Label * slow,bool strict)332 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
333                                     Register lhs,
334                                     Register rhs,
335                                     Label* both_loaded_as_doubles,
336                                     Label* slow,
337                                     bool strict) {
338   DCHECK((lhs.is(a0) && rhs.is(a1)) ||
339          (lhs.is(a1) && rhs.is(a0)));
340 
341   Label lhs_is_smi;
342   __ JumpIfSmi(lhs, &lhs_is_smi);
343   // Rhs is a Smi.
344   // Check whether the non-smi is a heap number.
345   __ GetObjectType(lhs, t0, t0);
346   if (strict) {
347     // If lhs was not a number and rhs was a Smi then strict equality cannot
348     // succeed. Return non-equal (lhs is already not zero).
349     __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
350     __ mov(v0, lhs);
351   } else {
352     // Smi compared non-strictly with a non-Smi non-heap-number. Call
353     // the runtime.
354     __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
355   }
356   // Rhs is a smi, lhs is a number.
357   // Convert smi rhs to double.
358   __ SmiUntag(at, rhs);
359   __ mtc1(at, f14);
360   __ cvt_d_w(f14, f14);
361   __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
362 
363   // We now have both loaded as doubles.
364   __ jmp(both_loaded_as_doubles);
365 
366   __ bind(&lhs_is_smi);
367   // Lhs is a Smi.  Check whether the non-smi is a heap number.
368   __ GetObjectType(rhs, t0, t0);
369   if (strict) {
370     // If lhs was not a number and rhs was a Smi then strict equality cannot
371     // succeed. Return non-equal.
372     __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
373     __ li(v0, Operand(1));
374   } else {
375     // Smi compared non-strictly with a non-Smi non-heap-number. Call
376     // the runtime.
377     __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
378   }
379 
380   // Lhs is a smi, rhs is a number.
381   // Convert smi lhs to double.
382   __ SmiUntag(at, lhs);
383   __ mtc1(at, f12);
384   __ cvt_d_w(f12, f12);
385   __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
386   // Fall through to both_loaded_as_doubles.
387 }
388 
389 
EmitStrictTwoHeapObjectCompare(MacroAssembler * masm,Register lhs,Register rhs)390 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
391                                            Register lhs,
392                                            Register rhs) {
393     // If either operand is a JS object or an oddball value, then they are
394     // not equal since their pointers are different.
395     // There is no test for undetectability in strict equality.
396     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
397     Label first_non_object;
398     // Get the type of the first operand into a2 and compare it with
399     // FIRST_JS_RECEIVER_TYPE.
400     __ GetObjectType(lhs, a2, a2);
401     __ Branch(&first_non_object, less, a2, Operand(FIRST_JS_RECEIVER_TYPE));
402 
403     // Return non-zero.
404     Label return_not_equal;
405     __ bind(&return_not_equal);
406     __ Ret(USE_DELAY_SLOT);
407     __ li(v0, Operand(1));
408 
409     __ bind(&first_non_object);
410     // Check for oddballs: true, false, null, undefined.
411     __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
412 
413     __ GetObjectType(rhs, a3, a3);
414     __ Branch(&return_not_equal, greater, a3, Operand(FIRST_JS_RECEIVER_TYPE));
415 
416     // Check for oddballs: true, false, null, undefined.
417     __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
418 
419     // Now that we have the types we might as well check for
420     // internalized-internalized.
421     STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
422     __ Or(a2, a2, Operand(a3));
423     __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
424     __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
425 }
426 
427 
EmitCheckForTwoHeapNumbers(MacroAssembler * masm,Register lhs,Register rhs,Label * both_loaded_as_doubles,Label * not_heap_numbers,Label * slow)428 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
429                                        Register lhs,
430                                        Register rhs,
431                                        Label* both_loaded_as_doubles,
432                                        Label* not_heap_numbers,
433                                        Label* slow) {
434   __ GetObjectType(lhs, a3, a2);
435   __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
436   __ ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
437   // If first was a heap number & second wasn't, go to slow case.
438   __ Branch(slow, ne, a3, Operand(a2));
439 
440   // Both are heap numbers. Load them up then jump to the code we have
441   // for that.
442   __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
443   __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
444 
445   __ jmp(both_loaded_as_doubles);
446 }
447 
448 
449 // Fast negative check for internalized-to-internalized equality.
EmitCheckForInternalizedStringsOrObjects(MacroAssembler * masm,Register lhs,Register rhs,Label * possible_strings,Label * runtime_call)450 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
451                                                      Register lhs, Register rhs,
452                                                      Label* possible_strings,
453                                                      Label* runtime_call) {
454   DCHECK((lhs.is(a0) && rhs.is(a1)) ||
455          (lhs.is(a1) && rhs.is(a0)));
456 
457   // a2 is object type of rhs.
458   Label object_test, return_equal, return_unequal, undetectable;
459   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
460   __ And(at, a2, Operand(kIsNotStringMask));
461   __ Branch(&object_test, ne, at, Operand(zero_reg));
462   __ And(at, a2, Operand(kIsNotInternalizedMask));
463   __ Branch(possible_strings, ne, at, Operand(zero_reg));
464   __ GetObjectType(rhs, a3, a3);
465   __ Branch(runtime_call, ge, a3, Operand(FIRST_NONSTRING_TYPE));
466   __ And(at, a3, Operand(kIsNotInternalizedMask));
467   __ Branch(possible_strings, ne, at, Operand(zero_reg));
468 
469   // Both are internalized. We already checked they weren't the same pointer so
470   // they are not equal. Return non-equal by returning the non-zero object
471   // pointer in v0.
472   __ Ret(USE_DELAY_SLOT);
473   __ mov(v0, a0);  // In delay slot.
474 
475   __ bind(&object_test);
476   __ ld(a2, FieldMemOperand(lhs, HeapObject::kMapOffset));
477   __ ld(a3, FieldMemOperand(rhs, HeapObject::kMapOffset));
478   __ lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset));
479   __ lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset));
480   __ And(at, t0, Operand(1 << Map::kIsUndetectable));
481   __ Branch(&undetectable, ne, at, Operand(zero_reg));
482   __ And(at, t1, Operand(1 << Map::kIsUndetectable));
483   __ Branch(&return_unequal, ne, at, Operand(zero_reg));
484 
485   __ GetInstanceType(a2, a2);
486   __ Branch(runtime_call, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
487   __ GetInstanceType(a3, a3);
488   __ Branch(runtime_call, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
489 
490   __ bind(&return_unequal);
491   // Return non-equal by returning the non-zero object pointer in v0.
492   __ Ret(USE_DELAY_SLOT);
493   __ mov(v0, a0);  // In delay slot.
494 
495   __ bind(&undetectable);
496   __ And(at, t1, Operand(1 << Map::kIsUndetectable));
497   __ Branch(&return_unequal, eq, at, Operand(zero_reg));
498 
499   // If both sides are JSReceivers, then the result is false according to
500   // the HTML specification, which says that only comparisons with null or
501   // undefined are affected by special casing for document.all.
502   __ GetInstanceType(a2, a2);
503   __ Branch(&return_equal, eq, a2, Operand(ODDBALL_TYPE));
504   __ GetInstanceType(a3, a3);
505   __ Branch(&return_unequal, ne, a3, Operand(ODDBALL_TYPE));
506 
507   __ bind(&return_equal);
508   __ Ret(USE_DELAY_SLOT);
509   __ li(v0, Operand(EQUAL));  // In delay slot.
510 }
511 
512 
CompareICStub_CheckInputType(MacroAssembler * masm,Register input,Register scratch,CompareICState::State expected,Label * fail)513 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
514                                          Register scratch,
515                                          CompareICState::State expected,
516                                          Label* fail) {
517   Label ok;
518   if (expected == CompareICState::SMI) {
519     __ JumpIfNotSmi(input, fail);
520   } else if (expected == CompareICState::NUMBER) {
521     __ JumpIfSmi(input, &ok);
522     __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
523                 DONT_DO_SMI_CHECK);
524   }
525   // We could be strict about internalized/string here, but as long as
526   // hydrogen doesn't care, the stub doesn't have to care either.
527   __ bind(&ok);
528 }
529 
530 
531 // On entry a1 and a2 are the values to be compared.
532 // On exit a0 is 0, positive or negative to indicate the result of
533 // the comparison.
GenerateGeneric(MacroAssembler * masm)534 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
535   Register lhs = a1;
536   Register rhs = a0;
537   Condition cc = GetCondition();
538 
539   Label miss;
540   CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
541   CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
542 
543   Label slow;  // Call builtin.
544   Label not_smis, both_loaded_as_doubles;
545 
546   Label not_two_smis, smi_done;
547   __ Or(a2, a1, a0);
548   __ JumpIfNotSmi(a2, &not_two_smis);
549   __ SmiUntag(a1);
550   __ SmiUntag(a0);
551 
552   __ Ret(USE_DELAY_SLOT);
553   __ dsubu(v0, a1, a0);
554   __ bind(&not_two_smis);
555 
556   // NOTICE! This code is only reached after a smi-fast-case check, so
557   // it is certain that at least one operand isn't a smi.
558 
559   // Handle the case where the objects are identical.  Either returns the answer
560   // or goes to slow.  Only falls through if the objects were not identical.
561   EmitIdenticalObjectComparison(masm, &slow, cc);
562 
563   // If either is a Smi (we know that not both are), then they can only
564   // be strictly equal if the other is a HeapNumber.
565   STATIC_ASSERT(kSmiTag == 0);
566   DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
567   __ And(a6, lhs, Operand(rhs));
568   __ JumpIfNotSmi(a6, &not_smis, a4);
569   // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
570   // 1) Return the answer.
571   // 2) Go to slow.
572   // 3) Fall through to both_loaded_as_doubles.
573   // 4) Jump to rhs_not_nan.
574   // In cases 3 and 4 we have found out we were dealing with a number-number
575   // comparison and the numbers have been loaded into f12 and f14 as doubles,
576   // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
577   EmitSmiNonsmiComparison(masm, lhs, rhs,
578                           &both_loaded_as_doubles, &slow, strict());
579 
580   __ bind(&both_loaded_as_doubles);
581   // f12, f14 are the double representations of the left hand side
582   // and the right hand side if we have FPU. Otherwise a2, a3 represent
583   // left hand side and a0, a1 represent right hand side.
584 
585   Label nan;
586   __ li(a4, Operand(LESS));
587   __ li(a5, Operand(GREATER));
588   __ li(a6, Operand(EQUAL));
589 
590   // Check if either rhs or lhs is NaN.
591   __ BranchF(NULL, &nan, eq, f12, f14);
592 
593   // Check if LESS condition is satisfied. If true, move conditionally
594   // result to v0.
595   if (kArchVariant != kMips64r6) {
596     __ c(OLT, D, f12, f14);
597     __ Movt(v0, a4);
598     // Use previous check to store conditionally to v0 oposite condition
599     // (GREATER). If rhs is equal to lhs, this will be corrected in next
600     // check.
601     __ Movf(v0, a5);
602     // Check if EQUAL condition is satisfied. If true, move conditionally
603     // result to v0.
604     __ c(EQ, D, f12, f14);
605     __ Movt(v0, a6);
606   } else {
607     Label skip;
608     __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
609     __ mov(v0, a4);  // Return LESS as result.
610 
611     __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
612     __ mov(v0, a6);  // Return EQUAL as result.
613 
614     __ mov(v0, a5);  // Return GREATER as result.
615     __ bind(&skip);
616   }
617   __ Ret();
618 
619   __ bind(&nan);
620   // NaN comparisons always fail.
621   // Load whatever we need in v0 to make the comparison fail.
622   DCHECK(is_int16(GREATER) && is_int16(LESS));
623   __ Ret(USE_DELAY_SLOT);
624   if (cc == lt || cc == le) {
625     __ li(v0, Operand(GREATER));
626   } else {
627     __ li(v0, Operand(LESS));
628   }
629 
630 
631   __ bind(&not_smis);
632   // At this point we know we are dealing with two different objects,
633   // and neither of them is a Smi. The objects are in lhs_ and rhs_.
634   if (strict()) {
635     // This returns non-equal for some object types, or falls through if it
636     // was not lucky.
637     EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
638   }
639 
640   Label check_for_internalized_strings;
641   Label flat_string_check;
642   // Check for heap-number-heap-number comparison. Can jump to slow case,
643   // or load both doubles and jump to the code that handles
644   // that case. If the inputs are not doubles then jumps to
645   // check_for_internalized_strings.
646   // In this case a2 will contain the type of lhs_.
647   EmitCheckForTwoHeapNumbers(masm,
648                              lhs,
649                              rhs,
650                              &both_loaded_as_doubles,
651                              &check_for_internalized_strings,
652                              &flat_string_check);
653 
654   __ bind(&check_for_internalized_strings);
655   if (cc == eq && !strict()) {
656     // Returns an answer for two internalized strings or two
657     // detectable objects.
658     // Otherwise jumps to string case or not both strings case.
659     // Assumes that a2 is the type of lhs_ on entry.
660     EmitCheckForInternalizedStringsOrObjects(
661         masm, lhs, rhs, &flat_string_check, &slow);
662   }
663 
664   // Check for both being sequential one-byte strings,
665   // and inline if that is the case.
666   __ bind(&flat_string_check);
667 
668   __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
669 
670   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
671                       a3);
672   if (cc == eq) {
673     StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, a4);
674   } else {
675     StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, a4,
676                                                     a5);
677   }
678   // Never falls through to here.
679 
680   __ bind(&slow);
681   if (cc == eq) {
682     {
683       FrameScope scope(masm, StackFrame::INTERNAL);
684       __ Push(lhs, rhs);
685       __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
686     }
687     // Turn true into 0 and false into some non-zero value.
688     STATIC_ASSERT(EQUAL == 0);
689     __ LoadRoot(a0, Heap::kTrueValueRootIndex);
690     __ Ret(USE_DELAY_SLOT);
691     __ subu(v0, v0, a0);  // In delay slot.
692   } else {
693     // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
694     // a1 (rhs) second.
695     __ Push(lhs, rhs);
696     int ncr;  // NaN compare result.
697     if (cc == lt || cc == le) {
698       ncr = GREATER;
699     } else {
700       DCHECK(cc == gt || cc == ge);  // Remaining cases.
701       ncr = LESS;
702     }
703     __ li(a0, Operand(Smi::FromInt(ncr)));
704     __ push(a0);
705 
706     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
707     // tagged as a small integer.
708     __ TailCallRuntime(Runtime::kCompare);
709   }
710 
711   __ bind(&miss);
712   GenerateMiss(masm);
713 }
714 
715 
Generate(MacroAssembler * masm)716 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
717   __ mov(t9, ra);
718   __ pop(ra);
719   __ PushSafepointRegisters();
720   __ Jump(t9);
721 }
722 
723 
Generate(MacroAssembler * masm)724 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
725   __ mov(t9, ra);
726   __ pop(ra);
727   __ PopSafepointRegisters();
728   __ Jump(t9);
729 }
730 
731 
Generate(MacroAssembler * masm)732 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
733   // We don't allow a GC during a store buffer overflow so there is no need to
734   // store the registers in any particular way, but we do have to store and
735   // restore them.
736   __ MultiPush(kJSCallerSaved | ra.bit());
737   if (save_doubles()) {
738     __ MultiPushFPU(kCallerSavedFPU);
739   }
740   const int argument_count = 1;
741   const int fp_argument_count = 0;
742   const Register scratch = a1;
743 
744   AllowExternalCallThatCantCauseGC scope(masm);
745   __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
746   __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
747   __ CallCFunction(
748       ExternalReference::store_buffer_overflow_function(isolate()),
749       argument_count);
750   if (save_doubles()) {
751     __ MultiPopFPU(kCallerSavedFPU);
752   }
753 
754   __ MultiPop(kJSCallerSaved | ra.bit());
755   __ Ret();
756 }
757 
758 
Generate(MacroAssembler * masm)759 void MathPowStub::Generate(MacroAssembler* masm) {
760   const Register exponent = MathPowTaggedDescriptor::exponent();
761   DCHECK(exponent.is(a2));
762   const DoubleRegister double_base = f2;
763   const DoubleRegister double_exponent = f4;
764   const DoubleRegister double_result = f0;
765   const DoubleRegister double_scratch = f6;
766   const FPURegister single_scratch = f8;
767   const Register scratch = t1;
768   const Register scratch2 = a7;
769 
770   Label call_runtime, done, int_exponent;
771   if (exponent_type() == TAGGED) {
772     // Base is already in double_base.
773     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
774 
775     __ ldc1(double_exponent,
776             FieldMemOperand(exponent, HeapNumber::kValueOffset));
777   }
778 
779   if (exponent_type() != INTEGER) {
780     Label int_exponent_convert;
781     // Detect integer exponents stored as double.
782     __ EmitFPUTruncate(kRoundToMinusInf,
783                        scratch,
784                        double_exponent,
785                        at,
786                        double_scratch,
787                        scratch2,
788                        kCheckForInexactConversion);
789     // scratch2 == 0 means there was no conversion error.
790     __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
791 
792     __ push(ra);
793     {
794       AllowExternalCallThatCantCauseGC scope(masm);
795       __ PrepareCallCFunction(0, 2, scratch2);
796       __ MovToFloatParameters(double_base, double_exponent);
797       __ CallCFunction(
798           ExternalReference::power_double_double_function(isolate()),
799           0, 2);
800     }
801     __ pop(ra);
802     __ MovFromFloatResult(double_result);
803     __ jmp(&done);
804 
805     __ bind(&int_exponent_convert);
806   }
807 
808   // Calculate power with integer exponent.
809   __ bind(&int_exponent);
810 
811   // Get two copies of exponent in the registers scratch and exponent.
812   if (exponent_type() == INTEGER) {
813     __ mov(scratch, exponent);
814   } else {
815     // Exponent has previously been stored into scratch as untagged integer.
816     __ mov(exponent, scratch);
817   }
818 
819   __ mov_d(double_scratch, double_base);  // Back up base.
820   __ Move(double_result, 1.0);
821 
822   // Get absolute value of exponent.
823   Label positive_exponent, bail_out;
824   __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
825   __ Dsubu(scratch, zero_reg, scratch);
826   // Check when Dsubu overflows and we get negative result
827   // (happens only when input is MIN_INT).
828   __ Branch(&bail_out, gt, zero_reg, Operand(scratch));
829   __ bind(&positive_exponent);
830   __ Assert(ge, kUnexpectedNegativeValue, scratch, Operand(zero_reg));
831 
832   Label while_true, no_carry, loop_end;
833   __ bind(&while_true);
834 
835   __ And(scratch2, scratch, 1);
836 
837   __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
838   __ mul_d(double_result, double_result, double_scratch);
839   __ bind(&no_carry);
840 
841   __ dsra(scratch, scratch, 1);
842 
843   __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
844   __ mul_d(double_scratch, double_scratch, double_scratch);
845 
846   __ Branch(&while_true);
847 
848   __ bind(&loop_end);
849 
850   __ Branch(&done, ge, exponent, Operand(zero_reg));
851   __ Move(double_scratch, 1.0);
852   __ div_d(double_result, double_scratch, double_result);
853   // Test whether result is zero.  Bail out to check for subnormal result.
854   // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
855   __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
856 
857   // double_exponent may not contain the exponent value if the input was a
858   // smi.  We set it with exponent value before bailing out.
859   __ bind(&bail_out);
860   __ mtc1(exponent, single_scratch);
861   __ cvt_d_w(double_exponent, single_scratch);
862 
863   // Returning or bailing out.
864   __ push(ra);
865   {
866     AllowExternalCallThatCantCauseGC scope(masm);
867     __ PrepareCallCFunction(0, 2, scratch);
868     __ MovToFloatParameters(double_base, double_exponent);
869     __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
870                      0, 2);
871   }
872   __ pop(ra);
873   __ MovFromFloatResult(double_result);
874 
875   __ bind(&done);
876   __ Ret();
877 }
878 
NeedsImmovableCode()879 bool CEntryStub::NeedsImmovableCode() {
880   return true;
881 }
882 
883 
GenerateStubsAheadOfTime(Isolate * isolate)884 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
885   CEntryStub::GenerateAheadOfTime(isolate);
886   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
887   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
888   CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
889   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
890   CreateWeakCellStub::GenerateAheadOfTime(isolate);
891   BinaryOpICStub::GenerateAheadOfTime(isolate);
892   StoreRegistersStateStub::GenerateAheadOfTime(isolate);
893   RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
894   BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
895   StoreFastElementStub::GenerateAheadOfTime(isolate);
896 }
897 
898 
GenerateAheadOfTime(Isolate * isolate)899 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
900   StoreRegistersStateStub stub(isolate);
901   stub.GetCode();
902 }
903 
904 
GenerateAheadOfTime(Isolate * isolate)905 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
906   RestoreRegistersStateStub stub(isolate);
907   stub.GetCode();
908 }
909 
910 
GenerateFPStubs(Isolate * isolate)911 void CodeStub::GenerateFPStubs(Isolate* isolate) {
912   // Generate if not already in cache.
913   SaveFPRegsMode mode = kSaveFPRegs;
914   CEntryStub(isolate, 1, mode).GetCode();
915   StoreBufferOverflowStub(isolate, mode).GetCode();
916   isolate->set_fp_stubs_generated(true);
917 }
918 
919 
GenerateAheadOfTime(Isolate * isolate)920 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
921   CEntryStub stub(isolate, 1, kDontSaveFPRegs);
922   stub.GetCode();
923 }
924 
925 
Generate(MacroAssembler * masm)926 void CEntryStub::Generate(MacroAssembler* masm) {
927   // Called from JavaScript; parameters are on stack as if calling JS function
928   // a0: number of arguments including receiver
929   // a1: pointer to builtin function
930   // fp: frame pointer    (restored after C call)
931   // sp: stack pointer    (restored as callee's sp after C call)
932   // cp: current context  (C callee-saved)
933   //
934   // If argv_in_register():
935   // a2: pointer to the first argument
936 
937   ProfileEntryHookStub::MaybeCallEntryHook(masm);
938 
939   if (argv_in_register()) {
940     // Move argv into the correct register.
941     __ mov(s1, a2);
942   } else {
943     // Compute the argv pointer in a callee-saved register.
944     __ Dlsa(s1, sp, a0, kPointerSizeLog2);
945     __ Dsubu(s1, s1, kPointerSize);
946   }
947 
948   // Enter the exit frame that transitions from JavaScript to C++.
949   FrameScope scope(masm, StackFrame::MANUAL);
950   __ EnterExitFrame(save_doubles(), 0, is_builtin_exit()
951                                            ? StackFrame::BUILTIN_EXIT
952                                            : StackFrame::EXIT);
953 
954   // s0: number of arguments  including receiver (C callee-saved)
955   // s1: pointer to first argument (C callee-saved)
956   // s2: pointer to builtin function (C callee-saved)
957 
958   // Prepare arguments for C routine.
959   // a0 = argc
960   __ mov(s0, a0);
961   __ mov(s2, a1);
962 
963   // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
964   // also need to reserve the 4 argument slots on the stack.
965 
966   __ AssertStackIsAligned();
967 
968   int frame_alignment = MacroAssembler::ActivationFrameAlignment();
969   int frame_alignment_mask = frame_alignment - 1;
970   int result_stack_size;
971   if (result_size() <= 2) {
972     // a0 = argc, a1 = argv, a2 = isolate
973     __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
974     __ mov(a1, s1);
975     result_stack_size = 0;
976   } else {
977     DCHECK_EQ(3, result_size());
978     // Allocate additional space for the result.
979     result_stack_size =
980         ((result_size() * kPointerSize) + frame_alignment_mask) &
981         ~frame_alignment_mask;
982     __ Dsubu(sp, sp, Operand(result_stack_size));
983 
984     // a0 = hidden result argument, a1 = argc, a2 = argv, a3 = isolate.
985     __ li(a3, Operand(ExternalReference::isolate_address(isolate())));
986     __ mov(a2, s1);
987     __ mov(a1, a0);
988     __ mov(a0, sp);
989   }
990 
991   // To let the GC traverse the return address of the exit frames, we need to
992   // know where the return address is. The CEntryStub is unmovable, so
993   // we can store the address on the stack to be able to find it again and
994   // we never have to restore it, because it will not change.
995   { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
996     int kNumInstructionsToJump = 4;
997     Label find_ra;
998     // Adjust the value in ra to point to the correct return location, 2nd
999     // instruction past the real call into C code (the jalr(t9)), and push it.
1000     // This is the return address of the exit frame.
1001     if (kArchVariant >= kMips64r6) {
1002       __ addiupc(ra, kNumInstructionsToJump + 1);
1003     } else {
1004       // This branch-and-link sequence is needed to find the current PC on mips
1005       // before r6, saved to the ra register.
1006       __ bal(&find_ra);  // bal exposes branch delay slot.
1007       __ Daddu(ra, ra, kNumInstructionsToJump * Instruction::kInstrSize);
1008     }
1009     __ bind(&find_ra);
1010 
1011     // This spot was reserved in EnterExitFrame.
1012     __ sd(ra, MemOperand(sp, result_stack_size));
1013     // Stack space reservation moved to the branch delay slot below.
1014     // Stack is still aligned.
1015 
1016     // Call the C routine.
1017     __ mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
1018     __ jalr(t9);
1019     // Set up sp in the delay slot.
1020     __ daddiu(sp, sp, -kCArgsSlotsSize);
1021     // Make sure the stored 'ra' points to this position.
1022     DCHECK_EQ(kNumInstructionsToJump,
1023               masm->InstructionsGeneratedSince(&find_ra));
1024   }
1025   if (result_size() > 2) {
1026     DCHECK_EQ(3, result_size());
1027     // Read result values stored on stack.
1028     __ ld(a0, MemOperand(v0, 2 * kPointerSize));
1029     __ ld(v1, MemOperand(v0, 1 * kPointerSize));
1030     __ ld(v0, MemOperand(v0, 0 * kPointerSize));
1031   }
1032   // Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers!
1033 
1034   // Check result for exception sentinel.
1035   Label exception_returned;
1036   __ LoadRoot(a4, Heap::kExceptionRootIndex);
1037   __ Branch(&exception_returned, eq, a4, Operand(v0));
1038 
1039   // Check that there is no pending exception, otherwise we
1040   // should have returned the exception sentinel.
1041   if (FLAG_debug_code) {
1042     Label okay;
1043     ExternalReference pending_exception_address(
1044         Isolate::kPendingExceptionAddress, isolate());
1045     __ li(a2, Operand(pending_exception_address));
1046     __ ld(a2, MemOperand(a2));
1047     __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
1048     // Cannot use check here as it attempts to generate call into runtime.
1049     __ Branch(&okay, eq, a4, Operand(a2));
1050     __ stop("Unexpected pending exception");
1051     __ bind(&okay);
1052   }
1053 
1054   // Exit C frame and return.
1055   // v0:v1: result
1056   // sp: stack pointer
1057   // fp: frame pointer
1058   Register argc;
1059   if (argv_in_register()) {
1060     // We don't want to pop arguments so set argc to no_reg.
1061     argc = no_reg;
1062   } else {
1063     // s0: still holds argc (callee-saved).
1064     argc = s0;
1065   }
1066   __ LeaveExitFrame(save_doubles(), argc, true, EMIT_RETURN);
1067 
1068   // Handling of exception.
1069   __ bind(&exception_returned);
1070 
1071   ExternalReference pending_handler_context_address(
1072       Isolate::kPendingHandlerContextAddress, isolate());
1073   ExternalReference pending_handler_code_address(
1074       Isolate::kPendingHandlerCodeAddress, isolate());
1075   ExternalReference pending_handler_offset_address(
1076       Isolate::kPendingHandlerOffsetAddress, isolate());
1077   ExternalReference pending_handler_fp_address(
1078       Isolate::kPendingHandlerFPAddress, isolate());
1079   ExternalReference pending_handler_sp_address(
1080       Isolate::kPendingHandlerSPAddress, isolate());
1081 
1082   // Ask the runtime for help to determine the handler. This will set v0 to
1083   // contain the current pending exception, don't clobber it.
1084   ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
1085                                  isolate());
1086   {
1087     FrameScope scope(masm, StackFrame::MANUAL);
1088     __ PrepareCallCFunction(3, 0, a0);
1089     __ mov(a0, zero_reg);
1090     __ mov(a1, zero_reg);
1091     __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
1092     __ CallCFunction(find_handler, 3);
1093   }
1094 
1095   // Retrieve the handler context, SP and FP.
1096   __ li(cp, Operand(pending_handler_context_address));
1097   __ ld(cp, MemOperand(cp));
1098   __ li(sp, Operand(pending_handler_sp_address));
1099   __ ld(sp, MemOperand(sp));
1100   __ li(fp, Operand(pending_handler_fp_address));
1101   __ ld(fp, MemOperand(fp));
1102 
1103   // If the handler is a JS frame, restore the context to the frame. Note that
1104   // the context will be set to (cp == 0) for non-JS frames.
1105   Label zero;
1106   __ Branch(&zero, eq, cp, Operand(zero_reg));
1107   __ sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1108   __ bind(&zero);
1109 
1110   // Compute the handler entry address and jump to it.
1111   __ li(a1, Operand(pending_handler_code_address));
1112   __ ld(a1, MemOperand(a1));
1113   __ li(a2, Operand(pending_handler_offset_address));
1114   __ ld(a2, MemOperand(a2));
1115   __ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
1116   __ Daddu(t9, a1, a2);
1117   __ Jump(t9);
1118 }
1119 
1120 
Generate(MacroAssembler * masm)1121 void JSEntryStub::Generate(MacroAssembler* masm) {
1122   Label invoke, handler_entry, exit;
1123   Isolate* isolate = masm->isolate();
1124 
1125   // TODO(plind): unify the ABI description here.
1126   // Registers:
1127   // a0: entry address
1128   // a1: function
1129   // a2: receiver
1130   // a3: argc
1131   // a4 (a4): on mips64
1132 
1133   // Stack:
1134   // 0 arg slots on mips64 (4 args slots on mips)
1135   // args -- in a4/a4 on mips64, on stack on mips
1136 
1137   ProfileEntryHookStub::MaybeCallEntryHook(masm);
1138 
1139   // Save callee saved registers on the stack.
1140   __ MultiPush(kCalleeSaved | ra.bit());
1141 
1142   // Save callee-saved FPU registers.
1143   __ MultiPushFPU(kCalleeSavedFPU);
1144   // Set up the reserved register for 0.0.
1145   __ Move(kDoubleRegZero, 0.0);
1146 
1147   // Load argv in s0 register.
1148   __ mov(s0, a4);  // 5th parameter in mips64 a4 (a4) register.
1149 
1150   __ InitializeRootRegister();
1151 
1152   // We build an EntryFrame.
1153   __ li(a7, Operand(-1));  // Push a bad frame pointer to fail if it is used.
1154   int marker = type();
1155   __ li(a6, Operand(Smi::FromInt(marker)));
1156   __ li(a5, Operand(Smi::FromInt(marker)));
1157   ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
1158   __ li(a4, Operand(c_entry_fp));
1159   __ ld(a4, MemOperand(a4));
1160   __ Push(a7, a6, a5, a4);
1161   // Set up frame pointer for the frame to be pushed.
1162   __ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
1163 
1164   // Registers:
1165   // a0: entry_address
1166   // a1: function
1167   // a2: receiver_pointer
1168   // a3: argc
1169   // s0: argv
1170   //
1171   // Stack:
1172   // caller fp          |
1173   // function slot      | entry frame
1174   // context slot       |
1175   // bad fp (0xff...f)  |
1176   // callee saved registers + ra
1177   // [ O32: 4 args slots]
1178   // args
1179 
1180   // If this is the outermost JS call, set js_entry_sp value.
1181   Label non_outermost_js;
1182   ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1183   __ li(a5, Operand(ExternalReference(js_entry_sp)));
1184   __ ld(a6, MemOperand(a5));
1185   __ Branch(&non_outermost_js, ne, a6, Operand(zero_reg));
1186   __ sd(fp, MemOperand(a5));
1187   __ li(a4, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1188   Label cont;
1189   __ b(&cont);
1190   __ nop();   // Branch delay slot nop.
1191   __ bind(&non_outermost_js);
1192   __ li(a4, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1193   __ bind(&cont);
1194   __ push(a4);
1195 
1196   // Jump to a faked try block that does the invoke, with a faked catch
1197   // block that sets the pending exception.
1198   __ jmp(&invoke);
1199   __ bind(&handler_entry);
1200   handler_offset_ = handler_entry.pos();
1201   // Caught exception: Store result (exception) in the pending exception
1202   // field in the JSEnv and return a failure sentinel.  Coming in here the
1203   // fp will be invalid because the PushStackHandler below sets it to 0 to
1204   // signal the existence of the JSEntry frame.
1205   __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1206                                       isolate)));
1207   __ sd(v0, MemOperand(a4));  // We come back from 'invoke'. result is in v0.
1208   __ LoadRoot(v0, Heap::kExceptionRootIndex);
1209   __ b(&exit);  // b exposes branch delay slot.
1210   __ nop();   // Branch delay slot nop.
1211 
1212   // Invoke: Link this frame into the handler chain.
1213   __ bind(&invoke);
1214   __ PushStackHandler();
1215   // If an exception not caught by another handler occurs, this handler
1216   // returns control to the code after the bal(&invoke) above, which
1217   // restores all kCalleeSaved registers (including cp and fp) to their
1218   // saved values before returning a failure to C.
1219 
1220   // Invoke the function by calling through JS entry trampoline builtin.
1221   // Notice that we cannot store a reference to the trampoline code directly in
1222   // this stub, because runtime stubs are not traversed when doing GC.
1223 
1224   // Registers:
1225   // a0: entry_address
1226   // a1: function
1227   // a2: receiver_pointer
1228   // a3: argc
1229   // s0: argv
1230   //
1231   // Stack:
1232   // handler frame
1233   // entry frame
1234   // callee saved registers + ra
1235   // [ O32: 4 args slots]
1236   // args
1237 
1238   if (type() == StackFrame::ENTRY_CONSTRUCT) {
1239     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1240                                       isolate);
1241     __ li(a4, Operand(construct_entry));
1242   } else {
1243     ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
1244     __ li(a4, Operand(entry));
1245   }
1246   __ ld(t9, MemOperand(a4));  // Deref address.
1247   // Call JSEntryTrampoline.
1248   __ daddiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1249   __ Call(t9);
1250 
1251   // Unlink this frame from the handler chain.
1252   __ PopStackHandler();
1253 
1254   __ bind(&exit);  // v0 holds result
1255   // Check if the current stack frame is marked as the outermost JS frame.
1256   Label non_outermost_js_2;
1257   __ pop(a5);
1258   __ Branch(&non_outermost_js_2,
1259             ne,
1260             a5,
1261             Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1262   __ li(a5, Operand(ExternalReference(js_entry_sp)));
1263   __ sd(zero_reg, MemOperand(a5));
1264   __ bind(&non_outermost_js_2);
1265 
1266   // Restore the top frame descriptors from the stack.
1267   __ pop(a5);
1268   __ li(a4, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1269                                       isolate)));
1270   __ sd(a5, MemOperand(a4));
1271 
1272   // Reset the stack to the callee saved registers.
1273   __ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
1274 
1275   // Restore callee-saved fpu registers.
1276   __ MultiPopFPU(kCalleeSavedFPU);
1277 
1278   // Restore callee saved registers from the stack.
1279   __ MultiPop(kCalleeSaved | ra.bit());
1280   // Return.
1281   __ Jump(ra);
1282 }
1283 
1284 
Generate(MacroAssembler * masm)1285 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1286   // Return address is in ra.
1287   Label miss;
1288 
1289   Register receiver = LoadDescriptor::ReceiverRegister();
1290   Register index = LoadDescriptor::NameRegister();
1291   Register scratch = a5;
1292   Register result = v0;
1293   DCHECK(!scratch.is(receiver) && !scratch.is(index));
1294   DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()));
1295 
1296   StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1297                                           &miss,  // When not a string.
1298                                           &miss,  // When not a number.
1299                                           &miss,  // When index out of range.
1300                                           RECEIVER_IS_STRING);
1301   char_at_generator.GenerateFast(masm);
1302   __ Ret();
1303 
1304   StubRuntimeCallHelper call_helper;
1305   char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
1306 
1307   __ bind(&miss);
1308   PropertyAccessCompiler::TailCallBuiltin(
1309       masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1310 }
1311 
1312 
Generate(MacroAssembler * masm)1313 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1314   Label miss;
1315   Register receiver = LoadDescriptor::ReceiverRegister();
1316   // Ensure that the vector and slot registers won't be clobbered before
1317   // calling the miss handler.
1318   DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::VectorRegister(),
1319                      LoadWithVectorDescriptor::SlotRegister()));
1320 
1321   NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a4,
1322                                                           a5, &miss);
1323   __ bind(&miss);
1324   PropertyAccessCompiler::TailCallBuiltin(
1325       masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1326 }
1327 
1328 
Generate(MacroAssembler * masm)1329 void RegExpExecStub::Generate(MacroAssembler* masm) {
1330   // Just jump directly to runtime if native RegExp is not selected at compile
1331   // time or if regexp entry in generated code is turned off runtime switch or
1332   // at compilation.
1333 #ifdef V8_INTERPRETED_REGEXP
1334   __ TailCallRuntime(Runtime::kRegExpExec);
1335 #else  // V8_INTERPRETED_REGEXP
1336 
1337   // Stack frame on entry.
1338   //  sp[0]: last_match_info (expected JSArray)
1339   //  sp[4]: previous index
1340   //  sp[8]: subject string
1341   //  sp[12]: JSRegExp object
1342 
1343   const int kLastMatchInfoOffset = 0 * kPointerSize;
1344   const int kPreviousIndexOffset = 1 * kPointerSize;
1345   const int kSubjectOffset = 2 * kPointerSize;
1346   const int kJSRegExpOffset = 3 * kPointerSize;
1347 
1348   Label runtime;
1349   // Allocation of registers for this function. These are in callee save
1350   // registers and will be preserved by the call to the native RegExp code, as
1351   // this code is called using the normal C calling convention. When calling
1352   // directly from generated code the native RegExp code will not do a GC and
1353   // therefore the content of these registers are safe to use after the call.
1354   // MIPS - using s0..s2, since we are not using CEntry Stub.
1355   Register subject = s0;
1356   Register regexp_data = s1;
1357   Register last_match_info_elements = s2;
1358 
1359   // Ensure that a RegExp stack is allocated.
1360   ExternalReference address_of_regexp_stack_memory_address =
1361       ExternalReference::address_of_regexp_stack_memory_address(
1362           isolate());
1363   ExternalReference address_of_regexp_stack_memory_size =
1364       ExternalReference::address_of_regexp_stack_memory_size(isolate());
1365   __ li(a0, Operand(address_of_regexp_stack_memory_size));
1366   __ ld(a0, MemOperand(a0, 0));
1367   __ Branch(&runtime, eq, a0, Operand(zero_reg));
1368 
1369   // Check that the first argument is a JSRegExp object.
1370   __ ld(a0, MemOperand(sp, kJSRegExpOffset));
1371   STATIC_ASSERT(kSmiTag == 0);
1372   __ JumpIfSmi(a0, &runtime);
1373   __ GetObjectType(a0, a1, a1);
1374   __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
1375 
1376   // Check that the RegExp has been compiled (data contains a fixed array).
1377   __ ld(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
1378   if (FLAG_debug_code) {
1379     __ SmiTst(regexp_data, a4);
1380     __ Check(nz,
1381              kUnexpectedTypeForRegExpDataFixedArrayExpected,
1382              a4,
1383              Operand(zero_reg));
1384     __ GetObjectType(regexp_data, a0, a0);
1385     __ Check(eq,
1386              kUnexpectedTypeForRegExpDataFixedArrayExpected,
1387              a0,
1388              Operand(FIXED_ARRAY_TYPE));
1389   }
1390 
1391   // regexp_data: RegExp data (FixedArray)
1392   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1393   __ ld(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
1394   __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
1395 
1396   // regexp_data: RegExp data (FixedArray)
1397   // Check that the number of captures fit in the static offsets vector buffer.
1398   __ ld(a2,
1399          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1400   // Check (number_of_captures + 1) * 2 <= offsets vector size
1401   // Or          number_of_captures * 2 <= offsets vector size - 2
1402   // Or          number_of_captures     <= offsets vector size / 2 - 1
1403   // Multiplying by 2 comes for free since a2 is smi-tagged.
1404   STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1405   int temp = Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1;
1406   __ Branch(&runtime, hi, a2, Operand(Smi::FromInt(temp)));
1407 
1408   // Reset offset for possibly sliced string.
1409   __ mov(t0, zero_reg);
1410   __ ld(subject, MemOperand(sp, kSubjectOffset));
1411   __ JumpIfSmi(subject, &runtime);
1412   __ mov(a3, subject);  // Make a copy of the original subject string.
1413 
1414   // subject: subject string
1415   // a3: subject string
1416   // regexp_data: RegExp data (FixedArray)
1417   // Handle subject string according to its encoding and representation:
1418   // (1) Sequential string?  If yes, go to (4).
1419   // (2) Sequential or cons?  If not, go to (5).
1420   // (3) Cons string.  If the string is flat, replace subject with first string
1421   //     and go to (1). Otherwise bail out to runtime.
1422   // (4) Sequential string.  Load regexp code according to encoding.
1423   // (E) Carry on.
1424   /// [...]
1425 
1426   // Deferred code at the end of the stub:
1427   // (5) Long external string?  If not, go to (7).
1428   // (6) External string.  Make it, offset-wise, look like a sequential string.
1429   //     Go to (4).
1430   // (7) Short external string or not a string?  If yes, bail out to runtime.
1431   // (8) Sliced string.  Replace subject with parent.  Go to (1).
1432 
1433   Label check_underlying;   // (1)
1434   Label seq_string;         // (4)
1435   Label not_seq_nor_cons;   // (5)
1436   Label external_string;    // (6)
1437   Label not_long_external;  // (7)
1438 
1439   __ bind(&check_underlying);
1440   __ ld(a2, FieldMemOperand(subject, HeapObject::kMapOffset));
1441   __ lbu(a0, FieldMemOperand(a2, Map::kInstanceTypeOffset));
1442 
1443   // (1) Sequential string?  If yes, go to (4).
1444   __ And(a1,
1445          a0,
1446          Operand(kIsNotStringMask |
1447                  kStringRepresentationMask |
1448                  kShortExternalStringMask));
1449   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
1450   __ Branch(&seq_string, eq, a1, Operand(zero_reg));  // Go to (4).
1451 
1452   // (2) Sequential or cons?  If not, go to (5).
1453   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
1454   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
1455   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
1456   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
1457   // Go to (5).
1458   __ Branch(&not_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
1459 
1460   // (3) Cons string.  Check that it's flat.
1461   // Replace subject with first string and reload instance type.
1462   __ ld(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
1463   __ LoadRoot(a1, Heap::kempty_stringRootIndex);
1464   __ Branch(&runtime, ne, a0, Operand(a1));
1465   __ ld(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
1466   __ jmp(&check_underlying);
1467 
1468   // (4) Sequential string.  Load regexp code according to encoding.
1469   __ bind(&seq_string);
1470   // subject: sequential subject string (or look-alike, external string)
1471   // a3: original subject string
1472   // Load previous index and check range before a3 is overwritten.  We have to
1473   // use a3 instead of subject here because subject might have been only made
1474   // to look like a sequential string when it actually is an external string.
1475   __ ld(a1, MemOperand(sp, kPreviousIndexOffset));
1476   __ JumpIfNotSmi(a1, &runtime);
1477   __ ld(a3, FieldMemOperand(a3, String::kLengthOffset));
1478   __ Branch(&runtime, ls, a3, Operand(a1));
1479   __ SmiUntag(a1);
1480 
1481   STATIC_ASSERT(kStringEncodingMask == 4);
1482   STATIC_ASSERT(kOneByteStringTag == 4);
1483   STATIC_ASSERT(kTwoByteStringTag == 0);
1484   __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for one_byte.
1485   __ ld(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
1486   __ dsra(a3, a0, 2);  // a3 is 1 for one_byte, 0 for UC16 (used below).
1487   __ ld(a5, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
1488   __ Movz(t9, a5, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
1489 
1490   // (E) Carry on.  String handling is done.
1491   // t9: irregexp code
1492   // Check that the irregexp code has been generated for the actual string
1493   // encoding. If it has, the field contains a code object otherwise it contains
1494   // a smi (code flushing support).
1495   __ JumpIfSmi(t9, &runtime);
1496 
1497   // a1: previous index
1498   // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
1499   // t9: code
1500   // subject: Subject string
1501   // regexp_data: RegExp data (FixedArray)
1502   // All checks done. Now push arguments for native regexp code.
1503   __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
1504                       1, a0, a2);
1505 
1506   // Isolates: note we add an additional parameter here (isolate pointer).
1507   const int kRegExpExecuteArguments = 9;
1508   const int kParameterRegisters = 8;
1509   __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
1510 
1511   // Stack pointer now points to cell where return address is to be written.
1512   // Arguments are before that on the stack or in registers, meaning we
1513   // treat the return address as argument 5. Thus every argument after that
1514   // needs to be shifted back by 1. Since DirectCEntryStub will handle
1515   // allocating space for the c argument slots, we don't need to calculate
1516   // that into the argument positions on the stack. This is how the stack will
1517   // look (sp meaning the value of sp at this moment):
1518   // Abi n64:
1519   //   [sp + 1] - Argument 9
1520   //   [sp + 0] - saved ra
1521   // Abi O32:
1522   //   [sp + 5] - Argument 9
1523   //   [sp + 4] - Argument 8
1524   //   [sp + 3] - Argument 7
1525   //   [sp + 2] - Argument 6
1526   //   [sp + 1] - Argument 5
1527   //   [sp + 0] - saved ra
1528 
1529   // Argument 9: Pass current isolate address.
1530   __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
1531   __ sd(a0, MemOperand(sp, 1 * kPointerSize));
1532 
1533   // Argument 8: Indicate that this is a direct call from JavaScript.
1534   __ li(a7, Operand(1));
1535 
1536   // Argument 7: Start (high end) of backtracking stack memory area.
1537   __ li(a0, Operand(address_of_regexp_stack_memory_address));
1538   __ ld(a0, MemOperand(a0, 0));
1539   __ li(a2, Operand(address_of_regexp_stack_memory_size));
1540   __ ld(a2, MemOperand(a2, 0));
1541   __ daddu(a6, a0, a2);
1542 
1543   // Argument 6: Set the number of capture registers to zero to force global
1544   // regexps to behave as non-global. This does not affect non-global regexps.
1545   __ mov(a5, zero_reg);
1546 
1547   // Argument 5: static offsets vector buffer.
1548   __ li(
1549       a4,
1550       Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
1551 
1552   // For arguments 4 and 3 get string length, calculate start of string data
1553   // and calculate the shift of the index (0 for one_byte and 1 for two byte).
1554   __ Daddu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
1555   __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
1556   // Load the length from the original subject string from the previous stack
1557   // frame. Therefore we have to use fp, which points exactly to two pointer
1558   // sizes below the previous sp. (Because creating a new stack frame pushes
1559   // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
1560   __ ld(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
1561   // If slice offset is not 0, load the length from the original sliced string.
1562   // Argument 4, a3: End of string data
1563   // Argument 3, a2: Start of string data
1564   // Prepare start and end index of the input.
1565   __ dsllv(t1, t0, a3);
1566   __ daddu(t0, t2, t1);
1567   __ dsllv(t1, a1, a3);
1568   __ daddu(a2, t0, t1);
1569 
1570   __ ld(t2, FieldMemOperand(subject, String::kLengthOffset));
1571 
1572   __ SmiUntag(t2);
1573   __ dsllv(t1, t2, a3);
1574   __ daddu(a3, t0, t1);
1575   // Argument 2 (a1): Previous index.
1576   // Already there
1577 
1578   // Argument 1 (a0): Subject string.
1579   __ mov(a0, subject);
1580 
1581   // Locate the code entry and call it.
1582   __ Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
1583   DirectCEntryStub stub(isolate());
1584   stub.GenerateCall(masm, t9);
1585 
1586   __ LeaveExitFrame(false, no_reg, true);
1587 
1588   // v0: result
1589   // subject: subject string (callee saved)
1590   // regexp_data: RegExp data (callee saved)
1591   // last_match_info_elements: Last match info elements (callee saved)
1592   // Check the result.
1593   Label success;
1594   __ Branch(&success, eq, v0, Operand(1));
1595   // We expect exactly one result since we force the called regexp to behave
1596   // as non-global.
1597   Label failure;
1598   __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
1599   // If not exception it can only be retry. Handle that in the runtime system.
1600   __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
1601   // Result must now be exception. If there is no pending exception already a
1602   // stack overflow (on the backtrack stack) was detected in RegExp code but
1603   // haven't created the exception yet. Handle that in the runtime system.
1604   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
1605   __ li(a1, Operand(isolate()->factory()->the_hole_value()));
1606   __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1607                                       isolate())));
1608   __ ld(v0, MemOperand(a2, 0));
1609   __ Branch(&runtime, eq, v0, Operand(a1));
1610 
1611   // For exception, throw the exception again.
1612   __ TailCallRuntime(Runtime::kRegExpExecReThrow);
1613 
1614   __ bind(&failure);
1615   // For failure and exception return null.
1616   __ li(v0, Operand(isolate()->factory()->null_value()));
1617   __ DropAndRet(4);
1618 
1619   // Process the result from the native regexp code.
1620   __ bind(&success);
1621 
1622   __ lw(a1, UntagSmiFieldMemOperand(
1623       regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1624   // Calculate number of capture registers (number_of_captures + 1) * 2.
1625   __ Daddu(a1, a1, Operand(1));
1626   __ dsll(a1, a1, 1);  // Multiply by 2.
1627 
1628   // Check that the last match info is a FixedArray.
1629   __ ld(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
1630   __ JumpIfSmi(last_match_info_elements, &runtime);
1631   // Check that the object has fast elements.
1632   __ ld(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
1633   __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
1634   __ Branch(&runtime, ne, a0, Operand(at));
1635   // Check that the last match info has space for the capture registers and the
1636   // additional information.
1637   __ ld(a0,
1638         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
1639   __ Daddu(a2, a1, Operand(RegExpMatchInfo::kLastMatchOverhead));
1640 
1641   __ SmiUntag(at, a0);
1642   __ Branch(&runtime, gt, a2, Operand(at));
1643 
1644   // a1: number of capture registers
1645   // subject: subject string
1646   // Store the capture count.
1647   __ SmiTag(a2, a1);  // To smi.
1648   __ sd(a2, FieldMemOperand(last_match_info_elements,
1649                             RegExpMatchInfo::kNumberOfCapturesOffset));
1650   // Store last subject and last input.
1651   __ sd(subject, FieldMemOperand(last_match_info_elements,
1652                                  RegExpMatchInfo::kLastSubjectOffset));
1653   __ mov(a2, subject);
1654   __ RecordWriteField(last_match_info_elements,
1655                       RegExpMatchInfo::kLastSubjectOffset, subject, a7,
1656                       kRAHasNotBeenSaved, kDontSaveFPRegs);
1657   __ mov(subject, a2);
1658   __ sd(subject, FieldMemOperand(last_match_info_elements,
1659                                  RegExpMatchInfo::kLastInputOffset));
1660   __ RecordWriteField(last_match_info_elements,
1661                       RegExpMatchInfo::kLastInputOffset, subject, a7,
1662                       kRAHasNotBeenSaved, kDontSaveFPRegs);
1663 
1664   // Get the static offsets vector filled by the native regexp code.
1665   ExternalReference address_of_static_offsets_vector =
1666       ExternalReference::address_of_static_offsets_vector(isolate());
1667   __ li(a2, Operand(address_of_static_offsets_vector));
1668 
1669   // a1: number of capture registers
1670   // a2: offsets vector
1671   Label next_capture, done;
1672   // Capture register counter starts from number of capture registers and
1673   // counts down until wrapping after zero.
1674   __ Daddu(a0, last_match_info_elements,
1675            Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag));
1676   __ bind(&next_capture);
1677   __ Dsubu(a1, a1, Operand(1));
1678   __ Branch(&done, lt, a1, Operand(zero_reg));
1679   // Read the value from the static offsets vector buffer.
1680   __ lw(a3, MemOperand(a2, 0));
1681   __ daddiu(a2, a2, kIntSize);
1682   // Store the smi value in the last match info.
1683   __ SmiTag(a3);
1684   __ sd(a3, MemOperand(a0, 0));
1685   __ Branch(&next_capture, USE_DELAY_SLOT);
1686   __ daddiu(a0, a0, kPointerSize);  // In branch delay slot.
1687 
1688   __ bind(&done);
1689 
1690   // Return last match info.
1691   __ mov(v0, last_match_info_elements);
1692   __ DropAndRet(4);
1693 
1694   // Do the runtime call to execute the regexp.
1695   __ bind(&runtime);
1696   __ TailCallRuntime(Runtime::kRegExpExec);
1697 
1698   // Deferred code for string handling.
1699   // (5) Long external string?  If not, go to (7).
1700   __ bind(&not_seq_nor_cons);
1701   // Go to (7).
1702   __ Branch(&not_long_external, gt, a1, Operand(kExternalStringTag));
1703 
1704   // (6) External string.  Make it, offset-wise, look like a sequential string.
1705   __ bind(&external_string);
1706   __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
1707   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
1708   if (FLAG_debug_code) {
1709     // Assert that we do not have a cons or slice (indirect strings) here.
1710     // Sequential strings have already been ruled out.
1711     __ And(at, a0, Operand(kIsIndirectStringMask));
1712     __ Assert(eq,
1713               kExternalStringExpectedButNotFound,
1714               at,
1715               Operand(zero_reg));
1716   }
1717   __ ld(subject,
1718         FieldMemOperand(subject, ExternalString::kResourceDataOffset));
1719   // Move the pointer so that offset-wise, it looks like a sequential string.
1720   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1721   __ Dsubu(subject,
1722           subject,
1723           SeqTwoByteString::kHeaderSize - kHeapObjectTag);
1724   __ jmp(&seq_string);  // Go to (4).
1725 
1726   // (7) Short external string or not a string?  If yes, bail out to runtime.
1727   __ bind(&not_long_external);
1728   STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
1729   __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
1730   __ Branch(&runtime, ne, at, Operand(zero_reg));
1731 
1732   // (8) Sliced string.  Replace subject with parent.  Go to (4).
1733   // Load offset into t0 and replace subject string with parent.
1734   __ ld(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
1735   __ SmiUntag(t0);
1736   __ ld(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
1737   __ jmp(&check_underlying);  // Go to (1).
1738 #endif  // V8_INTERPRETED_REGEXP
1739 }
1740 
1741 
CallStubInRecordCallTarget(MacroAssembler * masm,CodeStub * stub)1742 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
1743   // a0 : number of arguments to the construct function
1744   // a2 : feedback vector
1745   // a3 : slot in feedback vector (Smi)
1746   // a1 : the function to call
1747   FrameScope scope(masm, StackFrame::INTERNAL);
1748   const RegList kSavedRegs = 1 << 4 |  // a0
1749                              1 << 5 |  // a1
1750                              1 << 6 |  // a2
1751                              1 << 7 |  // a3
1752                              1 << cp.code();
1753 
1754   // Number-of-arguments register must be smi-tagged to call out.
1755   __ SmiTag(a0);
1756   __ MultiPush(kSavedRegs);
1757 
1758   __ CallStub(stub);
1759 
1760   __ MultiPop(kSavedRegs);
1761   __ SmiUntag(a0);
1762 }
1763 
1764 
GenerateRecordCallTarget(MacroAssembler * masm)1765 static void GenerateRecordCallTarget(MacroAssembler* masm) {
1766   // Cache the called function in a feedback vector slot.  Cache states
1767   // are uninitialized, monomorphic (indicated by a JSFunction), and
1768   // megamorphic.
1769   // a0 : number of arguments to the construct function
1770   // a1 : the function to call
1771   // a2 : feedback vector
1772   // a3 : slot in feedback vector (Smi)
1773   Label initialize, done, miss, megamorphic, not_array_function;
1774 
1775   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
1776             masm->isolate()->heap()->megamorphic_symbol());
1777   DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
1778             masm->isolate()->heap()->uninitialized_symbol());
1779 
1780   // Load the cache state into a5.
1781   __ dsrl(a5, a3, 32 - kPointerSizeLog2);
1782   __ Daddu(a5, a2, Operand(a5));
1783   __ ld(a5, FieldMemOperand(a5, FixedArray::kHeaderSize));
1784 
1785   // A monomorphic cache hit or an already megamorphic state: invoke the
1786   // function without changing the state.
1787   // We don't know if a5 is a WeakCell or a Symbol, but it's harmless to read at
1788   // this position in a symbol (see static asserts in type-feedback-vector.h).
1789   Label check_allocation_site;
1790   Register feedback_map = a6;
1791   Register weak_value = t0;
1792   __ ld(weak_value, FieldMemOperand(a5, WeakCell::kValueOffset));
1793   __ Branch(&done, eq, a1, Operand(weak_value));
1794   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
1795   __ Branch(&done, eq, a5, Operand(at));
1796   __ ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset));
1797   __ LoadRoot(at, Heap::kWeakCellMapRootIndex);
1798   __ Branch(&check_allocation_site, ne, feedback_map, Operand(at));
1799 
1800   // If the weak cell is cleared, we have a new chance to become monomorphic.
1801   __ JumpIfSmi(weak_value, &initialize);
1802   __ jmp(&megamorphic);
1803 
1804   __ bind(&check_allocation_site);
1805   // If we came here, we need to see if we are the array function.
1806   // If we didn't have a matching function, and we didn't find the megamorph
1807   // sentinel, then we have in the slot either some other function or an
1808   // AllocationSite.
1809   __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
1810   __ Branch(&miss, ne, feedback_map, Operand(at));
1811 
1812   // Make sure the function is the Array() function
1813   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
1814   __ Branch(&megamorphic, ne, a1, Operand(a5));
1815   __ jmp(&done);
1816 
1817   __ bind(&miss);
1818 
1819   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
1820   // megamorphic.
1821   __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
1822   __ Branch(&initialize, eq, a5, Operand(at));
1823   // MegamorphicSentinel is an immortal immovable object (undefined) so no
1824   // write-barrier is needed.
1825   __ bind(&megamorphic);
1826   __ dsrl(a5, a3, 32 - kPointerSizeLog2);
1827   __ Daddu(a5, a2, Operand(a5));
1828   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
1829   __ sd(at, FieldMemOperand(a5, FixedArray::kHeaderSize));
1830   __ jmp(&done);
1831 
1832   // An uninitialized cache is patched with the function.
1833   __ bind(&initialize);
1834   // Make sure the function is the Array() function.
1835   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
1836   __ Branch(&not_array_function, ne, a1, Operand(a5));
1837 
1838   // The target function is the Array constructor,
1839   // Create an AllocationSite if we don't already have it, store it in the
1840   // slot.
1841   CreateAllocationSiteStub create_stub(masm->isolate());
1842   CallStubInRecordCallTarget(masm, &create_stub);
1843   __ Branch(&done);
1844 
1845   __ bind(&not_array_function);
1846 
1847   CreateWeakCellStub weak_cell_stub(masm->isolate());
1848   CallStubInRecordCallTarget(masm, &weak_cell_stub);
1849 
1850   __ bind(&done);
1851 
1852   // Increment the call count for all function calls.
1853   __ SmiScale(a4, a3, kPointerSizeLog2);
1854   __ Daddu(a5, a2, Operand(a4));
1855   __ ld(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
1856   __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
1857   __ sd(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
1858 }
1859 
1860 
Generate(MacroAssembler * masm)1861 void CallConstructStub::Generate(MacroAssembler* masm) {
1862   // a0 : number of arguments
1863   // a1 : the function to call
1864   // a2 : feedback vector
1865   // a3 : slot in feedback vector (Smi, for RecordCallTarget)
1866 
1867   Label non_function;
1868   // Check that the function is not a smi.
1869   __ JumpIfSmi(a1, &non_function);
1870   // Check that the function is a JSFunction.
1871   __ GetObjectType(a1, a5, a5);
1872   __ Branch(&non_function, ne, a5, Operand(JS_FUNCTION_TYPE));
1873 
1874   GenerateRecordCallTarget(masm);
1875 
1876   __ dsrl(at, a3, 32 - kPointerSizeLog2);
1877   __ Daddu(a5, a2, at);
1878   Label feedback_register_initialized;
1879   // Put the AllocationSite from the feedback vector into a2, or undefined.
1880   __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
1881   __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
1882   __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
1883   __ Branch(&feedback_register_initialized, eq, a5, Operand(at));
1884   __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
1885   __ bind(&feedback_register_initialized);
1886 
1887   __ AssertUndefinedOrAllocationSite(a2, a5);
1888 
1889   // Pass function as new target.
1890   __ mov(a3, a1);
1891 
1892   // Tail call to the function-specific construct stub (still in the caller
1893   // context at this point).
1894   __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
1895   __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
1896   __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
1897   __ Jump(at);
1898 
1899   __ bind(&non_function);
1900   __ mov(a3, a1);
1901   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
1902 }
1903 
1904 
1905 // StringCharCodeAtGenerator.
GenerateFast(MacroAssembler * masm)1906 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
1907   DCHECK(!a4.is(index_));
1908   DCHECK(!a4.is(result_));
1909   DCHECK(!a4.is(object_));
1910 
1911   // If the receiver is a smi trigger the non-string case.
1912   if (check_mode_ == RECEIVER_IS_UNKNOWN) {
1913     __ JumpIfSmi(object_, receiver_not_string_);
1914 
1915     // Fetch the instance type of the receiver into result register.
1916     __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
1917     __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
1918     // If the receiver is not a string trigger the non-string case.
1919     __ And(a4, result_, Operand(kIsNotStringMask));
1920     __ Branch(receiver_not_string_, ne, a4, Operand(zero_reg));
1921   }
1922 
1923   // If the index is non-smi trigger the non-smi case.
1924   __ JumpIfNotSmi(index_, &index_not_smi_);
1925 
1926   __ bind(&got_smi_index_);
1927 
1928   // Check for index out of range.
1929   __ ld(a4, FieldMemOperand(object_, String::kLengthOffset));
1930   __ Branch(index_out_of_range_, ls, a4, Operand(index_));
1931 
1932   __ SmiUntag(index_);
1933 
1934   StringCharLoadGenerator::Generate(masm,
1935                                     object_,
1936                                     index_,
1937                                     result_,
1938                                     &call_runtime_);
1939 
1940   __ SmiTag(result_);
1941   __ bind(&exit_);
1942 }
1943 
1944 // Note: feedback_vector and slot are clobbered after the call.
IncrementCallCount(MacroAssembler * masm,Register feedback_vector,Register slot)1945 static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
1946                                Register slot) {
1947   __ dsrl(t0, slot, 32 - kPointerSizeLog2);
1948   __ Daddu(slot, feedback_vector, Operand(t0));
1949   __ ld(t0, FieldMemOperand(slot, FixedArray::kHeaderSize + kPointerSize));
1950   __ Daddu(t0, t0, Operand(Smi::FromInt(1)));
1951   __ sd(t0, FieldMemOperand(slot, FixedArray::kHeaderSize + kPointerSize));
1952 }
1953 
HandleArrayCase(MacroAssembler * masm,Label * miss)1954 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
1955   // a0 - number of arguments
1956   // a1 - function
1957   // a3 - slot id
1958   // a2 - vector
1959   // a4 - allocation site (loaded from vector[slot])
1960   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at);
1961   __ Branch(miss, ne, a1, Operand(at));
1962 
1963   // Increment the call count for monomorphic function calls.
1964   IncrementCallCount(masm, a2, a3);
1965 
1966   __ mov(a2, a4);
1967   __ mov(a3, a1);
1968   ArrayConstructorStub stub(masm->isolate());
1969   __ TailCallStub(&stub);
1970 }
1971 
1972 
Generate(MacroAssembler * masm)1973 void CallICStub::Generate(MacroAssembler* masm) {
1974   // a0 - number of arguments
1975   // a1 - function
1976   // a3 - slot id (Smi)
1977   // a2 - vector
1978   Label extra_checks_or_miss, call, call_function, call_count_incremented;
1979 
1980   // The checks. First, does r1 match the recorded monomorphic target?
1981   __ dsrl(a4, a3, 32 - kPointerSizeLog2);
1982   __ Daddu(a4, a2, Operand(a4));
1983   __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize));
1984 
1985   // We don't know that we have a weak cell. We might have a private symbol
1986   // or an AllocationSite, but the memory is safe to examine.
1987   // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
1988   // FixedArray.
1989   // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
1990   // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
1991   // computed, meaning that it can't appear to be a pointer. If the low bit is
1992   // 0, then hash is computed, but the 0 bit prevents the field from appearing
1993   // to be a pointer.
1994   STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
1995   STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
1996                     WeakCell::kValueOffset &&
1997                 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
1998 
1999   __ ld(a5, FieldMemOperand(a4, WeakCell::kValueOffset));
2000   __ Branch(&extra_checks_or_miss, ne, a1, Operand(a5));
2001 
2002   // The compare above could have been a SMI/SMI comparison. Guard against this
2003   // convincing us that we have a monomorphic JSFunction.
2004   __ JumpIfSmi(a1, &extra_checks_or_miss);
2005 
2006   __ bind(&call_function);
2007   // Increment the call count for monomorphic function calls.
2008   IncrementCallCount(masm, a2, a3);
2009 
2010   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
2011                                                     tail_call_mode()),
2012           RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
2013 
2014   __ bind(&extra_checks_or_miss);
2015   Label uninitialized, miss, not_allocation_site;
2016 
2017   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2018   __ Branch(&call, eq, a4, Operand(at));
2019 
2020   // Verify that a4 contains an AllocationSite
2021   __ ld(a5, FieldMemOperand(a4, HeapObject::kMapOffset));
2022   __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2023   __ Branch(&not_allocation_site, ne, a5, Operand(at));
2024 
2025   HandleArrayCase(masm, &miss);
2026 
2027   __ bind(&not_allocation_site);
2028 
2029   // The following cases attempt to handle MISS cases without going to the
2030   // runtime.
2031   if (FLAG_trace_ic) {
2032     __ Branch(&miss);
2033   }
2034 
2035   __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
2036   __ Branch(&uninitialized, eq, a4, Operand(at));
2037 
2038   // We are going megamorphic. If the feedback is a JSFunction, it is fine
2039   // to handle it here. More complex cases are dealt with in the runtime.
2040   __ AssertNotSmi(a4);
2041   __ GetObjectType(a4, a5, a5);
2042   __ Branch(&miss, ne, a5, Operand(JS_FUNCTION_TYPE));
2043   __ dsrl(a4, a3, 32 - kPointerSizeLog2);
2044   __ Daddu(a4, a2, Operand(a4));
2045   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2046   __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
2047 
2048   __ bind(&call);
2049   IncrementCallCount(masm, a2, a3);
2050 
2051   __ bind(&call_count_incremented);
2052 
2053   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
2054           RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
2055 
2056   __ bind(&uninitialized);
2057 
2058   // We are going monomorphic, provided we actually have a JSFunction.
2059   __ JumpIfSmi(a1, &miss);
2060 
2061   // Goto miss case if we do not have a function.
2062   __ GetObjectType(a1, a4, a4);
2063   __ Branch(&miss, ne, a4, Operand(JS_FUNCTION_TYPE));
2064 
2065   // Make sure the function is not the Array() function, which requires special
2066   // behavior on MISS.
2067   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a4);
2068   __ Branch(&miss, eq, a1, Operand(a4));
2069 
2070   // Make sure the function belongs to the same native context.
2071   __ ld(t0, FieldMemOperand(a1, JSFunction::kContextOffset));
2072   __ ld(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX));
2073   __ ld(t1, NativeContextMemOperand());
2074   __ Branch(&miss, ne, t0, Operand(t1));
2075 
2076   // Store the function. Use a stub since we need a frame for allocation.
2077   // a2 - vector
2078   // a3 - slot
2079   // a1 - function
2080   {
2081     FrameScope scope(masm, StackFrame::INTERNAL);
2082     CreateWeakCellStub create_stub(masm->isolate());
2083     __ SmiTag(a0);
2084     __ Push(a0);
2085     __ Push(a2, a3);
2086     __ Push(cp, a1);
2087     __ CallStub(&create_stub);
2088     __ Pop(cp, a1);
2089     __ Pop(a2, a3);
2090     __ Pop(a0);
2091     __ SmiUntag(a0);
2092   }
2093 
2094   __ Branch(&call_function);
2095 
2096   // We are here because tracing is on or we encountered a MISS case we can't
2097   // handle here.
2098   __ bind(&miss);
2099   GenerateMiss(masm);
2100 
2101   __ Branch(&call_count_incremented);
2102 }
2103 
2104 
GenerateMiss(MacroAssembler * masm)2105 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2106   FrameScope scope(masm, StackFrame::INTERNAL);
2107 
2108   // Preserve number of arguments as Smi.
2109   __ SmiTag(a0);
2110   __ Push(a0);
2111 
2112   // Push the receiver and the function and feedback info.
2113   __ Push(a1, a2, a3);
2114 
2115   // Call the entry.
2116   __ CallRuntime(Runtime::kCallIC_Miss);
2117 
2118   // Move result to a1 and exit the internal frame.
2119   __ mov(a1, v0);
2120 
2121   // Restore number of arguments.
2122   __ Pop(a0);
2123   __ SmiUntag(a0);
2124 }
2125 
2126 
GenerateSlow(MacroAssembler * masm,EmbedMode embed_mode,const RuntimeCallHelper & call_helper)2127 void StringCharCodeAtGenerator::GenerateSlow(
2128     MacroAssembler* masm, EmbedMode embed_mode,
2129     const RuntimeCallHelper& call_helper) {
2130   __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2131 
2132   // Index is not a smi.
2133   __ bind(&index_not_smi_);
2134   // If index is a heap number, try converting it to an integer.
2135   __ CheckMap(index_,
2136               result_,
2137               Heap::kHeapNumberMapRootIndex,
2138               index_not_number_,
2139               DONT_DO_SMI_CHECK);
2140   call_helper.BeforeCall(masm);
2141   // Consumed by runtime conversion function:
2142   if (embed_mode == PART_OF_IC_HANDLER) {
2143     __ Push(LoadWithVectorDescriptor::VectorRegister(),
2144             LoadWithVectorDescriptor::SlotRegister(), object_, index_);
2145   } else {
2146     __ Push(object_, index_);
2147   }
2148   __ CallRuntime(Runtime::kNumberToSmi);
2149 
2150   // Save the conversion result before the pop instructions below
2151   // have a chance to overwrite it.
2152 
2153   __ Move(index_, v0);
2154   if (embed_mode == PART_OF_IC_HANDLER) {
2155     __ Pop(LoadWithVectorDescriptor::VectorRegister(),
2156            LoadWithVectorDescriptor::SlotRegister(), object_);
2157   } else {
2158     __ pop(object_);
2159   }
2160   // Reload the instance type.
2161   __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2162   __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2163   call_helper.AfterCall(masm);
2164   // If index is still not a smi, it must be out of range.
2165   __ JumpIfNotSmi(index_, index_out_of_range_);
2166   // Otherwise, return to the fast path.
2167   __ Branch(&got_smi_index_);
2168 
2169   // Call runtime. We get here when the receiver is a string and the
2170   // index is a number, but the code of getting the actual character
2171   // is too complex (e.g., when the string needs to be flattened).
2172   __ bind(&call_runtime_);
2173   call_helper.BeforeCall(masm);
2174   __ SmiTag(index_);
2175   __ Push(object_, index_);
2176   __ CallRuntime(Runtime::kStringCharCodeAtRT);
2177 
2178   __ Move(result_, v0);
2179 
2180   call_helper.AfterCall(masm);
2181   __ jmp(&exit_);
2182 
2183   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2184 }
2185 
2186 
2187 // -------------------------------------------------------------------------
2188 // StringCharFromCodeGenerator
2189 
GenerateFast(MacroAssembler * masm)2190 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
2191   // Fast case of Heap::LookupSingleCharacterStringFromCode.
2192   __ JumpIfNotSmi(code_, &slow_case_);
2193   __ Branch(&slow_case_, hi, code_,
2194             Operand(Smi::FromInt(String::kMaxOneByteCharCode)));
2195 
2196   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
2197   // At this point code register contains smi tagged one_byte char code.
2198   __ SmiScale(at, code_, kPointerSizeLog2);
2199   __ Daddu(result_, result_, at);
2200   __ ld(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
2201   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2202   __ Branch(&slow_case_, eq, result_, Operand(at));
2203   __ bind(&exit_);
2204 }
2205 
2206 
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)2207 void StringCharFromCodeGenerator::GenerateSlow(
2208     MacroAssembler* masm,
2209     const RuntimeCallHelper& call_helper) {
2210   __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
2211 
2212   __ bind(&slow_case_);
2213   call_helper.BeforeCall(masm);
2214   __ push(code_);
2215   __ CallRuntime(Runtime::kStringCharFromCode);
2216   __ Move(result_, v0);
2217 
2218   call_helper.AfterCall(masm);
2219   __ Branch(&exit_);
2220 
2221   __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
2222 }
2223 
2224 
2225 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
2226 
2227 
GenerateCopyCharacters(MacroAssembler * masm,Register dest,Register src,Register count,Register scratch,String::Encoding encoding)2228 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
2229                                           Register dest,
2230                                           Register src,
2231                                           Register count,
2232                                           Register scratch,
2233                                           String::Encoding encoding) {
2234   if (FLAG_debug_code) {
2235     // Check that destination is word aligned.
2236     __ And(scratch, dest, Operand(kPointerAlignmentMask));
2237     __ Check(eq,
2238              kDestinationOfCopyNotAligned,
2239              scratch,
2240              Operand(zero_reg));
2241   }
2242 
2243   // Assumes word reads and writes are little endian.
2244   // Nothing to do for zero characters.
2245   Label done;
2246 
2247   if (encoding == String::TWO_BYTE_ENCODING) {
2248     __ Daddu(count, count, count);
2249   }
2250 
2251   Register limit = count;  // Read until dest equals this.
2252   __ Daddu(limit, dest, Operand(count));
2253 
2254   Label loop_entry, loop;
2255   // Copy bytes from src to dest until dest hits limit.
2256   __ Branch(&loop_entry);
2257   __ bind(&loop);
2258   __ lbu(scratch, MemOperand(src));
2259   __ daddiu(src, src, 1);
2260   __ sb(scratch, MemOperand(dest));
2261   __ daddiu(dest, dest, 1);
2262   __ bind(&loop_entry);
2263   __ Branch(&loop, lt, dest, Operand(limit));
2264 
2265   __ bind(&done);
2266 }
2267 
2268 
GenerateFlatOneByteStringEquals(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3)2269 void StringHelper::GenerateFlatOneByteStringEquals(
2270     MacroAssembler* masm, Register left, Register right, Register scratch1,
2271     Register scratch2, Register scratch3) {
2272   Register length = scratch1;
2273 
2274   // Compare lengths.
2275   Label strings_not_equal, check_zero_length;
2276   __ ld(length, FieldMemOperand(left, String::kLengthOffset));
2277   __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
2278   __ Branch(&check_zero_length, eq, length, Operand(scratch2));
2279   __ bind(&strings_not_equal);
2280   // Can not put li in delayslot, it has multi instructions.
2281   __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
2282   __ Ret();
2283 
2284   // Check if the length is zero.
2285   Label compare_chars;
2286   __ bind(&check_zero_length);
2287   STATIC_ASSERT(kSmiTag == 0);
2288   __ Branch(&compare_chars, ne, length, Operand(zero_reg));
2289   DCHECK(is_int16((intptr_t)Smi::FromInt(EQUAL)));
2290   __ Ret(USE_DELAY_SLOT);
2291   __ li(v0, Operand(Smi::FromInt(EQUAL)));
2292 
2293   // Compare characters.
2294   __ bind(&compare_chars);
2295 
2296   GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
2297                                   v0, &strings_not_equal);
2298 
2299   // Characters are equal.
2300   __ Ret(USE_DELAY_SLOT);
2301   __ li(v0, Operand(Smi::FromInt(EQUAL)));
2302 }
2303 
2304 
GenerateCompareFlatOneByteStrings(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3,Register scratch4)2305 void StringHelper::GenerateCompareFlatOneByteStrings(
2306     MacroAssembler* masm, Register left, Register right, Register scratch1,
2307     Register scratch2, Register scratch3, Register scratch4) {
2308   Label result_not_equal, compare_lengths;
2309   // Find minimum length and length difference.
2310   __ ld(scratch1, FieldMemOperand(left, String::kLengthOffset));
2311   __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
2312   __ Dsubu(scratch3, scratch1, Operand(scratch2));
2313   Register length_delta = scratch3;
2314   __ slt(scratch4, scratch2, scratch1);
2315   __ Movn(scratch1, scratch2, scratch4);
2316   Register min_length = scratch1;
2317   STATIC_ASSERT(kSmiTag == 0);
2318   __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
2319 
2320   // Compare loop.
2321   GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
2322                                   scratch4, v0, &result_not_equal);
2323 
2324   // Compare lengths - strings up to min-length are equal.
2325   __ bind(&compare_lengths);
2326   DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
2327   // Use length_delta as result if it's zero.
2328   __ mov(scratch2, length_delta);
2329   __ mov(scratch4, zero_reg);
2330   __ mov(v0, zero_reg);
2331 
2332   __ bind(&result_not_equal);
2333   // Conditionally update the result based either on length_delta or
2334   // the last comparion performed in the loop above.
2335   Label ret;
2336   __ Branch(&ret, eq, scratch2, Operand(scratch4));
2337   __ li(v0, Operand(Smi::FromInt(GREATER)));
2338   __ Branch(&ret, gt, scratch2, Operand(scratch4));
2339   __ li(v0, Operand(Smi::FromInt(LESS)));
2340   __ bind(&ret);
2341   __ Ret();
2342 }
2343 
2344 
GenerateOneByteCharsCompareLoop(MacroAssembler * masm,Register left,Register right,Register length,Register scratch1,Register scratch2,Register scratch3,Label * chars_not_equal)2345 void StringHelper::GenerateOneByteCharsCompareLoop(
2346     MacroAssembler* masm, Register left, Register right, Register length,
2347     Register scratch1, Register scratch2, Register scratch3,
2348     Label* chars_not_equal) {
2349   // Change index to run from -length to -1 by adding length to string
2350   // start. This means that loop ends when index reaches zero, which
2351   // doesn't need an additional compare.
2352   __ SmiUntag(length);
2353   __ Daddu(scratch1, length,
2354           Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
2355   __ Daddu(left, left, Operand(scratch1));
2356   __ Daddu(right, right, Operand(scratch1));
2357   __ Dsubu(length, zero_reg, length);
2358   Register index = length;  // index = -length;
2359 
2360 
2361   // Compare loop.
2362   Label loop;
2363   __ bind(&loop);
2364   __ Daddu(scratch3, left, index);
2365   __ lbu(scratch1, MemOperand(scratch3));
2366   __ Daddu(scratch3, right, index);
2367   __ lbu(scratch2, MemOperand(scratch3));
2368   __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
2369   __ Daddu(index, index, 1);
2370   __ Branch(&loop, ne, index, Operand(zero_reg));
2371 }
2372 
2373 
Generate(MacroAssembler * masm)2374 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
2375   // ----------- S t a t e -------------
2376   //  -- a1    : left
2377   //  -- a0    : right
2378   //  -- ra    : return address
2379   // -----------------------------------
2380 
2381   // Load a2 with the allocation site. We stick an undefined dummy value here
2382   // and replace it with the real allocation site later when we instantiate this
2383   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
2384   __ li(a2, isolate()->factory()->undefined_value());
2385 
2386   // Make sure that we actually patched the allocation site.
2387   if (FLAG_debug_code) {
2388     __ And(at, a2, Operand(kSmiTagMask));
2389     __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
2390     __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
2391     __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2392     __ Assert(eq, kExpectedAllocationSite, a4, Operand(at));
2393   }
2394 
2395   // Tail call into the stub that handles binary operations with allocation
2396   // sites.
2397   BinaryOpWithAllocationSiteStub stub(isolate(), state());
2398   __ TailCallStub(&stub);
2399 }
2400 
2401 
GenerateBooleans(MacroAssembler * masm)2402 void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
2403   DCHECK_EQ(CompareICState::BOOLEAN, state());
2404   Label miss;
2405 
2406   __ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2407   __ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2408   if (!Token::IsEqualityOp(op())) {
2409     __ ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
2410     __ AssertSmi(a1);
2411     __ ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
2412     __ AssertSmi(a0);
2413   }
2414   __ Ret(USE_DELAY_SLOT);
2415   __ Dsubu(v0, a1, a0);
2416 
2417   __ bind(&miss);
2418   GenerateMiss(masm);
2419 }
2420 
2421 
GenerateSmis(MacroAssembler * masm)2422 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
2423   DCHECK(state() == CompareICState::SMI);
2424   Label miss;
2425   __ Or(a2, a1, a0);
2426   __ JumpIfNotSmi(a2, &miss);
2427 
2428   if (GetCondition() == eq) {
2429     // For equality we do not care about the sign of the result.
2430     __ Ret(USE_DELAY_SLOT);
2431     __ Dsubu(v0, a0, a1);
2432   } else {
2433     // Untag before subtracting to avoid handling overflow.
2434     __ SmiUntag(a1);
2435     __ SmiUntag(a0);
2436     __ Ret(USE_DELAY_SLOT);
2437     __ Dsubu(v0, a1, a0);
2438   }
2439 
2440   __ bind(&miss);
2441   GenerateMiss(masm);
2442 }
2443 
2444 
GenerateNumbers(MacroAssembler * masm)2445 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
2446   DCHECK(state() == CompareICState::NUMBER);
2447 
2448   Label generic_stub;
2449   Label unordered, maybe_undefined1, maybe_undefined2;
2450   Label miss;
2451 
2452   if (left() == CompareICState::SMI) {
2453     __ JumpIfNotSmi(a1, &miss);
2454   }
2455   if (right() == CompareICState::SMI) {
2456     __ JumpIfNotSmi(a0, &miss);
2457   }
2458 
2459   // Inlining the double comparison and falling back to the general compare
2460   // stub if NaN is involved.
2461   // Load left and right operand.
2462   Label done, left, left_smi, right_smi;
2463   __ JumpIfSmi(a0, &right_smi);
2464   __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
2465               DONT_DO_SMI_CHECK);
2466   __ Dsubu(a2, a0, Operand(kHeapObjectTag));
2467   __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
2468   __ Branch(&left);
2469   __ bind(&right_smi);
2470   __ SmiUntag(a2, a0);  // Can't clobber a0 yet.
2471   FPURegister single_scratch = f6;
2472   __ mtc1(a2, single_scratch);
2473   __ cvt_d_w(f2, single_scratch);
2474 
2475   __ bind(&left);
2476   __ JumpIfSmi(a1, &left_smi);
2477   __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
2478               DONT_DO_SMI_CHECK);
2479   __ Dsubu(a2, a1, Operand(kHeapObjectTag));
2480   __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
2481   __ Branch(&done);
2482   __ bind(&left_smi);
2483   __ SmiUntag(a2, a1);  // Can't clobber a1 yet.
2484   single_scratch = f8;
2485   __ mtc1(a2, single_scratch);
2486   __ cvt_d_w(f0, single_scratch);
2487 
2488   __ bind(&done);
2489 
2490   // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
2491   Label fpu_eq, fpu_lt;
2492   // Test if equal, and also handle the unordered/NaN case.
2493   __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
2494 
2495   // Test if less (unordered case is already handled).
2496   __ BranchF(&fpu_lt, NULL, lt, f0, f2);
2497 
2498   // Otherwise it's greater, so just fall thru, and return.
2499   DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
2500   __ Ret(USE_DELAY_SLOT);
2501   __ li(v0, Operand(GREATER));
2502 
2503   __ bind(&fpu_eq);
2504   __ Ret(USE_DELAY_SLOT);
2505   __ li(v0, Operand(EQUAL));
2506 
2507   __ bind(&fpu_lt);
2508   __ Ret(USE_DELAY_SLOT);
2509   __ li(v0, Operand(LESS));
2510 
2511   __ bind(&unordered);
2512   __ bind(&generic_stub);
2513   CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
2514                      CompareICState::GENERIC, CompareICState::GENERIC);
2515   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2516 
2517   __ bind(&maybe_undefined1);
2518   if (Token::IsOrderedRelationalCompareOp(op())) {
2519     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2520     __ Branch(&miss, ne, a0, Operand(at));
2521     __ JumpIfSmi(a1, &unordered);
2522     __ GetObjectType(a1, a2, a2);
2523     __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
2524     __ jmp(&unordered);
2525   }
2526 
2527   __ bind(&maybe_undefined2);
2528   if (Token::IsOrderedRelationalCompareOp(op())) {
2529     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2530     __ Branch(&unordered, eq, a1, Operand(at));
2531   }
2532 
2533   __ bind(&miss);
2534   GenerateMiss(masm);
2535 }
2536 
2537 
GenerateInternalizedStrings(MacroAssembler * masm)2538 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
2539   DCHECK(state() == CompareICState::INTERNALIZED_STRING);
2540   Label miss;
2541 
2542   // Registers containing left and right operands respectively.
2543   Register left = a1;
2544   Register right = a0;
2545   Register tmp1 = a2;
2546   Register tmp2 = a3;
2547 
2548   // Check that both operands are heap objects.
2549   __ JumpIfEitherSmi(left, right, &miss);
2550 
2551   // Check that both operands are internalized strings.
2552   __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2553   __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2554   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2555   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2556   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2557   __ Or(tmp1, tmp1, Operand(tmp2));
2558   __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2559   __ Branch(&miss, ne, at, Operand(zero_reg));
2560 
2561   // Make sure a0 is non-zero. At this point input operands are
2562   // guaranteed to be non-zero.
2563   DCHECK(right.is(a0));
2564   STATIC_ASSERT(EQUAL == 0);
2565   STATIC_ASSERT(kSmiTag == 0);
2566   __ mov(v0, right);
2567   // Internalized strings are compared by identity.
2568   __ Ret(ne, left, Operand(right));
2569   DCHECK(is_int16(EQUAL));
2570   __ Ret(USE_DELAY_SLOT);
2571   __ li(v0, Operand(Smi::FromInt(EQUAL)));
2572 
2573   __ bind(&miss);
2574   GenerateMiss(masm);
2575 }
2576 
2577 
GenerateUniqueNames(MacroAssembler * masm)2578 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
2579   DCHECK(state() == CompareICState::UNIQUE_NAME);
2580   DCHECK(GetCondition() == eq);
2581   Label miss;
2582 
2583   // Registers containing left and right operands respectively.
2584   Register left = a1;
2585   Register right = a0;
2586   Register tmp1 = a2;
2587   Register tmp2 = a3;
2588 
2589   // Check that both operands are heap objects.
2590   __ JumpIfEitherSmi(left, right, &miss);
2591 
2592   // Check that both operands are unique names. This leaves the instance
2593   // types loaded in tmp1 and tmp2.
2594   __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2595   __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2596   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2597   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2598 
2599   __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
2600   __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
2601 
2602   // Use a0 as result
2603   __ mov(v0, a0);
2604 
2605   // Unique names are compared by identity.
2606   Label done;
2607   __ Branch(&done, ne, left, Operand(right));
2608   // Make sure a0 is non-zero. At this point input operands are
2609   // guaranteed to be non-zero.
2610   DCHECK(right.is(a0));
2611   STATIC_ASSERT(EQUAL == 0);
2612   STATIC_ASSERT(kSmiTag == 0);
2613   __ li(v0, Operand(Smi::FromInt(EQUAL)));
2614   __ bind(&done);
2615   __ Ret();
2616 
2617   __ bind(&miss);
2618   GenerateMiss(masm);
2619 }
2620 
2621 
GenerateStrings(MacroAssembler * masm)2622 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
2623   DCHECK(state() == CompareICState::STRING);
2624   Label miss;
2625 
2626   bool equality = Token::IsEqualityOp(op());
2627 
2628   // Registers containing left and right operands respectively.
2629   Register left = a1;
2630   Register right = a0;
2631   Register tmp1 = a2;
2632   Register tmp2 = a3;
2633   Register tmp3 = a4;
2634   Register tmp4 = a5;
2635   Register tmp5 = a6;
2636 
2637   // Check that both operands are heap objects.
2638   __ JumpIfEitherSmi(left, right, &miss);
2639 
2640   // Check that both operands are strings. This leaves the instance
2641   // types loaded in tmp1 and tmp2.
2642   __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2643   __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2644   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2645   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2646   STATIC_ASSERT(kNotStringTag != 0);
2647   __ Or(tmp3, tmp1, tmp2);
2648   __ And(tmp5, tmp3, Operand(kIsNotStringMask));
2649   __ Branch(&miss, ne, tmp5, Operand(zero_reg));
2650 
2651   // Fast check for identical strings.
2652   Label left_ne_right;
2653   STATIC_ASSERT(EQUAL == 0);
2654   STATIC_ASSERT(kSmiTag == 0);
2655   __ Branch(&left_ne_right, ne, left, Operand(right));
2656   __ Ret(USE_DELAY_SLOT);
2657   __ mov(v0, zero_reg);  // In the delay slot.
2658   __ bind(&left_ne_right);
2659 
2660   // Handle not identical strings.
2661 
2662   // Check that both strings are internalized strings. If they are, we're done
2663   // because we already know they are not identical. We know they are both
2664   // strings.
2665   if (equality) {
2666     DCHECK(GetCondition() == eq);
2667     STATIC_ASSERT(kInternalizedTag == 0);
2668     __ Or(tmp3, tmp1, Operand(tmp2));
2669     __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
2670     Label is_symbol;
2671     __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
2672     // Make sure a0 is non-zero. At this point input operands are
2673     // guaranteed to be non-zero.
2674     DCHECK(right.is(a0));
2675     __ Ret(USE_DELAY_SLOT);
2676     __ mov(v0, a0);  // In the delay slot.
2677     __ bind(&is_symbol);
2678   }
2679 
2680   // Check that both strings are sequential one_byte.
2681   Label runtime;
2682   __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
2683                                                     &runtime);
2684 
2685   // Compare flat one_byte strings. Returns when done.
2686   if (equality) {
2687     StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
2688                                                   tmp3);
2689   } else {
2690     StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
2691                                                     tmp2, tmp3, tmp4);
2692   }
2693 
2694   // Handle more complex cases in runtime.
2695   __ bind(&runtime);
2696   if (equality) {
2697     {
2698       FrameScope scope(masm, StackFrame::INTERNAL);
2699       __ Push(left, right);
2700       __ CallRuntime(Runtime::kStringEqual);
2701     }
2702     __ LoadRoot(a0, Heap::kTrueValueRootIndex);
2703     __ Ret(USE_DELAY_SLOT);
2704     __ Subu(v0, v0, a0);  // In delay slot.
2705   } else {
2706     __ Push(left, right);
2707     __ TailCallRuntime(Runtime::kStringCompare);
2708   }
2709 
2710   __ bind(&miss);
2711   GenerateMiss(masm);
2712 }
2713 
2714 
GenerateReceivers(MacroAssembler * masm)2715 void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
2716   DCHECK_EQ(CompareICState::RECEIVER, state());
2717   Label miss;
2718   __ And(a2, a1, Operand(a0));
2719   __ JumpIfSmi(a2, &miss);
2720 
2721   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2722   __ GetObjectType(a0, a2, a2);
2723   __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
2724   __ GetObjectType(a1, a2, a2);
2725   __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
2726 
2727   DCHECK_EQ(eq, GetCondition());
2728   __ Ret(USE_DELAY_SLOT);
2729   __ dsubu(v0, a0, a1);
2730 
2731   __ bind(&miss);
2732   GenerateMiss(masm);
2733 }
2734 
2735 
GenerateKnownReceivers(MacroAssembler * masm)2736 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
2737   Label miss;
2738   Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
2739   __ And(a2, a1, a0);
2740   __ JumpIfSmi(a2, &miss);
2741   __ GetWeakValue(a4, cell);
2742   __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
2743   __ ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
2744   __ Branch(&miss, ne, a2, Operand(a4));
2745   __ Branch(&miss, ne, a3, Operand(a4));
2746 
2747   if (Token::IsEqualityOp(op())) {
2748     __ Ret(USE_DELAY_SLOT);
2749     __ dsubu(v0, a0, a1);
2750   } else {
2751     if (op() == Token::LT || op() == Token::LTE) {
2752       __ li(a2, Operand(Smi::FromInt(GREATER)));
2753     } else {
2754       __ li(a2, Operand(Smi::FromInt(LESS)));
2755     }
2756     __ Push(a1, a0, a2);
2757     __ TailCallRuntime(Runtime::kCompare);
2758   }
2759 
2760   __ bind(&miss);
2761   GenerateMiss(masm);
2762 }
2763 
2764 
GenerateMiss(MacroAssembler * masm)2765 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
2766   {
2767     // Call the runtime system in a fresh internal frame.
2768     FrameScope scope(masm, StackFrame::INTERNAL);
2769     __ Push(a1, a0);
2770     __ Push(ra, a1, a0);
2771     __ li(a4, Operand(Smi::FromInt(op())));
2772     __ daddiu(sp, sp, -kPointerSize);
2773     __ CallRuntime(Runtime::kCompareIC_Miss, 3, kDontSaveFPRegs,
2774                    USE_DELAY_SLOT);
2775     __ sd(a4, MemOperand(sp));  // In the delay slot.
2776     // Compute the entry point of the rewritten stub.
2777     __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
2778     // Restore registers.
2779     __ Pop(a1, a0, ra);
2780   }
2781   __ Jump(a2);
2782 }
2783 
2784 
Generate(MacroAssembler * masm)2785 void DirectCEntryStub::Generate(MacroAssembler* masm) {
2786   // Make place for arguments to fit C calling convention. Most of the callers
2787   // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
2788   // so they handle stack restoring and we don't have to do that here.
2789   // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
2790   // kCArgsSlotsSize stack space after the call.
2791   __ daddiu(sp, sp, -kCArgsSlotsSize);
2792   // Place the return address on the stack, making the call
2793   // GC safe. The RegExp backend also relies on this.
2794   __ sd(ra, MemOperand(sp, kCArgsSlotsSize));
2795   __ Call(t9);  // Call the C++ function.
2796   __ ld(t9, MemOperand(sp, kCArgsSlotsSize));
2797 
2798   if (FLAG_debug_code && FLAG_enable_slow_asserts) {
2799     // In case of an error the return address may point to a memory area
2800     // filled with kZapValue by the GC.
2801     // Dereference the address and check for this.
2802     __ Uld(a4, MemOperand(t9));
2803     __ Assert(ne, kReceivedInvalidReturnAddress, a4,
2804         Operand(reinterpret_cast<uint64_t>(kZapValue)));
2805   }
2806   __ Jump(t9);
2807 }
2808 
2809 
GenerateCall(MacroAssembler * masm,Register target)2810 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
2811                                     Register target) {
2812   intptr_t loc =
2813       reinterpret_cast<intptr_t>(GetCode().location());
2814   __ Move(t9, target);
2815   __ li(at, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
2816   __ Call(at);
2817 }
2818 
2819 
GenerateNegativeLookup(MacroAssembler * masm,Label * miss,Label * done,Register receiver,Register properties,Handle<Name> name,Register scratch0)2820 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
2821                                                       Label* miss,
2822                                                       Label* done,
2823                                                       Register receiver,
2824                                                       Register properties,
2825                                                       Handle<Name> name,
2826                                                       Register scratch0) {
2827   DCHECK(name->IsUniqueName());
2828   // If names of slots in range from 1 to kProbes - 1 for the hash value are
2829   // not equal to the name and kProbes-th slot is not used (its name is the
2830   // undefined value), it guarantees the hash table doesn't contain the
2831   // property. It's true even if some slots represent deleted properties
2832   // (their names are the hole value).
2833   for (int i = 0; i < kInlinedProbes; i++) {
2834     // scratch0 points to properties hash.
2835     // Compute the masked index: (hash + i + i * i) & mask.
2836     Register index = scratch0;
2837     // Capacity is smi 2^n.
2838     __ SmiLoadUntag(index, FieldMemOperand(properties, kCapacityOffset));
2839     __ Dsubu(index, index, Operand(1));
2840     __ And(index, index,
2841            Operand(name->Hash() + NameDictionary::GetProbeOffset(i)));
2842 
2843     // Scale the index by multiplying by the entry size.
2844     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2845     __ Dlsa(index, index, index, 1);  // index *= 3.
2846 
2847     Register entity_name = scratch0;
2848     // Having undefined at this place means the name is not contained.
2849     STATIC_ASSERT(kSmiTagSize == 1);
2850     Register tmp = properties;
2851 
2852     __ Dlsa(tmp, properties, index, kPointerSizeLog2);
2853     __ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
2854 
2855     DCHECK(!tmp.is(entity_name));
2856     __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
2857     __ Branch(done, eq, entity_name, Operand(tmp));
2858 
2859     // Load the hole ready for use below:
2860     __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
2861 
2862     // Stop if found the property.
2863     __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
2864 
2865     Label good;
2866     __ Branch(&good, eq, entity_name, Operand(tmp));
2867 
2868     // Check if the entry name is not a unique name.
2869     __ ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
2870     __ lbu(entity_name,
2871            FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
2872     __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
2873     __ bind(&good);
2874 
2875     // Restore the properties.
2876     __ ld(properties,
2877           FieldMemOperand(receiver, JSObject::kPropertiesOffset));
2878   }
2879 
2880   const int spill_mask =
2881       (ra.bit() | a6.bit() | a5.bit() | a4.bit() | a3.bit() |
2882        a2.bit() | a1.bit() | a0.bit() | v0.bit());
2883 
2884   __ MultiPush(spill_mask);
2885   __ ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
2886   __ li(a1, Operand(Handle<Name>(name)));
2887   NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
2888   __ CallStub(&stub);
2889   __ mov(at, v0);
2890   __ MultiPop(spill_mask);
2891 
2892   __ Branch(done, eq, at, Operand(zero_reg));
2893   __ Branch(miss, ne, at, Operand(zero_reg));
2894 }
2895 
2896 
2897 // Probe the name dictionary in the |elements| register. Jump to the
2898 // |done| label if a property with the given name is found. Jump to
2899 // the |miss| label otherwise.
2900 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
GeneratePositiveLookup(MacroAssembler * masm,Label * miss,Label * done,Register elements,Register name,Register scratch1,Register scratch2)2901 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
2902                                                       Label* miss,
2903                                                       Label* done,
2904                                                       Register elements,
2905                                                       Register name,
2906                                                       Register scratch1,
2907                                                       Register scratch2) {
2908   DCHECK(!elements.is(scratch1));
2909   DCHECK(!elements.is(scratch2));
2910   DCHECK(!name.is(scratch1));
2911   DCHECK(!name.is(scratch2));
2912 
2913   __ AssertName(name);
2914 
2915   // Compute the capacity mask.
2916   __ ld(scratch1, FieldMemOperand(elements, kCapacityOffset));
2917   __ SmiUntag(scratch1);
2918   __ Dsubu(scratch1, scratch1, Operand(1));
2919 
2920   // Generate an unrolled loop that performs a few probes before
2921   // giving up. Measurements done on Gmail indicate that 2 probes
2922   // cover ~93% of loads from dictionaries.
2923   for (int i = 0; i < kInlinedProbes; i++) {
2924     // Compute the masked index: (hash + i + i * i) & mask.
2925     __ lwu(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
2926     if (i > 0) {
2927       // Add the probe offset (i + i * i) left shifted to avoid right shifting
2928       // the hash in a separate instruction. The value hash + i + i * i is right
2929       // shifted in the following and instruction.
2930       DCHECK(NameDictionary::GetProbeOffset(i) <
2931              1 << (32 - Name::kHashFieldOffset));
2932       __ Daddu(scratch2, scratch2, Operand(
2933           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
2934     }
2935     __ dsrl(scratch2, scratch2, Name::kHashShift);
2936     __ And(scratch2, scratch1, scratch2);
2937 
2938     // Scale the index by multiplying by the entry size.
2939     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2940     // scratch2 = scratch2 * 3.
2941     __ Dlsa(scratch2, scratch2, scratch2, 1);
2942 
2943     // Check if the key is identical to the name.
2944     __ Dlsa(scratch2, elements, scratch2, kPointerSizeLog2);
2945     __ ld(at, FieldMemOperand(scratch2, kElementsStartOffset));
2946     __ Branch(done, eq, name, Operand(at));
2947   }
2948 
2949   const int spill_mask =
2950       (ra.bit() | a6.bit() | a5.bit() | a4.bit() |
2951        a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
2952       ~(scratch1.bit() | scratch2.bit());
2953 
2954   __ MultiPush(spill_mask);
2955   if (name.is(a0)) {
2956     DCHECK(!elements.is(a1));
2957     __ Move(a1, name);
2958     __ Move(a0, elements);
2959   } else {
2960     __ Move(a0, elements);
2961     __ Move(a1, name);
2962   }
2963   NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
2964   __ CallStub(&stub);
2965   __ mov(scratch2, a2);
2966   __ mov(at, v0);
2967   __ MultiPop(spill_mask);
2968 
2969   __ Branch(done, ne, at, Operand(zero_reg));
2970   __ Branch(miss, eq, at, Operand(zero_reg));
2971 }
2972 
2973 
Generate(MacroAssembler * masm)2974 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
2975   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
2976   // we cannot call anything that could cause a GC from this stub.
2977   // Registers:
2978   //  result: NameDictionary to probe
2979   //  a1: key
2980   //  dictionary: NameDictionary to probe.
2981   //  index: will hold an index of entry if lookup is successful.
2982   //         might alias with result_.
2983   // Returns:
2984   //  result_ is zero if lookup failed, non zero otherwise.
2985 
2986   Register result = v0;
2987   Register dictionary = a0;
2988   Register key = a1;
2989   Register index = a2;
2990   Register mask = a3;
2991   Register hash = a4;
2992   Register undefined = a5;
2993   Register entry_key = a6;
2994 
2995   Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
2996 
2997   __ ld(mask, FieldMemOperand(dictionary, kCapacityOffset));
2998   __ SmiUntag(mask);
2999   __ Dsubu(mask, mask, Operand(1));
3000 
3001   __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
3002 
3003   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
3004 
3005   for (int i = kInlinedProbes; i < kTotalProbes; i++) {
3006     // Compute the masked index: (hash + i + i * i) & mask.
3007     // Capacity is smi 2^n.
3008     if (i > 0) {
3009       // Add the probe offset (i + i * i) left shifted to avoid right shifting
3010       // the hash in a separate instruction. The value hash + i + i * i is right
3011       // shifted in the following and instruction.
3012       DCHECK(NameDictionary::GetProbeOffset(i) <
3013              1 << (32 - Name::kHashFieldOffset));
3014       __ Daddu(index, hash, Operand(
3015           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
3016     } else {
3017       __ mov(index, hash);
3018     }
3019     __ dsrl(index, index, Name::kHashShift);
3020     __ And(index, mask, index);
3021 
3022     // Scale the index by multiplying by the entry size.
3023     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3024     // index *= 3.
3025     __ Dlsa(index, index, index, 1);
3026 
3027     STATIC_ASSERT(kSmiTagSize == 1);
3028     __ Dlsa(index, dictionary, index, kPointerSizeLog2);
3029     __ ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
3030 
3031     // Having undefined at this place means the name is not contained.
3032     __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
3033 
3034     // Stop if found the property.
3035     __ Branch(&in_dictionary, eq, entry_key, Operand(key));
3036 
3037     if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
3038       // Check if the entry name is not a unique name.
3039       __ ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
3040       __ lbu(entry_key,
3041              FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
3042       __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
3043     }
3044   }
3045 
3046   __ bind(&maybe_in_dictionary);
3047   // If we are doing negative lookup then probing failure should be
3048   // treated as a lookup success. For positive lookup probing failure
3049   // should be treated as lookup failure.
3050   if (mode() == POSITIVE_LOOKUP) {
3051     __ Ret(USE_DELAY_SLOT);
3052     __ mov(result, zero_reg);
3053   }
3054 
3055   __ bind(&in_dictionary);
3056   __ Ret(USE_DELAY_SLOT);
3057   __ li(result, 1);
3058 
3059   __ bind(&not_in_dictionary);
3060   __ Ret(USE_DELAY_SLOT);
3061   __ mov(result, zero_reg);
3062 }
3063 
3064 
GenerateFixedRegStubsAheadOfTime(Isolate * isolate)3065 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
3066     Isolate* isolate) {
3067   StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
3068   stub1.GetCode();
3069   // Hydrogen code stubs need stub2 at snapshot time.
3070   StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
3071   stub2.GetCode();
3072 }
3073 
3074 
3075 // Takes the input in 3 registers: address_ value_ and object_.  A pointer to
3076 // the value has just been written into the object, now this stub makes sure
3077 // we keep the GC informed.  The word in the object where the value has been
3078 // written is in the address register.
Generate(MacroAssembler * masm)3079 void RecordWriteStub::Generate(MacroAssembler* masm) {
3080   Label skip_to_incremental_noncompacting;
3081   Label skip_to_incremental_compacting;
3082 
3083   // The first two branch+nop instructions are generated with labels so as to
3084   // get the offset fixed up correctly by the bind(Label*) call.  We patch it
3085   // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
3086   // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
3087   // incremental heap marking.
3088   // See RecordWriteStub::Patch for details.
3089   __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
3090   __ nop();
3091   __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
3092   __ nop();
3093 
3094   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
3095     __ RememberedSetHelper(object(),
3096                            address(),
3097                            value(),
3098                            save_fp_regs_mode(),
3099                            MacroAssembler::kReturnAtEnd);
3100   }
3101   __ Ret();
3102 
3103   __ bind(&skip_to_incremental_noncompacting);
3104   GenerateIncremental(masm, INCREMENTAL);
3105 
3106   __ bind(&skip_to_incremental_compacting);
3107   GenerateIncremental(masm, INCREMENTAL_COMPACTION);
3108 
3109   // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
3110   // Will be checked in IncrementalMarking::ActivateGeneratedStub.
3111 
3112   PatchBranchIntoNop(masm, 0);
3113   PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
3114 }
3115 
3116 
GenerateIncremental(MacroAssembler * masm,Mode mode)3117 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
3118   regs_.Save(masm);
3119 
3120   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
3121     Label dont_need_remembered_set;
3122 
3123     __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
3124     __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
3125                            regs_.scratch0(),
3126                            &dont_need_remembered_set);
3127 
3128     __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
3129                         &dont_need_remembered_set);
3130 
3131     // First notify the incremental marker if necessary, then update the
3132     // remembered set.
3133     CheckNeedsToInformIncrementalMarker(
3134         masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
3135     InformIncrementalMarker(masm);
3136     regs_.Restore(masm);
3137     __ RememberedSetHelper(object(),
3138                            address(),
3139                            value(),
3140                            save_fp_regs_mode(),
3141                            MacroAssembler::kReturnAtEnd);
3142 
3143     __ bind(&dont_need_remembered_set);
3144   }
3145 
3146   CheckNeedsToInformIncrementalMarker(
3147       masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
3148   InformIncrementalMarker(masm);
3149   regs_.Restore(masm);
3150   __ Ret();
3151 }
3152 
3153 
InformIncrementalMarker(MacroAssembler * masm)3154 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
3155   regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
3156   int argument_count = 3;
3157   __ PrepareCallCFunction(argument_count, regs_.scratch0());
3158   Register address =
3159       a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
3160   DCHECK(!address.is(regs_.object()));
3161   DCHECK(!address.is(a0));
3162   __ Move(address, regs_.address());
3163   __ Move(a0, regs_.object());
3164   __ Move(a1, address);
3165   __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
3166 
3167   AllowExternalCallThatCantCauseGC scope(masm);
3168   __ CallCFunction(
3169       ExternalReference::incremental_marking_record_write_function(isolate()),
3170       argument_count);
3171   regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
3172 }
3173 
3174 
CheckNeedsToInformIncrementalMarker(MacroAssembler * masm,OnNoNeedToInformIncrementalMarker on_no_need,Mode mode)3175 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
3176     MacroAssembler* masm,
3177     OnNoNeedToInformIncrementalMarker on_no_need,
3178     Mode mode) {
3179   Label on_black;
3180   Label need_incremental;
3181   Label need_incremental_pop_scratch;
3182 
3183   // Let's look at the color of the object:  If it is not black we don't have
3184   // to inform the incremental marker.
3185   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
3186 
3187   regs_.Restore(masm);
3188   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
3189     __ RememberedSetHelper(object(),
3190                            address(),
3191                            value(),
3192                            save_fp_regs_mode(),
3193                            MacroAssembler::kReturnAtEnd);
3194   } else {
3195     __ Ret();
3196   }
3197 
3198   __ bind(&on_black);
3199 
3200   // Get the value from the slot.
3201   __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
3202 
3203   if (mode == INCREMENTAL_COMPACTION) {
3204     Label ensure_not_white;
3205 
3206     __ CheckPageFlag(regs_.scratch0(),  // Contains value.
3207                      regs_.scratch1(),  // Scratch.
3208                      MemoryChunk::kEvacuationCandidateMask,
3209                      eq,
3210                      &ensure_not_white);
3211 
3212     __ CheckPageFlag(regs_.object(),
3213                      regs_.scratch1(),  // Scratch.
3214                      MemoryChunk::kSkipEvacuationSlotsRecordingMask,
3215                      eq,
3216                      &need_incremental);
3217 
3218     __ bind(&ensure_not_white);
3219   }
3220 
3221   // We need extra registers for this, so we push the object and the address
3222   // register temporarily.
3223   __ Push(regs_.object(), regs_.address());
3224   __ JumpIfWhite(regs_.scratch0(),  // The value.
3225                  regs_.scratch1(),  // Scratch.
3226                  regs_.object(),    // Scratch.
3227                  regs_.address(),   // Scratch.
3228                  &need_incremental_pop_scratch);
3229   __ Pop(regs_.object(), regs_.address());
3230 
3231   regs_.Restore(masm);
3232   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
3233     __ RememberedSetHelper(object(),
3234                            address(),
3235                            value(),
3236                            save_fp_regs_mode(),
3237                            MacroAssembler::kReturnAtEnd);
3238   } else {
3239     __ Ret();
3240   }
3241 
3242   __ bind(&need_incremental_pop_scratch);
3243   __ Pop(regs_.object(), regs_.address());
3244 
3245   __ bind(&need_incremental);
3246 
3247   // Fall through when we need to inform the incremental marker.
3248 }
3249 
3250 
Generate(MacroAssembler * masm)3251 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
3252   CEntryStub ces(isolate(), 1, kSaveFPRegs);
3253   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
3254   int parameter_count_offset =
3255       StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
3256   __ ld(a1, MemOperand(fp, parameter_count_offset));
3257   if (function_mode() == JS_FUNCTION_STUB_MODE) {
3258     __ Daddu(a1, a1, Operand(1));
3259   }
3260   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
3261   __ dsll(a1, a1, kPointerSizeLog2);
3262   __ Ret(USE_DELAY_SLOT);
3263   __ Daddu(sp, sp, a1);
3264 }
3265 
Generate(MacroAssembler * masm)3266 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
3267   __ EmitLoadTypeFeedbackVector(a2);
3268   CallICStub stub(isolate(), state());
3269   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3270 }
3271 
3272 
HandleArrayCases(MacroAssembler * masm,Register feedback,Register receiver_map,Register scratch1,Register scratch2,bool is_polymorphic,Label * miss)3273 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
3274                              Register receiver_map, Register scratch1,
3275                              Register scratch2, bool is_polymorphic,
3276                              Label* miss) {
3277   // feedback initially contains the feedback array
3278   Label next_loop, prepare_next;
3279   Label start_polymorphic;
3280 
3281   Register cached_map = scratch1;
3282 
3283   __ ld(cached_map,
3284         FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
3285   __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3286   __ Branch(&start_polymorphic, ne, receiver_map, Operand(cached_map));
3287   // found, now call handler.
3288   Register handler = feedback;
3289   __ ld(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
3290   __ Daddu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
3291   __ Jump(t9);
3292 
3293   Register length = scratch2;
3294   __ bind(&start_polymorphic);
3295   __ ld(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
3296   if (!is_polymorphic) {
3297     // If the IC could be monomorphic we have to make sure we don't go past the
3298     // end of the feedback array.
3299     __ Branch(miss, eq, length, Operand(Smi::FromInt(2)));
3300   }
3301 
3302   Register too_far = length;
3303   Register pointer_reg = feedback;
3304 
3305   // +-----+------+------+-----+-----+ ... ----+
3306   // | map | len  | wm0  | h0  | wm1 |      hN |
3307   // +-----+------+------+-----+-----+ ... ----+
3308   //                 0      1     2        len-1
3309   //                              ^              ^
3310   //                              |              |
3311   //                         pointer_reg      too_far
3312   //                         aka feedback     scratch2
3313   // also need receiver_map
3314   // use cached_map (scratch1) to look in the weak map values.
3315   __ SmiScale(too_far, length, kPointerSizeLog2);
3316   __ Daddu(too_far, feedback, Operand(too_far));
3317   __ Daddu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3318   __ Daddu(pointer_reg, feedback,
3319            Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
3320 
3321   __ bind(&next_loop);
3322   __ ld(cached_map, MemOperand(pointer_reg));
3323   __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3324   __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
3325   __ ld(handler, MemOperand(pointer_reg, kPointerSize));
3326   __ Daddu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
3327   __ Jump(t9);
3328 
3329   __ bind(&prepare_next);
3330   __ Daddu(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
3331   __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
3332 
3333   // We exhausted our array of map handler pairs.
3334   __ Branch(miss);
3335 }
3336 
3337 
HandleMonomorphicCase(MacroAssembler * masm,Register receiver,Register receiver_map,Register feedback,Register vector,Register slot,Register scratch,Label * compare_map,Label * load_smi_map,Label * try_array)3338 static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
3339                                   Register receiver_map, Register feedback,
3340                                   Register vector, Register slot,
3341                                   Register scratch, Label* compare_map,
3342                                   Label* load_smi_map, Label* try_array) {
3343   __ JumpIfSmi(receiver, load_smi_map);
3344   __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
3345   __ bind(compare_map);
3346   Register cached_map = scratch;
3347   // Move the weak map into the weak_cell register.
3348   __ ld(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
3349   __ Branch(try_array, ne, cached_map, Operand(receiver_map));
3350   Register handler = feedback;
3351   __ SmiScale(handler, slot, kPointerSizeLog2);
3352   __ Daddu(handler, vector, Operand(handler));
3353   __ ld(handler,
3354         FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
3355   __ Daddu(t9, handler, Code::kHeaderSize - kHeapObjectTag);
3356   __ Jump(t9);
3357 }
3358 
Generate(MacroAssembler * masm)3359 void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
3360   __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
3361   KeyedStoreICStub stub(isolate(), state());
3362   stub.GenerateForTrampoline(masm);
3363 }
3364 
Generate(MacroAssembler * masm)3365 void KeyedStoreICStub::Generate(MacroAssembler* masm) {
3366   GenerateImpl(masm, false);
3367 }
3368 
GenerateForTrampoline(MacroAssembler * masm)3369 void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
3370   GenerateImpl(masm, true);
3371 }
3372 
3373 
HandlePolymorphicStoreCase(MacroAssembler * masm,Register feedback,Register receiver_map,Register scratch1,Register scratch2,Label * miss)3374 static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
3375                                        Register receiver_map, Register scratch1,
3376                                        Register scratch2, Label* miss) {
3377   // feedback initially contains the feedback array
3378   Label next_loop, prepare_next;
3379   Label start_polymorphic;
3380   Label transition_call;
3381 
3382   Register cached_map = scratch1;
3383   Register too_far = scratch2;
3384   Register pointer_reg = feedback;
3385 
3386   __ ld(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
3387 
3388   // +-----+------+------+-----+-----+-----+ ... ----+
3389   // | map | len  | wm0  | wt0 | h0  | wm1 |      hN |
3390   // +-----+------+------+-----+-----+ ----+ ... ----+
3391   //                 0      1     2              len-1
3392   //                 ^                                 ^
3393   //                 |                                 |
3394   //             pointer_reg                        too_far
3395   //             aka feedback                       scratch2
3396   // also need receiver_map
3397   // use cached_map (scratch1) to look in the weak map values.
3398   __ SmiScale(too_far, too_far, kPointerSizeLog2);
3399   __ Daddu(too_far, feedback, Operand(too_far));
3400   __ Daddu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3401   __ Daddu(pointer_reg, feedback,
3402            Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
3403 
3404   __ bind(&next_loop);
3405   __ ld(cached_map, MemOperand(pointer_reg));
3406   __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3407   __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
3408   // Is it a transitioning store?
3409   __ ld(too_far, MemOperand(pointer_reg, kPointerSize));
3410   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3411   __ Branch(&transition_call, ne, too_far, Operand(at));
3412 
3413   __ ld(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
3414   __ Daddu(t9, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
3415   __ Jump(t9);
3416 
3417   __ bind(&transition_call);
3418   __ ld(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
3419   __ JumpIfSmi(too_far, miss);
3420 
3421   __ ld(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
3422   // Load the map into the correct register.
3423   DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
3424   __ Move(feedback, too_far);
3425   __ Daddu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
3426   __ Jump(t9);
3427 
3428   __ bind(&prepare_next);
3429   __ Daddu(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
3430   __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
3431 
3432   // We exhausted our array of map handler pairs.
3433   __ Branch(miss);
3434 }
3435 
GenerateImpl(MacroAssembler * masm,bool in_frame)3436 void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
3437   Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // a1
3438   Register key = StoreWithVectorDescriptor::NameRegister();           // a2
3439   Register vector = StoreWithVectorDescriptor::VectorRegister();      // a3
3440   Register slot = StoreWithVectorDescriptor::SlotRegister();          // a4
3441   DCHECK(StoreWithVectorDescriptor::ValueRegister().is(a0));          // a0
3442   Register feedback = a5;
3443   Register receiver_map = a6;
3444   Register scratch1 = a7;
3445 
3446   __ SmiScale(scratch1, slot, kPointerSizeLog2);
3447   __ Daddu(feedback, vector, Operand(scratch1));
3448   __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
3449 
3450   // Try to quickly handle the monomorphic case without knowing for sure
3451   // if we have a weak cell in feedback. We do know it's safe to look
3452   // at WeakCell::kValueOffset.
3453   Label try_array, load_smi_map, compare_map;
3454   Label not_array, miss;
3455   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
3456                         scratch1, &compare_map, &load_smi_map, &try_array);
3457 
3458   __ bind(&try_array);
3459   // Is it a fixed array?
3460   __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
3461   __ Branch(&not_array, ne, scratch1, Heap::kFixedArrayMapRootIndex);
3462 
3463   // We have a polymorphic element handler.
3464   Label try_poly_name;
3465 
3466   Register scratch2 = t0;
3467 
3468   HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
3469                              &miss);
3470 
3471   __ bind(&not_array);
3472   // Is it generic?
3473   __ Branch(&try_poly_name, ne, feedback, Heap::kmegamorphic_symbolRootIndex);
3474   Handle<Code> megamorphic_stub =
3475       KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
3476   __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
3477 
3478   __ bind(&try_poly_name);
3479   // We might have a name in feedback, and a fixed array in the next slot.
3480   __ Branch(&miss, ne, key, Operand(feedback));
3481   // If the name comparison succeeded, we know we have a fixed array with
3482   // at least one map/handler pair.
3483   __ SmiScale(scratch1, slot, kPointerSizeLog2);
3484   __ Daddu(feedback, vector, Operand(scratch1));
3485   __ ld(feedback,
3486         FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
3487   HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
3488                    &miss);
3489 
3490   __ bind(&miss);
3491   KeyedStoreIC::GenerateMiss(masm);
3492 
3493   __ bind(&load_smi_map);
3494   __ Branch(USE_DELAY_SLOT, &compare_map);
3495   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);  // In delay slot.
3496 }
3497 
3498 
MaybeCallEntryHook(MacroAssembler * masm)3499 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
3500   if (masm->isolate()->function_entry_hook() != NULL) {
3501     ProfileEntryHookStub stub(masm->isolate());
3502     __ push(ra);
3503     __ CallStub(&stub);
3504     __ pop(ra);
3505   }
3506 }
3507 
3508 
Generate(MacroAssembler * masm)3509 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
3510   // The entry hook is a "push ra" instruction, followed by a call.
3511   // Note: on MIPS "push" is 2 instruction
3512   const int32_t kReturnAddressDistanceFromFunctionStart =
3513       Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
3514 
3515   // This should contain all kJSCallerSaved registers.
3516   const RegList kSavedRegs =
3517      kJSCallerSaved |  // Caller saved registers.
3518      s5.bit();         // Saved stack pointer.
3519 
3520   // We also save ra, so the count here is one higher than the mask indicates.
3521   const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
3522 
3523   // Save all caller-save registers as this may be called from anywhere.
3524   __ MultiPush(kSavedRegs | ra.bit());
3525 
3526   // Compute the function's address for the first argument.
3527   __ Dsubu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
3528 
3529   // The caller's return address is above the saved temporaries.
3530   // Grab that for the second argument to the hook.
3531   __ Daddu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
3532 
3533   // Align the stack if necessary.
3534   int frame_alignment = masm->ActivationFrameAlignment();
3535   if (frame_alignment > kPointerSize) {
3536     __ mov(s5, sp);
3537     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3538     __ And(sp, sp, Operand(-frame_alignment));
3539   }
3540 
3541   __ Dsubu(sp, sp, kCArgsSlotsSize);
3542 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
3543   int64_t entry_hook =
3544       reinterpret_cast<int64_t>(isolate()->function_entry_hook());
3545   __ li(t9, Operand(entry_hook));
3546 #else
3547   // Under the simulator we need to indirect the entry hook through a
3548   // trampoline function at a known address.
3549   // It additionally takes an isolate as a third parameter.
3550   __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
3551 
3552   ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
3553   __ li(t9, Operand(ExternalReference(&dispatcher,
3554                                       ExternalReference::BUILTIN_CALL,
3555                                       isolate())));
3556 #endif
3557   // Call C function through t9 to conform ABI for PIC.
3558   __ Call(t9);
3559 
3560   // Restore the stack pointer if needed.
3561   if (frame_alignment > kPointerSize) {
3562     __ mov(sp, s5);
3563   } else {
3564     __ Daddu(sp, sp, kCArgsSlotsSize);
3565   }
3566 
3567   // Also pop ra to get Ret(0).
3568   __ MultiPop(kSavedRegs | ra.bit());
3569   __ Ret();
3570 }
3571 
3572 
3573 template<class T>
CreateArrayDispatch(MacroAssembler * masm,AllocationSiteOverrideMode mode)3574 static void CreateArrayDispatch(MacroAssembler* masm,
3575                                 AllocationSiteOverrideMode mode) {
3576   if (mode == DISABLE_ALLOCATION_SITES) {
3577     T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
3578     __ TailCallStub(&stub);
3579   } else if (mode == DONT_OVERRIDE) {
3580     int last_index = GetSequenceIndexFromFastElementsKind(
3581         TERMINAL_FAST_ELEMENTS_KIND);
3582     for (int i = 0; i <= last_index; ++i) {
3583       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3584       T stub(masm->isolate(), kind);
3585       __ TailCallStub(&stub, eq, a3, Operand(kind));
3586     }
3587 
3588     // If we reached this point there is a problem.
3589     __ Abort(kUnexpectedElementsKindInArrayConstructor);
3590   } else {
3591     UNREACHABLE();
3592   }
3593 }
3594 
3595 
CreateArrayDispatchOneArgument(MacroAssembler * masm,AllocationSiteOverrideMode mode)3596 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
3597                                            AllocationSiteOverrideMode mode) {
3598   // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
3599   // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
3600   // a0 - number of arguments
3601   // a1 - constructor?
3602   // sp[0] - last argument
3603   Label normal_sequence;
3604   if (mode == DONT_OVERRIDE) {
3605     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3606     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3607     STATIC_ASSERT(FAST_ELEMENTS == 2);
3608     STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3609     STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
3610     STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
3611 
3612     // is the low bit set? If so, we are holey and that is good.
3613     __ And(at, a3, Operand(1));
3614     __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
3615   }
3616   // look at the first argument
3617   __ ld(a5, MemOperand(sp, 0));
3618   __ Branch(&normal_sequence, eq, a5, Operand(zero_reg));
3619 
3620   if (mode == DISABLE_ALLOCATION_SITES) {
3621     ElementsKind initial = GetInitialFastElementsKind();
3622     ElementsKind holey_initial = GetHoleyElementsKind(initial);
3623 
3624     ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
3625                                                   holey_initial,
3626                                                   DISABLE_ALLOCATION_SITES);
3627     __ TailCallStub(&stub_holey);
3628 
3629     __ bind(&normal_sequence);
3630     ArraySingleArgumentConstructorStub stub(masm->isolate(),
3631                                             initial,
3632                                             DISABLE_ALLOCATION_SITES);
3633     __ TailCallStub(&stub);
3634   } else if (mode == DONT_OVERRIDE) {
3635     // We are going to create a holey array, but our kind is non-holey.
3636     // Fix kind and retry (only if we have an allocation site in the slot).
3637     __ Daddu(a3, a3, Operand(1));
3638 
3639     if (FLAG_debug_code) {
3640       __ ld(a5, FieldMemOperand(a2, 0));
3641       __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3642       __ Assert(eq, kExpectedAllocationSite, a5, Operand(at));
3643     }
3644 
3645     // Save the resulting elements kind in type info. We can't just store a3
3646     // in the AllocationSite::transition_info field because elements kind is
3647     // restricted to a portion of the field...upper bits need to be left alone.
3648     STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
3649     __ ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
3650     __ Daddu(a4, a4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
3651     __ sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
3652 
3653 
3654     __ bind(&normal_sequence);
3655     int last_index = GetSequenceIndexFromFastElementsKind(
3656         TERMINAL_FAST_ELEMENTS_KIND);
3657     for (int i = 0; i <= last_index; ++i) {
3658       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3659       ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
3660       __ TailCallStub(&stub, eq, a3, Operand(kind));
3661     }
3662 
3663     // If we reached this point there is a problem.
3664     __ Abort(kUnexpectedElementsKindInArrayConstructor);
3665   } else {
3666     UNREACHABLE();
3667   }
3668 }
3669 
3670 
3671 template<class T>
ArrayConstructorStubAheadOfTimeHelper(Isolate * isolate)3672 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
3673   int to_index = GetSequenceIndexFromFastElementsKind(
3674       TERMINAL_FAST_ELEMENTS_KIND);
3675   for (int i = 0; i <= to_index; ++i) {
3676     ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3677     T stub(isolate, kind);
3678     stub.GetCode();
3679     if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
3680       T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
3681       stub1.GetCode();
3682     }
3683   }
3684 }
3685 
GenerateStubsAheadOfTime(Isolate * isolate)3686 void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
3687   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
3688       isolate);
3689   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
3690       isolate);
3691   ArrayNArgumentsConstructorStub stub(isolate);
3692   stub.GetCode();
3693   ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
3694   for (int i = 0; i < 2; i++) {
3695     // For internal arrays we only need a few things.
3696     InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
3697     stubh1.GetCode();
3698     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
3699     stubh2.GetCode();
3700   }
3701 }
3702 
3703 
GenerateDispatchToArrayStub(MacroAssembler * masm,AllocationSiteOverrideMode mode)3704 void ArrayConstructorStub::GenerateDispatchToArrayStub(
3705     MacroAssembler* masm,
3706     AllocationSiteOverrideMode mode) {
3707   Label not_zero_case, not_one_case;
3708   __ And(at, a0, a0);
3709   __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
3710   CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
3711 
3712   __ bind(&not_zero_case);
3713   __ Branch(&not_one_case, gt, a0, Operand(1));
3714   CreateArrayDispatchOneArgument(masm, mode);
3715 
3716   __ bind(&not_one_case);
3717   ArrayNArgumentsConstructorStub stub(masm->isolate());
3718   __ TailCallStub(&stub);
3719 }
3720 
3721 
Generate(MacroAssembler * masm)3722 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
3723   // ----------- S t a t e -------------
3724   //  -- a0 : argc (only if argument_count() == ANY)
3725   //  -- a1 : constructor
3726   //  -- a2 : AllocationSite or undefined
3727   //  -- a3 : new target
3728   //  -- sp[0] : last argument
3729   // -----------------------------------
3730 
3731   if (FLAG_debug_code) {
3732     // The array construct code is only set for the global and natives
3733     // builtin Array functions which always have maps.
3734 
3735     // Initial map for the builtin Array function should be a map.
3736     __ ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
3737     // Will both indicate a NULL and a Smi.
3738     __ SmiTst(a4, at);
3739     __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
3740         at, Operand(zero_reg));
3741     __ GetObjectType(a4, a4, a5);
3742     __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
3743         a5, Operand(MAP_TYPE));
3744 
3745     // We should either have undefined in a2 or a valid AllocationSite
3746     __ AssertUndefinedOrAllocationSite(a2, a4);
3747   }
3748 
3749   // Enter the context of the Array function.
3750   __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3751 
3752   Label subclassing;
3753   __ Branch(&subclassing, ne, a1, Operand(a3));
3754 
3755   Label no_info;
3756   // Get the elements kind and case on that.
3757   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3758   __ Branch(&no_info, eq, a2, Operand(at));
3759 
3760   __ ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
3761   __ SmiUntag(a3);
3762   STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
3763   __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
3764   GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
3765 
3766   __ bind(&no_info);
3767   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
3768 
3769   // Subclassing.
3770   __ bind(&subclassing);
3771   __ Dlsa(at, sp, a0, kPointerSizeLog2);
3772   __ sd(a1, MemOperand(at));
3773   __ li(at, Operand(3));
3774   __ Daddu(a0, a0, at);
3775   __ Push(a3, a2);
3776   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
3777 }
3778 
3779 
GenerateCase(MacroAssembler * masm,ElementsKind kind)3780 void InternalArrayConstructorStub::GenerateCase(
3781     MacroAssembler* masm, ElementsKind kind) {
3782 
3783   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
3784   __ TailCallStub(&stub0, lo, a0, Operand(1));
3785 
3786   ArrayNArgumentsConstructorStub stubN(isolate());
3787   __ TailCallStub(&stubN, hi, a0, Operand(1));
3788 
3789   if (IsFastPackedElementsKind(kind)) {
3790     // We might need to create a holey array
3791     // look at the first argument.
3792     __ ld(at, MemOperand(sp, 0));
3793 
3794     InternalArraySingleArgumentConstructorStub
3795         stub1_holey(isolate(), GetHoleyElementsKind(kind));
3796     __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
3797   }
3798 
3799   InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
3800   __ TailCallStub(&stub1);
3801 }
3802 
3803 
Generate(MacroAssembler * masm)3804 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
3805   // ----------- S t a t e -------------
3806   //  -- a0 : argc
3807   //  -- a1 : constructor
3808   //  -- sp[0] : return address
3809   //  -- sp[4] : last argument
3810   // -----------------------------------
3811 
3812   if (FLAG_debug_code) {
3813     // The array construct code is only set for the global and natives
3814     // builtin Array functions which always have maps.
3815 
3816     // Initial map for the builtin Array function should be a map.
3817     __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
3818     // Will both indicate a NULL and a Smi.
3819     __ SmiTst(a3, at);
3820     __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
3821         at, Operand(zero_reg));
3822     __ GetObjectType(a3, a3, a4);
3823     __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
3824         a4, Operand(MAP_TYPE));
3825   }
3826 
3827   // Figure out the right elements kind.
3828   __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
3829 
3830   // Load the map's "bit field 2" into a3. We only need the first byte,
3831   // but the following bit field extraction takes care of that anyway.
3832   __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
3833   // Retrieve elements_kind from bit field 2.
3834   __ DecodeField<Map::ElementsKindBits>(a3);
3835 
3836   if (FLAG_debug_code) {
3837     Label done;
3838     __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
3839     __ Assert(
3840         eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
3841         a3, Operand(FAST_HOLEY_ELEMENTS));
3842     __ bind(&done);
3843   }
3844 
3845   Label fast_elements_case;
3846   __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
3847   GenerateCase(masm, FAST_HOLEY_ELEMENTS);
3848 
3849   __ bind(&fast_elements_case);
3850   GenerateCase(masm, FAST_ELEMENTS);
3851 }
3852 
3853 
Generate(MacroAssembler * masm)3854 void FastNewObjectStub::Generate(MacroAssembler* masm) {
3855   // ----------- S t a t e -------------
3856   //  -- a1 : target
3857   //  -- a3 : new target
3858   //  -- cp : context
3859   //  -- ra : return address
3860   // -----------------------------------
3861   __ AssertFunction(a1);
3862   __ AssertReceiver(a3);
3863 
3864   // Verify that the new target is a JSFunction.
3865   Label new_object;
3866   __ GetObjectType(a3, a2, a2);
3867   __ Branch(&new_object, ne, a2, Operand(JS_FUNCTION_TYPE));
3868 
3869   // Load the initial map and verify that it's in fact a map.
3870   __ ld(a2, FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
3871   __ JumpIfSmi(a2, &new_object);
3872   __ GetObjectType(a2, a0, a0);
3873   __ Branch(&new_object, ne, a0, Operand(MAP_TYPE));
3874 
3875   // Fall back to runtime if the target differs from the new target's
3876   // initial map constructor.
3877   __ ld(a0, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
3878   __ Branch(&new_object, ne, a0, Operand(a1));
3879 
3880   // Allocate the JSObject on the heap.
3881   Label allocate, done_allocate;
3882   __ lbu(a4, FieldMemOperand(a2, Map::kInstanceSizeOffset));
3883   __ Allocate(a4, v0, a5, a0, &allocate, SIZE_IN_WORDS);
3884   __ bind(&done_allocate);
3885 
3886   // Initialize the JSObject fields.
3887   __ sd(a2, FieldMemOperand(v0, JSObject::kMapOffset));
3888   __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
3889   __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
3890   __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
3891   STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
3892   __ Daddu(a1, v0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
3893 
3894   // ----------- S t a t e -------------
3895   //  -- v0 : result (tagged)
3896   //  -- a1 : result fields (untagged)
3897   //  -- a5 : result end (untagged)
3898   //  -- a2 : initial map
3899   //  -- cp : context
3900   //  -- ra : return address
3901   // -----------------------------------
3902 
3903   // Perform in-object slack tracking if requested.
3904   Label slack_tracking;
3905   STATIC_ASSERT(Map::kNoSlackTracking == 0);
3906   __ lwu(a3, FieldMemOperand(a2, Map::kBitField3Offset));
3907   __ And(at, a3, Operand(Map::ConstructionCounter::kMask));
3908   __ Branch(USE_DELAY_SLOT, &slack_tracking, ne, at, Operand(zero_reg));
3909   __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);  // In delay slot.
3910   {
3911     // Initialize all in-object fields with undefined.
3912     __ InitializeFieldsWithFiller(a1, a5, a0);
3913     __ Ret();
3914   }
3915   __ bind(&slack_tracking);
3916   {
3917     // Decrease generous allocation count.
3918     STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
3919     __ Subu(a3, a3, Operand(1 << Map::ConstructionCounter::kShift));
3920     __ sw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
3921 
3922     // Initialize the in-object fields with undefined.
3923     __ lbu(a4, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
3924     __ dsll(a4, a4, kPointerSizeLog2);
3925     __ Dsubu(a4, a5, a4);
3926     __ InitializeFieldsWithFiller(a1, a4, a0);
3927 
3928     // Initialize the remaining (reserved) fields with one pointer filler map.
3929     __ LoadRoot(a0, Heap::kOnePointerFillerMapRootIndex);
3930     __ InitializeFieldsWithFiller(a1, a5, a0);
3931 
3932     // Check if we can finalize the instance size.
3933     Label finalize;
3934     STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
3935     __ And(a3, a3, Operand(Map::ConstructionCounter::kMask));
3936     __ Branch(&finalize, eq, a3, Operand(zero_reg));
3937     __ Ret();
3938 
3939     // Finalize the instance size.
3940     __ bind(&finalize);
3941     {
3942       FrameScope scope(masm, StackFrame::INTERNAL);
3943       __ Push(v0, a2);
3944       __ CallRuntime(Runtime::kFinalizeInstanceSize);
3945       __ Pop(v0);
3946     }
3947     __ Ret();
3948   }
3949 
3950   // Fall back to %AllocateInNewSpace.
3951   __ bind(&allocate);
3952   {
3953     FrameScope scope(masm, StackFrame::INTERNAL);
3954     STATIC_ASSERT(kSmiTag == 0);
3955     STATIC_ASSERT(kSmiTagSize == 1);
3956     __ dsll(a4, a4, kPointerSizeLog2 + kSmiShiftSize + kSmiTagSize);
3957     __ SmiTag(a4);
3958     __ Push(a2, a4);
3959     __ CallRuntime(Runtime::kAllocateInNewSpace);
3960     __ Pop(a2);
3961   }
3962   __ lbu(a5, FieldMemOperand(a2, Map::kInstanceSizeOffset));
3963   __ Dlsa(a5, v0, a5, kPointerSizeLog2);
3964   STATIC_ASSERT(kHeapObjectTag == 1);
3965   __ Dsubu(a5, a5, Operand(kHeapObjectTag));
3966   __ jmp(&done_allocate);
3967 
3968   // Fall back to %NewObject.
3969   __ bind(&new_object);
3970   __ Push(a1, a3);
3971   __ TailCallRuntime(Runtime::kNewObject);
3972 }
3973 
3974 
Generate(MacroAssembler * masm)3975 void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
3976   // ----------- S t a t e -------------
3977   //  -- a1 : function
3978   //  -- cp : context
3979   //  -- fp : frame pointer
3980   //  -- ra : return address
3981   // -----------------------------------
3982   __ AssertFunction(a1);
3983 
3984   // Make a2 point to the JavaScript frame.
3985   __ mov(a2, fp);
3986   if (skip_stub_frame()) {
3987     // For Ignition we need to skip the handler/stub frame to reach the
3988     // JavaScript frame for the function.
3989     __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
3990   }
3991   if (FLAG_debug_code) {
3992     Label ok;
3993     __ ld(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
3994     __ Branch(&ok, eq, a1, Operand(a3));
3995     __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
3996     __ bind(&ok);
3997   }
3998 
3999   // Check if we have rest parameters (only possible if we have an
4000   // arguments adaptor frame below the function frame).
4001   Label no_rest_parameters;
4002   __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
4003   __ ld(a3, MemOperand(a2, CommonFrameConstants::kContextOrFrameTypeOffset));
4004   __ Branch(&no_rest_parameters, ne, a3,
4005             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4006 
4007   // Check if the arguments adaptor frame contains more arguments than
4008   // specified by the function's internal formal parameter count.
4009   Label rest_parameters;
4010   __ SmiLoadUntag(
4011       a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4012   __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4013   __ lw(a3,
4014         FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
4015   __ Dsubu(a0, a0, Operand(a3));
4016   __ Branch(&rest_parameters, gt, a0, Operand(zero_reg));
4017 
4018   // Return an empty rest parameter array.
4019   __ bind(&no_rest_parameters);
4020   {
4021     // ----------- S t a t e -------------
4022     //  -- cp : context
4023     //  -- ra : return address
4024     // -----------------------------------
4025 
4026     // Allocate an empty rest parameter array.
4027     Label allocate, done_allocate;
4028     __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, NO_ALLOCATION_FLAGS);
4029     __ bind(&done_allocate);
4030 
4031     // Setup the rest parameter array in v0.
4032     __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, a1);
4033     __ sd(a1, FieldMemOperand(v0, JSArray::kMapOffset));
4034     __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
4035     __ sd(a1, FieldMemOperand(v0, JSArray::kPropertiesOffset));
4036     __ sd(a1, FieldMemOperand(v0, JSArray::kElementsOffset));
4037     __ Move(a1, Smi::kZero);
4038     __ Ret(USE_DELAY_SLOT);
4039     __ sd(a1, FieldMemOperand(v0, JSArray::kLengthOffset));  // In delay slot
4040     STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
4041 
4042     // Fall back to %AllocateInNewSpace.
4043     __ bind(&allocate);
4044     {
4045       FrameScope scope(masm, StackFrame::INTERNAL);
4046       __ Push(Smi::FromInt(JSArray::kSize));
4047       __ CallRuntime(Runtime::kAllocateInNewSpace);
4048     }
4049     __ jmp(&done_allocate);
4050   }
4051 
4052   __ bind(&rest_parameters);
4053   {
4054     // Compute the pointer to the first rest parameter (skippping the receiver).
4055     __ Dlsa(a2, a2, a0, kPointerSizeLog2);
4056     __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
4057                              1 * kPointerSize));
4058 
4059     // ----------- S t a t e -------------
4060     //  -- cp : context
4061     //  -- a0 : number of rest parameters
4062     //  -- a1 : function
4063     //  -- a2 : pointer to first rest parameters
4064     //  -- ra : return address
4065     // -----------------------------------
4066 
4067     // Allocate space for the rest parameter array plus the backing store.
4068     Label allocate, done_allocate;
4069     __ li(a5, Operand(JSArray::kSize + FixedArray::kHeaderSize));
4070     __ Dlsa(a5, a5, a0, kPointerSizeLog2);
4071     __ Allocate(a5, v0, a3, a4, &allocate, NO_ALLOCATION_FLAGS);
4072     __ bind(&done_allocate);
4073 
4074     // Compute arguments.length in a4.
4075     __ SmiTag(a4, a0);
4076 
4077     // Setup the elements array in v0.
4078     __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4079     __ sd(at, FieldMemOperand(v0, FixedArray::kMapOffset));
4080     __ sd(a4, FieldMemOperand(v0, FixedArray::kLengthOffset));
4081     __ Daddu(a3, v0, Operand(FixedArray::kHeaderSize));
4082     {
4083       Label loop, done_loop;
4084       __ Dlsa(a1, a3, a0, kPointerSizeLog2);
4085       __ bind(&loop);
4086       __ Branch(&done_loop, eq, a1, Operand(a3));
4087       __ ld(at, MemOperand(a2, 0 * kPointerSize));
4088       __ sd(at, FieldMemOperand(a3, 0 * kPointerSize));
4089       __ Dsubu(a2, a2, Operand(1 * kPointerSize));
4090       __ Daddu(a3, a3, Operand(1 * kPointerSize));
4091       __ Branch(&loop);
4092       __ bind(&done_loop);
4093     }
4094 
4095     // Setup the rest parameter array in a3.
4096     __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, at);
4097     __ sd(at, FieldMemOperand(a3, JSArray::kMapOffset));
4098     __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
4099     __ sd(at, FieldMemOperand(a3, JSArray::kPropertiesOffset));
4100     __ sd(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
4101     __ sd(a4, FieldMemOperand(a3, JSArray::kLengthOffset));
4102     STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
4103     __ Ret(USE_DELAY_SLOT);
4104     __ mov(v0, a3);  // In delay slot
4105 
4106     // Fall back to %AllocateInNewSpace (if not too big).
4107     Label too_big_for_new_space;
4108     __ bind(&allocate);
4109     __ Branch(&too_big_for_new_space, gt, a5,
4110               Operand(kMaxRegularHeapObjectSize));
4111     {
4112       FrameScope scope(masm, StackFrame::INTERNAL);
4113       __ SmiTag(a0);
4114       __ SmiTag(a5);
4115       __ Push(a0, a2, a5);
4116       __ CallRuntime(Runtime::kAllocateInNewSpace);
4117       __ Pop(a0, a2);
4118       __ SmiUntag(a0);
4119     }
4120     __ jmp(&done_allocate);
4121 
4122     // Fall back to %NewStrictArguments.
4123     __ bind(&too_big_for_new_space);
4124     __ Push(a1);
4125     __ TailCallRuntime(Runtime::kNewStrictArguments);
4126   }
4127 }
4128 
4129 
Generate(MacroAssembler * masm)4130 void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
4131   // ----------- S t a t e -------------
4132   //  -- a1 : function
4133   //  -- cp : context
4134   //  -- fp : frame pointer
4135   //  -- ra : return address
4136   // -----------------------------------
4137   __ AssertFunction(a1);
4138 
4139   // Make t0 point to the JavaScript frame.
4140   __ mov(t0, fp);
4141   if (skip_stub_frame()) {
4142     // For Ignition we need to skip the handler/stub frame to reach the
4143     // JavaScript frame for the function.
4144     __ ld(t0, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
4145   }
4146   if (FLAG_debug_code) {
4147     Label ok;
4148     __ ld(a3, MemOperand(t0, StandardFrameConstants::kFunctionOffset));
4149     __ Branch(&ok, eq, a1, Operand(a3));
4150     __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4151     __ bind(&ok);
4152   }
4153 
4154   // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
4155   __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4156   __ lw(a2,
4157          FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
4158   __ Lsa(a3, t0, a2, kPointerSizeLog2);
4159   __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4160   __ SmiTag(a2);
4161 
4162   // a1 : function
4163   // a2 : number of parameters (tagged)
4164   // a3 : parameters pointer
4165   // t0 : Javascript frame pointer
4166   // Registers used over whole function:
4167   //  a5 : arguments count (tagged)
4168   //  a6 : mapped parameter count (tagged)
4169 
4170   // Check if the calling frame is an arguments adaptor frame.
4171   Label adaptor_frame, try_allocate, runtime;
4172   __ ld(a4, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
4173   __ ld(a0, MemOperand(a4, CommonFrameConstants::kContextOrFrameTypeOffset));
4174   __ Branch(&adaptor_frame, eq, a0,
4175             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4176 
4177   // No adaptor, parameter count = argument count.
4178   __ mov(a5, a2);
4179   __ Branch(USE_DELAY_SLOT, &try_allocate);
4180   __ mov(a6, a2);  // In delay slot.
4181 
4182   // We have an adaptor frame. Patch the parameters pointer.
4183   __ bind(&adaptor_frame);
4184   __ ld(a5, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
4185   __ SmiScale(t2, a5, kPointerSizeLog2);
4186   __ Daddu(a4, a4, Operand(t2));
4187   __ Daddu(a3, a4, Operand(StandardFrameConstants::kCallerSPOffset));
4188 
4189   // a5 = argument count (tagged)
4190   // a6 = parameter count (tagged)
4191   // Compute the mapped parameter count = min(a6, a5) in a6.
4192   __ mov(a6, a2);
4193   __ Branch(&try_allocate, le, a6, Operand(a5));
4194   __ mov(a6, a5);
4195 
4196   __ bind(&try_allocate);
4197 
4198   // Compute the sizes of backing store, parameter map, and arguments object.
4199   // 1. Parameter map, has 2 extra words containing context and backing store.
4200   const int kParameterMapHeaderSize =
4201       FixedArray::kHeaderSize + 2 * kPointerSize;
4202   // If there are no mapped parameters, we do not need the parameter_map.
4203   Label param_map_size;
4204   DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
4205   __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a6, Operand(zero_reg));
4206   __ mov(t1, zero_reg);  // In delay slot: param map size = 0 when a6 == 0.
4207   __ SmiScale(t1, a6, kPointerSizeLog2);
4208   __ daddiu(t1, t1, kParameterMapHeaderSize);
4209   __ bind(&param_map_size);
4210 
4211   // 2. Backing store.
4212   __ SmiScale(t2, a5, kPointerSizeLog2);
4213   __ Daddu(t1, t1, Operand(t2));
4214   __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize));
4215 
4216   // 3. Arguments object.
4217   __ Daddu(t1, t1, Operand(JSSloppyArgumentsObject::kSize));
4218 
4219   // Do the allocation of all three objects in one go.
4220   __ Allocate(t1, v0, t1, a4, &runtime, NO_ALLOCATION_FLAGS);
4221 
4222   // v0 = address of new object(s) (tagged)
4223   // a2 = argument count (smi-tagged)
4224   // Get the arguments boilerplate from the current native context into a4.
4225   const int kNormalOffset =
4226       Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
4227   const int kAliasedOffset =
4228       Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
4229 
4230   __ ld(a4, NativeContextMemOperand());
4231   Label skip2_ne, skip2_eq;
4232   __ Branch(&skip2_ne, ne, a6, Operand(zero_reg));
4233   __ ld(a4, MemOperand(a4, kNormalOffset));
4234   __ bind(&skip2_ne);
4235 
4236   __ Branch(&skip2_eq, eq, a6, Operand(zero_reg));
4237   __ ld(a4, MemOperand(a4, kAliasedOffset));
4238   __ bind(&skip2_eq);
4239 
4240   // v0 = address of new object (tagged)
4241   // a2 = argument count (smi-tagged)
4242   // a4 = address of arguments map (tagged)
4243   // a6 = mapped parameter count (tagged)
4244   __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
4245   __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
4246   __ sd(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
4247   __ sd(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
4248 
4249   // Set up the callee in-object property.
4250   __ AssertNotSmi(a1);
4251   __ sd(a1, FieldMemOperand(v0, JSSloppyArgumentsObject::kCalleeOffset));
4252 
4253   // Use the length (smi tagged) and set that as an in-object property too.
4254   __ AssertSmi(a5);
4255   __ sd(a5, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
4256 
4257   // Set up the elements pointer in the allocated arguments object.
4258   // If we allocated a parameter map, a4 will point there, otherwise
4259   // it will point to the backing store.
4260   __ Daddu(a4, v0, Operand(JSSloppyArgumentsObject::kSize));
4261   __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
4262 
4263   // v0 = address of new object (tagged)
4264   // a2 = argument count (tagged)
4265   // a4 = address of parameter map or backing store (tagged)
4266   // a6 = mapped parameter count (tagged)
4267   // Initialize parameter map. If there are no mapped arguments, we're done.
4268   Label skip_parameter_map;
4269   Label skip3;
4270   __ Branch(&skip3, ne, a6, Operand(Smi::kZero));
4271   // Move backing store address to a1, because it is
4272   // expected there when filling in the unmapped arguments.
4273   __ mov(a1, a4);
4274   __ bind(&skip3);
4275 
4276   __ Branch(&skip_parameter_map, eq, a6, Operand(Smi::kZero));
4277 
4278   __ LoadRoot(a5, Heap::kSloppyArgumentsElementsMapRootIndex);
4279   __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
4280   __ Daddu(a5, a6, Operand(Smi::FromInt(2)));
4281   __ sd(a5, FieldMemOperand(a4, FixedArray::kLengthOffset));
4282   __ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize));
4283   __ SmiScale(t2, a6, kPointerSizeLog2);
4284   __ Daddu(a5, a4, Operand(t2));
4285   __ Daddu(a5, a5, Operand(kParameterMapHeaderSize));
4286   __ sd(a5, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
4287 
4288   // Copy the parameter slots and the holes in the arguments.
4289   // We need to fill in mapped_parameter_count slots. They index the context,
4290   // where parameters are stored in reverse order, at
4291   //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4292   // The mapped parameter thus need to get indices
4293   //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
4294   //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4295   // We loop from right to left.
4296   Label parameters_loop, parameters_test;
4297   __ mov(a5, a6);
4298   __ Daddu(t1, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
4299   __ Dsubu(t1, t1, Operand(a6));
4300   __ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
4301   __ SmiScale(t2, a5, kPointerSizeLog2);
4302   __ Daddu(a1, a4, Operand(t2));
4303   __ Daddu(a1, a1, Operand(kParameterMapHeaderSize));
4304 
4305   // a1 = address of backing store (tagged)
4306   // a4 = address of parameter map (tagged)
4307   // a0 = temporary scratch (a.o., for address calculation)
4308   // t1 = loop variable (tagged)
4309   // a7 = the hole value
4310   __ jmp(&parameters_test);
4311 
4312   __ bind(&parameters_loop);
4313   __ Dsubu(a5, a5, Operand(Smi::FromInt(1)));
4314   __ SmiScale(a0, a5, kPointerSizeLog2);
4315   __ Daddu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4316   __ Daddu(t2, a4, a0);
4317   __ sd(t1, MemOperand(t2));
4318   __ Dsubu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
4319   __ Daddu(t2, a1, a0);
4320   __ sd(a7, MemOperand(t2));
4321   __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
4322   __ bind(&parameters_test);
4323   __ Branch(&parameters_loop, ne, a5, Operand(Smi::kZero));
4324 
4325   // Restore t1 = argument count (tagged).
4326   __ ld(a5, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
4327 
4328   __ bind(&skip_parameter_map);
4329   // v0 = address of new object (tagged)
4330   // a1 = address of backing store (tagged)
4331   // a5 = argument count (tagged)
4332   // a6 = mapped parameter count (tagged)
4333   // t1 = scratch
4334   // Copy arguments header and remaining slots (if there are any).
4335   __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
4336   __ sd(t1, FieldMemOperand(a1, FixedArray::kMapOffset));
4337   __ sd(a5, FieldMemOperand(a1, FixedArray::kLengthOffset));
4338 
4339   Label arguments_loop, arguments_test;
4340   __ SmiScale(t2, a6, kPointerSizeLog2);
4341   __ Dsubu(a3, a3, Operand(t2));
4342   __ jmp(&arguments_test);
4343 
4344   __ bind(&arguments_loop);
4345   __ Dsubu(a3, a3, Operand(kPointerSize));
4346   __ ld(a4, MemOperand(a3, 0));
4347   __ SmiScale(t2, a6, kPointerSizeLog2);
4348   __ Daddu(t1, a1, Operand(t2));
4349   __ sd(a4, FieldMemOperand(t1, FixedArray::kHeaderSize));
4350   __ Daddu(a6, a6, Operand(Smi::FromInt(1)));
4351 
4352   __ bind(&arguments_test);
4353   __ Branch(&arguments_loop, lt, a6, Operand(a5));
4354 
4355   // Return.
4356   __ Ret();
4357 
4358   // Do the runtime call to allocate the arguments object.
4359   // a5 = argument count (tagged)
4360   __ bind(&runtime);
4361   __ Push(a1, a3, a5);
4362   __ TailCallRuntime(Runtime::kNewSloppyArguments);
4363 }
4364 
4365 
Generate(MacroAssembler * masm)4366 void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
4367   // ----------- S t a t e -------------
4368   //  -- a1 : function
4369   //  -- cp : context
4370   //  -- fp : frame pointer
4371   //  -- ra : return address
4372   // -----------------------------------
4373   __ AssertFunction(a1);
4374 
4375   // Make a2 point to the JavaScript frame.
4376   __ mov(a2, fp);
4377   if (skip_stub_frame()) {
4378     // For Ignition we need to skip the handler/stub frame to reach the
4379     // JavaScript frame for the function.
4380     __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
4381   }
4382   if (FLAG_debug_code) {
4383     Label ok;
4384     __ ld(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
4385     __ Branch(&ok, eq, a1, Operand(a3));
4386     __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4387     __ bind(&ok);
4388   }
4389 
4390   // Check if we have an arguments adaptor frame below the function frame.
4391   Label arguments_adaptor, arguments_done;
4392   __ ld(a3, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
4393   __ ld(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
4394   __ Branch(&arguments_adaptor, eq, a0,
4395             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4396   {
4397     __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4398     __ lw(a0,
4399           FieldMemOperand(a4, SharedFunctionInfo::kFormalParameterCountOffset));
4400     __ Dlsa(a2, a2, a0, kPointerSizeLog2);
4401     __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
4402                              1 * kPointerSize));
4403   }
4404   __ Branch(&arguments_done);
4405   __ bind(&arguments_adaptor);
4406   {
4407     __ SmiLoadUntag(
4408         a0, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4409     __ Dlsa(a2, a3, a0, kPointerSizeLog2);
4410     __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
4411                              1 * kPointerSize));
4412   }
4413   __ bind(&arguments_done);
4414 
4415   // ----------- S t a t e -------------
4416   //  -- cp : context
4417   //  -- a0 : number of rest parameters
4418   //  -- a1 : function
4419   //  -- a2 : pointer to first rest parameters
4420   //  -- ra : return address
4421   // -----------------------------------
4422 
4423   // Allocate space for the rest parameter array plus the backing store.
4424   Label allocate, done_allocate;
4425   __ li(a5, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
4426   __ Dlsa(a5, a5, a0, kPointerSizeLog2);
4427   __ Allocate(a5, v0, a3, a4, &allocate, NO_ALLOCATION_FLAGS);
4428   __ bind(&done_allocate);
4429 
4430   // Compute arguments.length in a4.
4431   __ SmiTag(a4, a0);
4432 
4433   // Setup the elements array in v0.
4434   __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4435   __ sd(at, FieldMemOperand(v0, FixedArray::kMapOffset));
4436   __ sd(a4, FieldMemOperand(v0, FixedArray::kLengthOffset));
4437   __ Daddu(a3, v0, Operand(FixedArray::kHeaderSize));
4438   {
4439     Label loop, done_loop;
4440     __ Dlsa(a1, a3, a0, kPointerSizeLog2);
4441     __ bind(&loop);
4442     __ Branch(&done_loop, eq, a1, Operand(a3));
4443     __ ld(at, MemOperand(a2, 0 * kPointerSize));
4444     __ sd(at, FieldMemOperand(a3, 0 * kPointerSize));
4445     __ Dsubu(a2, a2, Operand(1 * kPointerSize));
4446     __ Daddu(a3, a3, Operand(1 * kPointerSize));
4447     __ Branch(&loop);
4448     __ bind(&done_loop);
4449   }
4450 
4451   // Setup the strict arguments object in a3.
4452   __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, at);
4453   __ sd(at, FieldMemOperand(a3, JSStrictArgumentsObject::kMapOffset));
4454   __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
4455   __ sd(at, FieldMemOperand(a3, JSStrictArgumentsObject::kPropertiesOffset));
4456   __ sd(v0, FieldMemOperand(a3, JSStrictArgumentsObject::kElementsOffset));
4457   __ sd(a4, FieldMemOperand(a3, JSStrictArgumentsObject::kLengthOffset));
4458   STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
4459   __ Ret(USE_DELAY_SLOT);
4460   __ mov(v0, a3);  // In delay slot
4461 
4462   // Fall back to %AllocateInNewSpace (if not too big).
4463   Label too_big_for_new_space;
4464   __ bind(&allocate);
4465   __ Branch(&too_big_for_new_space, gt, a5, Operand(kMaxRegularHeapObjectSize));
4466   {
4467     FrameScope scope(masm, StackFrame::INTERNAL);
4468     __ SmiTag(a0);
4469     __ SmiTag(a5);
4470     __ Push(a0, a2, a5);
4471     __ CallRuntime(Runtime::kAllocateInNewSpace);
4472     __ Pop(a0, a2);
4473     __ SmiUntag(a0);
4474   }
4475   __ jmp(&done_allocate);
4476 
4477   // Fall back to %NewStrictArguments.
4478   __ bind(&too_big_for_new_space);
4479   __ Push(a1);
4480   __ TailCallRuntime(Runtime::kNewStrictArguments);
4481 }
4482 
4483 
AddressOffset(ExternalReference ref0,ExternalReference ref1)4484 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
4485   int64_t offset = (ref0.address() - ref1.address());
4486   DCHECK(static_cast<int>(offset) == offset);
4487   return static_cast<int>(offset);
4488 }
4489 
4490 
4491 // Calls an API function.  Allocates HandleScope, extracts returned value
4492 // from handle and propagates exceptions.  Restores context.  stack_space
4493 // - space to be unwound on exit (includes the call JS arguments space and
4494 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,int32_t stack_space_offset,MemOperand return_value_operand,MemOperand * context_restore_operand)4495 static void CallApiFunctionAndReturn(
4496     MacroAssembler* masm, Register function_address,
4497     ExternalReference thunk_ref, int stack_space, int32_t stack_space_offset,
4498     MemOperand return_value_operand, MemOperand* context_restore_operand) {
4499   Isolate* isolate = masm->isolate();
4500   ExternalReference next_address =
4501       ExternalReference::handle_scope_next_address(isolate);
4502   const int kNextOffset = 0;
4503   const int kLimitOffset = AddressOffset(
4504       ExternalReference::handle_scope_limit_address(isolate), next_address);
4505   const int kLevelOffset = AddressOffset(
4506       ExternalReference::handle_scope_level_address(isolate), next_address);
4507 
4508   DCHECK(function_address.is(a1) || function_address.is(a2));
4509 
4510   Label profiler_disabled;
4511   Label end_profiler_check;
4512   __ li(t9, Operand(ExternalReference::is_profiling_address(isolate)));
4513   __ lb(t9, MemOperand(t9, 0));
4514   __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
4515 
4516   // Additional parameter is the address of the actual callback.
4517   __ li(t9, Operand(thunk_ref));
4518   __ jmp(&end_profiler_check);
4519 
4520   __ bind(&profiler_disabled);
4521   __ mov(t9, function_address);
4522   __ bind(&end_profiler_check);
4523 
4524   // Allocate HandleScope in callee-save registers.
4525   __ li(s3, Operand(next_address));
4526   __ ld(s0, MemOperand(s3, kNextOffset));
4527   __ ld(s1, MemOperand(s3, kLimitOffset));
4528   __ lw(s2, MemOperand(s3, kLevelOffset));
4529   __ Addu(s2, s2, Operand(1));
4530   __ sw(s2, MemOperand(s3, kLevelOffset));
4531 
4532   if (FLAG_log_timer_events) {
4533     FrameScope frame(masm, StackFrame::MANUAL);
4534     __ PushSafepointRegisters();
4535     __ PrepareCallCFunction(1, a0);
4536     __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
4537     __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
4538                      1);
4539     __ PopSafepointRegisters();
4540   }
4541 
4542   // Native call returns to the DirectCEntry stub which redirects to the
4543   // return address pushed on stack (could have moved after GC).
4544   // DirectCEntry stub itself is generated early and never moves.
4545   DirectCEntryStub stub(isolate);
4546   stub.GenerateCall(masm, t9);
4547 
4548   if (FLAG_log_timer_events) {
4549     FrameScope frame(masm, StackFrame::MANUAL);
4550     __ PushSafepointRegisters();
4551     __ PrepareCallCFunction(1, a0);
4552     __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
4553     __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
4554                      1);
4555     __ PopSafepointRegisters();
4556   }
4557 
4558   Label promote_scheduled_exception;
4559   Label delete_allocated_handles;
4560   Label leave_exit_frame;
4561   Label return_value_loaded;
4562 
4563   // Load value from ReturnValue.
4564   __ ld(v0, return_value_operand);
4565   __ bind(&return_value_loaded);
4566 
4567   // No more valid handles (the result handle was the last one). Restore
4568   // previous handle scope.
4569   __ sd(s0, MemOperand(s3, kNextOffset));
4570   if (__ emit_debug_code()) {
4571     __ lw(a1, MemOperand(s3, kLevelOffset));
4572     __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
4573   }
4574   __ Subu(s2, s2, Operand(1));
4575   __ sw(s2, MemOperand(s3, kLevelOffset));
4576   __ ld(at, MemOperand(s3, kLimitOffset));
4577   __ Branch(&delete_allocated_handles, ne, s1, Operand(at));
4578 
4579   // Leave the API exit frame.
4580   __ bind(&leave_exit_frame);
4581 
4582   bool restore_context = context_restore_operand != NULL;
4583   if (restore_context) {
4584     __ ld(cp, *context_restore_operand);
4585   }
4586   if (stack_space_offset != kInvalidStackOffset) {
4587     DCHECK(kCArgsSlotsSize == 0);
4588     __ ld(s0, MemOperand(sp, stack_space_offset));
4589   } else {
4590     __ li(s0, Operand(stack_space));
4591   }
4592   __ LeaveExitFrame(false, s0, !restore_context, NO_EMIT_RETURN,
4593                     stack_space_offset != kInvalidStackOffset);
4594 
4595   // Check if the function scheduled an exception.
4596   __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
4597   __ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
4598   __ ld(a5, MemOperand(at));
4599   __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
4600 
4601   __ Ret();
4602 
4603   // Re-throw by promoting a scheduled exception.
4604   __ bind(&promote_scheduled_exception);
4605   __ TailCallRuntime(Runtime::kPromoteScheduledException);
4606 
4607   // HandleScope limit has changed. Delete allocated extensions.
4608   __ bind(&delete_allocated_handles);
4609   __ sd(s1, MemOperand(s3, kLimitOffset));
4610   __ mov(s0, v0);
4611   __ mov(a0, v0);
4612   __ PrepareCallCFunction(1, s1);
4613   __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
4614   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
4615                    1);
4616   __ mov(v0, s0);
4617   __ jmp(&leave_exit_frame);
4618 }
4619 
Generate(MacroAssembler * masm)4620 void CallApiCallbackStub::Generate(MacroAssembler* masm) {
4621   // ----------- S t a t e -------------
4622   //  -- a0                  : callee
4623   //  -- a4                  : call_data
4624   //  -- a2                  : holder
4625   //  -- a1                  : api_function_address
4626   //  -- cp                  : context
4627   //  --
4628   //  -- sp[0]               : last argument
4629   //  -- ...
4630   //  -- sp[(argc - 1)* 8]   : first argument
4631   //  -- sp[argc * 8]        : receiver
4632   // -----------------------------------
4633 
4634   Register callee = a0;
4635   Register call_data = a4;
4636   Register holder = a2;
4637   Register api_function_address = a1;
4638   Register context = cp;
4639 
4640   typedef FunctionCallbackArguments FCA;
4641 
4642   STATIC_ASSERT(FCA::kContextSaveIndex == 6);
4643   STATIC_ASSERT(FCA::kCalleeIndex == 5);
4644   STATIC_ASSERT(FCA::kDataIndex == 4);
4645   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
4646   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
4647   STATIC_ASSERT(FCA::kIsolateIndex == 1);
4648   STATIC_ASSERT(FCA::kHolderIndex == 0);
4649   STATIC_ASSERT(FCA::kNewTargetIndex == 7);
4650   STATIC_ASSERT(FCA::kArgsLength == 8);
4651 
4652   // new target
4653   __ PushRoot(Heap::kUndefinedValueRootIndex);
4654 
4655   // Save context, callee and call data.
4656   __ Push(context, callee, call_data);
4657   if (!is_lazy()) {
4658     // Load context from callee.
4659     __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
4660   }
4661 
4662   Register scratch = call_data;
4663   if (!call_data_undefined()) {
4664     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4665   }
4666   // Push return value and default return value.
4667   __ Push(scratch, scratch);
4668   __ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
4669   // Push isolate and holder.
4670   __ Push(scratch, holder);
4671 
4672   // Prepare arguments.
4673   __ mov(scratch, sp);
4674 
4675   // Allocate the v8::Arguments structure in the arguments' space since
4676   // it's not controlled by GC.
4677   const int kApiStackSpace = 3;
4678 
4679   FrameScope frame_scope(masm, StackFrame::MANUAL);
4680   __ EnterExitFrame(false, kApiStackSpace);
4681 
4682   DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
4683   // a0 = FunctionCallbackInfo&
4684   // Arguments is after the return address.
4685   __ Daddu(a0, sp, Operand(1 * kPointerSize));
4686   // FunctionCallbackInfo::implicit_args_
4687   __ sd(scratch, MemOperand(a0, 0 * kPointerSize));
4688   // FunctionCallbackInfo::values_
4689   __ Daddu(at, scratch,
4690            Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
4691   __ sd(at, MemOperand(a0, 1 * kPointerSize));
4692   // FunctionCallbackInfo::length_ = argc
4693   // Stored as int field, 32-bit integers within struct on stack always left
4694   // justified by n64 ABI.
4695   __ li(at, Operand(argc()));
4696   __ sw(at, MemOperand(a0, 2 * kPointerSize));
4697 
4698   ExternalReference thunk_ref =
4699       ExternalReference::invoke_function_callback(masm->isolate());
4700 
4701   AllowExternalCallThatCantCauseGC scope(masm);
4702   MemOperand context_restore_operand(
4703       fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
4704   // Stores return the first js argument.
4705   int return_value_offset = 0;
4706   if (is_store()) {
4707     return_value_offset = 2 + FCA::kArgsLength;
4708   } else {
4709     return_value_offset = 2 + FCA::kReturnValueOffset;
4710   }
4711   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
4712   int stack_space = 0;
4713   int32_t stack_space_offset = 3 * kPointerSize;
4714   stack_space = argc() + FCA::kArgsLength + 1;
4715   // TODO(adamk): Why are we clobbering this immediately?
4716   stack_space_offset = kInvalidStackOffset;
4717   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
4718                            stack_space_offset, return_value_operand,
4719                            &context_restore_operand);
4720 }
4721 
4722 
Generate(MacroAssembler * masm)4723 void CallApiGetterStub::Generate(MacroAssembler* masm) {
4724   // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
4725   // name below the exit frame to make GC aware of them.
4726   STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
4727   STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
4728   STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
4729   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
4730   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
4731   STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
4732   STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
4733   STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
4734 
4735   Register receiver = ApiGetterDescriptor::ReceiverRegister();
4736   Register holder = ApiGetterDescriptor::HolderRegister();
4737   Register callback = ApiGetterDescriptor::CallbackRegister();
4738   Register scratch = a4;
4739   DCHECK(!AreAliased(receiver, holder, callback, scratch));
4740 
4741   Register api_function_address = a2;
4742 
4743   // Here and below +1 is for name() pushed after the args_ array.
4744   typedef PropertyCallbackArguments PCA;
4745   __ Dsubu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
4746   __ sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
4747   __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
4748   __ sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
4749   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4750   __ sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
4751   __ sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
4752                                     kPointerSize));
4753   __ li(scratch, Operand(ExternalReference::isolate_address(isolate())));
4754   __ sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
4755   __ sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
4756   // should_throw_on_error -> false
4757   DCHECK(Smi::kZero == nullptr);
4758   __ sd(zero_reg,
4759         MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
4760   __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
4761   __ sd(scratch, MemOperand(sp, 0 * kPointerSize));
4762 
4763   // v8::PropertyCallbackInfo::args_ array and name handle.
4764   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
4765 
4766   // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
4767   __ mov(a0, sp);                               // a0 = Handle<Name>
4768   __ Daddu(a1, a0, Operand(1 * kPointerSize));  // a1 = v8::PCI::args_
4769 
4770   const int kApiStackSpace = 1;
4771   FrameScope frame_scope(masm, StackFrame::MANUAL);
4772   __ EnterExitFrame(false, kApiStackSpace);
4773 
4774   // Create v8::PropertyCallbackInfo object on the stack and initialize
4775   // it's args_ field.
4776   __ sd(a1, MemOperand(sp, 1 * kPointerSize));
4777   __ Daddu(a1, sp, Operand(1 * kPointerSize));
4778   // a1 = v8::PropertyCallbackInfo&
4779 
4780   ExternalReference thunk_ref =
4781       ExternalReference::invoke_accessor_getter_callback(isolate());
4782 
4783   __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
4784   __ ld(api_function_address,
4785         FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
4786 
4787   // +3 is to skip prolog, return address and name handle.
4788   MemOperand return_value_operand(
4789       fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
4790   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
4791                            kStackUnwindSpace, kInvalidStackOffset,
4792                            return_value_operand, NULL);
4793 }
4794 
4795 #undef __
4796 
4797 }  // namespace internal
4798 }  // namespace v8
4799 
4800 #endif  // V8_TARGET_ARCH_MIPS64
4801