1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_MIPS64
6 
7 #include "src/code-stubs.h"
8 #include "src/api-arguments.h"
9 #include "src/bootstrapper.h"
10 #include "src/codegen.h"
11 #include "src/ic/handler-compiler.h"
12 #include "src/ic/ic.h"
13 #include "src/ic/stub-cache.h"
14 #include "src/isolate.h"
15 #include "src/mips64/code-stubs-mips64.h"
16 #include "src/regexp/jsregexp.h"
17 #include "src/regexp/regexp-macro-assembler.h"
18 #include "src/runtime/runtime.h"
19 
20 namespace v8 {
21 namespace internal {
22 
23 #define __ ACCESS_MASM(masm)
24 
Generate(MacroAssembler * masm)25 void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
26   __ dsll(t9, a0, kPointerSizeLog2);
27   __ Daddu(t9, sp, t9);
28   __ sd(a1, MemOperand(t9, 0));
29   __ Push(a1);
30   __ Push(a2);
31   __ Daddu(a0, a0, 3);
32   __ TailCallRuntime(Runtime::kNewArray);
33 }
34 
35 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
36                                           Condition cc);
37 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
38                                     Register lhs,
39                                     Register rhs,
40                                     Label* rhs_not_nan,
41                                     Label* slow,
42                                     bool strict);
43 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
44                                            Register lhs,
45                                            Register rhs);
46 
47 
GenerateLightweightMiss(MacroAssembler * masm,ExternalReference miss)48 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
49                                                ExternalReference miss) {
50   // Update the static counter each time a new code stub is generated.
51   isolate()->counters()->code_stubs()->Increment();
52 
53   CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
54   int param_count = descriptor.GetRegisterParameterCount();
55   {
56     // Call the runtime system in a fresh internal frame.
57     FrameScope scope(masm, StackFrame::INTERNAL);
58     DCHECK((param_count == 0) ||
59            a0.is(descriptor.GetRegisterParameter(param_count - 1)));
60     // Push arguments, adjust sp.
61     __ Dsubu(sp, sp, Operand(param_count * kPointerSize));
62     for (int i = 0; i < param_count; ++i) {
63       // Store argument to stack.
64       __ sd(descriptor.GetRegisterParameter(i),
65             MemOperand(sp, (param_count - 1 - i) * kPointerSize));
66     }
67     __ CallExternalReference(miss, param_count);
68   }
69 
70   __ Ret();
71 }
72 
73 
Generate(MacroAssembler * masm)74 void DoubleToIStub::Generate(MacroAssembler* masm) {
75   Label out_of_range, only_low, negate, done;
76   Register input_reg = source();
77   Register result_reg = destination();
78 
79   int double_offset = offset();
80   // Account for saved regs if input is sp.
81   if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
82 
83   Register scratch =
84       GetRegisterThatIsNotOneOf(input_reg, result_reg);
85   Register scratch2 =
86       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
87   Register scratch3 =
88       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
89   DoubleRegister double_scratch = kLithiumScratchDouble;
90 
91   __ Push(scratch, scratch2, scratch3);
92   if (!skip_fastpath()) {
93     // Load double input.
94     __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
95 
96     // Clear cumulative exception flags and save the FCSR.
97     __ cfc1(scratch2, FCSR);
98     __ ctc1(zero_reg, FCSR);
99 
100     // Try a conversion to a signed integer.
101     __ Trunc_w_d(double_scratch, double_scratch);
102     // Move the converted value into the result register.
103     __ mfc1(scratch3, double_scratch);
104 
105     // Retrieve and restore the FCSR.
106     __ cfc1(scratch, FCSR);
107     __ ctc1(scratch2, FCSR);
108 
109     // Check for overflow and NaNs.
110     __ And(
111         scratch, scratch,
112         kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
113            | kFCSRInvalidOpFlagMask);
114     // If we had no exceptions then set result_reg and we are done.
115     Label error;
116     __ Branch(&error, ne, scratch, Operand(zero_reg));
117     __ Move(result_reg, scratch3);
118     __ Branch(&done);
119     __ bind(&error);
120   }
121 
122   // Load the double value and perform a manual truncation.
123   Register input_high = scratch2;
124   Register input_low = scratch3;
125 
126   __ lw(input_low,
127         MemOperand(input_reg, double_offset + Register::kMantissaOffset));
128   __ lw(input_high,
129         MemOperand(input_reg, double_offset + Register::kExponentOffset));
130 
131   Label normal_exponent, restore_sign;
132   // Extract the biased exponent in result.
133   __ Ext(result_reg,
134          input_high,
135          HeapNumber::kExponentShift,
136          HeapNumber::kExponentBits);
137 
138   // Check for Infinity and NaNs, which should return 0.
139   __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
140   __ Movz(result_reg, zero_reg, scratch);
141   __ Branch(&done, eq, scratch, Operand(zero_reg));
142 
143   // Express exponent as delta to (number of mantissa bits + 31).
144   __ Subu(result_reg,
145           result_reg,
146           Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
147 
148   // If the delta is strictly positive, all bits would be shifted away,
149   // which means that we can return 0.
150   __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
151   __ mov(result_reg, zero_reg);
152   __ Branch(&done);
153 
154   __ bind(&normal_exponent);
155   const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
156   // Calculate shift.
157   __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
158 
159   // Save the sign.
160   Register sign = result_reg;
161   result_reg = no_reg;
162   __ And(sign, input_high, Operand(HeapNumber::kSignMask));
163 
164   // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
165   // to check for this specific case.
166   Label high_shift_needed, high_shift_done;
167   __ Branch(&high_shift_needed, lt, scratch, Operand(32));
168   __ mov(input_high, zero_reg);
169   __ Branch(&high_shift_done);
170   __ bind(&high_shift_needed);
171 
172   // Set the implicit 1 before the mantissa part in input_high.
173   __ Or(input_high,
174         input_high,
175         Operand(1 << HeapNumber::kMantissaBitsInTopWord));
176   // Shift the mantissa bits to the correct position.
177   // We don't need to clear non-mantissa bits as they will be shifted away.
178   // If they weren't, it would mean that the answer is in the 32bit range.
179   __ sllv(input_high, input_high, scratch);
180 
181   __ bind(&high_shift_done);
182 
183   // Replace the shifted bits with bits from the lower mantissa word.
184   Label pos_shift, shift_done;
185   __ li(at, 32);
186   __ subu(scratch, at, scratch);
187   __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
188 
189   // Negate scratch.
190   __ Subu(scratch, zero_reg, scratch);
191   __ sllv(input_low, input_low, scratch);
192   __ Branch(&shift_done);
193 
194   __ bind(&pos_shift);
195   __ srlv(input_low, input_low, scratch);
196 
197   __ bind(&shift_done);
198   __ Or(input_high, input_high, Operand(input_low));
199   // Restore sign if necessary.
200   __ mov(scratch, sign);
201   result_reg = sign;
202   sign = no_reg;
203   __ Subu(result_reg, zero_reg, input_high);
204   __ Movz(result_reg, input_high, scratch);
205 
206   __ bind(&done);
207 
208   __ Pop(scratch, scratch2, scratch3);
209   __ Ret();
210 }
211 
212 
213 // Handle the case where the lhs and rhs are the same object.
214 // Equality is almost reflexive (everything but NaN), so this is a test
215 // for "identity and not NaN".
EmitIdenticalObjectComparison(MacroAssembler * masm,Label * slow,Condition cc)216 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
217                                           Condition cc) {
218   Label not_identical;
219   Label heap_number, return_equal;
220   Register exp_mask_reg = t1;
221 
222   __ Branch(&not_identical, ne, a0, Operand(a1));
223 
224   __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
225 
226   // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
227   // so we do the second best thing - test it ourselves.
228   // They are both equal and they are not both Smis so both of them are not
229   // Smis. If it's not a heap number, then return equal.
230   __ GetObjectType(a0, t0, t0);
231   if (cc == less || cc == greater) {
232     // Call runtime on identical JSObjects.
233     __ Branch(slow, greater, t0, Operand(FIRST_JS_RECEIVER_TYPE));
234     // Call runtime on identical symbols since we need to throw a TypeError.
235     __ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
236   } else {
237     __ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE));
238     // Comparing JS objects with <=, >= is complicated.
239     if (cc != eq) {
240       __ Branch(slow, greater, t0, Operand(FIRST_JS_RECEIVER_TYPE));
241       // Call runtime on identical symbols since we need to throw a TypeError.
242       __ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
243       // Normally here we fall through to return_equal, but undefined is
244       // special: (undefined == undefined) == true, but
245       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
246       if (cc == less_equal || cc == greater_equal) {
247         __ Branch(&return_equal, ne, t0, Operand(ODDBALL_TYPE));
248         __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
249         __ Branch(&return_equal, ne, a0, Operand(a6));
250         DCHECK(is_int16(GREATER) && is_int16(LESS));
251         __ Ret(USE_DELAY_SLOT);
252         if (cc == le) {
253           // undefined <= undefined should fail.
254           __ li(v0, Operand(GREATER));
255         } else  {
256           // undefined >= undefined should fail.
257           __ li(v0, Operand(LESS));
258         }
259       }
260     }
261   }
262 
263   __ bind(&return_equal);
264   DCHECK(is_int16(GREATER) && is_int16(LESS));
265   __ Ret(USE_DELAY_SLOT);
266   if (cc == less) {
267     __ li(v0, Operand(GREATER));  // Things aren't less than themselves.
268   } else if (cc == greater) {
269     __ li(v0, Operand(LESS));     // Things aren't greater than themselves.
270   } else {
271     __ mov(v0, zero_reg);         // Things are <=, >=, ==, === themselves.
272   }
273   // For less and greater we don't have to check for NaN since the result of
274   // x < x is false regardless.  For the others here is some code to check
275   // for NaN.
276   if (cc != lt && cc != gt) {
277     __ bind(&heap_number);
278     // It is a heap number, so return non-equal if it's NaN and equal if it's
279     // not NaN.
280 
281     // The representation of NaN values has all exponent bits (52..62) set,
282     // and not all mantissa bits (0..51) clear.
283     // Read top bits of double representation (second word of value).
284     __ lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset));
285     // Test that exponent bits are all set.
286     __ And(a7, a6, Operand(exp_mask_reg));
287     // If all bits not set (ne cond), then not a NaN, objects are equal.
288     __ Branch(&return_equal, ne, a7, Operand(exp_mask_reg));
289 
290     // Shift out flag and all exponent bits, retaining only mantissa.
291     __ sll(a6, a6, HeapNumber::kNonMantissaBitsInTopWord);
292     // Or with all low-bits of mantissa.
293     __ lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
294     __ Or(v0, a7, Operand(a6));
295     // For equal we already have the right value in v0:  Return zero (equal)
296     // if all bits in mantissa are zero (it's an Infinity) and non-zero if
297     // not (it's a NaN).  For <= and >= we need to load v0 with the failing
298     // value if it's a NaN.
299     if (cc != eq) {
300       // All-zero means Infinity means equal.
301       __ Ret(eq, v0, Operand(zero_reg));
302       DCHECK(is_int16(GREATER) && is_int16(LESS));
303       __ Ret(USE_DELAY_SLOT);
304       if (cc == le) {
305         __ li(v0, Operand(GREATER));  // NaN <= NaN should fail.
306       } else {
307         __ li(v0, Operand(LESS));     // NaN >= NaN should fail.
308       }
309     }
310   }
311   // No fall through here.
312 
313   __ bind(&not_identical);
314 }
315 
316 
EmitSmiNonsmiComparison(MacroAssembler * masm,Register lhs,Register rhs,Label * both_loaded_as_doubles,Label * slow,bool strict)317 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
318                                     Register lhs,
319                                     Register rhs,
320                                     Label* both_loaded_as_doubles,
321                                     Label* slow,
322                                     bool strict) {
323   DCHECK((lhs.is(a0) && rhs.is(a1)) ||
324          (lhs.is(a1) && rhs.is(a0)));
325 
326   Label lhs_is_smi;
327   __ JumpIfSmi(lhs, &lhs_is_smi);
328   // Rhs is a Smi.
329   // Check whether the non-smi is a heap number.
330   __ GetObjectType(lhs, t0, t0);
331   if (strict) {
332     // If lhs was not a number and rhs was a Smi then strict equality cannot
333     // succeed. Return non-equal (lhs is already not zero).
334     __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
335     __ mov(v0, lhs);
336   } else {
337     // Smi compared non-strictly with a non-Smi non-heap-number. Call
338     // the runtime.
339     __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
340   }
341   // Rhs is a smi, lhs is a number.
342   // Convert smi rhs to double.
343   __ SmiUntag(at, rhs);
344   __ mtc1(at, f14);
345   __ cvt_d_w(f14, f14);
346   __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
347 
348   // We now have both loaded as doubles.
349   __ jmp(both_loaded_as_doubles);
350 
351   __ bind(&lhs_is_smi);
352   // Lhs is a Smi.  Check whether the non-smi is a heap number.
353   __ GetObjectType(rhs, t0, t0);
354   if (strict) {
355     // If lhs was not a number and rhs was a Smi then strict equality cannot
356     // succeed. Return non-equal.
357     __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
358     __ li(v0, Operand(1));
359   } else {
360     // Smi compared non-strictly with a non-Smi non-heap-number. Call
361     // the runtime.
362     __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
363   }
364 
365   // Lhs is a smi, rhs is a number.
366   // Convert smi lhs to double.
367   __ SmiUntag(at, lhs);
368   __ mtc1(at, f12);
369   __ cvt_d_w(f12, f12);
370   __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
371   // Fall through to both_loaded_as_doubles.
372 }
373 
374 
EmitStrictTwoHeapObjectCompare(MacroAssembler * masm,Register lhs,Register rhs)375 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
376                                            Register lhs,
377                                            Register rhs) {
378     // If either operand is a JS object or an oddball value, then they are
379     // not equal since their pointers are different.
380     // There is no test for undetectability in strict equality.
381     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
382     Label first_non_object;
383     // Get the type of the first operand into a2 and compare it with
384     // FIRST_JS_RECEIVER_TYPE.
385     __ GetObjectType(lhs, a2, a2);
386     __ Branch(&first_non_object, less, a2, Operand(FIRST_JS_RECEIVER_TYPE));
387 
388     // Return non-zero.
389     Label return_not_equal;
390     __ bind(&return_not_equal);
391     __ Ret(USE_DELAY_SLOT);
392     __ li(v0, Operand(1));
393 
394     __ bind(&first_non_object);
395     // Check for oddballs: true, false, null, undefined.
396     __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
397 
398     __ GetObjectType(rhs, a3, a3);
399     __ Branch(&return_not_equal, greater, a3, Operand(FIRST_JS_RECEIVER_TYPE));
400 
401     // Check for oddballs: true, false, null, undefined.
402     __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
403 
404     // Now that we have the types we might as well check for
405     // internalized-internalized.
406     STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
407     __ Or(a2, a2, Operand(a3));
408     __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
409     __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
410 }
411 
412 
EmitCheckForTwoHeapNumbers(MacroAssembler * masm,Register lhs,Register rhs,Label * both_loaded_as_doubles,Label * not_heap_numbers,Label * slow)413 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
414                                        Register lhs,
415                                        Register rhs,
416                                        Label* both_loaded_as_doubles,
417                                        Label* not_heap_numbers,
418                                        Label* slow) {
419   __ GetObjectType(lhs, a3, a2);
420   __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
421   __ ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
422   // If first was a heap number & second wasn't, go to slow case.
423   __ Branch(slow, ne, a3, Operand(a2));
424 
425   // Both are heap numbers. Load them up then jump to the code we have
426   // for that.
427   __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
428   __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
429 
430   __ jmp(both_loaded_as_doubles);
431 }
432 
433 
434 // Fast negative check for internalized-to-internalized equality.
EmitCheckForInternalizedStringsOrObjects(MacroAssembler * masm,Register lhs,Register rhs,Label * possible_strings,Label * runtime_call)435 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
436                                                      Register lhs, Register rhs,
437                                                      Label* possible_strings,
438                                                      Label* runtime_call) {
439   DCHECK((lhs.is(a0) && rhs.is(a1)) ||
440          (lhs.is(a1) && rhs.is(a0)));
441 
442   // a2 is object type of rhs.
443   Label object_test, return_equal, return_unequal, undetectable;
444   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
445   __ And(at, a2, Operand(kIsNotStringMask));
446   __ Branch(&object_test, ne, at, Operand(zero_reg));
447   __ And(at, a2, Operand(kIsNotInternalizedMask));
448   __ Branch(possible_strings, ne, at, Operand(zero_reg));
449   __ GetObjectType(rhs, a3, a3);
450   __ Branch(runtime_call, ge, a3, Operand(FIRST_NONSTRING_TYPE));
451   __ And(at, a3, Operand(kIsNotInternalizedMask));
452   __ Branch(possible_strings, ne, at, Operand(zero_reg));
453 
454   // Both are internalized. We already checked they weren't the same pointer so
455   // they are not equal. Return non-equal by returning the non-zero object
456   // pointer in v0.
457   __ Ret(USE_DELAY_SLOT);
458   __ mov(v0, a0);  // In delay slot.
459 
460   __ bind(&object_test);
461   __ ld(a2, FieldMemOperand(lhs, HeapObject::kMapOffset));
462   __ ld(a3, FieldMemOperand(rhs, HeapObject::kMapOffset));
463   __ lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset));
464   __ lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset));
465   __ And(at, t0, Operand(1 << Map::kIsUndetectable));
466   __ Branch(&undetectable, ne, at, Operand(zero_reg));
467   __ And(at, t1, Operand(1 << Map::kIsUndetectable));
468   __ Branch(&return_unequal, ne, at, Operand(zero_reg));
469 
470   __ GetInstanceType(a2, a2);
471   __ Branch(runtime_call, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
472   __ GetInstanceType(a3, a3);
473   __ Branch(runtime_call, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
474 
475   __ bind(&return_unequal);
476   // Return non-equal by returning the non-zero object pointer in v0.
477   __ Ret(USE_DELAY_SLOT);
478   __ mov(v0, a0);  // In delay slot.
479 
480   __ bind(&undetectable);
481   __ And(at, t1, Operand(1 << Map::kIsUndetectable));
482   __ Branch(&return_unequal, eq, at, Operand(zero_reg));
483 
484   // If both sides are JSReceivers, then the result is false according to
485   // the HTML specification, which says that only comparisons with null or
486   // undefined are affected by special casing for document.all.
487   __ GetInstanceType(a2, a2);
488   __ Branch(&return_equal, eq, a2, Operand(ODDBALL_TYPE));
489   __ GetInstanceType(a3, a3);
490   __ Branch(&return_unequal, ne, a3, Operand(ODDBALL_TYPE));
491 
492   __ bind(&return_equal);
493   __ Ret(USE_DELAY_SLOT);
494   __ li(v0, Operand(EQUAL));  // In delay slot.
495 }
496 
497 
CompareICStub_CheckInputType(MacroAssembler * masm,Register input,Register scratch,CompareICState::State expected,Label * fail)498 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
499                                          Register scratch,
500                                          CompareICState::State expected,
501                                          Label* fail) {
502   Label ok;
503   if (expected == CompareICState::SMI) {
504     __ JumpIfNotSmi(input, fail);
505   } else if (expected == CompareICState::NUMBER) {
506     __ JumpIfSmi(input, &ok);
507     __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
508                 DONT_DO_SMI_CHECK);
509   }
510   // We could be strict about internalized/string here, but as long as
511   // hydrogen doesn't care, the stub doesn't have to care either.
512   __ bind(&ok);
513 }
514 
515 
516 // On entry a1 and a2 are the values to be compared.
517 // On exit a0 is 0, positive or negative to indicate the result of
518 // the comparison.
GenerateGeneric(MacroAssembler * masm)519 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
520   Register lhs = a1;
521   Register rhs = a0;
522   Condition cc = GetCondition();
523 
524   Label miss;
525   CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
526   CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
527 
528   Label slow;  // Call builtin.
529   Label not_smis, both_loaded_as_doubles;
530 
531   Label not_two_smis, smi_done;
532   __ Or(a2, a1, a0);
533   __ JumpIfNotSmi(a2, &not_two_smis);
534   __ SmiUntag(a1);
535   __ SmiUntag(a0);
536 
537   __ Ret(USE_DELAY_SLOT);
538   __ dsubu(v0, a1, a0);
539   __ bind(&not_two_smis);
540 
541   // NOTICE! This code is only reached after a smi-fast-case check, so
542   // it is certain that at least one operand isn't a smi.
543 
544   // Handle the case where the objects are identical.  Either returns the answer
545   // or goes to slow.  Only falls through if the objects were not identical.
546   EmitIdenticalObjectComparison(masm, &slow, cc);
547 
548   // If either is a Smi (we know that not both are), then they can only
549   // be strictly equal if the other is a HeapNumber.
550   STATIC_ASSERT(kSmiTag == 0);
551   DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
552   __ And(a6, lhs, Operand(rhs));
553   __ JumpIfNotSmi(a6, &not_smis, a4);
554   // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
555   // 1) Return the answer.
556   // 2) Go to slow.
557   // 3) Fall through to both_loaded_as_doubles.
558   // 4) Jump to rhs_not_nan.
559   // In cases 3 and 4 we have found out we were dealing with a number-number
560   // comparison and the numbers have been loaded into f12 and f14 as doubles,
561   // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
562   EmitSmiNonsmiComparison(masm, lhs, rhs,
563                           &both_loaded_as_doubles, &slow, strict());
564 
565   __ bind(&both_loaded_as_doubles);
566   // f12, f14 are the double representations of the left hand side
567   // and the right hand side if we have FPU. Otherwise a2, a3 represent
568   // left hand side and a0, a1 represent right hand side.
569 
570   Label nan;
571   __ li(a4, Operand(LESS));
572   __ li(a5, Operand(GREATER));
573   __ li(a6, Operand(EQUAL));
574 
575   // Check if either rhs or lhs is NaN.
576   __ BranchF(NULL, &nan, eq, f12, f14);
577 
578   // Check if LESS condition is satisfied. If true, move conditionally
579   // result to v0.
580   if (kArchVariant != kMips64r6) {
581     __ c(OLT, D, f12, f14);
582     __ Movt(v0, a4);
583     // Use previous check to store conditionally to v0 oposite condition
584     // (GREATER). If rhs is equal to lhs, this will be corrected in next
585     // check.
586     __ Movf(v0, a5);
587     // Check if EQUAL condition is satisfied. If true, move conditionally
588     // result to v0.
589     __ c(EQ, D, f12, f14);
590     __ Movt(v0, a6);
591   } else {
592     Label skip;
593     __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
594     __ mov(v0, a4);  // Return LESS as result.
595 
596     __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
597     __ mov(v0, a6);  // Return EQUAL as result.
598 
599     __ mov(v0, a5);  // Return GREATER as result.
600     __ bind(&skip);
601   }
602   __ Ret();
603 
604   __ bind(&nan);
605   // NaN comparisons always fail.
606   // Load whatever we need in v0 to make the comparison fail.
607   DCHECK(is_int16(GREATER) && is_int16(LESS));
608   __ Ret(USE_DELAY_SLOT);
609   if (cc == lt || cc == le) {
610     __ li(v0, Operand(GREATER));
611   } else {
612     __ li(v0, Operand(LESS));
613   }
614 
615 
616   __ bind(&not_smis);
617   // At this point we know we are dealing with two different objects,
618   // and neither of them is a Smi. The objects are in lhs_ and rhs_.
619   if (strict()) {
620     // This returns non-equal for some object types, or falls through if it
621     // was not lucky.
622     EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
623   }
624 
625   Label check_for_internalized_strings;
626   Label flat_string_check;
627   // Check for heap-number-heap-number comparison. Can jump to slow case,
628   // or load both doubles and jump to the code that handles
629   // that case. If the inputs are not doubles then jumps to
630   // check_for_internalized_strings.
631   // In this case a2 will contain the type of lhs_.
632   EmitCheckForTwoHeapNumbers(masm,
633                              lhs,
634                              rhs,
635                              &both_loaded_as_doubles,
636                              &check_for_internalized_strings,
637                              &flat_string_check);
638 
639   __ bind(&check_for_internalized_strings);
640   if (cc == eq && !strict()) {
641     // Returns an answer for two internalized strings or two
642     // detectable objects.
643     // Otherwise jumps to string case or not both strings case.
644     // Assumes that a2 is the type of lhs_ on entry.
645     EmitCheckForInternalizedStringsOrObjects(
646         masm, lhs, rhs, &flat_string_check, &slow);
647   }
648 
649   // Check for both being sequential one-byte strings,
650   // and inline if that is the case.
651   __ bind(&flat_string_check);
652 
653   __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
654 
655   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
656                       a3);
657   if (cc == eq) {
658     StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, a4);
659   } else {
660     StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, a4,
661                                                     a5);
662   }
663   // Never falls through to here.
664 
665   __ bind(&slow);
666   if (cc == eq) {
667     {
668       FrameScope scope(masm, StackFrame::INTERNAL);
669       __ Push(cp);
670       __ Call(strict() ? isolate()->builtins()->StrictEqual()
671                        : isolate()->builtins()->Equal(),
672               RelocInfo::CODE_TARGET);
673       __ Pop(cp);
674     }
675     // Turn true into 0 and false into some non-zero value.
676     STATIC_ASSERT(EQUAL == 0);
677     __ LoadRoot(a0, Heap::kTrueValueRootIndex);
678     __ Ret(USE_DELAY_SLOT);
679     __ subu(v0, v0, a0);  // In delay slot.
680   } else {
681     // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
682     // a1 (rhs) second.
683     __ Push(lhs, rhs);
684     int ncr;  // NaN compare result.
685     if (cc == lt || cc == le) {
686       ncr = GREATER;
687     } else {
688       DCHECK(cc == gt || cc == ge);  // Remaining cases.
689       ncr = LESS;
690     }
691     __ li(a0, Operand(Smi::FromInt(ncr)));
692     __ push(a0);
693 
694     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
695     // tagged as a small integer.
696     __ TailCallRuntime(Runtime::kCompare);
697   }
698 
699   __ bind(&miss);
700   GenerateMiss(masm);
701 }
702 
703 
Generate(MacroAssembler * masm)704 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
705   __ mov(t9, ra);
706   __ pop(ra);
707   __ PushSafepointRegisters();
708   __ Jump(t9);
709 }
710 
711 
Generate(MacroAssembler * masm)712 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
713   __ mov(t9, ra);
714   __ pop(ra);
715   __ PopSafepointRegisters();
716   __ Jump(t9);
717 }
718 
719 
Generate(MacroAssembler * masm)720 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
721   // We don't allow a GC during a store buffer overflow so there is no need to
722   // store the registers in any particular way, but we do have to store and
723   // restore them.
724   __ MultiPush(kJSCallerSaved | ra.bit());
725   if (save_doubles()) {
726     __ MultiPushFPU(kCallerSavedFPU);
727   }
728   const int argument_count = 1;
729   const int fp_argument_count = 0;
730   const Register scratch = a1;
731 
732   AllowExternalCallThatCantCauseGC scope(masm);
733   __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
734   __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
735   __ CallCFunction(
736       ExternalReference::store_buffer_overflow_function(isolate()),
737       argument_count);
738   if (save_doubles()) {
739     __ MultiPopFPU(kCallerSavedFPU);
740   }
741 
742   __ MultiPop(kJSCallerSaved | ra.bit());
743   __ Ret();
744 }
745 
746 
Generate(MacroAssembler * masm)747 void MathPowStub::Generate(MacroAssembler* masm) {
748   const Register exponent = MathPowTaggedDescriptor::exponent();
749   DCHECK(exponent.is(a2));
750   const DoubleRegister double_base = f2;
751   const DoubleRegister double_exponent = f4;
752   const DoubleRegister double_result = f0;
753   const DoubleRegister double_scratch = f6;
754   const FPURegister single_scratch = f8;
755   const Register scratch = t1;
756   const Register scratch2 = a7;
757 
758   Label call_runtime, done, int_exponent;
759   if (exponent_type() == TAGGED) {
760     // Base is already in double_base.
761     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
762 
763     __ ldc1(double_exponent,
764             FieldMemOperand(exponent, HeapNumber::kValueOffset));
765   }
766 
767   if (exponent_type() != INTEGER) {
768     Label int_exponent_convert;
769     // Detect integer exponents stored as double.
770     __ EmitFPUTruncate(kRoundToMinusInf,
771                        scratch,
772                        double_exponent,
773                        at,
774                        double_scratch,
775                        scratch2,
776                        kCheckForInexactConversion);
777     // scratch2 == 0 means there was no conversion error.
778     __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
779 
780     __ push(ra);
781     {
782       AllowExternalCallThatCantCauseGC scope(masm);
783       __ PrepareCallCFunction(0, 2, scratch2);
784       __ MovToFloatParameters(double_base, double_exponent);
785       __ CallCFunction(
786           ExternalReference::power_double_double_function(isolate()),
787           0, 2);
788     }
789     __ pop(ra);
790     __ MovFromFloatResult(double_result);
791     __ jmp(&done);
792 
793     __ bind(&int_exponent_convert);
794   }
795 
796   // Calculate power with integer exponent.
797   __ bind(&int_exponent);
798 
799   // Get two copies of exponent in the registers scratch and exponent.
800   if (exponent_type() == INTEGER) {
801     __ mov(scratch, exponent);
802   } else {
803     // Exponent has previously been stored into scratch as untagged integer.
804     __ mov(exponent, scratch);
805   }
806 
807   __ mov_d(double_scratch, double_base);  // Back up base.
808   __ Move(double_result, 1.0);
809 
810   // Get absolute value of exponent.
811   Label positive_exponent, bail_out;
812   __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
813   __ Dsubu(scratch, zero_reg, scratch);
814   // Check when Dsubu overflows and we get negative result
815   // (happens only when input is MIN_INT).
816   __ Branch(&bail_out, gt, zero_reg, Operand(scratch));
817   __ bind(&positive_exponent);
818   __ Assert(ge, kUnexpectedNegativeValue, scratch, Operand(zero_reg));
819 
820   Label while_true, no_carry, loop_end;
821   __ bind(&while_true);
822 
823   __ And(scratch2, scratch, 1);
824 
825   __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
826   __ mul_d(double_result, double_result, double_scratch);
827   __ bind(&no_carry);
828 
829   __ dsra(scratch, scratch, 1);
830 
831   __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
832   __ mul_d(double_scratch, double_scratch, double_scratch);
833 
834   __ Branch(&while_true);
835 
836   __ bind(&loop_end);
837 
838   __ Branch(&done, ge, exponent, Operand(zero_reg));
839   __ Move(double_scratch, 1.0);
840   __ div_d(double_result, double_scratch, double_result);
841   // Test whether result is zero.  Bail out to check for subnormal result.
842   // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
843   __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
844 
845   // double_exponent may not contain the exponent value if the input was a
846   // smi.  We set it with exponent value before bailing out.
847   __ bind(&bail_out);
848   __ mtc1(exponent, single_scratch);
849   __ cvt_d_w(double_exponent, single_scratch);
850 
851   // Returning or bailing out.
852   __ push(ra);
853   {
854     AllowExternalCallThatCantCauseGC scope(masm);
855     __ PrepareCallCFunction(0, 2, scratch);
856     __ MovToFloatParameters(double_base, double_exponent);
857     __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
858                      0, 2);
859   }
860   __ pop(ra);
861   __ MovFromFloatResult(double_result);
862 
863   __ bind(&done);
864   __ Ret();
865 }
866 
NeedsImmovableCode()867 bool CEntryStub::NeedsImmovableCode() {
868   return true;
869 }
870 
871 
GenerateStubsAheadOfTime(Isolate * isolate)872 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
873   CEntryStub::GenerateAheadOfTime(isolate);
874   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
875   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
876   CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
877   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
878   CreateWeakCellStub::GenerateAheadOfTime(isolate);
879   BinaryOpICStub::GenerateAheadOfTime(isolate);
880   StoreRegistersStateStub::GenerateAheadOfTime(isolate);
881   RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
882   BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
883   StoreFastElementStub::GenerateAheadOfTime(isolate);
884 }
885 
886 
GenerateAheadOfTime(Isolate * isolate)887 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
888   StoreRegistersStateStub stub(isolate);
889   stub.GetCode();
890 }
891 
892 
GenerateAheadOfTime(Isolate * isolate)893 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
894   RestoreRegistersStateStub stub(isolate);
895   stub.GetCode();
896 }
897 
898 
GenerateFPStubs(Isolate * isolate)899 void CodeStub::GenerateFPStubs(Isolate* isolate) {
900   // Generate if not already in cache.
901   SaveFPRegsMode mode = kSaveFPRegs;
902   CEntryStub(isolate, 1, mode).GetCode();
903   StoreBufferOverflowStub(isolate, mode).GetCode();
904 }
905 
906 
GenerateAheadOfTime(Isolate * isolate)907 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
908   CEntryStub stub(isolate, 1, kDontSaveFPRegs);
909   stub.GetCode();
910 }
911 
912 
Generate(MacroAssembler * masm)913 void CEntryStub::Generate(MacroAssembler* masm) {
914   // Called from JavaScript; parameters are on stack as if calling JS function
915   // a0: number of arguments including receiver
916   // a1: pointer to builtin function
917   // fp: frame pointer    (restored after C call)
918   // sp: stack pointer    (restored as callee's sp after C call)
919   // cp: current context  (C callee-saved)
920   //
921   // If argv_in_register():
922   // a2: pointer to the first argument
923 
924   ProfileEntryHookStub::MaybeCallEntryHook(masm);
925 
926   if (argv_in_register()) {
927     // Move argv into the correct register.
928     __ mov(s1, a2);
929   } else {
930     // Compute the argv pointer in a callee-saved register.
931     __ Dlsa(s1, sp, a0, kPointerSizeLog2);
932     __ Dsubu(s1, s1, kPointerSize);
933   }
934 
935   // Enter the exit frame that transitions from JavaScript to C++.
936   FrameScope scope(masm, StackFrame::MANUAL);
937   __ EnterExitFrame(save_doubles(), 0, is_builtin_exit()
938                                            ? StackFrame::BUILTIN_EXIT
939                                            : StackFrame::EXIT);
940 
941   // s0: number of arguments  including receiver (C callee-saved)
942   // s1: pointer to first argument (C callee-saved)
943   // s2: pointer to builtin function (C callee-saved)
944 
945   // Prepare arguments for C routine.
946   // a0 = argc
947   __ mov(s0, a0);
948   __ mov(s2, a1);
949 
950   // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
951   // also need to reserve the 4 argument slots on the stack.
952 
953   __ AssertStackIsAligned();
954 
955   int frame_alignment = MacroAssembler::ActivationFrameAlignment();
956   int frame_alignment_mask = frame_alignment - 1;
957   int result_stack_size;
958   if (result_size() <= 2) {
959     // a0 = argc, a1 = argv, a2 = isolate
960     __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
961     __ mov(a1, s1);
962     result_stack_size = 0;
963   } else {
964     DCHECK_EQ(3, result_size());
965     // Allocate additional space for the result.
966     result_stack_size =
967         ((result_size() * kPointerSize) + frame_alignment_mask) &
968         ~frame_alignment_mask;
969     __ Dsubu(sp, sp, Operand(result_stack_size));
970 
971     // a0 = hidden result argument, a1 = argc, a2 = argv, a3 = isolate.
972     __ li(a3, Operand(ExternalReference::isolate_address(isolate())));
973     __ mov(a2, s1);
974     __ mov(a1, a0);
975     __ mov(a0, sp);
976   }
977 
978   // To let the GC traverse the return address of the exit frames, we need to
979   // know where the return address is. The CEntryStub is unmovable, so
980   // we can store the address on the stack to be able to find it again and
981   // we never have to restore it, because it will not change.
982   { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
983     int kNumInstructionsToJump = 4;
984     Label find_ra;
985     // Adjust the value in ra to point to the correct return location, 2nd
986     // instruction past the real call into C code (the jalr(t9)), and push it.
987     // This is the return address of the exit frame.
988     if (kArchVariant >= kMips64r6) {
989       __ addiupc(ra, kNumInstructionsToJump + 1);
990     } else {
991       // This branch-and-link sequence is needed to find the current PC on mips
992       // before r6, saved to the ra register.
993       __ bal(&find_ra);  // bal exposes branch delay slot.
994       __ Daddu(ra, ra, kNumInstructionsToJump * Instruction::kInstrSize);
995     }
996     __ bind(&find_ra);
997 
998     // This spot was reserved in EnterExitFrame.
999     __ sd(ra, MemOperand(sp, result_stack_size));
1000     // Stack space reservation moved to the branch delay slot below.
1001     // Stack is still aligned.
1002 
1003     // Call the C routine.
1004     __ mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
1005     __ jalr(t9);
1006     // Set up sp in the delay slot.
1007     __ daddiu(sp, sp, -kCArgsSlotsSize);
1008     // Make sure the stored 'ra' points to this position.
1009     DCHECK_EQ(kNumInstructionsToJump,
1010               masm->InstructionsGeneratedSince(&find_ra));
1011   }
1012   if (result_size() > 2) {
1013     DCHECK_EQ(3, result_size());
1014     // Read result values stored on stack.
1015     __ ld(a0, MemOperand(v0, 2 * kPointerSize));
1016     __ ld(v1, MemOperand(v0, 1 * kPointerSize));
1017     __ ld(v0, MemOperand(v0, 0 * kPointerSize));
1018   }
1019   // Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers!
1020 
1021   // Check result for exception sentinel.
1022   Label exception_returned;
1023   __ LoadRoot(a4, Heap::kExceptionRootIndex);
1024   __ Branch(&exception_returned, eq, a4, Operand(v0));
1025 
1026   // Check that there is no pending exception, otherwise we
1027   // should have returned the exception sentinel.
1028   if (FLAG_debug_code) {
1029     Label okay;
1030     ExternalReference pending_exception_address(
1031         Isolate::kPendingExceptionAddress, isolate());
1032     __ li(a2, Operand(pending_exception_address));
1033     __ ld(a2, MemOperand(a2));
1034     __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
1035     // Cannot use check here as it attempts to generate call into runtime.
1036     __ Branch(&okay, eq, a4, Operand(a2));
1037     __ stop("Unexpected pending exception");
1038     __ bind(&okay);
1039   }
1040 
1041   // Exit C frame and return.
1042   // v0:v1: result
1043   // sp: stack pointer
1044   // fp: frame pointer
1045   Register argc;
1046   if (argv_in_register()) {
1047     // We don't want to pop arguments so set argc to no_reg.
1048     argc = no_reg;
1049   } else {
1050     // s0: still holds argc (callee-saved).
1051     argc = s0;
1052   }
1053   __ LeaveExitFrame(save_doubles(), argc, true, EMIT_RETURN);
1054 
1055   // Handling of exception.
1056   __ bind(&exception_returned);
1057 
1058   ExternalReference pending_handler_context_address(
1059       Isolate::kPendingHandlerContextAddress, isolate());
1060   ExternalReference pending_handler_code_address(
1061       Isolate::kPendingHandlerCodeAddress, isolate());
1062   ExternalReference pending_handler_offset_address(
1063       Isolate::kPendingHandlerOffsetAddress, isolate());
1064   ExternalReference pending_handler_fp_address(
1065       Isolate::kPendingHandlerFPAddress, isolate());
1066   ExternalReference pending_handler_sp_address(
1067       Isolate::kPendingHandlerSPAddress, isolate());
1068 
1069   // Ask the runtime for help to determine the handler. This will set v0 to
1070   // contain the current pending exception, don't clobber it.
1071   ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
1072                                  isolate());
1073   {
1074     FrameScope scope(masm, StackFrame::MANUAL);
1075     __ PrepareCallCFunction(3, 0, a0);
1076     __ mov(a0, zero_reg);
1077     __ mov(a1, zero_reg);
1078     __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
1079     __ CallCFunction(find_handler, 3);
1080   }
1081 
1082   // Retrieve the handler context, SP and FP.
1083   __ li(cp, Operand(pending_handler_context_address));
1084   __ ld(cp, MemOperand(cp));
1085   __ li(sp, Operand(pending_handler_sp_address));
1086   __ ld(sp, MemOperand(sp));
1087   __ li(fp, Operand(pending_handler_fp_address));
1088   __ ld(fp, MemOperand(fp));
1089 
1090   // If the handler is a JS frame, restore the context to the frame. Note that
1091   // the context will be set to (cp == 0) for non-JS frames.
1092   Label zero;
1093   __ Branch(&zero, eq, cp, Operand(zero_reg));
1094   __ sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1095   __ bind(&zero);
1096 
1097   // Compute the handler entry address and jump to it.
1098   __ li(a1, Operand(pending_handler_code_address));
1099   __ ld(a1, MemOperand(a1));
1100   __ li(a2, Operand(pending_handler_offset_address));
1101   __ ld(a2, MemOperand(a2));
1102   __ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
1103   __ Daddu(t9, a1, a2);
1104   __ Jump(t9);
1105 }
1106 
1107 
Generate(MacroAssembler * masm)1108 void JSEntryStub::Generate(MacroAssembler* masm) {
1109   Label invoke, handler_entry, exit;
1110   Isolate* isolate = masm->isolate();
1111 
1112   // TODO(plind): unify the ABI description here.
1113   // Registers:
1114   // a0: entry address
1115   // a1: function
1116   // a2: receiver
1117   // a3: argc
1118   // a4 (a4): on mips64
1119 
1120   // Stack:
1121   // 0 arg slots on mips64 (4 args slots on mips)
1122   // args -- in a4/a4 on mips64, on stack on mips
1123 
1124   ProfileEntryHookStub::MaybeCallEntryHook(masm);
1125 
1126   // Save callee saved registers on the stack.
1127   __ MultiPush(kCalleeSaved | ra.bit());
1128 
1129   // Save callee-saved FPU registers.
1130   __ MultiPushFPU(kCalleeSavedFPU);
1131   // Set up the reserved register for 0.0.
1132   __ Move(kDoubleRegZero, 0.0);
1133 
1134   // Load argv in s0 register.
1135   __ mov(s0, a4);  // 5th parameter in mips64 a4 (a4) register.
1136 
1137   __ InitializeRootRegister();
1138 
1139   // We build an EntryFrame.
1140   __ li(a7, Operand(-1));  // Push a bad frame pointer to fail if it is used.
1141   StackFrame::Type marker = type();
1142   __ li(a6, Operand(StackFrame::TypeToMarker(marker)));
1143   __ li(a5, Operand(StackFrame::TypeToMarker(marker)));
1144   ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
1145   __ li(a4, Operand(c_entry_fp));
1146   __ ld(a4, MemOperand(a4));
1147   __ Push(a7, a6, a5, a4);
1148   // Set up frame pointer for the frame to be pushed.
1149   __ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
1150 
1151   // Registers:
1152   // a0: entry_address
1153   // a1: function
1154   // a2: receiver_pointer
1155   // a3: argc
1156   // s0: argv
1157   //
1158   // Stack:
1159   // caller fp          |
1160   // function slot      | entry frame
1161   // context slot       |
1162   // bad fp (0xff...f)  |
1163   // callee saved registers + ra
1164   // [ O32: 4 args slots]
1165   // args
1166 
1167   // If this is the outermost JS call, set js_entry_sp value.
1168   Label non_outermost_js;
1169   ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1170   __ li(a5, Operand(ExternalReference(js_entry_sp)));
1171   __ ld(a6, MemOperand(a5));
1172   __ Branch(&non_outermost_js, ne, a6, Operand(zero_reg));
1173   __ sd(fp, MemOperand(a5));
1174   __ li(a4, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
1175   Label cont;
1176   __ b(&cont);
1177   __ nop();   // Branch delay slot nop.
1178   __ bind(&non_outermost_js);
1179   __ li(a4, Operand(StackFrame::INNER_JSENTRY_FRAME));
1180   __ bind(&cont);
1181   __ push(a4);
1182 
1183   // Jump to a faked try block that does the invoke, with a faked catch
1184   // block that sets the pending exception.
1185   __ jmp(&invoke);
1186   __ bind(&handler_entry);
1187   handler_offset_ = handler_entry.pos();
1188   // Caught exception: Store result (exception) in the pending exception
1189   // field in the JSEnv and return a failure sentinel.  Coming in here the
1190   // fp will be invalid because the PushStackHandler below sets it to 0 to
1191   // signal the existence of the JSEntry frame.
1192   __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1193                                       isolate)));
1194   __ sd(v0, MemOperand(a4));  // We come back from 'invoke'. result is in v0.
1195   __ LoadRoot(v0, Heap::kExceptionRootIndex);
1196   __ b(&exit);  // b exposes branch delay slot.
1197   __ nop();   // Branch delay slot nop.
1198 
1199   // Invoke: Link this frame into the handler chain.
1200   __ bind(&invoke);
1201   __ PushStackHandler();
1202   // If an exception not caught by another handler occurs, this handler
1203   // returns control to the code after the bal(&invoke) above, which
1204   // restores all kCalleeSaved registers (including cp and fp) to their
1205   // saved values before returning a failure to C.
1206 
1207   // Invoke the function by calling through JS entry trampoline builtin.
1208   // Notice that we cannot store a reference to the trampoline code directly in
1209   // this stub, because runtime stubs are not traversed when doing GC.
1210 
1211   // Registers:
1212   // a0: entry_address
1213   // a1: function
1214   // a2: receiver_pointer
1215   // a3: argc
1216   // s0: argv
1217   //
1218   // Stack:
1219   // handler frame
1220   // entry frame
1221   // callee saved registers + ra
1222   // [ O32: 4 args slots]
1223   // args
1224 
1225   if (type() == StackFrame::ENTRY_CONSTRUCT) {
1226     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1227                                       isolate);
1228     __ li(a4, Operand(construct_entry));
1229   } else {
1230     ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
1231     __ li(a4, Operand(entry));
1232   }
1233   __ ld(t9, MemOperand(a4));  // Deref address.
1234   // Call JSEntryTrampoline.
1235   __ daddiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1236   __ Call(t9);
1237 
1238   // Unlink this frame from the handler chain.
1239   __ PopStackHandler();
1240 
1241   __ bind(&exit);  // v0 holds result
1242   // Check if the current stack frame is marked as the outermost JS frame.
1243   Label non_outermost_js_2;
1244   __ pop(a5);
1245   __ Branch(&non_outermost_js_2, ne, a5,
1246             Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
1247   __ li(a5, Operand(ExternalReference(js_entry_sp)));
1248   __ sd(zero_reg, MemOperand(a5));
1249   __ bind(&non_outermost_js_2);
1250 
1251   // Restore the top frame descriptors from the stack.
1252   __ pop(a5);
1253   __ li(a4, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1254                                       isolate)));
1255   __ sd(a5, MemOperand(a4));
1256 
1257   // Reset the stack to the callee saved registers.
1258   __ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
1259 
1260   // Restore callee-saved fpu registers.
1261   __ MultiPopFPU(kCalleeSavedFPU);
1262 
1263   // Restore callee saved registers from the stack.
1264   __ MultiPop(kCalleeSaved | ra.bit());
1265   // Return.
1266   __ Jump(ra);
1267 }
1268 
Generate(MacroAssembler * masm)1269 void RegExpExecStub::Generate(MacroAssembler* masm) {
1270   // Just jump directly to runtime if native RegExp is not selected at compile
1271   // time or if regexp entry in generated code is turned off runtime switch or
1272   // at compilation.
1273 #ifdef V8_INTERPRETED_REGEXP
1274   __ TailCallRuntime(Runtime::kRegExpExec);
1275 #else  // V8_INTERPRETED_REGEXP
1276 
1277   // Stack frame on entry.
1278   //  sp[0]: last_match_info (expected JSArray)
1279   //  sp[4]: previous index
1280   //  sp[8]: subject string
1281   //  sp[12]: JSRegExp object
1282 
1283   const int kLastMatchInfoOffset = 0 * kPointerSize;
1284   const int kPreviousIndexOffset = 1 * kPointerSize;
1285   const int kSubjectOffset = 2 * kPointerSize;
1286   const int kJSRegExpOffset = 3 * kPointerSize;
1287 
1288   Label runtime;
1289   // Allocation of registers for this function. These are in callee save
1290   // registers and will be preserved by the call to the native RegExp code, as
1291   // this code is called using the normal C calling convention. When calling
1292   // directly from generated code the native RegExp code will not do a GC and
1293   // therefore the content of these registers are safe to use after the call.
1294   // MIPS - using s0..s2, since we are not using CEntry Stub.
1295   Register subject = s0;
1296   Register regexp_data = s1;
1297   Register last_match_info_elements = s2;
1298 
1299   // Ensure that a RegExp stack is allocated.
1300   ExternalReference address_of_regexp_stack_memory_address =
1301       ExternalReference::address_of_regexp_stack_memory_address(
1302           isolate());
1303   ExternalReference address_of_regexp_stack_memory_size =
1304       ExternalReference::address_of_regexp_stack_memory_size(isolate());
1305   __ li(a0, Operand(address_of_regexp_stack_memory_size));
1306   __ ld(a0, MemOperand(a0, 0));
1307   __ Branch(&runtime, eq, a0, Operand(zero_reg));
1308 
1309   // Check that the first argument is a JSRegExp object.
1310   __ ld(a0, MemOperand(sp, kJSRegExpOffset));
1311   STATIC_ASSERT(kSmiTag == 0);
1312   __ JumpIfSmi(a0, &runtime);
1313   __ GetObjectType(a0, a1, a1);
1314   __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
1315 
1316   // Check that the RegExp has been compiled (data contains a fixed array).
1317   __ ld(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
1318   if (FLAG_debug_code) {
1319     __ SmiTst(regexp_data, a4);
1320     __ Check(nz,
1321              kUnexpectedTypeForRegExpDataFixedArrayExpected,
1322              a4,
1323              Operand(zero_reg));
1324     __ GetObjectType(regexp_data, a0, a0);
1325     __ Check(eq,
1326              kUnexpectedTypeForRegExpDataFixedArrayExpected,
1327              a0,
1328              Operand(FIXED_ARRAY_TYPE));
1329   }
1330 
1331   // regexp_data: RegExp data (FixedArray)
1332   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1333   __ ld(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
1334   __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
1335 
1336   // regexp_data: RegExp data (FixedArray)
1337   // Check that the number of captures fit in the static offsets vector buffer.
1338   __ ld(a2,
1339          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1340   // Check (number_of_captures + 1) * 2 <= offsets vector size
1341   // Or          number_of_captures * 2 <= offsets vector size - 2
1342   // Or          number_of_captures     <= offsets vector size / 2 - 1
1343   // Multiplying by 2 comes for free since a2 is smi-tagged.
1344   STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1345   int temp = Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1;
1346   __ Branch(&runtime, hi, a2, Operand(Smi::FromInt(temp)));
1347 
1348   // Reset offset for possibly sliced string.
1349   __ mov(t0, zero_reg);
1350   __ ld(subject, MemOperand(sp, kSubjectOffset));
1351   __ JumpIfSmi(subject, &runtime);
1352   __ mov(a3, subject);  // Make a copy of the original subject string.
1353 
1354   // subject: subject string
1355   // a3: subject string
1356   // regexp_data: RegExp data (FixedArray)
1357   // Handle subject string according to its encoding and representation:
1358   // (1) Sequential string?  If yes, go to (4).
1359   // (2) Sequential or cons?  If not, go to (5).
1360   // (3) Cons string.  If the string is flat, replace subject with first string
1361   //     and go to (1). Otherwise bail out to runtime.
1362   // (4) Sequential string.  Load regexp code according to encoding.
1363   // (E) Carry on.
1364   /// [...]
1365 
1366   // Deferred code at the end of the stub:
1367   // (5) Long external string?  If not, go to (7).
1368   // (6) External string.  Make it, offset-wise, look like a sequential string.
1369   //     Go to (4).
1370   // (7) Short external string or not a string?  If yes, bail out to runtime.
1371   // (8) Sliced or thin string.  Replace subject with parent.  Go to (1).
1372 
1373   Label check_underlying;   // (1)
1374   Label seq_string;         // (4)
1375   Label not_seq_nor_cons;   // (5)
1376   Label external_string;    // (6)
1377   Label not_long_external;  // (7)
1378 
1379   __ bind(&check_underlying);
1380   __ ld(a2, FieldMemOperand(subject, HeapObject::kMapOffset));
1381   __ lbu(a0, FieldMemOperand(a2, Map::kInstanceTypeOffset));
1382 
1383   // (1) Sequential string?  If yes, go to (4).
1384   __ And(a1,
1385          a0,
1386          Operand(kIsNotStringMask |
1387                  kStringRepresentationMask |
1388                  kShortExternalStringMask));
1389   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
1390   __ Branch(&seq_string, eq, a1, Operand(zero_reg));  // Go to (4).
1391 
1392   // (2) Sequential or cons?  If not, go to (5).
1393   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
1394   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
1395   STATIC_ASSERT(kThinStringTag > kExternalStringTag);
1396   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
1397   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
1398   // Go to (5).
1399   __ Branch(&not_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
1400 
1401   // (3) Cons string.  Check that it's flat.
1402   // Replace subject with first string and reload instance type.
1403   __ ld(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
1404   __ LoadRoot(a1, Heap::kempty_stringRootIndex);
1405   __ Branch(&runtime, ne, a0, Operand(a1));
1406   __ ld(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
1407   __ jmp(&check_underlying);
1408 
1409   // (4) Sequential string.  Load regexp code according to encoding.
1410   __ bind(&seq_string);
1411   // subject: sequential subject string (or look-alike, external string)
1412   // a3: original subject string
1413   // Load previous index and check range before a3 is overwritten.  We have to
1414   // use a3 instead of subject here because subject might have been only made
1415   // to look like a sequential string when it actually is an external string.
1416   __ ld(a1, MemOperand(sp, kPreviousIndexOffset));
1417   __ JumpIfNotSmi(a1, &runtime);
1418   __ ld(a3, FieldMemOperand(a3, String::kLengthOffset));
1419   __ Branch(&runtime, ls, a3, Operand(a1));
1420   __ SmiUntag(a1);
1421 
1422   STATIC_ASSERT(kStringEncodingMask == 8);
1423   STATIC_ASSERT(kOneByteStringTag == 8);
1424   STATIC_ASSERT(kTwoByteStringTag == 0);
1425   __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for one_byte.
1426   __ ld(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
1427   __ dsra(a3, a0, 3);  // a3 is 1 for one_byte, 0 for UC16 (used below).
1428   __ ld(a5, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
1429   __ Movz(t9, a5, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
1430 
1431   // (E) Carry on.  String handling is done.
1432   // t9: irregexp code
1433   // Check that the irregexp code has been generated for the actual string
1434   // encoding. If it has, the field contains a code object otherwise it contains
1435   // a smi (code flushing support).
1436   __ JumpIfSmi(t9, &runtime);
1437 
1438   // a1: previous index
1439   // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
1440   // t9: code
1441   // subject: Subject string
1442   // regexp_data: RegExp data (FixedArray)
1443   // All checks done. Now push arguments for native regexp code.
1444   __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
1445                       1, a0, a2);
1446 
1447   // Isolates: note we add an additional parameter here (isolate pointer).
1448   const int kRegExpExecuteArguments = 9;
1449   const int kParameterRegisters = 8;
1450   __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
1451 
1452   // Stack pointer now points to cell where return address is to be written.
1453   // Arguments are before that on the stack or in registers, meaning we
1454   // treat the return address as argument 5. Thus every argument after that
1455   // needs to be shifted back by 1. Since DirectCEntryStub will handle
1456   // allocating space for the c argument slots, we don't need to calculate
1457   // that into the argument positions on the stack. This is how the stack will
1458   // look (sp meaning the value of sp at this moment):
1459   // Abi n64:
1460   //   [sp + 1] - Argument 9
1461   //   [sp + 0] - saved ra
1462   // Abi O32:
1463   //   [sp + 5] - Argument 9
1464   //   [sp + 4] - Argument 8
1465   //   [sp + 3] - Argument 7
1466   //   [sp + 2] - Argument 6
1467   //   [sp + 1] - Argument 5
1468   //   [sp + 0] - saved ra
1469 
1470   // Argument 9: Pass current isolate address.
1471   __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
1472   __ sd(a0, MemOperand(sp, 1 * kPointerSize));
1473 
1474   // Argument 8: Indicate that this is a direct call from JavaScript.
1475   __ li(a7, Operand(1));
1476 
1477   // Argument 7: Start (high end) of backtracking stack memory area.
1478   __ li(a0, Operand(address_of_regexp_stack_memory_address));
1479   __ ld(a0, MemOperand(a0, 0));
1480   __ li(a2, Operand(address_of_regexp_stack_memory_size));
1481   __ ld(a2, MemOperand(a2, 0));
1482   __ daddu(a6, a0, a2);
1483 
1484   // Argument 6: Set the number of capture registers to zero to force global
1485   // regexps to behave as non-global. This does not affect non-global regexps.
1486   __ mov(a5, zero_reg);
1487 
1488   // Argument 5: static offsets vector buffer.
1489   __ li(
1490       a4,
1491       Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
1492 
1493   // For arguments 4 and 3 get string length, calculate start of string data
1494   // and calculate the shift of the index (0 for one_byte and 1 for two byte).
1495   __ Daddu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
1496   __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
1497   // Load the length from the original subject string from the previous stack
1498   // frame. Therefore we have to use fp, which points exactly to two pointer
1499   // sizes below the previous sp. (Because creating a new stack frame pushes
1500   // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
1501   __ ld(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
1502   // If slice offset is not 0, load the length from the original sliced string.
1503   // Argument 4, a3: End of string data
1504   // Argument 3, a2: Start of string data
1505   // Prepare start and end index of the input.
1506   __ dsllv(t1, t0, a3);
1507   __ daddu(t0, t2, t1);
1508   __ dsllv(t1, a1, a3);
1509   __ daddu(a2, t0, t1);
1510 
1511   __ ld(t2, FieldMemOperand(subject, String::kLengthOffset));
1512 
1513   __ SmiUntag(t2);
1514   __ dsllv(t1, t2, a3);
1515   __ daddu(a3, t0, t1);
1516   // Argument 2 (a1): Previous index.
1517   // Already there
1518 
1519   // Argument 1 (a0): Subject string.
1520   __ mov(a0, subject);
1521 
1522   // Locate the code entry and call it.
1523   __ Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
1524   DirectCEntryStub stub(isolate());
1525   stub.GenerateCall(masm, t9);
1526 
1527   __ LeaveExitFrame(false, no_reg, true);
1528 
1529   // v0: result
1530   // subject: subject string (callee saved)
1531   // regexp_data: RegExp data (callee saved)
1532   // last_match_info_elements: Last match info elements (callee saved)
1533   // Check the result.
1534   Label success;
1535   __ Branch(&success, eq, v0, Operand(1));
1536   // We expect exactly one result since we force the called regexp to behave
1537   // as non-global.
1538   Label failure;
1539   __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
1540   // If not exception it can only be retry. Handle that in the runtime system.
1541   __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
1542   // Result must now be exception. If there is no pending exception already a
1543   // stack overflow (on the backtrack stack) was detected in RegExp code but
1544   // haven't created the exception yet. Handle that in the runtime system.
1545   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
1546   __ li(a1, Operand(isolate()->factory()->the_hole_value()));
1547   __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1548                                       isolate())));
1549   __ ld(v0, MemOperand(a2, 0));
1550   __ Branch(&runtime, eq, v0, Operand(a1));
1551 
1552   // For exception, throw the exception again.
1553   __ TailCallRuntime(Runtime::kRegExpExecReThrow);
1554 
1555   __ bind(&failure);
1556   // For failure and exception return null.
1557   __ li(v0, Operand(isolate()->factory()->null_value()));
1558   __ DropAndRet(4);
1559 
1560   // Process the result from the native regexp code.
1561   __ bind(&success);
1562 
1563   __ lw(a1, UntagSmiFieldMemOperand(
1564       regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1565   // Calculate number of capture registers (number_of_captures + 1) * 2.
1566   __ Daddu(a1, a1, Operand(1));
1567   __ dsll(a1, a1, 1);  // Multiply by 2.
1568 
1569   // Check that the last match info is a FixedArray.
1570   __ ld(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
1571   __ JumpIfSmi(last_match_info_elements, &runtime);
1572   // Check that the object has fast elements.
1573   __ ld(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
1574   __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
1575   __ Branch(&runtime, ne, a0, Operand(at));
1576   // Check that the last match info has space for the capture registers and the
1577   // additional information.
1578   __ ld(a0,
1579         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
1580   __ Daddu(a2, a1, Operand(RegExpMatchInfo::kLastMatchOverhead));
1581 
1582   __ SmiUntag(at, a0);
1583   __ Branch(&runtime, gt, a2, Operand(at));
1584 
1585   // a1: number of capture registers
1586   // subject: subject string
1587   // Store the capture count.
1588   __ SmiTag(a2, a1);  // To smi.
1589   __ sd(a2, FieldMemOperand(last_match_info_elements,
1590                             RegExpMatchInfo::kNumberOfCapturesOffset));
1591   // Store last subject and last input.
1592   __ sd(subject, FieldMemOperand(last_match_info_elements,
1593                                  RegExpMatchInfo::kLastSubjectOffset));
1594   __ mov(a2, subject);
1595   __ RecordWriteField(last_match_info_elements,
1596                       RegExpMatchInfo::kLastSubjectOffset, subject, a7,
1597                       kRAHasNotBeenSaved, kDontSaveFPRegs);
1598   __ mov(subject, a2);
1599   __ sd(subject, FieldMemOperand(last_match_info_elements,
1600                                  RegExpMatchInfo::kLastInputOffset));
1601   __ RecordWriteField(last_match_info_elements,
1602                       RegExpMatchInfo::kLastInputOffset, subject, a7,
1603                       kRAHasNotBeenSaved, kDontSaveFPRegs);
1604 
1605   // Get the static offsets vector filled by the native regexp code.
1606   ExternalReference address_of_static_offsets_vector =
1607       ExternalReference::address_of_static_offsets_vector(isolate());
1608   __ li(a2, Operand(address_of_static_offsets_vector));
1609 
1610   // a1: number of capture registers
1611   // a2: offsets vector
1612   Label next_capture, done;
1613   // Capture register counter starts from number of capture registers and
1614   // counts down until wrapping after zero.
1615   __ Daddu(a0, last_match_info_elements,
1616            Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag));
1617   __ bind(&next_capture);
1618   __ Dsubu(a1, a1, Operand(1));
1619   __ Branch(&done, lt, a1, Operand(zero_reg));
1620   // Read the value from the static offsets vector buffer.
1621   __ lw(a3, MemOperand(a2, 0));
1622   __ daddiu(a2, a2, kIntSize);
1623   // Store the smi value in the last match info.
1624   __ SmiTag(a3);
1625   __ sd(a3, MemOperand(a0, 0));
1626   __ Branch(&next_capture, USE_DELAY_SLOT);
1627   __ daddiu(a0, a0, kPointerSize);  // In branch delay slot.
1628 
1629   __ bind(&done);
1630 
1631   // Return last match info.
1632   __ mov(v0, last_match_info_elements);
1633   __ DropAndRet(4);
1634 
1635   // Do the runtime call to execute the regexp.
1636   __ bind(&runtime);
1637   __ TailCallRuntime(Runtime::kRegExpExec);
1638 
1639   // Deferred code for string handling.
1640   // (5) Long external string?  If not, go to (7).
1641   __ bind(&not_seq_nor_cons);
1642   // Go to (7).
1643   __ Branch(&not_long_external, gt, a1, Operand(kExternalStringTag));
1644 
1645   // (6) External string.  Make it, offset-wise, look like a sequential string.
1646   __ bind(&external_string);
1647   __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
1648   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
1649   if (FLAG_debug_code) {
1650     // Assert that we do not have a cons or slice (indirect strings) here.
1651     // Sequential strings have already been ruled out.
1652     __ And(at, a0, Operand(kIsIndirectStringMask));
1653     __ Assert(eq,
1654               kExternalStringExpectedButNotFound,
1655               at,
1656               Operand(zero_reg));
1657   }
1658   __ ld(subject,
1659         FieldMemOperand(subject, ExternalString::kResourceDataOffset));
1660   // Move the pointer so that offset-wise, it looks like a sequential string.
1661   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1662   __ Dsubu(subject,
1663           subject,
1664           SeqTwoByteString::kHeaderSize - kHeapObjectTag);
1665   __ jmp(&seq_string);  // Go to (4).
1666 
1667   // (7) Short external string or not a string?  If yes, bail out to runtime.
1668   __ bind(&not_long_external);
1669   STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
1670   __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
1671   __ Branch(&runtime, ne, at, Operand(zero_reg));
1672 
1673   // (8) Sliced or thin string.  Replace subject with parent.  Go to (4).
1674   Label thin_string;
1675   __ Branch(&thin_string, eq, a1, Operand(kThinStringTag));
1676   // Load offset into t0 and replace subject string with parent.
1677   __ ld(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
1678   __ SmiUntag(t0);
1679   __ ld(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
1680   __ jmp(&check_underlying);  // Go to (1).
1681 
1682   __ bind(&thin_string);
1683   __ ld(subject, FieldMemOperand(subject, ThinString::kActualOffset));
1684   __ jmp(&check_underlying);  // Go to (1).
1685 #endif  // V8_INTERPRETED_REGEXP
1686 }
1687 
1688 
CallStubInRecordCallTarget(MacroAssembler * masm,CodeStub * stub)1689 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
1690   // a0 : number of arguments to the construct function
1691   // a2 : feedback vector
1692   // a3 : slot in feedback vector (Smi)
1693   // a1 : the function to call
1694   FrameScope scope(masm, StackFrame::INTERNAL);
1695   const RegList kSavedRegs = 1 << 4 |  // a0
1696                              1 << 5 |  // a1
1697                              1 << 6 |  // a2
1698                              1 << 7 |  // a3
1699                              1 << cp.code();
1700 
1701   // Number-of-arguments register must be smi-tagged to call out.
1702   __ SmiTag(a0);
1703   __ MultiPush(kSavedRegs);
1704 
1705   __ CallStub(stub);
1706 
1707   __ MultiPop(kSavedRegs);
1708   __ SmiUntag(a0);
1709 }
1710 
1711 
GenerateRecordCallTarget(MacroAssembler * masm)1712 static void GenerateRecordCallTarget(MacroAssembler* masm) {
1713   // Cache the called function in a feedback vector slot.  Cache states
1714   // are uninitialized, monomorphic (indicated by a JSFunction), and
1715   // megamorphic.
1716   // a0 : number of arguments to the construct function
1717   // a1 : the function to call
1718   // a2 : feedback vector
1719   // a3 : slot in feedback vector (Smi)
1720   Label initialize, done, miss, megamorphic, not_array_function;
1721 
1722   DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
1723             masm->isolate()->heap()->megamorphic_symbol());
1724   DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
1725             masm->isolate()->heap()->uninitialized_symbol());
1726 
1727   // Load the cache state into a5.
1728   __ dsrl(a5, a3, 32 - kPointerSizeLog2);
1729   __ Daddu(a5, a2, Operand(a5));
1730   __ ld(a5, FieldMemOperand(a5, FixedArray::kHeaderSize));
1731 
1732   // A monomorphic cache hit or an already megamorphic state: invoke the
1733   // function without changing the state.
1734   // We don't know if a5 is a WeakCell or a Symbol, but it's harmless to read at
1735   // this position in a symbol (see static asserts in feedback-vector.h).
1736   Label check_allocation_site;
1737   Register feedback_map = a6;
1738   Register weak_value = t0;
1739   __ ld(weak_value, FieldMemOperand(a5, WeakCell::kValueOffset));
1740   __ Branch(&done, eq, a1, Operand(weak_value));
1741   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
1742   __ Branch(&done, eq, a5, Operand(at));
1743   __ ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset));
1744   __ LoadRoot(at, Heap::kWeakCellMapRootIndex);
1745   __ Branch(&check_allocation_site, ne, feedback_map, Operand(at));
1746 
1747   // If the weak cell is cleared, we have a new chance to become monomorphic.
1748   __ JumpIfSmi(weak_value, &initialize);
1749   __ jmp(&megamorphic);
1750 
1751   __ bind(&check_allocation_site);
1752   // If we came here, we need to see if we are the array function.
1753   // If we didn't have a matching function, and we didn't find the megamorph
1754   // sentinel, then we have in the slot either some other function or an
1755   // AllocationSite.
1756   __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
1757   __ Branch(&miss, ne, feedback_map, Operand(at));
1758 
1759   // Make sure the function is the Array() function
1760   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
1761   __ Branch(&megamorphic, ne, a1, Operand(a5));
1762   __ jmp(&done);
1763 
1764   __ bind(&miss);
1765 
1766   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
1767   // megamorphic.
1768   __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
1769   __ Branch(&initialize, eq, a5, Operand(at));
1770   // MegamorphicSentinel is an immortal immovable object (undefined) so no
1771   // write-barrier is needed.
1772   __ bind(&megamorphic);
1773   __ dsrl(a5, a3, 32 - kPointerSizeLog2);
1774   __ Daddu(a5, a2, Operand(a5));
1775   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
1776   __ sd(at, FieldMemOperand(a5, FixedArray::kHeaderSize));
1777   __ jmp(&done);
1778 
1779   // An uninitialized cache is patched with the function.
1780   __ bind(&initialize);
1781   // Make sure the function is the Array() function.
1782   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
1783   __ Branch(&not_array_function, ne, a1, Operand(a5));
1784 
1785   // The target function is the Array constructor,
1786   // Create an AllocationSite if we don't already have it, store it in the
1787   // slot.
1788   CreateAllocationSiteStub create_stub(masm->isolate());
1789   CallStubInRecordCallTarget(masm, &create_stub);
1790   __ Branch(&done);
1791 
1792   __ bind(&not_array_function);
1793 
1794   CreateWeakCellStub weak_cell_stub(masm->isolate());
1795   CallStubInRecordCallTarget(masm, &weak_cell_stub);
1796 
1797   __ bind(&done);
1798 
1799   // Increment the call count for all function calls.
1800   __ SmiScale(a4, a3, kPointerSizeLog2);
1801   __ Daddu(a5, a2, Operand(a4));
1802   __ ld(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
1803   __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
1804   __ sd(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
1805 }
1806 
1807 
Generate(MacroAssembler * masm)1808 void CallConstructStub::Generate(MacroAssembler* masm) {
1809   // a0 : number of arguments
1810   // a1 : the function to call
1811   // a2 : feedback vector
1812   // a3 : slot in feedback vector (Smi, for RecordCallTarget)
1813 
1814   Label non_function;
1815   // Check that the function is not a smi.
1816   __ JumpIfSmi(a1, &non_function);
1817   // Check that the function is a JSFunction.
1818   __ GetObjectType(a1, a5, a5);
1819   __ Branch(&non_function, ne, a5, Operand(JS_FUNCTION_TYPE));
1820 
1821   GenerateRecordCallTarget(masm);
1822 
1823   __ dsrl(at, a3, 32 - kPointerSizeLog2);
1824   __ Daddu(a5, a2, at);
1825   Label feedback_register_initialized;
1826   // Put the AllocationSite from the feedback vector into a2, or undefined.
1827   __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
1828   __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
1829   __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
1830   __ Branch(&feedback_register_initialized, eq, a5, Operand(at));
1831   __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
1832   __ bind(&feedback_register_initialized);
1833 
1834   __ AssertUndefinedOrAllocationSite(a2, a5);
1835 
1836   // Pass function as new target.
1837   __ mov(a3, a1);
1838 
1839   // Tail call to the function-specific construct stub (still in the caller
1840   // context at this point).
1841   __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
1842   __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
1843   __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
1844   __ Jump(at);
1845 
1846   __ bind(&non_function);
1847   __ mov(a3, a1);
1848   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
1849 }
1850 
1851 
1852 // StringCharCodeAtGenerator.
GenerateFast(MacroAssembler * masm)1853 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
1854   DCHECK(!a4.is(index_));
1855   DCHECK(!a4.is(result_));
1856   DCHECK(!a4.is(object_));
1857 
1858   // If the receiver is a smi trigger the non-string case.
1859   if (check_mode_ == RECEIVER_IS_UNKNOWN) {
1860     __ JumpIfSmi(object_, receiver_not_string_);
1861 
1862     // Fetch the instance type of the receiver into result register.
1863     __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
1864     __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
1865     // If the receiver is not a string trigger the non-string case.
1866     __ And(a4, result_, Operand(kIsNotStringMask));
1867     __ Branch(receiver_not_string_, ne, a4, Operand(zero_reg));
1868   }
1869 
1870   // If the index is non-smi trigger the non-smi case.
1871   __ JumpIfNotSmi(index_, &index_not_smi_);
1872 
1873   __ bind(&got_smi_index_);
1874 
1875   // Check for index out of range.
1876   __ ld(a4, FieldMemOperand(object_, String::kLengthOffset));
1877   __ Branch(index_out_of_range_, ls, a4, Operand(index_));
1878 
1879   __ SmiUntag(index_);
1880 
1881   StringCharLoadGenerator::Generate(masm,
1882                                     object_,
1883                                     index_,
1884                                     result_,
1885                                     &call_runtime_);
1886 
1887   __ SmiTag(result_);
1888   __ bind(&exit_);
1889 }
1890 
GenerateSlow(MacroAssembler * masm,EmbedMode embed_mode,const RuntimeCallHelper & call_helper)1891 void StringCharCodeAtGenerator::GenerateSlow(
1892     MacroAssembler* masm, EmbedMode embed_mode,
1893     const RuntimeCallHelper& call_helper) {
1894   __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
1895 
1896   // Index is not a smi.
1897   __ bind(&index_not_smi_);
1898   // If index is a heap number, try converting it to an integer.
1899   __ CheckMap(index_,
1900               result_,
1901               Heap::kHeapNumberMapRootIndex,
1902               index_not_number_,
1903               DONT_DO_SMI_CHECK);
1904   call_helper.BeforeCall(masm);
1905   // Consumed by runtime conversion function:
1906   if (embed_mode == PART_OF_IC_HANDLER) {
1907     __ Push(LoadWithVectorDescriptor::VectorRegister(),
1908             LoadWithVectorDescriptor::SlotRegister(), object_, index_);
1909   } else {
1910     __ Push(object_, index_);
1911   }
1912   __ CallRuntime(Runtime::kNumberToSmi);
1913 
1914   // Save the conversion result before the pop instructions below
1915   // have a chance to overwrite it.
1916 
1917   __ Move(index_, v0);
1918   if (embed_mode == PART_OF_IC_HANDLER) {
1919     __ Pop(LoadWithVectorDescriptor::VectorRegister(),
1920            LoadWithVectorDescriptor::SlotRegister(), object_);
1921   } else {
1922     __ pop(object_);
1923   }
1924   // Reload the instance type.
1925   __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
1926   __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
1927   call_helper.AfterCall(masm);
1928   // If index is still not a smi, it must be out of range.
1929   __ JumpIfNotSmi(index_, index_out_of_range_);
1930   // Otherwise, return to the fast path.
1931   __ Branch(&got_smi_index_);
1932 
1933   // Call runtime. We get here when the receiver is a string and the
1934   // index is a number, but the code of getting the actual character
1935   // is too complex (e.g., when the string needs to be flattened).
1936   __ bind(&call_runtime_);
1937   call_helper.BeforeCall(masm);
1938   __ SmiTag(index_);
1939   __ Push(object_, index_);
1940   __ CallRuntime(Runtime::kStringCharCodeAtRT);
1941 
1942   __ Move(result_, v0);
1943 
1944   call_helper.AfterCall(masm);
1945   __ jmp(&exit_);
1946 
1947   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
1948 }
1949 
GenerateFlatOneByteStringEquals(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3)1950 void StringHelper::GenerateFlatOneByteStringEquals(
1951     MacroAssembler* masm, Register left, Register right, Register scratch1,
1952     Register scratch2, Register scratch3) {
1953   Register length = scratch1;
1954 
1955   // Compare lengths.
1956   Label strings_not_equal, check_zero_length;
1957   __ ld(length, FieldMemOperand(left, String::kLengthOffset));
1958   __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
1959   __ Branch(&check_zero_length, eq, length, Operand(scratch2));
1960   __ bind(&strings_not_equal);
1961   // Can not put li in delayslot, it has multi instructions.
1962   __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
1963   __ Ret();
1964 
1965   // Check if the length is zero.
1966   Label compare_chars;
1967   __ bind(&check_zero_length);
1968   STATIC_ASSERT(kSmiTag == 0);
1969   __ Branch(&compare_chars, ne, length, Operand(zero_reg));
1970   DCHECK(is_int16((intptr_t)Smi::FromInt(EQUAL)));
1971   __ Ret(USE_DELAY_SLOT);
1972   __ li(v0, Operand(Smi::FromInt(EQUAL)));
1973 
1974   // Compare characters.
1975   __ bind(&compare_chars);
1976 
1977   GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
1978                                   v0, &strings_not_equal);
1979 
1980   // Characters are equal.
1981   __ Ret(USE_DELAY_SLOT);
1982   __ li(v0, Operand(Smi::FromInt(EQUAL)));
1983 }
1984 
1985 
GenerateCompareFlatOneByteStrings(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3,Register scratch4)1986 void StringHelper::GenerateCompareFlatOneByteStrings(
1987     MacroAssembler* masm, Register left, Register right, Register scratch1,
1988     Register scratch2, Register scratch3, Register scratch4) {
1989   Label result_not_equal, compare_lengths;
1990   // Find minimum length and length difference.
1991   __ ld(scratch1, FieldMemOperand(left, String::kLengthOffset));
1992   __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
1993   __ Dsubu(scratch3, scratch1, Operand(scratch2));
1994   Register length_delta = scratch3;
1995   __ slt(scratch4, scratch2, scratch1);
1996   __ Movn(scratch1, scratch2, scratch4);
1997   Register min_length = scratch1;
1998   STATIC_ASSERT(kSmiTag == 0);
1999   __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
2000 
2001   // Compare loop.
2002   GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
2003                                   scratch4, v0, &result_not_equal);
2004 
2005   // Compare lengths - strings up to min-length are equal.
2006   __ bind(&compare_lengths);
2007   DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
2008   // Use length_delta as result if it's zero.
2009   __ mov(scratch2, length_delta);
2010   __ mov(scratch4, zero_reg);
2011   __ mov(v0, zero_reg);
2012 
2013   __ bind(&result_not_equal);
2014   // Conditionally update the result based either on length_delta or
2015   // the last comparion performed in the loop above.
2016   Label ret;
2017   __ Branch(&ret, eq, scratch2, Operand(scratch4));
2018   __ li(v0, Operand(Smi::FromInt(GREATER)));
2019   __ Branch(&ret, gt, scratch2, Operand(scratch4));
2020   __ li(v0, Operand(Smi::FromInt(LESS)));
2021   __ bind(&ret);
2022   __ Ret();
2023 }
2024 
2025 
GenerateOneByteCharsCompareLoop(MacroAssembler * masm,Register left,Register right,Register length,Register scratch1,Register scratch2,Register scratch3,Label * chars_not_equal)2026 void StringHelper::GenerateOneByteCharsCompareLoop(
2027     MacroAssembler* masm, Register left, Register right, Register length,
2028     Register scratch1, Register scratch2, Register scratch3,
2029     Label* chars_not_equal) {
2030   // Change index to run from -length to -1 by adding length to string
2031   // start. This means that loop ends when index reaches zero, which
2032   // doesn't need an additional compare.
2033   __ SmiUntag(length);
2034   __ Daddu(scratch1, length,
2035           Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
2036   __ Daddu(left, left, Operand(scratch1));
2037   __ Daddu(right, right, Operand(scratch1));
2038   __ Dsubu(length, zero_reg, length);
2039   Register index = length;  // index = -length;
2040 
2041 
2042   // Compare loop.
2043   Label loop;
2044   __ bind(&loop);
2045   __ Daddu(scratch3, left, index);
2046   __ lbu(scratch1, MemOperand(scratch3));
2047   __ Daddu(scratch3, right, index);
2048   __ lbu(scratch2, MemOperand(scratch3));
2049   __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
2050   __ Daddu(index, index, 1);
2051   __ Branch(&loop, ne, index, Operand(zero_reg));
2052 }
2053 
2054 
Generate(MacroAssembler * masm)2055 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
2056   // ----------- S t a t e -------------
2057   //  -- a1    : left
2058   //  -- a0    : right
2059   //  -- ra    : return address
2060   // -----------------------------------
2061 
2062   // Load a2 with the allocation site. We stick an undefined dummy value here
2063   // and replace it with the real allocation site later when we instantiate this
2064   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
2065   __ li(a2, isolate()->factory()->undefined_value());
2066 
2067   // Make sure that we actually patched the allocation site.
2068   if (FLAG_debug_code) {
2069     __ And(at, a2, Operand(kSmiTagMask));
2070     __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
2071     __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
2072     __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2073     __ Assert(eq, kExpectedAllocationSite, a4, Operand(at));
2074   }
2075 
2076   // Tail call into the stub that handles binary operations with allocation
2077   // sites.
2078   BinaryOpWithAllocationSiteStub stub(isolate(), state());
2079   __ TailCallStub(&stub);
2080 }
2081 
2082 
GenerateBooleans(MacroAssembler * masm)2083 void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
2084   DCHECK_EQ(CompareICState::BOOLEAN, state());
2085   Label miss;
2086 
2087   __ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2088   __ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2089   if (!Token::IsEqualityOp(op())) {
2090     __ ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
2091     __ AssertSmi(a1);
2092     __ ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
2093     __ AssertSmi(a0);
2094   }
2095   __ Ret(USE_DELAY_SLOT);
2096   __ Dsubu(v0, a1, a0);
2097 
2098   __ bind(&miss);
2099   GenerateMiss(masm);
2100 }
2101 
2102 
GenerateSmis(MacroAssembler * masm)2103 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
2104   DCHECK(state() == CompareICState::SMI);
2105   Label miss;
2106   __ Or(a2, a1, a0);
2107   __ JumpIfNotSmi(a2, &miss);
2108 
2109   if (GetCondition() == eq) {
2110     // For equality we do not care about the sign of the result.
2111     __ Ret(USE_DELAY_SLOT);
2112     __ Dsubu(v0, a0, a1);
2113   } else {
2114     // Untag before subtracting to avoid handling overflow.
2115     __ SmiUntag(a1);
2116     __ SmiUntag(a0);
2117     __ Ret(USE_DELAY_SLOT);
2118     __ Dsubu(v0, a1, a0);
2119   }
2120 
2121   __ bind(&miss);
2122   GenerateMiss(masm);
2123 }
2124 
2125 
GenerateNumbers(MacroAssembler * masm)2126 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
2127   DCHECK(state() == CompareICState::NUMBER);
2128 
2129   Label generic_stub;
2130   Label unordered, maybe_undefined1, maybe_undefined2;
2131   Label miss;
2132 
2133   if (left() == CompareICState::SMI) {
2134     __ JumpIfNotSmi(a1, &miss);
2135   }
2136   if (right() == CompareICState::SMI) {
2137     __ JumpIfNotSmi(a0, &miss);
2138   }
2139 
2140   // Inlining the double comparison and falling back to the general compare
2141   // stub if NaN is involved.
2142   // Load left and right operand.
2143   Label done, left, left_smi, right_smi;
2144   __ JumpIfSmi(a0, &right_smi);
2145   __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
2146               DONT_DO_SMI_CHECK);
2147   __ Dsubu(a2, a0, Operand(kHeapObjectTag));
2148   __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
2149   __ Branch(&left);
2150   __ bind(&right_smi);
2151   __ SmiUntag(a2, a0);  // Can't clobber a0 yet.
2152   FPURegister single_scratch = f6;
2153   __ mtc1(a2, single_scratch);
2154   __ cvt_d_w(f2, single_scratch);
2155 
2156   __ bind(&left);
2157   __ JumpIfSmi(a1, &left_smi);
2158   __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
2159               DONT_DO_SMI_CHECK);
2160   __ Dsubu(a2, a1, Operand(kHeapObjectTag));
2161   __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
2162   __ Branch(&done);
2163   __ bind(&left_smi);
2164   __ SmiUntag(a2, a1);  // Can't clobber a1 yet.
2165   single_scratch = f8;
2166   __ mtc1(a2, single_scratch);
2167   __ cvt_d_w(f0, single_scratch);
2168 
2169   __ bind(&done);
2170 
2171   // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
2172   Label fpu_eq, fpu_lt;
2173   // Test if equal, and also handle the unordered/NaN case.
2174   __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
2175 
2176   // Test if less (unordered case is already handled).
2177   __ BranchF(&fpu_lt, NULL, lt, f0, f2);
2178 
2179   // Otherwise it's greater, so just fall thru, and return.
2180   DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
2181   __ Ret(USE_DELAY_SLOT);
2182   __ li(v0, Operand(GREATER));
2183 
2184   __ bind(&fpu_eq);
2185   __ Ret(USE_DELAY_SLOT);
2186   __ li(v0, Operand(EQUAL));
2187 
2188   __ bind(&fpu_lt);
2189   __ Ret(USE_DELAY_SLOT);
2190   __ li(v0, Operand(LESS));
2191 
2192   __ bind(&unordered);
2193   __ bind(&generic_stub);
2194   CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
2195                      CompareICState::GENERIC, CompareICState::GENERIC);
2196   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2197 
2198   __ bind(&maybe_undefined1);
2199   if (Token::IsOrderedRelationalCompareOp(op())) {
2200     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2201     __ Branch(&miss, ne, a0, Operand(at));
2202     __ JumpIfSmi(a1, &unordered);
2203     __ GetObjectType(a1, a2, a2);
2204     __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
2205     __ jmp(&unordered);
2206   }
2207 
2208   __ bind(&maybe_undefined2);
2209   if (Token::IsOrderedRelationalCompareOp(op())) {
2210     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2211     __ Branch(&unordered, eq, a1, Operand(at));
2212   }
2213 
2214   __ bind(&miss);
2215   GenerateMiss(masm);
2216 }
2217 
2218 
GenerateInternalizedStrings(MacroAssembler * masm)2219 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
2220   DCHECK(state() == CompareICState::INTERNALIZED_STRING);
2221   Label miss;
2222 
2223   // Registers containing left and right operands respectively.
2224   Register left = a1;
2225   Register right = a0;
2226   Register tmp1 = a2;
2227   Register tmp2 = a3;
2228 
2229   // Check that both operands are heap objects.
2230   __ JumpIfEitherSmi(left, right, &miss);
2231 
2232   // Check that both operands are internalized strings.
2233   __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2234   __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2235   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2236   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2237   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2238   __ Or(tmp1, tmp1, Operand(tmp2));
2239   __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2240   __ Branch(&miss, ne, at, Operand(zero_reg));
2241 
2242   // Make sure a0 is non-zero. At this point input operands are
2243   // guaranteed to be non-zero.
2244   DCHECK(right.is(a0));
2245   STATIC_ASSERT(EQUAL == 0);
2246   STATIC_ASSERT(kSmiTag == 0);
2247   __ mov(v0, right);
2248   // Internalized strings are compared by identity.
2249   __ Ret(ne, left, Operand(right));
2250   DCHECK(is_int16(EQUAL));
2251   __ Ret(USE_DELAY_SLOT);
2252   __ li(v0, Operand(Smi::FromInt(EQUAL)));
2253 
2254   __ bind(&miss);
2255   GenerateMiss(masm);
2256 }
2257 
2258 
GenerateUniqueNames(MacroAssembler * masm)2259 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
2260   DCHECK(state() == CompareICState::UNIQUE_NAME);
2261   DCHECK(GetCondition() == eq);
2262   Label miss;
2263 
2264   // Registers containing left and right operands respectively.
2265   Register left = a1;
2266   Register right = a0;
2267   Register tmp1 = a2;
2268   Register tmp2 = a3;
2269 
2270   // Check that both operands are heap objects.
2271   __ JumpIfEitherSmi(left, right, &miss);
2272 
2273   // Check that both operands are unique names. This leaves the instance
2274   // types loaded in tmp1 and tmp2.
2275   __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2276   __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2277   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2278   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2279 
2280   __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
2281   __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
2282 
2283   // Use a0 as result
2284   __ mov(v0, a0);
2285 
2286   // Unique names are compared by identity.
2287   Label done;
2288   __ Branch(&done, ne, left, Operand(right));
2289   // Make sure a0 is non-zero. At this point input operands are
2290   // guaranteed to be non-zero.
2291   DCHECK(right.is(a0));
2292   STATIC_ASSERT(EQUAL == 0);
2293   STATIC_ASSERT(kSmiTag == 0);
2294   __ li(v0, Operand(Smi::FromInt(EQUAL)));
2295   __ bind(&done);
2296   __ Ret();
2297 
2298   __ bind(&miss);
2299   GenerateMiss(masm);
2300 }
2301 
2302 
GenerateStrings(MacroAssembler * masm)2303 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
2304   DCHECK(state() == CompareICState::STRING);
2305   Label miss;
2306 
2307   bool equality = Token::IsEqualityOp(op());
2308 
2309   // Registers containing left and right operands respectively.
2310   Register left = a1;
2311   Register right = a0;
2312   Register tmp1 = a2;
2313   Register tmp2 = a3;
2314   Register tmp3 = a4;
2315   Register tmp4 = a5;
2316   Register tmp5 = a6;
2317 
2318   // Check that both operands are heap objects.
2319   __ JumpIfEitherSmi(left, right, &miss);
2320 
2321   // Check that both operands are strings. This leaves the instance
2322   // types loaded in tmp1 and tmp2.
2323   __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2324   __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2325   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2326   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2327   STATIC_ASSERT(kNotStringTag != 0);
2328   __ Or(tmp3, tmp1, tmp2);
2329   __ And(tmp5, tmp3, Operand(kIsNotStringMask));
2330   __ Branch(&miss, ne, tmp5, Operand(zero_reg));
2331 
2332   // Fast check for identical strings.
2333   Label left_ne_right;
2334   STATIC_ASSERT(EQUAL == 0);
2335   STATIC_ASSERT(kSmiTag == 0);
2336   __ Branch(&left_ne_right, ne, left, Operand(right));
2337   __ Ret(USE_DELAY_SLOT);
2338   __ mov(v0, zero_reg);  // In the delay slot.
2339   __ bind(&left_ne_right);
2340 
2341   // Handle not identical strings.
2342 
2343   // Check that both strings are internalized strings. If they are, we're done
2344   // because we already know they are not identical. We know they are both
2345   // strings.
2346   if (equality) {
2347     DCHECK(GetCondition() == eq);
2348     STATIC_ASSERT(kInternalizedTag == 0);
2349     __ Or(tmp3, tmp1, Operand(tmp2));
2350     __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
2351     Label is_symbol;
2352     __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
2353     // Make sure a0 is non-zero. At this point input operands are
2354     // guaranteed to be non-zero.
2355     DCHECK(right.is(a0));
2356     __ Ret(USE_DELAY_SLOT);
2357     __ mov(v0, a0);  // In the delay slot.
2358     __ bind(&is_symbol);
2359   }
2360 
2361   // Check that both strings are sequential one_byte.
2362   Label runtime;
2363   __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
2364                                                     &runtime);
2365 
2366   // Compare flat one_byte strings. Returns when done.
2367   if (equality) {
2368     StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
2369                                                   tmp3);
2370   } else {
2371     StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
2372                                                     tmp2, tmp3, tmp4);
2373   }
2374 
2375   // Handle more complex cases in runtime.
2376   __ bind(&runtime);
2377   if (equality) {
2378     {
2379       FrameScope scope(masm, StackFrame::INTERNAL);
2380       __ Push(left, right);
2381       __ CallRuntime(Runtime::kStringEqual);
2382     }
2383     __ LoadRoot(a0, Heap::kTrueValueRootIndex);
2384     __ Ret(USE_DELAY_SLOT);
2385     __ Subu(v0, v0, a0);  // In delay slot.
2386   } else {
2387     __ Push(left, right);
2388     __ TailCallRuntime(Runtime::kStringCompare);
2389   }
2390 
2391   __ bind(&miss);
2392   GenerateMiss(masm);
2393 }
2394 
2395 
GenerateReceivers(MacroAssembler * masm)2396 void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
2397   DCHECK_EQ(CompareICState::RECEIVER, state());
2398   Label miss;
2399   __ And(a2, a1, Operand(a0));
2400   __ JumpIfSmi(a2, &miss);
2401 
2402   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2403   __ GetObjectType(a0, a2, a2);
2404   __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
2405   __ GetObjectType(a1, a2, a2);
2406   __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
2407 
2408   DCHECK_EQ(eq, GetCondition());
2409   __ Ret(USE_DELAY_SLOT);
2410   __ dsubu(v0, a0, a1);
2411 
2412   __ bind(&miss);
2413   GenerateMiss(masm);
2414 }
2415 
2416 
GenerateKnownReceivers(MacroAssembler * masm)2417 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
2418   Label miss;
2419   Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
2420   __ And(a2, a1, a0);
2421   __ JumpIfSmi(a2, &miss);
2422   __ GetWeakValue(a4, cell);
2423   __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
2424   __ ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
2425   __ Branch(&miss, ne, a2, Operand(a4));
2426   __ Branch(&miss, ne, a3, Operand(a4));
2427 
2428   if (Token::IsEqualityOp(op())) {
2429     __ Ret(USE_DELAY_SLOT);
2430     __ dsubu(v0, a0, a1);
2431   } else {
2432     if (op() == Token::LT || op() == Token::LTE) {
2433       __ li(a2, Operand(Smi::FromInt(GREATER)));
2434     } else {
2435       __ li(a2, Operand(Smi::FromInt(LESS)));
2436     }
2437     __ Push(a1, a0, a2);
2438     __ TailCallRuntime(Runtime::kCompare);
2439   }
2440 
2441   __ bind(&miss);
2442   GenerateMiss(masm);
2443 }
2444 
2445 
GenerateMiss(MacroAssembler * masm)2446 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
2447   {
2448     // Call the runtime system in a fresh internal frame.
2449     FrameScope scope(masm, StackFrame::INTERNAL);
2450     __ Push(a1, a0);
2451     __ Push(ra, a1, a0);
2452     __ li(a4, Operand(Smi::FromInt(op())));
2453     __ daddiu(sp, sp, -kPointerSize);
2454     __ CallRuntime(Runtime::kCompareIC_Miss, 3, kDontSaveFPRegs,
2455                    USE_DELAY_SLOT);
2456     __ sd(a4, MemOperand(sp));  // In the delay slot.
2457     // Compute the entry point of the rewritten stub.
2458     __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
2459     // Restore registers.
2460     __ Pop(a1, a0, ra);
2461   }
2462   __ Jump(a2);
2463 }
2464 
2465 
Generate(MacroAssembler * masm)2466 void DirectCEntryStub::Generate(MacroAssembler* masm) {
2467   // Make place for arguments to fit C calling convention. Most of the callers
2468   // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
2469   // so they handle stack restoring and we don't have to do that here.
2470   // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
2471   // kCArgsSlotsSize stack space after the call.
2472   __ daddiu(sp, sp, -kCArgsSlotsSize);
2473   // Place the return address on the stack, making the call
2474   // GC safe. The RegExp backend also relies on this.
2475   __ sd(ra, MemOperand(sp, kCArgsSlotsSize));
2476   __ Call(t9);  // Call the C++ function.
2477   __ ld(t9, MemOperand(sp, kCArgsSlotsSize));
2478 
2479   if (FLAG_debug_code && FLAG_enable_slow_asserts) {
2480     // In case of an error the return address may point to a memory area
2481     // filled with kZapValue by the GC.
2482     // Dereference the address and check for this.
2483     __ Uld(a4, MemOperand(t9));
2484     __ Assert(ne, kReceivedInvalidReturnAddress, a4,
2485         Operand(reinterpret_cast<uint64_t>(kZapValue)));
2486   }
2487   __ Jump(t9);
2488 }
2489 
2490 
GenerateCall(MacroAssembler * masm,Register target)2491 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
2492                                     Register target) {
2493   intptr_t loc =
2494       reinterpret_cast<intptr_t>(GetCode().location());
2495   __ Move(t9, target);
2496   __ li(at, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
2497   __ Call(at);
2498 }
2499 
2500 
GenerateNegativeLookup(MacroAssembler * masm,Label * miss,Label * done,Register receiver,Register properties,Handle<Name> name,Register scratch0)2501 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
2502                                                       Label* miss,
2503                                                       Label* done,
2504                                                       Register receiver,
2505                                                       Register properties,
2506                                                       Handle<Name> name,
2507                                                       Register scratch0) {
2508   DCHECK(name->IsUniqueName());
2509   // If names of slots in range from 1 to kProbes - 1 for the hash value are
2510   // not equal to the name and kProbes-th slot is not used (its name is the
2511   // undefined value), it guarantees the hash table doesn't contain the
2512   // property. It's true even if some slots represent deleted properties
2513   // (their names are the hole value).
2514   for (int i = 0; i < kInlinedProbes; i++) {
2515     // scratch0 points to properties hash.
2516     // Compute the masked index: (hash + i + i * i) & mask.
2517     Register index = scratch0;
2518     // Capacity is smi 2^n.
2519     __ SmiLoadUntag(index, FieldMemOperand(properties, kCapacityOffset));
2520     __ Dsubu(index, index, Operand(1));
2521     __ And(index, index,
2522            Operand(name->Hash() + NameDictionary::GetProbeOffset(i)));
2523 
2524     // Scale the index by multiplying by the entry size.
2525     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2526     __ Dlsa(index, index, index, 1);  // index *= 3.
2527 
2528     Register entity_name = scratch0;
2529     // Having undefined at this place means the name is not contained.
2530     STATIC_ASSERT(kSmiTagSize == 1);
2531     Register tmp = properties;
2532 
2533     __ Dlsa(tmp, properties, index, kPointerSizeLog2);
2534     __ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
2535 
2536     DCHECK(!tmp.is(entity_name));
2537     __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
2538     __ Branch(done, eq, entity_name, Operand(tmp));
2539 
2540     // Load the hole ready for use below:
2541     __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
2542 
2543     // Stop if found the property.
2544     __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
2545 
2546     Label good;
2547     __ Branch(&good, eq, entity_name, Operand(tmp));
2548 
2549     // Check if the entry name is not a unique name.
2550     __ ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
2551     __ lbu(entity_name,
2552            FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
2553     __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
2554     __ bind(&good);
2555 
2556     // Restore the properties.
2557     __ ld(properties,
2558           FieldMemOperand(receiver, JSObject::kPropertiesOffset));
2559   }
2560 
2561   const int spill_mask =
2562       (ra.bit() | a6.bit() | a5.bit() | a4.bit() | a3.bit() |
2563        a2.bit() | a1.bit() | a0.bit() | v0.bit());
2564 
2565   __ MultiPush(spill_mask);
2566   __ ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
2567   __ li(a1, Operand(Handle<Name>(name)));
2568   NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
2569   __ CallStub(&stub);
2570   __ mov(at, v0);
2571   __ MultiPop(spill_mask);
2572 
2573   __ Branch(done, eq, at, Operand(zero_reg));
2574   __ Branch(miss, ne, at, Operand(zero_reg));
2575 }
2576 
Generate(MacroAssembler * masm)2577 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
2578   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
2579   // we cannot call anything that could cause a GC from this stub.
2580   // Registers:
2581   //  result: NameDictionary to probe
2582   //  a1: key
2583   //  dictionary: NameDictionary to probe.
2584   //  index: will hold an index of entry if lookup is successful.
2585   //         might alias with result_.
2586   // Returns:
2587   //  result_ is zero if lookup failed, non zero otherwise.
2588 
2589   Register result = v0;
2590   Register dictionary = a0;
2591   Register key = a1;
2592   Register index = a2;
2593   Register mask = a3;
2594   Register hash = a4;
2595   Register undefined = a5;
2596   Register entry_key = a6;
2597 
2598   Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
2599 
2600   __ ld(mask, FieldMemOperand(dictionary, kCapacityOffset));
2601   __ SmiUntag(mask);
2602   __ Dsubu(mask, mask, Operand(1));
2603 
2604   __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
2605 
2606   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
2607 
2608   for (int i = kInlinedProbes; i < kTotalProbes; i++) {
2609     // Compute the masked index: (hash + i + i * i) & mask.
2610     // Capacity is smi 2^n.
2611     if (i > 0) {
2612       // Add the probe offset (i + i * i) left shifted to avoid right shifting
2613       // the hash in a separate instruction. The value hash + i + i * i is right
2614       // shifted in the following and instruction.
2615       DCHECK(NameDictionary::GetProbeOffset(i) <
2616              1 << (32 - Name::kHashFieldOffset));
2617       __ Daddu(index, hash, Operand(
2618           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
2619     } else {
2620       __ mov(index, hash);
2621     }
2622     __ dsrl(index, index, Name::kHashShift);
2623     __ And(index, mask, index);
2624 
2625     // Scale the index by multiplying by the entry size.
2626     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2627     // index *= 3.
2628     __ Dlsa(index, index, index, 1);
2629 
2630     STATIC_ASSERT(kSmiTagSize == 1);
2631     __ Dlsa(index, dictionary, index, kPointerSizeLog2);
2632     __ ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
2633 
2634     // Having undefined at this place means the name is not contained.
2635     __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
2636 
2637     // Stop if found the property.
2638     __ Branch(&in_dictionary, eq, entry_key, Operand(key));
2639 
2640     if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
2641       // Check if the entry name is not a unique name.
2642       __ ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
2643       __ lbu(entry_key,
2644              FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
2645       __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
2646     }
2647   }
2648 
2649   __ bind(&maybe_in_dictionary);
2650   // If we are doing negative lookup then probing failure should be
2651   // treated as a lookup success. For positive lookup probing failure
2652   // should be treated as lookup failure.
2653   if (mode() == POSITIVE_LOOKUP) {
2654     __ Ret(USE_DELAY_SLOT);
2655     __ mov(result, zero_reg);
2656   }
2657 
2658   __ bind(&in_dictionary);
2659   __ Ret(USE_DELAY_SLOT);
2660   __ li(result, 1);
2661 
2662   __ bind(&not_in_dictionary);
2663   __ Ret(USE_DELAY_SLOT);
2664   __ mov(result, zero_reg);
2665 }
2666 
2667 
GenerateFixedRegStubsAheadOfTime(Isolate * isolate)2668 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
2669     Isolate* isolate) {
2670   StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
2671   stub1.GetCode();
2672   // Hydrogen code stubs need stub2 at snapshot time.
2673   StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
2674   stub2.GetCode();
2675 }
2676 
2677 
2678 // Takes the input in 3 registers: address_ value_ and object_.  A pointer to
2679 // the value has just been written into the object, now this stub makes sure
2680 // we keep the GC informed.  The word in the object where the value has been
2681 // written is in the address register.
Generate(MacroAssembler * masm)2682 void RecordWriteStub::Generate(MacroAssembler* masm) {
2683   Label skip_to_incremental_noncompacting;
2684   Label skip_to_incremental_compacting;
2685 
2686   // The first two branch+nop instructions are generated with labels so as to
2687   // get the offset fixed up correctly by the bind(Label*) call.  We patch it
2688   // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
2689   // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
2690   // incremental heap marking.
2691   // See RecordWriteStub::Patch for details.
2692   __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
2693   __ nop();
2694   __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
2695   __ nop();
2696 
2697   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
2698     __ RememberedSetHelper(object(),
2699                            address(),
2700                            value(),
2701                            save_fp_regs_mode(),
2702                            MacroAssembler::kReturnAtEnd);
2703   }
2704   __ Ret();
2705 
2706   __ bind(&skip_to_incremental_noncompacting);
2707   GenerateIncremental(masm, INCREMENTAL);
2708 
2709   __ bind(&skip_to_incremental_compacting);
2710   GenerateIncremental(masm, INCREMENTAL_COMPACTION);
2711 
2712   // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
2713   // Will be checked in IncrementalMarking::ActivateGeneratedStub.
2714 
2715   PatchBranchIntoNop(masm, 0);
2716   PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
2717 }
2718 
2719 
GenerateIncremental(MacroAssembler * masm,Mode mode)2720 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
2721   regs_.Save(masm);
2722 
2723   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
2724     Label dont_need_remembered_set;
2725 
2726     __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
2727     __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
2728                            regs_.scratch0(),
2729                            &dont_need_remembered_set);
2730 
2731     __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
2732                         &dont_need_remembered_set);
2733 
2734     // First notify the incremental marker if necessary, then update the
2735     // remembered set.
2736     CheckNeedsToInformIncrementalMarker(
2737         masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
2738     InformIncrementalMarker(masm);
2739     regs_.Restore(masm);
2740     __ RememberedSetHelper(object(),
2741                            address(),
2742                            value(),
2743                            save_fp_regs_mode(),
2744                            MacroAssembler::kReturnAtEnd);
2745 
2746     __ bind(&dont_need_remembered_set);
2747   }
2748 
2749   CheckNeedsToInformIncrementalMarker(
2750       masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
2751   InformIncrementalMarker(masm);
2752   regs_.Restore(masm);
2753   __ Ret();
2754 }
2755 
2756 
InformIncrementalMarker(MacroAssembler * masm)2757 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
2758   regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
2759   int argument_count = 3;
2760   __ PrepareCallCFunction(argument_count, regs_.scratch0());
2761   Register address =
2762       a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
2763   DCHECK(!address.is(regs_.object()));
2764   DCHECK(!address.is(a0));
2765   __ Move(address, regs_.address());
2766   __ Move(a0, regs_.object());
2767   __ Move(a1, address);
2768   __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
2769 
2770   AllowExternalCallThatCantCauseGC scope(masm);
2771   __ CallCFunction(
2772       ExternalReference::incremental_marking_record_write_function(isolate()),
2773       argument_count);
2774   regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
2775 }
2776 
2777 
CheckNeedsToInformIncrementalMarker(MacroAssembler * masm,OnNoNeedToInformIncrementalMarker on_no_need,Mode mode)2778 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
2779     MacroAssembler* masm,
2780     OnNoNeedToInformIncrementalMarker on_no_need,
2781     Mode mode) {
2782   Label on_black;
2783   Label need_incremental;
2784   Label need_incremental_pop_scratch;
2785 
2786   // Let's look at the color of the object:  If it is not black we don't have
2787   // to inform the incremental marker.
2788   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
2789 
2790   regs_.Restore(masm);
2791   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
2792     __ RememberedSetHelper(object(),
2793                            address(),
2794                            value(),
2795                            save_fp_regs_mode(),
2796                            MacroAssembler::kReturnAtEnd);
2797   } else {
2798     __ Ret();
2799   }
2800 
2801   __ bind(&on_black);
2802 
2803   // Get the value from the slot.
2804   __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
2805 
2806   if (mode == INCREMENTAL_COMPACTION) {
2807     Label ensure_not_white;
2808 
2809     __ CheckPageFlag(regs_.scratch0(),  // Contains value.
2810                      regs_.scratch1(),  // Scratch.
2811                      MemoryChunk::kEvacuationCandidateMask,
2812                      eq,
2813                      &ensure_not_white);
2814 
2815     __ CheckPageFlag(regs_.object(),
2816                      regs_.scratch1(),  // Scratch.
2817                      MemoryChunk::kSkipEvacuationSlotsRecordingMask,
2818                      eq,
2819                      &need_incremental);
2820 
2821     __ bind(&ensure_not_white);
2822   }
2823 
2824   // We need extra registers for this, so we push the object and the address
2825   // register temporarily.
2826   __ Push(regs_.object(), regs_.address());
2827   __ JumpIfWhite(regs_.scratch0(),  // The value.
2828                  regs_.scratch1(),  // Scratch.
2829                  regs_.object(),    // Scratch.
2830                  regs_.address(),   // Scratch.
2831                  &need_incremental_pop_scratch);
2832   __ Pop(regs_.object(), regs_.address());
2833 
2834   regs_.Restore(masm);
2835   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
2836     __ RememberedSetHelper(object(),
2837                            address(),
2838                            value(),
2839                            save_fp_regs_mode(),
2840                            MacroAssembler::kReturnAtEnd);
2841   } else {
2842     __ Ret();
2843   }
2844 
2845   __ bind(&need_incremental_pop_scratch);
2846   __ Pop(regs_.object(), regs_.address());
2847 
2848   __ bind(&need_incremental);
2849 
2850   // Fall through when we need to inform the incremental marker.
2851 }
2852 
2853 
Generate(MacroAssembler * masm)2854 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
2855   CEntryStub ces(isolate(), 1, kSaveFPRegs);
2856   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
2857   int parameter_count_offset =
2858       StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
2859   __ ld(a1, MemOperand(fp, parameter_count_offset));
2860   if (function_mode() == JS_FUNCTION_STUB_MODE) {
2861     __ Daddu(a1, a1, Operand(1));
2862   }
2863   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
2864   __ dsll(a1, a1, kPointerSizeLog2);
2865   __ Ret(USE_DELAY_SLOT);
2866   __ Daddu(sp, sp, a1);
2867 }
2868 
MaybeCallEntryHook(MacroAssembler * masm)2869 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
2870   if (masm->isolate()->function_entry_hook() != NULL) {
2871     ProfileEntryHookStub stub(masm->isolate());
2872     __ push(ra);
2873     __ CallStub(&stub);
2874     __ pop(ra);
2875   }
2876 }
2877 
2878 
Generate(MacroAssembler * masm)2879 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
2880   // The entry hook is a "push ra" instruction, followed by a call.
2881   // Note: on MIPS "push" is 2 instruction
2882   const int32_t kReturnAddressDistanceFromFunctionStart =
2883       Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
2884 
2885   // This should contain all kJSCallerSaved registers.
2886   const RegList kSavedRegs =
2887      kJSCallerSaved |  // Caller saved registers.
2888      s5.bit();         // Saved stack pointer.
2889 
2890   // We also save ra, so the count here is one higher than the mask indicates.
2891   const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
2892 
2893   // Save all caller-save registers as this may be called from anywhere.
2894   __ MultiPush(kSavedRegs | ra.bit());
2895 
2896   // Compute the function's address for the first argument.
2897   __ Dsubu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
2898 
2899   // The caller's return address is above the saved temporaries.
2900   // Grab that for the second argument to the hook.
2901   __ Daddu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
2902 
2903   // Align the stack if necessary.
2904   int frame_alignment = masm->ActivationFrameAlignment();
2905   if (frame_alignment > kPointerSize) {
2906     __ mov(s5, sp);
2907     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
2908     __ And(sp, sp, Operand(-frame_alignment));
2909   }
2910 
2911   __ Dsubu(sp, sp, kCArgsSlotsSize);
2912 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
2913   int64_t entry_hook =
2914       reinterpret_cast<int64_t>(isolate()->function_entry_hook());
2915   __ li(t9, Operand(entry_hook));
2916 #else
2917   // Under the simulator we need to indirect the entry hook through a
2918   // trampoline function at a known address.
2919   // It additionally takes an isolate as a third parameter.
2920   __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
2921 
2922   ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
2923   __ li(t9, Operand(ExternalReference(&dispatcher,
2924                                       ExternalReference::BUILTIN_CALL,
2925                                       isolate())));
2926 #endif
2927   // Call C function through t9 to conform ABI for PIC.
2928   __ Call(t9);
2929 
2930   // Restore the stack pointer if needed.
2931   if (frame_alignment > kPointerSize) {
2932     __ mov(sp, s5);
2933   } else {
2934     __ Daddu(sp, sp, kCArgsSlotsSize);
2935   }
2936 
2937   // Also pop ra to get Ret(0).
2938   __ MultiPop(kSavedRegs | ra.bit());
2939   __ Ret();
2940 }
2941 
2942 
2943 template<class T>
CreateArrayDispatch(MacroAssembler * masm,AllocationSiteOverrideMode mode)2944 static void CreateArrayDispatch(MacroAssembler* masm,
2945                                 AllocationSiteOverrideMode mode) {
2946   if (mode == DISABLE_ALLOCATION_SITES) {
2947     T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
2948     __ TailCallStub(&stub);
2949   } else if (mode == DONT_OVERRIDE) {
2950     int last_index = GetSequenceIndexFromFastElementsKind(
2951         TERMINAL_FAST_ELEMENTS_KIND);
2952     for (int i = 0; i <= last_index; ++i) {
2953       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
2954       T stub(masm->isolate(), kind);
2955       __ TailCallStub(&stub, eq, a3, Operand(kind));
2956     }
2957 
2958     // If we reached this point there is a problem.
2959     __ Abort(kUnexpectedElementsKindInArrayConstructor);
2960   } else {
2961     UNREACHABLE();
2962   }
2963 }
2964 
2965 
CreateArrayDispatchOneArgument(MacroAssembler * masm,AllocationSiteOverrideMode mode)2966 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
2967                                            AllocationSiteOverrideMode mode) {
2968   // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
2969   // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
2970   // a0 - number of arguments
2971   // a1 - constructor?
2972   // sp[0] - last argument
2973   Label normal_sequence;
2974   if (mode == DONT_OVERRIDE) {
2975     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2976     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2977     STATIC_ASSERT(FAST_ELEMENTS == 2);
2978     STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2979     STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
2980     STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
2981 
2982     // is the low bit set? If so, we are holey and that is good.
2983     __ And(at, a3, Operand(1));
2984     __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
2985   }
2986   // look at the first argument
2987   __ ld(a5, MemOperand(sp, 0));
2988   __ Branch(&normal_sequence, eq, a5, Operand(zero_reg));
2989 
2990   if (mode == DISABLE_ALLOCATION_SITES) {
2991     ElementsKind initial = GetInitialFastElementsKind();
2992     ElementsKind holey_initial = GetHoleyElementsKind(initial);
2993 
2994     ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
2995                                                   holey_initial,
2996                                                   DISABLE_ALLOCATION_SITES);
2997     __ TailCallStub(&stub_holey);
2998 
2999     __ bind(&normal_sequence);
3000     ArraySingleArgumentConstructorStub stub(masm->isolate(),
3001                                             initial,
3002                                             DISABLE_ALLOCATION_SITES);
3003     __ TailCallStub(&stub);
3004   } else if (mode == DONT_OVERRIDE) {
3005     // We are going to create a holey array, but our kind is non-holey.
3006     // Fix kind and retry (only if we have an allocation site in the slot).
3007     __ Daddu(a3, a3, Operand(1));
3008 
3009     if (FLAG_debug_code) {
3010       __ ld(a5, FieldMemOperand(a2, 0));
3011       __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3012       __ Assert(eq, kExpectedAllocationSite, a5, Operand(at));
3013     }
3014 
3015     // Save the resulting elements kind in type info. We can't just store a3
3016     // in the AllocationSite::transition_info field because elements kind is
3017     // restricted to a portion of the field...upper bits need to be left alone.
3018     STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
3019     __ ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
3020     __ Daddu(a4, a4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
3021     __ sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
3022 
3023 
3024     __ bind(&normal_sequence);
3025     int last_index = GetSequenceIndexFromFastElementsKind(
3026         TERMINAL_FAST_ELEMENTS_KIND);
3027     for (int i = 0; i <= last_index; ++i) {
3028       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3029       ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
3030       __ TailCallStub(&stub, eq, a3, Operand(kind));
3031     }
3032 
3033     // If we reached this point there is a problem.
3034     __ Abort(kUnexpectedElementsKindInArrayConstructor);
3035   } else {
3036     UNREACHABLE();
3037   }
3038 }
3039 
3040 
3041 template<class T>
ArrayConstructorStubAheadOfTimeHelper(Isolate * isolate)3042 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
3043   int to_index = GetSequenceIndexFromFastElementsKind(
3044       TERMINAL_FAST_ELEMENTS_KIND);
3045   for (int i = 0; i <= to_index; ++i) {
3046     ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3047     T stub(isolate, kind);
3048     stub.GetCode();
3049     if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
3050       T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
3051       stub1.GetCode();
3052     }
3053   }
3054 }
3055 
GenerateStubsAheadOfTime(Isolate * isolate)3056 void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
3057   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
3058       isolate);
3059   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
3060       isolate);
3061   ArrayNArgumentsConstructorStub stub(isolate);
3062   stub.GetCode();
3063   ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
3064   for (int i = 0; i < 2; i++) {
3065     // For internal arrays we only need a few things.
3066     InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
3067     stubh1.GetCode();
3068     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
3069     stubh2.GetCode();
3070   }
3071 }
3072 
3073 
GenerateDispatchToArrayStub(MacroAssembler * masm,AllocationSiteOverrideMode mode)3074 void ArrayConstructorStub::GenerateDispatchToArrayStub(
3075     MacroAssembler* masm,
3076     AllocationSiteOverrideMode mode) {
3077   Label not_zero_case, not_one_case;
3078   __ And(at, a0, a0);
3079   __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
3080   CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
3081 
3082   __ bind(&not_zero_case);
3083   __ Branch(&not_one_case, gt, a0, Operand(1));
3084   CreateArrayDispatchOneArgument(masm, mode);
3085 
3086   __ bind(&not_one_case);
3087   ArrayNArgumentsConstructorStub stub(masm->isolate());
3088   __ TailCallStub(&stub);
3089 }
3090 
3091 
Generate(MacroAssembler * masm)3092 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
3093   // ----------- S t a t e -------------
3094   //  -- a0 : argc (only if argument_count() == ANY)
3095   //  -- a1 : constructor
3096   //  -- a2 : AllocationSite or undefined
3097   //  -- a3 : new target
3098   //  -- sp[0] : last argument
3099   // -----------------------------------
3100 
3101   if (FLAG_debug_code) {
3102     // The array construct code is only set for the global and natives
3103     // builtin Array functions which always have maps.
3104 
3105     // Initial map for the builtin Array function should be a map.
3106     __ ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
3107     // Will both indicate a NULL and a Smi.
3108     __ SmiTst(a4, at);
3109     __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
3110         at, Operand(zero_reg));
3111     __ GetObjectType(a4, a4, a5);
3112     __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
3113         a5, Operand(MAP_TYPE));
3114 
3115     // We should either have undefined in a2 or a valid AllocationSite
3116     __ AssertUndefinedOrAllocationSite(a2, a4);
3117   }
3118 
3119   // Enter the context of the Array function.
3120   __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3121 
3122   Label subclassing;
3123   __ Branch(&subclassing, ne, a1, Operand(a3));
3124 
3125   Label no_info;
3126   // Get the elements kind and case on that.
3127   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3128   __ Branch(&no_info, eq, a2, Operand(at));
3129 
3130   __ ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
3131   __ SmiUntag(a3);
3132   STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
3133   __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
3134   GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
3135 
3136   __ bind(&no_info);
3137   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
3138 
3139   // Subclassing.
3140   __ bind(&subclassing);
3141   __ Dlsa(at, sp, a0, kPointerSizeLog2);
3142   __ sd(a1, MemOperand(at));
3143   __ li(at, Operand(3));
3144   __ Daddu(a0, a0, at);
3145   __ Push(a3, a2);
3146   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
3147 }
3148 
3149 
GenerateCase(MacroAssembler * masm,ElementsKind kind)3150 void InternalArrayConstructorStub::GenerateCase(
3151     MacroAssembler* masm, ElementsKind kind) {
3152 
3153   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
3154   __ TailCallStub(&stub0, lo, a0, Operand(1));
3155 
3156   ArrayNArgumentsConstructorStub stubN(isolate());
3157   __ TailCallStub(&stubN, hi, a0, Operand(1));
3158 
3159   if (IsFastPackedElementsKind(kind)) {
3160     // We might need to create a holey array
3161     // look at the first argument.
3162     __ ld(at, MemOperand(sp, 0));
3163 
3164     InternalArraySingleArgumentConstructorStub
3165         stub1_holey(isolate(), GetHoleyElementsKind(kind));
3166     __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
3167   }
3168 
3169   InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
3170   __ TailCallStub(&stub1);
3171 }
3172 
3173 
Generate(MacroAssembler * masm)3174 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
3175   // ----------- S t a t e -------------
3176   //  -- a0 : argc
3177   //  -- a1 : constructor
3178   //  -- sp[0] : return address
3179   //  -- sp[4] : last argument
3180   // -----------------------------------
3181 
3182   if (FLAG_debug_code) {
3183     // The array construct code is only set for the global and natives
3184     // builtin Array functions which always have maps.
3185 
3186     // Initial map for the builtin Array function should be a map.
3187     __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
3188     // Will both indicate a NULL and a Smi.
3189     __ SmiTst(a3, at);
3190     __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
3191         at, Operand(zero_reg));
3192     __ GetObjectType(a3, a3, a4);
3193     __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
3194         a4, Operand(MAP_TYPE));
3195   }
3196 
3197   // Figure out the right elements kind.
3198   __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
3199 
3200   // Load the map's "bit field 2" into a3. We only need the first byte,
3201   // but the following bit field extraction takes care of that anyway.
3202   __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
3203   // Retrieve elements_kind from bit field 2.
3204   __ DecodeField<Map::ElementsKindBits>(a3);
3205 
3206   if (FLAG_debug_code) {
3207     Label done;
3208     __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
3209     __ Assert(
3210         eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
3211         a3, Operand(FAST_HOLEY_ELEMENTS));
3212     __ bind(&done);
3213   }
3214 
3215   Label fast_elements_case;
3216   __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
3217   GenerateCase(masm, FAST_HOLEY_ELEMENTS);
3218 
3219   __ bind(&fast_elements_case);
3220   GenerateCase(masm, FAST_ELEMENTS);
3221 }
3222 
AddressOffset(ExternalReference ref0,ExternalReference ref1)3223 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3224   int64_t offset = (ref0.address() - ref1.address());
3225   DCHECK(static_cast<int>(offset) == offset);
3226   return static_cast<int>(offset);
3227 }
3228 
3229 
3230 // Calls an API function.  Allocates HandleScope, extracts returned value
3231 // from handle and propagates exceptions.  Restores context.  stack_space
3232 // - space to be unwound on exit (includes the call JS arguments space and
3233 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,int32_t stack_space_offset,MemOperand return_value_operand,MemOperand * context_restore_operand)3234 static void CallApiFunctionAndReturn(
3235     MacroAssembler* masm, Register function_address,
3236     ExternalReference thunk_ref, int stack_space, int32_t stack_space_offset,
3237     MemOperand return_value_operand, MemOperand* context_restore_operand) {
3238   Isolate* isolate = masm->isolate();
3239   ExternalReference next_address =
3240       ExternalReference::handle_scope_next_address(isolate);
3241   const int kNextOffset = 0;
3242   const int kLimitOffset = AddressOffset(
3243       ExternalReference::handle_scope_limit_address(isolate), next_address);
3244   const int kLevelOffset = AddressOffset(
3245       ExternalReference::handle_scope_level_address(isolate), next_address);
3246 
3247   DCHECK(function_address.is(a1) || function_address.is(a2));
3248 
3249   Label profiler_disabled;
3250   Label end_profiler_check;
3251   __ li(t9, Operand(ExternalReference::is_profiling_address(isolate)));
3252   __ lb(t9, MemOperand(t9, 0));
3253   __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
3254 
3255   // Additional parameter is the address of the actual callback.
3256   __ li(t9, Operand(thunk_ref));
3257   __ jmp(&end_profiler_check);
3258 
3259   __ bind(&profiler_disabled);
3260   __ mov(t9, function_address);
3261   __ bind(&end_profiler_check);
3262 
3263   // Allocate HandleScope in callee-save registers.
3264   __ li(s3, Operand(next_address));
3265   __ ld(s0, MemOperand(s3, kNextOffset));
3266   __ ld(s1, MemOperand(s3, kLimitOffset));
3267   __ lw(s2, MemOperand(s3, kLevelOffset));
3268   __ Addu(s2, s2, Operand(1));
3269   __ sw(s2, MemOperand(s3, kLevelOffset));
3270 
3271   if (FLAG_log_timer_events) {
3272     FrameScope frame(masm, StackFrame::MANUAL);
3273     __ PushSafepointRegisters();
3274     __ PrepareCallCFunction(1, a0);
3275     __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
3276     __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
3277                      1);
3278     __ PopSafepointRegisters();
3279   }
3280 
3281   // Native call returns to the DirectCEntry stub which redirects to the
3282   // return address pushed on stack (could have moved after GC).
3283   // DirectCEntry stub itself is generated early and never moves.
3284   DirectCEntryStub stub(isolate);
3285   stub.GenerateCall(masm, t9);
3286 
3287   if (FLAG_log_timer_events) {
3288     FrameScope frame(masm, StackFrame::MANUAL);
3289     __ PushSafepointRegisters();
3290     __ PrepareCallCFunction(1, a0);
3291     __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
3292     __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
3293                      1);
3294     __ PopSafepointRegisters();
3295   }
3296 
3297   Label promote_scheduled_exception;
3298   Label delete_allocated_handles;
3299   Label leave_exit_frame;
3300   Label return_value_loaded;
3301 
3302   // Load value from ReturnValue.
3303   __ ld(v0, return_value_operand);
3304   __ bind(&return_value_loaded);
3305 
3306   // No more valid handles (the result handle was the last one). Restore
3307   // previous handle scope.
3308   __ sd(s0, MemOperand(s3, kNextOffset));
3309   if (__ emit_debug_code()) {
3310     __ lw(a1, MemOperand(s3, kLevelOffset));
3311     __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
3312   }
3313   __ Subu(s2, s2, Operand(1));
3314   __ sw(s2, MemOperand(s3, kLevelOffset));
3315   __ ld(at, MemOperand(s3, kLimitOffset));
3316   __ Branch(&delete_allocated_handles, ne, s1, Operand(at));
3317 
3318   // Leave the API exit frame.
3319   __ bind(&leave_exit_frame);
3320 
3321   bool restore_context = context_restore_operand != NULL;
3322   if (restore_context) {
3323     __ ld(cp, *context_restore_operand);
3324   }
3325   if (stack_space_offset != kInvalidStackOffset) {
3326     DCHECK(kCArgsSlotsSize == 0);
3327     __ ld(s0, MemOperand(sp, stack_space_offset));
3328   } else {
3329     __ li(s0, Operand(stack_space));
3330   }
3331   __ LeaveExitFrame(false, s0, !restore_context, NO_EMIT_RETURN,
3332                     stack_space_offset != kInvalidStackOffset);
3333 
3334   // Check if the function scheduled an exception.
3335   __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
3336   __ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
3337   __ ld(a5, MemOperand(at));
3338   __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
3339 
3340   __ Ret();
3341 
3342   // Re-throw by promoting a scheduled exception.
3343   __ bind(&promote_scheduled_exception);
3344   __ TailCallRuntime(Runtime::kPromoteScheduledException);
3345 
3346   // HandleScope limit has changed. Delete allocated extensions.
3347   __ bind(&delete_allocated_handles);
3348   __ sd(s1, MemOperand(s3, kLimitOffset));
3349   __ mov(s0, v0);
3350   __ mov(a0, v0);
3351   __ PrepareCallCFunction(1, s1);
3352   __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
3353   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
3354                    1);
3355   __ mov(v0, s0);
3356   __ jmp(&leave_exit_frame);
3357 }
3358 
Generate(MacroAssembler * masm)3359 void CallApiCallbackStub::Generate(MacroAssembler* masm) {
3360   // ----------- S t a t e -------------
3361   //  -- a0                  : callee
3362   //  -- a4                  : call_data
3363   //  -- a2                  : holder
3364   //  -- a1                  : api_function_address
3365   //  -- cp                  : context
3366   //  --
3367   //  -- sp[0]               : last argument
3368   //  -- ...
3369   //  -- sp[(argc - 1)* 8]   : first argument
3370   //  -- sp[argc * 8]        : receiver
3371   // -----------------------------------
3372 
3373   Register callee = a0;
3374   Register call_data = a4;
3375   Register holder = a2;
3376   Register api_function_address = a1;
3377   Register context = cp;
3378 
3379   typedef FunctionCallbackArguments FCA;
3380 
3381   STATIC_ASSERT(FCA::kContextSaveIndex == 6);
3382   STATIC_ASSERT(FCA::kCalleeIndex == 5);
3383   STATIC_ASSERT(FCA::kDataIndex == 4);
3384   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
3385   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
3386   STATIC_ASSERT(FCA::kIsolateIndex == 1);
3387   STATIC_ASSERT(FCA::kHolderIndex == 0);
3388   STATIC_ASSERT(FCA::kNewTargetIndex == 7);
3389   STATIC_ASSERT(FCA::kArgsLength == 8);
3390 
3391   // new target
3392   __ PushRoot(Heap::kUndefinedValueRootIndex);
3393 
3394   // Save context, callee and call data.
3395   __ Push(context, callee, call_data);
3396   if (!is_lazy()) {
3397     // Load context from callee.
3398     __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
3399   }
3400 
3401   Register scratch = call_data;
3402   if (!call_data_undefined()) {
3403     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3404   }
3405   // Push return value and default return value.
3406   __ Push(scratch, scratch);
3407   __ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
3408   // Push isolate and holder.
3409   __ Push(scratch, holder);
3410 
3411   // Prepare arguments.
3412   __ mov(scratch, sp);
3413 
3414   // Allocate the v8::Arguments structure in the arguments' space since
3415   // it's not controlled by GC.
3416   const int kApiStackSpace = 3;
3417 
3418   FrameScope frame_scope(masm, StackFrame::MANUAL);
3419   __ EnterExitFrame(false, kApiStackSpace);
3420 
3421   DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
3422   // a0 = FunctionCallbackInfo&
3423   // Arguments is after the return address.
3424   __ Daddu(a0, sp, Operand(1 * kPointerSize));
3425   // FunctionCallbackInfo::implicit_args_
3426   __ sd(scratch, MemOperand(a0, 0 * kPointerSize));
3427   // FunctionCallbackInfo::values_
3428   __ Daddu(at, scratch,
3429            Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
3430   __ sd(at, MemOperand(a0, 1 * kPointerSize));
3431   // FunctionCallbackInfo::length_ = argc
3432   // Stored as int field, 32-bit integers within struct on stack always left
3433   // justified by n64 ABI.
3434   __ li(at, Operand(argc()));
3435   __ sw(at, MemOperand(a0, 2 * kPointerSize));
3436 
3437   ExternalReference thunk_ref =
3438       ExternalReference::invoke_function_callback(masm->isolate());
3439 
3440   AllowExternalCallThatCantCauseGC scope(masm);
3441   MemOperand context_restore_operand(
3442       fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
3443   // Stores return the first js argument.
3444   int return_value_offset = 0;
3445   if (is_store()) {
3446     return_value_offset = 2 + FCA::kArgsLength;
3447   } else {
3448     return_value_offset = 2 + FCA::kReturnValueOffset;
3449   }
3450   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
3451   int stack_space = 0;
3452   int32_t stack_space_offset = 3 * kPointerSize;
3453   stack_space = argc() + FCA::kArgsLength + 1;
3454   // TODO(adamk): Why are we clobbering this immediately?
3455   stack_space_offset = kInvalidStackOffset;
3456   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
3457                            stack_space_offset, return_value_operand,
3458                            &context_restore_operand);
3459 }
3460 
3461 
Generate(MacroAssembler * masm)3462 void CallApiGetterStub::Generate(MacroAssembler* masm) {
3463   // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
3464   // name below the exit frame to make GC aware of them.
3465   STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
3466   STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
3467   STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
3468   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
3469   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
3470   STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
3471   STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
3472   STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
3473 
3474   Register receiver = ApiGetterDescriptor::ReceiverRegister();
3475   Register holder = ApiGetterDescriptor::HolderRegister();
3476   Register callback = ApiGetterDescriptor::CallbackRegister();
3477   Register scratch = a4;
3478   DCHECK(!AreAliased(receiver, holder, callback, scratch));
3479 
3480   Register api_function_address = a2;
3481 
3482   // Here and below +1 is for name() pushed after the args_ array.
3483   typedef PropertyCallbackArguments PCA;
3484   __ Dsubu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
3485   __ sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
3486   __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
3487   __ sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
3488   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3489   __ sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
3490   __ sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
3491                                     kPointerSize));
3492   __ li(scratch, Operand(ExternalReference::isolate_address(isolate())));
3493   __ sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
3494   __ sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
3495   // should_throw_on_error -> false
3496   DCHECK(Smi::kZero == nullptr);
3497   __ sd(zero_reg,
3498         MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
3499   __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
3500   __ sd(scratch, MemOperand(sp, 0 * kPointerSize));
3501 
3502   // v8::PropertyCallbackInfo::args_ array and name handle.
3503   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
3504 
3505   // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
3506   __ mov(a0, sp);                               // a0 = Handle<Name>
3507   __ Daddu(a1, a0, Operand(1 * kPointerSize));  // a1 = v8::PCI::args_
3508 
3509   const int kApiStackSpace = 1;
3510   FrameScope frame_scope(masm, StackFrame::MANUAL);
3511   __ EnterExitFrame(false, kApiStackSpace);
3512 
3513   // Create v8::PropertyCallbackInfo object on the stack and initialize
3514   // it's args_ field.
3515   __ sd(a1, MemOperand(sp, 1 * kPointerSize));
3516   __ Daddu(a1, sp, Operand(1 * kPointerSize));
3517   // a1 = v8::PropertyCallbackInfo&
3518 
3519   ExternalReference thunk_ref =
3520       ExternalReference::invoke_accessor_getter_callback(isolate());
3521 
3522   __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
3523   __ ld(api_function_address,
3524         FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
3525 
3526   // +3 is to skip prolog, return address and name handle.
3527   MemOperand return_value_operand(
3528       fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
3529   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3530                            kStackUnwindSpace, kInvalidStackOffset,
3531                            return_value_operand, NULL);
3532 }
3533 
3534 #undef __
3535 
3536 }  // namespace internal
3537 }  // namespace v8
3538 
3539 #endif  // V8_TARGET_ARCH_MIPS64
3540