1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_MIPS
6
7 #include "src/code-stubs.h"
8 #include "src/api-arguments.h"
9 #include "src/base/bits.h"
10 #include "src/bootstrapper.h"
11 #include "src/codegen.h"
12 #include "src/ic/handler-compiler.h"
13 #include "src/ic/ic.h"
14 #include "src/ic/stub-cache.h"
15 #include "src/isolate.h"
16 #include "src/mips/code-stubs-mips.h"
17 #include "src/regexp/jsregexp.h"
18 #include "src/regexp/regexp-macro-assembler.h"
19 #include "src/runtime/runtime.h"
20
21 namespace v8 {
22 namespace internal {
23
24 #define __ ACCESS_MASM(masm)
25
Generate(MacroAssembler * masm)26 void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
27 __ sll(t9, a0, kPointerSizeLog2);
28 __ Addu(t9, sp, t9);
29 __ sw(a1, MemOperand(t9, 0));
30 __ Push(a1);
31 __ Push(a2);
32 __ Addu(a0, a0, Operand(3));
33 __ TailCallRuntime(Runtime::kNewArray);
34 }
35
InitializeDescriptor(CodeStubDescriptor * descriptor)36 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
37 Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
38 descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
39 }
40
InitializeDescriptor(CodeStubDescriptor * descriptor)41 void FastFunctionBindStub::InitializeDescriptor(
42 CodeStubDescriptor* descriptor) {
43 Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
44 descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
45 }
46
47 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
48 Condition cc);
49 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
50 Register lhs,
51 Register rhs,
52 Label* rhs_not_nan,
53 Label* slow,
54 bool strict);
55 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
56 Register lhs,
57 Register rhs);
58
59
GenerateLightweightMiss(MacroAssembler * masm,ExternalReference miss)60 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
61 ExternalReference miss) {
62 // Update the static counter each time a new code stub is generated.
63 isolate()->counters()->code_stubs()->Increment();
64
65 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
66 int param_count = descriptor.GetRegisterParameterCount();
67 {
68 // Call the runtime system in a fresh internal frame.
69 FrameScope scope(masm, StackFrame::INTERNAL);
70 DCHECK(param_count == 0 ||
71 a0.is(descriptor.GetRegisterParameter(param_count - 1)));
72 // Push arguments, adjust sp.
73 __ Subu(sp, sp, Operand(param_count * kPointerSize));
74 for (int i = 0; i < param_count; ++i) {
75 // Store argument to stack.
76 __ sw(descriptor.GetRegisterParameter(i),
77 MemOperand(sp, (param_count - 1 - i) * kPointerSize));
78 }
79 __ CallExternalReference(miss, param_count);
80 }
81
82 __ Ret();
83 }
84
85
Generate(MacroAssembler * masm)86 void DoubleToIStub::Generate(MacroAssembler* masm) {
87 Label out_of_range, only_low, negate, done;
88 Register input_reg = source();
89 Register result_reg = destination();
90
91 int double_offset = offset();
92 // Account for saved regs if input is sp.
93 if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
94
95 Register scratch =
96 GetRegisterThatIsNotOneOf(input_reg, result_reg);
97 Register scratch2 =
98 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
99 Register scratch3 =
100 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
101 DoubleRegister double_scratch = kLithiumScratchDouble;
102
103 __ Push(scratch, scratch2, scratch3);
104
105 if (!skip_fastpath()) {
106 // Load double input.
107 __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
108
109 // Clear cumulative exception flags and save the FCSR.
110 __ cfc1(scratch2, FCSR);
111 __ ctc1(zero_reg, FCSR);
112
113 // Try a conversion to a signed integer.
114 __ Trunc_w_d(double_scratch, double_scratch);
115 // Move the converted value into the result register.
116 __ mfc1(scratch3, double_scratch);
117
118 // Retrieve and restore the FCSR.
119 __ cfc1(scratch, FCSR);
120 __ ctc1(scratch2, FCSR);
121
122 // Check for overflow and NaNs.
123 __ And(
124 scratch, scratch,
125 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
126 | kFCSRInvalidOpFlagMask);
127 // If we had no exceptions then set result_reg and we are done.
128 Label error;
129 __ Branch(&error, ne, scratch, Operand(zero_reg));
130 __ Move(result_reg, scratch3);
131 __ Branch(&done);
132 __ bind(&error);
133 }
134
135 // Load the double value and perform a manual truncation.
136 Register input_high = scratch2;
137 Register input_low = scratch3;
138
139 __ lw(input_low,
140 MemOperand(input_reg, double_offset + Register::kMantissaOffset));
141 __ lw(input_high,
142 MemOperand(input_reg, double_offset + Register::kExponentOffset));
143
144 Label normal_exponent, restore_sign;
145 // Extract the biased exponent in result.
146 __ Ext(result_reg,
147 input_high,
148 HeapNumber::kExponentShift,
149 HeapNumber::kExponentBits);
150
151 // Check for Infinity and NaNs, which should return 0.
152 __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
153 __ Movz(result_reg, zero_reg, scratch);
154 __ Branch(&done, eq, scratch, Operand(zero_reg));
155
156 // Express exponent as delta to (number of mantissa bits + 31).
157 __ Subu(result_reg,
158 result_reg,
159 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
160
161 // If the delta is strictly positive, all bits would be shifted away,
162 // which means that we can return 0.
163 __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
164 __ mov(result_reg, zero_reg);
165 __ Branch(&done);
166
167 __ bind(&normal_exponent);
168 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
169 // Calculate shift.
170 __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
171
172 // Save the sign.
173 Register sign = result_reg;
174 result_reg = no_reg;
175 __ And(sign, input_high, Operand(HeapNumber::kSignMask));
176
177 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
178 // to check for this specific case.
179 Label high_shift_needed, high_shift_done;
180 __ Branch(&high_shift_needed, lt, scratch, Operand(32));
181 __ mov(input_high, zero_reg);
182 __ Branch(&high_shift_done);
183 __ bind(&high_shift_needed);
184
185 // Set the implicit 1 before the mantissa part in input_high.
186 __ Or(input_high,
187 input_high,
188 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
189 // Shift the mantissa bits to the correct position.
190 // We don't need to clear non-mantissa bits as they will be shifted away.
191 // If they weren't, it would mean that the answer is in the 32bit range.
192 __ sllv(input_high, input_high, scratch);
193
194 __ bind(&high_shift_done);
195
196 // Replace the shifted bits with bits from the lower mantissa word.
197 Label pos_shift, shift_done;
198 __ li(at, 32);
199 __ subu(scratch, at, scratch);
200 __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
201
202 // Negate scratch.
203 __ Subu(scratch, zero_reg, scratch);
204 __ sllv(input_low, input_low, scratch);
205 __ Branch(&shift_done);
206
207 __ bind(&pos_shift);
208 __ srlv(input_low, input_low, scratch);
209
210 __ bind(&shift_done);
211 __ Or(input_high, input_high, Operand(input_low));
212 // Restore sign if necessary.
213 __ mov(scratch, sign);
214 result_reg = sign;
215 sign = no_reg;
216 __ Subu(result_reg, zero_reg, input_high);
217 __ Movz(result_reg, input_high, scratch);
218
219 __ bind(&done);
220
221 __ Pop(scratch, scratch2, scratch3);
222 __ Ret();
223 }
224
225
226 // Handle the case where the lhs and rhs are the same object.
227 // Equality is almost reflexive (everything but NaN), so this is a test
228 // for "identity and not NaN".
EmitIdenticalObjectComparison(MacroAssembler * masm,Label * slow,Condition cc)229 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
230 Condition cc) {
231 Label not_identical;
232 Label heap_number, return_equal;
233 Register exp_mask_reg = t5;
234
235 __ Branch(¬_identical, ne, a0, Operand(a1));
236
237 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
238
239 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
240 // so we do the second best thing - test it ourselves.
241 // They are both equal and they are not both Smis so both of them are not
242 // Smis. If it's not a heap number, then return equal.
243 __ GetObjectType(a0, t4, t4);
244 if (cc == less || cc == greater) {
245 // Call runtime on identical JSObjects.
246 __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
247 // Call runtime on identical symbols since we need to throw a TypeError.
248 __ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
249 // Call runtime on identical SIMD values since we must throw a TypeError.
250 __ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
251 } else {
252 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
253 // Comparing JS objects with <=, >= is complicated.
254 if (cc != eq) {
255 __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
256 // Call runtime on identical symbols since we need to throw a TypeError.
257 __ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
258 // Call runtime on identical SIMD values since we must throw a TypeError.
259 __ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
260 // Normally here we fall through to return_equal, but undefined is
261 // special: (undefined == undefined) == true, but
262 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
263 if (cc == less_equal || cc == greater_equal) {
264 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
265 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
266 __ Branch(&return_equal, ne, a0, Operand(t2));
267 DCHECK(is_int16(GREATER) && is_int16(LESS));
268 __ Ret(USE_DELAY_SLOT);
269 if (cc == le) {
270 // undefined <= undefined should fail.
271 __ li(v0, Operand(GREATER));
272 } else {
273 // undefined >= undefined should fail.
274 __ li(v0, Operand(LESS));
275 }
276 }
277 }
278 }
279
280 __ bind(&return_equal);
281 DCHECK(is_int16(GREATER) && is_int16(LESS));
282 __ Ret(USE_DELAY_SLOT);
283 if (cc == less) {
284 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
285 } else if (cc == greater) {
286 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
287 } else {
288 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
289 }
290
291 // For less and greater we don't have to check for NaN since the result of
292 // x < x is false regardless. For the others here is some code to check
293 // for NaN.
294 if (cc != lt && cc != gt) {
295 __ bind(&heap_number);
296 // It is a heap number, so return non-equal if it's NaN and equal if it's
297 // not NaN.
298
299 // The representation of NaN values has all exponent bits (52..62) set,
300 // and not all mantissa bits (0..51) clear.
301 // Read top bits of double representation (second word of value).
302 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
303 // Test that exponent bits are all set.
304 __ And(t3, t2, Operand(exp_mask_reg));
305 // If all bits not set (ne cond), then not a NaN, objects are equal.
306 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
307
308 // Shift out flag and all exponent bits, retaining only mantissa.
309 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
310 // Or with all low-bits of mantissa.
311 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
312 __ Or(v0, t3, Operand(t2));
313 // For equal we already have the right value in v0: Return zero (equal)
314 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
315 // not (it's a NaN). For <= and >= we need to load v0 with the failing
316 // value if it's a NaN.
317 if (cc != eq) {
318 // All-zero means Infinity means equal.
319 __ Ret(eq, v0, Operand(zero_reg));
320 DCHECK(is_int16(GREATER) && is_int16(LESS));
321 __ Ret(USE_DELAY_SLOT);
322 if (cc == le) {
323 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
324 } else {
325 __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
326 }
327 }
328 }
329 // No fall through here.
330
331 __ bind(¬_identical);
332 }
333
334
EmitSmiNonsmiComparison(MacroAssembler * masm,Register lhs,Register rhs,Label * both_loaded_as_doubles,Label * slow,bool strict)335 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
336 Register lhs,
337 Register rhs,
338 Label* both_loaded_as_doubles,
339 Label* slow,
340 bool strict) {
341 DCHECK((lhs.is(a0) && rhs.is(a1)) ||
342 (lhs.is(a1) && rhs.is(a0)));
343
344 Label lhs_is_smi;
345 __ JumpIfSmi(lhs, &lhs_is_smi);
346 // Rhs is a Smi.
347 // Check whether the non-smi is a heap number.
348 __ GetObjectType(lhs, t4, t4);
349 if (strict) {
350 // If lhs was not a number and rhs was a Smi then strict equality cannot
351 // succeed. Return non-equal (lhs is already not zero).
352 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
353 __ mov(v0, lhs);
354 } else {
355 // Smi compared non-strictly with a non-Smi non-heap-number. Call
356 // the runtime.
357 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
358 }
359
360 // Rhs is a smi, lhs is a number.
361 // Convert smi rhs to double.
362 __ sra(at, rhs, kSmiTagSize);
363 __ mtc1(at, f14);
364 __ cvt_d_w(f14, f14);
365 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
366
367 // We now have both loaded as doubles.
368 __ jmp(both_loaded_as_doubles);
369
370 __ bind(&lhs_is_smi);
371 // Lhs is a Smi. Check whether the non-smi is a heap number.
372 __ GetObjectType(rhs, t4, t4);
373 if (strict) {
374 // If lhs was not a number and rhs was a Smi then strict equality cannot
375 // succeed. Return non-equal.
376 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
377 __ li(v0, Operand(1));
378 } else {
379 // Smi compared non-strictly with a non-Smi non-heap-number. Call
380 // the runtime.
381 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
382 }
383
384 // Lhs is a smi, rhs is a number.
385 // Convert smi lhs to double.
386 __ sra(at, lhs, kSmiTagSize);
387 __ mtc1(at, f12);
388 __ cvt_d_w(f12, f12);
389 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
390 // Fall through to both_loaded_as_doubles.
391 }
392
393
EmitStrictTwoHeapObjectCompare(MacroAssembler * masm,Register lhs,Register rhs)394 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
395 Register lhs,
396 Register rhs) {
397 // If either operand is a JS object or an oddball value, then they are
398 // not equal since their pointers are different.
399 // There is no test for undetectability in strict equality.
400 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
401 Label first_non_object;
402 // Get the type of the first operand into a2 and compare it with
403 // FIRST_JS_RECEIVER_TYPE.
404 __ GetObjectType(lhs, a2, a2);
405 __ Branch(&first_non_object, less, a2, Operand(FIRST_JS_RECEIVER_TYPE));
406
407 // Return non-zero.
408 Label return_not_equal;
409 __ bind(&return_not_equal);
410 __ Ret(USE_DELAY_SLOT);
411 __ li(v0, Operand(1));
412
413 __ bind(&first_non_object);
414 // Check for oddballs: true, false, null, undefined.
415 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
416
417 __ GetObjectType(rhs, a3, a3);
418 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_JS_RECEIVER_TYPE));
419
420 // Check for oddballs: true, false, null, undefined.
421 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
422
423 // Now that we have the types we might as well check for
424 // internalized-internalized.
425 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
426 __ Or(a2, a2, Operand(a3));
427 __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
428 __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
429 }
430
431
EmitCheckForTwoHeapNumbers(MacroAssembler * masm,Register lhs,Register rhs,Label * both_loaded_as_doubles,Label * not_heap_numbers,Label * slow)432 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
433 Register lhs,
434 Register rhs,
435 Label* both_loaded_as_doubles,
436 Label* not_heap_numbers,
437 Label* slow) {
438 __ GetObjectType(lhs, a3, a2);
439 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
440 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
441 // If first was a heap number & second wasn't, go to slow case.
442 __ Branch(slow, ne, a3, Operand(a2));
443
444 // Both are heap numbers. Load them up then jump to the code we have
445 // for that.
446 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
447 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
448
449 __ jmp(both_loaded_as_doubles);
450 }
451
452
453 // Fast negative check for internalized-to-internalized equality.
EmitCheckForInternalizedStringsOrObjects(MacroAssembler * masm,Register lhs,Register rhs,Label * possible_strings,Label * runtime_call)454 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
455 Register lhs, Register rhs,
456 Label* possible_strings,
457 Label* runtime_call) {
458 DCHECK((lhs.is(a0) && rhs.is(a1)) ||
459 (lhs.is(a1) && rhs.is(a0)));
460
461 // a2 is object type of rhs.
462 Label object_test, return_equal, return_unequal, undetectable;
463 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
464 __ And(at, a2, Operand(kIsNotStringMask));
465 __ Branch(&object_test, ne, at, Operand(zero_reg));
466 __ And(at, a2, Operand(kIsNotInternalizedMask));
467 __ Branch(possible_strings, ne, at, Operand(zero_reg));
468 __ GetObjectType(rhs, a3, a3);
469 __ Branch(runtime_call, ge, a3, Operand(FIRST_NONSTRING_TYPE));
470 __ And(at, a3, Operand(kIsNotInternalizedMask));
471 __ Branch(possible_strings, ne, at, Operand(zero_reg));
472
473 // Both are internalized. We already checked they weren't the same pointer so
474 // they are not equal. Return non-equal by returning the non-zero object
475 // pointer in v0.
476 __ Ret(USE_DELAY_SLOT);
477 __ mov(v0, a0); // In delay slot.
478
479 __ bind(&object_test);
480 __ lw(a2, FieldMemOperand(lhs, HeapObject::kMapOffset));
481 __ lw(a3, FieldMemOperand(rhs, HeapObject::kMapOffset));
482 __ lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset));
483 __ lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset));
484 __ And(at, t0, Operand(1 << Map::kIsUndetectable));
485 __ Branch(&undetectable, ne, at, Operand(zero_reg));
486 __ And(at, t1, Operand(1 << Map::kIsUndetectable));
487 __ Branch(&return_unequal, ne, at, Operand(zero_reg));
488
489 __ GetInstanceType(a2, a2);
490 __ Branch(runtime_call, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
491 __ GetInstanceType(a3, a3);
492 __ Branch(runtime_call, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
493
494 __ bind(&return_unequal);
495 // Return non-equal by returning the non-zero object pointer in v0.
496 __ Ret(USE_DELAY_SLOT);
497 __ mov(v0, a0); // In delay slot.
498
499 __ bind(&undetectable);
500 __ And(at, t1, Operand(1 << Map::kIsUndetectable));
501 __ Branch(&return_unequal, eq, at, Operand(zero_reg));
502
503 // If both sides are JSReceivers, then the result is false according to
504 // the HTML specification, which says that only comparisons with null or
505 // undefined are affected by special casing for document.all.
506 __ GetInstanceType(a2, a2);
507 __ Branch(&return_equal, eq, a2, Operand(ODDBALL_TYPE));
508 __ GetInstanceType(a3, a3);
509 __ Branch(&return_unequal, ne, a3, Operand(ODDBALL_TYPE));
510
511 __ bind(&return_equal);
512 __ Ret(USE_DELAY_SLOT);
513 __ li(v0, Operand(EQUAL)); // In delay slot.
514 }
515
516
CompareICStub_CheckInputType(MacroAssembler * masm,Register input,Register scratch,CompareICState::State expected,Label * fail)517 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
518 Register scratch,
519 CompareICState::State expected,
520 Label* fail) {
521 Label ok;
522 if (expected == CompareICState::SMI) {
523 __ JumpIfNotSmi(input, fail);
524 } else if (expected == CompareICState::NUMBER) {
525 __ JumpIfSmi(input, &ok);
526 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
527 DONT_DO_SMI_CHECK);
528 }
529 // We could be strict about internalized/string here, but as long as
530 // hydrogen doesn't care, the stub doesn't have to care either.
531 __ bind(&ok);
532 }
533
534
535 // On entry a1 and a2 are the values to be compared.
536 // On exit a0 is 0, positive or negative to indicate the result of
537 // the comparison.
GenerateGeneric(MacroAssembler * masm)538 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
539 Register lhs = a1;
540 Register rhs = a0;
541 Condition cc = GetCondition();
542
543 Label miss;
544 CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
545 CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
546
547 Label slow; // Call builtin.
548 Label not_smis, both_loaded_as_doubles;
549
550 Label not_two_smis, smi_done;
551 __ Or(a2, a1, a0);
552 __ JumpIfNotSmi(a2, ¬_two_smis);
553 __ sra(a1, a1, 1);
554 __ sra(a0, a0, 1);
555 __ Ret(USE_DELAY_SLOT);
556 __ subu(v0, a1, a0);
557 __ bind(¬_two_smis);
558
559 // NOTICE! This code is only reached after a smi-fast-case check, so
560 // it is certain that at least one operand isn't a smi.
561
562 // Handle the case where the objects are identical. Either returns the answer
563 // or goes to slow. Only falls through if the objects were not identical.
564 EmitIdenticalObjectComparison(masm, &slow, cc);
565
566 // If either is a Smi (we know that not both are), then they can only
567 // be strictly equal if the other is a HeapNumber.
568 STATIC_ASSERT(kSmiTag == 0);
569 DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
570 __ And(t2, lhs, Operand(rhs));
571 __ JumpIfNotSmi(t2, ¬_smis, t0);
572 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
573 // 1) Return the answer.
574 // 2) Go to slow.
575 // 3) Fall through to both_loaded_as_doubles.
576 // 4) Jump to rhs_not_nan.
577 // In cases 3 and 4 we have found out we were dealing with a number-number
578 // comparison and the numbers have been loaded into f12 and f14 as doubles,
579 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
580 EmitSmiNonsmiComparison(masm, lhs, rhs,
581 &both_loaded_as_doubles, &slow, strict());
582
583 __ bind(&both_loaded_as_doubles);
584 // f12, f14 are the double representations of the left hand side
585 // and the right hand side if we have FPU. Otherwise a2, a3 represent
586 // left hand side and a0, a1 represent right hand side.
587 Label nan;
588 __ li(t0, Operand(LESS));
589 __ li(t1, Operand(GREATER));
590 __ li(t2, Operand(EQUAL));
591
592 // Check if either rhs or lhs is NaN.
593 __ BranchF(NULL, &nan, eq, f12, f14);
594
595 // Check if LESS condition is satisfied. If true, move conditionally
596 // result to v0.
597 if (!IsMipsArchVariant(kMips32r6)) {
598 __ c(OLT, D, f12, f14);
599 __ Movt(v0, t0);
600 // Use previous check to store conditionally to v0 oposite condition
601 // (GREATER). If rhs is equal to lhs, this will be corrected in next
602 // check.
603 __ Movf(v0, t1);
604 // Check if EQUAL condition is satisfied. If true, move conditionally
605 // result to v0.
606 __ c(EQ, D, f12, f14);
607 __ Movt(v0, t2);
608 } else {
609 Label skip;
610 __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
611 __ mov(v0, t0); // Return LESS as result.
612
613 __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
614 __ mov(v0, t2); // Return EQUAL as result.
615
616 __ mov(v0, t1); // Return GREATER as result.
617 __ bind(&skip);
618 }
619
620 __ Ret();
621
622 __ bind(&nan);
623 // NaN comparisons always fail.
624 // Load whatever we need in v0 to make the comparison fail.
625 DCHECK(is_int16(GREATER) && is_int16(LESS));
626 __ Ret(USE_DELAY_SLOT);
627 if (cc == lt || cc == le) {
628 __ li(v0, Operand(GREATER));
629 } else {
630 __ li(v0, Operand(LESS));
631 }
632
633
634 __ bind(¬_smis);
635 // At this point we know we are dealing with two different objects,
636 // and neither of them is a Smi. The objects are in lhs_ and rhs_.
637 if (strict()) {
638 // This returns non-equal for some object types, or falls through if it
639 // was not lucky.
640 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
641 }
642
643 Label check_for_internalized_strings;
644 Label flat_string_check;
645 // Check for heap-number-heap-number comparison. Can jump to slow case,
646 // or load both doubles and jump to the code that handles
647 // that case. If the inputs are not doubles then jumps to
648 // check_for_internalized_strings.
649 // In this case a2 will contain the type of lhs_.
650 EmitCheckForTwoHeapNumbers(masm,
651 lhs,
652 rhs,
653 &both_loaded_as_doubles,
654 &check_for_internalized_strings,
655 &flat_string_check);
656
657 __ bind(&check_for_internalized_strings);
658 if (cc == eq && !strict()) {
659 // Returns an answer for two internalized strings or two
660 // detectable objects.
661 // Otherwise jumps to string case or not both strings case.
662 // Assumes that a2 is the type of lhs_ on entry.
663 EmitCheckForInternalizedStringsOrObjects(
664 masm, lhs, rhs, &flat_string_check, &slow);
665 }
666
667 // Check for both being sequential one-byte strings,
668 // and inline if that is the case.
669 __ bind(&flat_string_check);
670
671 __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
672
673 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
674 a3);
675 if (cc == eq) {
676 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, t0);
677 } else {
678 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, t0,
679 t1);
680 }
681 // Never falls through to here.
682
683 __ bind(&slow);
684 if (cc == eq) {
685 {
686 FrameScope scope(masm, StackFrame::INTERNAL);
687 __ Push(lhs, rhs);
688 __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
689 }
690 // Turn true into 0 and false into some non-zero value.
691 STATIC_ASSERT(EQUAL == 0);
692 __ LoadRoot(a0, Heap::kTrueValueRootIndex);
693 __ Ret(USE_DELAY_SLOT);
694 __ subu(v0, v0, a0); // In delay slot.
695 } else {
696 // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
697 // a1 (rhs) second.
698 __ Push(lhs, rhs);
699 int ncr; // NaN compare result.
700 if (cc == lt || cc == le) {
701 ncr = GREATER;
702 } else {
703 DCHECK(cc == gt || cc == ge); // Remaining cases.
704 ncr = LESS;
705 }
706 __ li(a0, Operand(Smi::FromInt(ncr)));
707 __ push(a0);
708
709 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
710 // tagged as a small integer.
711 __ TailCallRuntime(Runtime::kCompare);
712 }
713
714 __ bind(&miss);
715 GenerateMiss(masm);
716 }
717
718
Generate(MacroAssembler * masm)719 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
720 __ mov(t9, ra);
721 __ pop(ra);
722 __ PushSafepointRegisters();
723 __ Jump(t9);
724 }
725
726
Generate(MacroAssembler * masm)727 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
728 __ mov(t9, ra);
729 __ pop(ra);
730 __ PopSafepointRegisters();
731 __ Jump(t9);
732 }
733
734
Generate(MacroAssembler * masm)735 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
736 // We don't allow a GC during a store buffer overflow so there is no need to
737 // store the registers in any particular way, but we do have to store and
738 // restore them.
739 __ MultiPush(kJSCallerSaved | ra.bit());
740 if (save_doubles()) {
741 __ MultiPushFPU(kCallerSavedFPU);
742 }
743 const int argument_count = 1;
744 const int fp_argument_count = 0;
745 const Register scratch = a1;
746
747 AllowExternalCallThatCantCauseGC scope(masm);
748 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
749 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
750 __ CallCFunction(
751 ExternalReference::store_buffer_overflow_function(isolate()),
752 argument_count);
753 if (save_doubles()) {
754 __ MultiPopFPU(kCallerSavedFPU);
755 }
756
757 __ MultiPop(kJSCallerSaved | ra.bit());
758 __ Ret();
759 }
760
761
Generate(MacroAssembler * masm)762 void MathPowStub::Generate(MacroAssembler* masm) {
763 const Register exponent = MathPowTaggedDescriptor::exponent();
764 DCHECK(exponent.is(a2));
765 const DoubleRegister double_base = f2;
766 const DoubleRegister double_exponent = f4;
767 const DoubleRegister double_result = f0;
768 const DoubleRegister double_scratch = f6;
769 const FPURegister single_scratch = f8;
770 const Register scratch = t5;
771 const Register scratch2 = t3;
772
773 Label call_runtime, done, int_exponent;
774 if (exponent_type() == TAGGED) {
775 // Base is already in double_base.
776 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
777
778 __ ldc1(double_exponent,
779 FieldMemOperand(exponent, HeapNumber::kValueOffset));
780 }
781
782 if (exponent_type() != INTEGER) {
783 Label int_exponent_convert;
784 // Detect integer exponents stored as double.
785 __ EmitFPUTruncate(kRoundToMinusInf,
786 scratch,
787 double_exponent,
788 at,
789 double_scratch,
790 scratch2,
791 kCheckForInexactConversion);
792 // scratch2 == 0 means there was no conversion error.
793 __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
794
795 __ push(ra);
796 {
797 AllowExternalCallThatCantCauseGC scope(masm);
798 __ PrepareCallCFunction(0, 2, scratch2);
799 __ MovToFloatParameters(double_base, double_exponent);
800 __ CallCFunction(
801 ExternalReference::power_double_double_function(isolate()),
802 0, 2);
803 }
804 __ pop(ra);
805 __ MovFromFloatResult(double_result);
806 __ jmp(&done);
807
808 __ bind(&int_exponent_convert);
809 }
810
811 // Calculate power with integer exponent.
812 __ bind(&int_exponent);
813
814 // Get two copies of exponent in the registers scratch and exponent.
815 if (exponent_type() == INTEGER) {
816 __ mov(scratch, exponent);
817 } else {
818 // Exponent has previously been stored into scratch as untagged integer.
819 __ mov(exponent, scratch);
820 }
821
822 __ mov_d(double_scratch, double_base); // Back up base.
823 __ Move(double_result, 1.0);
824
825 // Get absolute value of exponent.
826 Label positive_exponent, bail_out;
827 __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
828 __ Subu(scratch, zero_reg, scratch);
829 // Check when Subu overflows and we get negative result
830 // (happens only when input is MIN_INT).
831 __ Branch(&bail_out, gt, zero_reg, Operand(scratch));
832 __ bind(&positive_exponent);
833 __ Assert(ge, kUnexpectedNegativeValue, scratch, Operand(zero_reg));
834
835 Label while_true, no_carry, loop_end;
836 __ bind(&while_true);
837
838 __ And(scratch2, scratch, 1);
839
840 __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
841 __ mul_d(double_result, double_result, double_scratch);
842 __ bind(&no_carry);
843
844 __ sra(scratch, scratch, 1);
845
846 __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
847 __ mul_d(double_scratch, double_scratch, double_scratch);
848
849 __ Branch(&while_true);
850
851 __ bind(&loop_end);
852
853 __ Branch(&done, ge, exponent, Operand(zero_reg));
854 __ Move(double_scratch, 1.0);
855 __ div_d(double_result, double_scratch, double_result);
856 // Test whether result is zero. Bail out to check for subnormal result.
857 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
858 __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
859
860 // double_exponent may not contain the exponent value if the input was a
861 // smi. We set it with exponent value before bailing out.
862 __ bind(&bail_out);
863 __ mtc1(exponent, single_scratch);
864 __ cvt_d_w(double_exponent, single_scratch);
865
866 // Returning or bailing out.
867 __ push(ra);
868 {
869 AllowExternalCallThatCantCauseGC scope(masm);
870 __ PrepareCallCFunction(0, 2, scratch);
871 __ MovToFloatParameters(double_base, double_exponent);
872 __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
873 0, 2);
874 }
875 __ pop(ra);
876 __ MovFromFloatResult(double_result);
877
878 __ bind(&done);
879 __ Ret();
880 }
881
NeedsImmovableCode()882 bool CEntryStub::NeedsImmovableCode() {
883 return true;
884 }
885
886
GenerateStubsAheadOfTime(Isolate * isolate)887 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
888 CEntryStub::GenerateAheadOfTime(isolate);
889 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
890 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
891 CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
892 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
893 CreateWeakCellStub::GenerateAheadOfTime(isolate);
894 BinaryOpICStub::GenerateAheadOfTime(isolate);
895 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
896 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
897 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
898 StoreFastElementStub::GenerateAheadOfTime(isolate);
899 }
900
901
GenerateAheadOfTime(Isolate * isolate)902 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
903 StoreRegistersStateStub stub(isolate);
904 stub.GetCode();
905 }
906
907
GenerateAheadOfTime(Isolate * isolate)908 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
909 RestoreRegistersStateStub stub(isolate);
910 stub.GetCode();
911 }
912
913
GenerateFPStubs(Isolate * isolate)914 void CodeStub::GenerateFPStubs(Isolate* isolate) {
915 // Generate if not already in cache.
916 SaveFPRegsMode mode = kSaveFPRegs;
917 CEntryStub(isolate, 1, mode).GetCode();
918 StoreBufferOverflowStub(isolate, mode).GetCode();
919 isolate->set_fp_stubs_generated(true);
920 }
921
922
GenerateAheadOfTime(Isolate * isolate)923 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
924 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
925 stub.GetCode();
926 }
927
928
Generate(MacroAssembler * masm)929 void CEntryStub::Generate(MacroAssembler* masm) {
930 // Called from JavaScript; parameters are on stack as if calling JS function
931 // a0: number of arguments including receiver
932 // a1: pointer to builtin function
933 // fp: frame pointer (restored after C call)
934 // sp: stack pointer (restored as callee's sp after C call)
935 // cp: current context (C callee-saved)
936 //
937 // If argv_in_register():
938 // a2: pointer to the first argument
939
940 ProfileEntryHookStub::MaybeCallEntryHook(masm);
941
942 if (argv_in_register()) {
943 // Move argv into the correct register.
944 __ mov(s1, a2);
945 } else {
946 // Compute the argv pointer in a callee-saved register.
947 __ Lsa(s1, sp, a0, kPointerSizeLog2);
948 __ Subu(s1, s1, kPointerSize);
949 }
950
951 // Enter the exit frame that transitions from JavaScript to C++.
952 FrameScope scope(masm, StackFrame::MANUAL);
953 __ EnterExitFrame(save_doubles(), 0, is_builtin_exit()
954 ? StackFrame::BUILTIN_EXIT
955 : StackFrame::EXIT);
956
957 // s0: number of arguments including receiver (C callee-saved)
958 // s1: pointer to first argument (C callee-saved)
959 // s2: pointer to builtin function (C callee-saved)
960
961 // Prepare arguments for C routine.
962 // a0 = argc
963 __ mov(s0, a0);
964 __ mov(s2, a1);
965
966 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
967 // also need to reserve the 4 argument slots on the stack.
968
969 __ AssertStackIsAligned();
970
971 int frame_alignment = MacroAssembler::ActivationFrameAlignment();
972 int frame_alignment_mask = frame_alignment - 1;
973 int result_stack_size;
974 if (result_size() <= 2) {
975 // a0 = argc, a1 = argv, a2 = isolate
976 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
977 __ mov(a1, s1);
978 result_stack_size = 0;
979 } else {
980 DCHECK_EQ(3, result_size());
981 // Allocate additional space for the result.
982 result_stack_size =
983 ((result_size() * kPointerSize) + frame_alignment_mask) &
984 ~frame_alignment_mask;
985 __ Subu(sp, sp, Operand(result_stack_size));
986
987 // a0 = hidden result argument, a1 = argc, a2 = argv, a3 = isolate.
988 __ li(a3, Operand(ExternalReference::isolate_address(isolate())));
989 __ mov(a2, s1);
990 __ mov(a1, a0);
991 __ mov(a0, sp);
992 }
993
994 // To let the GC traverse the return address of the exit frames, we need to
995 // know where the return address is. The CEntryStub is unmovable, so
996 // we can store the address on the stack to be able to find it again and
997 // we never have to restore it, because it will not change.
998 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
999 int kNumInstructionsToJump = 4;
1000 Label find_ra;
1001 // Adjust the value in ra to point to the correct return location, 2nd
1002 // instruction past the real call into C code (the jalr(t9)), and push it.
1003 // This is the return address of the exit frame.
1004 if (kArchVariant >= kMips32r6) {
1005 __ addiupc(ra, kNumInstructionsToJump + 1);
1006 } else {
1007 // This branch-and-link sequence is needed to find the current PC on mips
1008 // before r6, saved to the ra register.
1009 __ bal(&find_ra); // bal exposes branch delay slot.
1010 __ Addu(ra, ra, kNumInstructionsToJump * Instruction::kInstrSize);
1011 }
1012 __ bind(&find_ra);
1013
1014 // This spot was reserved in EnterExitFrame.
1015 __ sw(ra, MemOperand(sp, result_stack_size));
1016 // Stack space reservation moved to the branch delay slot below.
1017 // Stack is still aligned.
1018
1019 // Call the C routine.
1020 __ mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
1021 __ jalr(t9);
1022 // Set up sp in the delay slot.
1023 __ addiu(sp, sp, -kCArgsSlotsSize);
1024 // Make sure the stored 'ra' points to this position.
1025 DCHECK_EQ(kNumInstructionsToJump,
1026 masm->InstructionsGeneratedSince(&find_ra));
1027 }
1028 if (result_size() > 2) {
1029 DCHECK_EQ(3, result_size());
1030 // Read result values stored on stack.
1031 __ lw(a0, MemOperand(v0, 2 * kPointerSize));
1032 __ lw(v1, MemOperand(v0, 1 * kPointerSize));
1033 __ lw(v0, MemOperand(v0, 0 * kPointerSize));
1034 }
1035 // Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers!
1036
1037 // Check result for exception sentinel.
1038 Label exception_returned;
1039 __ LoadRoot(t0, Heap::kExceptionRootIndex);
1040 __ Branch(&exception_returned, eq, t0, Operand(v0));
1041
1042 // Check that there is no pending exception, otherwise we
1043 // should have returned the exception sentinel.
1044 if (FLAG_debug_code) {
1045 Label okay;
1046 ExternalReference pending_exception_address(
1047 Isolate::kPendingExceptionAddress, isolate());
1048 __ li(a2, Operand(pending_exception_address));
1049 __ lw(a2, MemOperand(a2));
1050 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
1051 // Cannot use check here as it attempts to generate call into runtime.
1052 __ Branch(&okay, eq, t0, Operand(a2));
1053 __ stop("Unexpected pending exception");
1054 __ bind(&okay);
1055 }
1056
1057 // Exit C frame and return.
1058 // v0:v1: result
1059 // sp: stack pointer
1060 // fp: frame pointer
1061 Register argc;
1062 if (argv_in_register()) {
1063 // We don't want to pop arguments so set argc to no_reg.
1064 argc = no_reg;
1065 } else {
1066 // s0: still holds argc (callee-saved).
1067 argc = s0;
1068 }
1069 __ LeaveExitFrame(save_doubles(), argc, true, EMIT_RETURN);
1070
1071 // Handling of exception.
1072 __ bind(&exception_returned);
1073
1074 ExternalReference pending_handler_context_address(
1075 Isolate::kPendingHandlerContextAddress, isolate());
1076 ExternalReference pending_handler_code_address(
1077 Isolate::kPendingHandlerCodeAddress, isolate());
1078 ExternalReference pending_handler_offset_address(
1079 Isolate::kPendingHandlerOffsetAddress, isolate());
1080 ExternalReference pending_handler_fp_address(
1081 Isolate::kPendingHandlerFPAddress, isolate());
1082 ExternalReference pending_handler_sp_address(
1083 Isolate::kPendingHandlerSPAddress, isolate());
1084
1085 // Ask the runtime for help to determine the handler. This will set v0 to
1086 // contain the current pending exception, don't clobber it.
1087 ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
1088 isolate());
1089 {
1090 FrameScope scope(masm, StackFrame::MANUAL);
1091 __ PrepareCallCFunction(3, 0, a0);
1092 __ mov(a0, zero_reg);
1093 __ mov(a1, zero_reg);
1094 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
1095 __ CallCFunction(find_handler, 3);
1096 }
1097
1098 // Retrieve the handler context, SP and FP.
1099 __ li(cp, Operand(pending_handler_context_address));
1100 __ lw(cp, MemOperand(cp));
1101 __ li(sp, Operand(pending_handler_sp_address));
1102 __ lw(sp, MemOperand(sp));
1103 __ li(fp, Operand(pending_handler_fp_address));
1104 __ lw(fp, MemOperand(fp));
1105
1106 // If the handler is a JS frame, restore the context to the frame. Note that
1107 // the context will be set to (cp == 0) for non-JS frames.
1108 Label zero;
1109 __ Branch(&zero, eq, cp, Operand(zero_reg));
1110 __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1111 __ bind(&zero);
1112
1113 // Compute the handler entry address and jump to it.
1114 __ li(a1, Operand(pending_handler_code_address));
1115 __ lw(a1, MemOperand(a1));
1116 __ li(a2, Operand(pending_handler_offset_address));
1117 __ lw(a2, MemOperand(a2));
1118 __ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
1119 __ Addu(t9, a1, a2);
1120 __ Jump(t9);
1121 }
1122
1123
Generate(MacroAssembler * masm)1124 void JSEntryStub::Generate(MacroAssembler* masm) {
1125 Label invoke, handler_entry, exit;
1126 Isolate* isolate = masm->isolate();
1127
1128 // Registers:
1129 // a0: entry address
1130 // a1: function
1131 // a2: receiver
1132 // a3: argc
1133 //
1134 // Stack:
1135 // 4 args slots
1136 // args
1137
1138 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1139
1140 // Save callee saved registers on the stack.
1141 __ MultiPush(kCalleeSaved | ra.bit());
1142
1143 // Save callee-saved FPU registers.
1144 __ MultiPushFPU(kCalleeSavedFPU);
1145 // Set up the reserved register for 0.0.
1146 __ Move(kDoubleRegZero, 0.0);
1147
1148
1149 // Load argv in s0 register.
1150 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1151 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
1152
1153 __ InitializeRootRegister();
1154 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
1155
1156 // We build an EntryFrame.
1157 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1158 int marker = type();
1159 __ li(t2, Operand(Smi::FromInt(marker)));
1160 __ li(t1, Operand(Smi::FromInt(marker)));
1161 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1162 isolate)));
1163 __ lw(t0, MemOperand(t0));
1164 __ Push(t3, t2, t1, t0);
1165 // Set up frame pointer for the frame to be pushed.
1166 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
1167
1168 // Registers:
1169 // a0: entry_address
1170 // a1: function
1171 // a2: receiver_pointer
1172 // a3: argc
1173 // s0: argv
1174 //
1175 // Stack:
1176 // caller fp |
1177 // function slot | entry frame
1178 // context slot |
1179 // bad fp (0xff...f) |
1180 // callee saved registers + ra
1181 // 4 args slots
1182 // args
1183
1184 // If this is the outermost JS call, set js_entry_sp value.
1185 Label non_outermost_js;
1186 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1187 __ li(t1, Operand(ExternalReference(js_entry_sp)));
1188 __ lw(t2, MemOperand(t1));
1189 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
1190 __ sw(fp, MemOperand(t1));
1191 __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1192 Label cont;
1193 __ b(&cont);
1194 __ nop(); // Branch delay slot nop.
1195 __ bind(&non_outermost_js);
1196 __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1197 __ bind(&cont);
1198 __ push(t0);
1199
1200 // Jump to a faked try block that does the invoke, with a faked catch
1201 // block that sets the pending exception.
1202 __ jmp(&invoke);
1203 __ bind(&handler_entry);
1204 handler_offset_ = handler_entry.pos();
1205 // Caught exception: Store result (exception) in the pending exception
1206 // field in the JSEnv and return a failure sentinel. Coming in here the
1207 // fp will be invalid because the PushStackHandler below sets it to 0 to
1208 // signal the existence of the JSEntry frame.
1209 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1210 isolate)));
1211 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
1212 __ LoadRoot(v0, Heap::kExceptionRootIndex);
1213 __ b(&exit); // b exposes branch delay slot.
1214 __ nop(); // Branch delay slot nop.
1215
1216 // Invoke: Link this frame into the handler chain.
1217 __ bind(&invoke);
1218 __ PushStackHandler();
1219 // If an exception not caught by another handler occurs, this handler
1220 // returns control to the code after the bal(&invoke) above, which
1221 // restores all kCalleeSaved registers (including cp and fp) to their
1222 // saved values before returning a failure to C.
1223
1224 // Invoke the function by calling through JS entry trampoline builtin.
1225 // Notice that we cannot store a reference to the trampoline code directly in
1226 // this stub, because runtime stubs are not traversed when doing GC.
1227
1228 // Registers:
1229 // a0: entry_address
1230 // a1: function
1231 // a2: receiver_pointer
1232 // a3: argc
1233 // s0: argv
1234 //
1235 // Stack:
1236 // handler frame
1237 // entry frame
1238 // callee saved registers + ra
1239 // 4 args slots
1240 // args
1241
1242 if (type() == StackFrame::ENTRY_CONSTRUCT) {
1243 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1244 isolate);
1245 __ li(t0, Operand(construct_entry));
1246 } else {
1247 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
1248 __ li(t0, Operand(entry));
1249 }
1250 __ lw(t9, MemOperand(t0)); // Deref address.
1251
1252 // Call JSEntryTrampoline.
1253 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1254 __ Call(t9);
1255
1256 // Unlink this frame from the handler chain.
1257 __ PopStackHandler();
1258
1259 __ bind(&exit); // v0 holds result
1260 // Check if the current stack frame is marked as the outermost JS frame.
1261 Label non_outermost_js_2;
1262 __ pop(t1);
1263 __ Branch(&non_outermost_js_2,
1264 ne,
1265 t1,
1266 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1267 __ li(t1, Operand(ExternalReference(js_entry_sp)));
1268 __ sw(zero_reg, MemOperand(t1));
1269 __ bind(&non_outermost_js_2);
1270
1271 // Restore the top frame descriptors from the stack.
1272 __ pop(t1);
1273 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1274 isolate)));
1275 __ sw(t1, MemOperand(t0));
1276
1277 // Reset the stack to the callee saved registers.
1278 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
1279
1280 // Restore callee-saved fpu registers.
1281 __ MultiPopFPU(kCalleeSavedFPU);
1282
1283 // Restore callee saved registers from the stack.
1284 __ MultiPop(kCalleeSaved | ra.bit());
1285 // Return.
1286 __ Jump(ra);
1287 }
1288
1289
Generate(MacroAssembler * masm)1290 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1291 // Return address is in ra.
1292 Label miss;
1293
1294 Register receiver = LoadDescriptor::ReceiverRegister();
1295 Register index = LoadDescriptor::NameRegister();
1296 Register scratch = t1;
1297 Register result = v0;
1298 DCHECK(!scratch.is(receiver) && !scratch.is(index));
1299 DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()));
1300
1301 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1302 &miss, // When not a string.
1303 &miss, // When not a number.
1304 &miss, // When index out of range.
1305 RECEIVER_IS_STRING);
1306 char_at_generator.GenerateFast(masm);
1307 __ Ret();
1308
1309 StubRuntimeCallHelper call_helper;
1310 char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
1311
1312 __ bind(&miss);
1313 PropertyAccessCompiler::TailCallBuiltin(
1314 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1315 }
1316
1317
Generate(MacroAssembler * masm)1318 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1319 Label miss;
1320 Register receiver = LoadDescriptor::ReceiverRegister();
1321 // Ensure that the vector and slot registers won't be clobbered before
1322 // calling the miss handler.
1323 DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::VectorRegister(),
1324 LoadWithVectorDescriptor::SlotRegister()));
1325
1326 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, t0,
1327 t1, &miss);
1328 __ bind(&miss);
1329 PropertyAccessCompiler::TailCallBuiltin(
1330 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1331 }
1332
Generate(MacroAssembler * masm)1333 void RegExpExecStub::Generate(MacroAssembler* masm) {
1334 // Just jump directly to runtime if native RegExp is not selected at compile
1335 // time or if regexp entry in generated code is turned off runtime switch or
1336 // at compilation.
1337 #ifdef V8_INTERPRETED_REGEXP
1338 __ TailCallRuntime(Runtime::kRegExpExec);
1339 #else // V8_INTERPRETED_REGEXP
1340
1341 // Stack frame on entry.
1342 // sp[0]: last_match_info (expected JSArray)
1343 // sp[4]: previous index
1344 // sp[8]: subject string
1345 // sp[12]: JSRegExp object
1346
1347 const int kLastMatchInfoOffset = 0 * kPointerSize;
1348 const int kPreviousIndexOffset = 1 * kPointerSize;
1349 const int kSubjectOffset = 2 * kPointerSize;
1350 const int kJSRegExpOffset = 3 * kPointerSize;
1351
1352 Label runtime;
1353 // Allocation of registers for this function. These are in callee save
1354 // registers and will be preserved by the call to the native RegExp code, as
1355 // this code is called using the normal C calling convention. When calling
1356 // directly from generated code the native RegExp code will not do a GC and
1357 // therefore the content of these registers are safe to use after the call.
1358 // MIPS - using s0..s2, since we are not using CEntry Stub.
1359 Register subject = s0;
1360 Register regexp_data = s1;
1361 Register last_match_info_elements = s2;
1362
1363 // Ensure that a RegExp stack is allocated.
1364 ExternalReference address_of_regexp_stack_memory_address =
1365 ExternalReference::address_of_regexp_stack_memory_address(isolate());
1366 ExternalReference address_of_regexp_stack_memory_size =
1367 ExternalReference::address_of_regexp_stack_memory_size(isolate());
1368 __ li(a0, Operand(address_of_regexp_stack_memory_size));
1369 __ lw(a0, MemOperand(a0, 0));
1370 __ Branch(&runtime, eq, a0, Operand(zero_reg));
1371
1372 // Check that the first argument is a JSRegExp object.
1373 __ lw(a0, MemOperand(sp, kJSRegExpOffset));
1374 STATIC_ASSERT(kSmiTag == 0);
1375 __ JumpIfSmi(a0, &runtime);
1376 __ GetObjectType(a0, a1, a1);
1377 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
1378
1379 // Check that the RegExp has been compiled (data contains a fixed array).
1380 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
1381 if (FLAG_debug_code) {
1382 __ SmiTst(regexp_data, t0);
1383 __ Check(nz,
1384 kUnexpectedTypeForRegExpDataFixedArrayExpected,
1385 t0,
1386 Operand(zero_reg));
1387 __ GetObjectType(regexp_data, a0, a0);
1388 __ Check(eq,
1389 kUnexpectedTypeForRegExpDataFixedArrayExpected,
1390 a0,
1391 Operand(FIXED_ARRAY_TYPE));
1392 }
1393
1394 // regexp_data: RegExp data (FixedArray)
1395 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1396 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
1397 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
1398
1399 // regexp_data: RegExp data (FixedArray)
1400 // Check that the number of captures fit in the static offsets vector buffer.
1401 __ lw(a2,
1402 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1403 // Check (number_of_captures + 1) * 2 <= offsets vector size
1404 // Or number_of_captures * 2 <= offsets vector size - 2
1405 // Multiplying by 2 comes for free since a2 is smi-tagged.
1406 STATIC_ASSERT(kSmiTag == 0);
1407 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
1408 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1409 __ Branch(
1410 &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
1411
1412 // Reset offset for possibly sliced string.
1413 __ mov(t0, zero_reg);
1414 __ lw(subject, MemOperand(sp, kSubjectOffset));
1415 __ JumpIfSmi(subject, &runtime);
1416 __ mov(a3, subject); // Make a copy of the original subject string.
1417 // subject: subject string
1418 // a3: subject string
1419 // regexp_data: RegExp data (FixedArray)
1420 // Handle subject string according to its encoding and representation:
1421 // (1) Sequential string? If yes, go to (4).
1422 // (2) Sequential or cons? If not, go to (5).
1423 // (3) Cons string. If the string is flat, replace subject with first string
1424 // and go to (1). Otherwise bail out to runtime.
1425 // (4) Sequential string. Load regexp code according to encoding.
1426 // (E) Carry on.
1427 /// [...]
1428
1429 // Deferred code at the end of the stub:
1430 // (5) Long external string? If not, go to (7).
1431 // (6) External string. Make it, offset-wise, look like a sequential string.
1432 // Go to (4).
1433 // (7) Short external string or not a string? If yes, bail out to runtime.
1434 // (8) Sliced string. Replace subject with parent. Go to (1).
1435
1436 Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
1437 not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
1438
1439 __ bind(&check_underlying);
1440 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
1441 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
1442
1443 // (1) Sequential string? If yes, go to (4).
1444 __ And(a1,
1445 a0,
1446 Operand(kIsNotStringMask |
1447 kStringRepresentationMask |
1448 kShortExternalStringMask));
1449 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
1450 __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
1451
1452 // (2) Sequential or cons? If not, go to (5).
1453 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
1454 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
1455 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
1456 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
1457 // Go to (5).
1458 __ Branch(¬_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
1459
1460 // (3) Cons string. Check that it's flat.
1461 // Replace subject with first string and reload instance type.
1462 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
1463 __ LoadRoot(a1, Heap::kempty_stringRootIndex);
1464 __ Branch(&runtime, ne, a0, Operand(a1));
1465 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
1466 __ jmp(&check_underlying);
1467
1468 // (4) Sequential string. Load regexp code according to encoding.
1469 __ bind(&seq_string);
1470 // subject: sequential subject string (or look-alike, external string)
1471 // a3: original subject string
1472 // Load previous index and check range before a3 is overwritten. We have to
1473 // use a3 instead of subject here because subject might have been only made
1474 // to look like a sequential string when it actually is an external string.
1475 __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
1476 __ JumpIfNotSmi(a1, &runtime);
1477 __ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
1478 __ Branch(&runtime, ls, a3, Operand(a1));
1479 __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
1480
1481 STATIC_ASSERT(kStringEncodingMask == 4);
1482 STATIC_ASSERT(kOneByteStringTag == 4);
1483 STATIC_ASSERT(kTwoByteStringTag == 0);
1484 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for one-byte.
1485 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
1486 __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
1487 __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
1488 __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
1489
1490 // (E) Carry on. String handling is done.
1491 // t9: irregexp code
1492 // Check that the irregexp code has been generated for the actual string
1493 // encoding. If it has, the field contains a code object otherwise it contains
1494 // a smi (code flushing support).
1495 __ JumpIfSmi(t9, &runtime);
1496
1497 // a1: previous index
1498 // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
1499 // t9: code
1500 // subject: Subject string
1501 // regexp_data: RegExp data (FixedArray)
1502 // All checks done. Now push arguments for native regexp code.
1503 __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
1504 1, a0, a2);
1505
1506 // Isolates: note we add an additional parameter here (isolate pointer).
1507 const int kRegExpExecuteArguments = 9;
1508 const int kParameterRegisters = 4;
1509 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
1510
1511 // Stack pointer now points to cell where return address is to be written.
1512 // Arguments are before that on the stack or in registers, meaning we
1513 // treat the return address as argument 5. Thus every argument after that
1514 // needs to be shifted back by 1. Since DirectCEntryStub will handle
1515 // allocating space for the c argument slots, we don't need to calculate
1516 // that into the argument positions on the stack. This is how the stack will
1517 // look (sp meaning the value of sp at this moment):
1518 // [sp + 5] - Argument 9
1519 // [sp + 4] - Argument 8
1520 // [sp + 3] - Argument 7
1521 // [sp + 2] - Argument 6
1522 // [sp + 1] - Argument 5
1523 // [sp + 0] - saved ra
1524
1525 // Argument 9: Pass current isolate address.
1526 // CFunctionArgumentOperand handles MIPS stack argument slots.
1527 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
1528 __ sw(a0, MemOperand(sp, 5 * kPointerSize));
1529
1530 // Argument 8: Indicate that this is a direct call from JavaScript.
1531 __ li(a0, Operand(1));
1532 __ sw(a0, MemOperand(sp, 4 * kPointerSize));
1533
1534 // Argument 7: Start (high end) of backtracking stack memory area.
1535 __ li(a0, Operand(address_of_regexp_stack_memory_address));
1536 __ lw(a0, MemOperand(a0, 0));
1537 __ li(a2, Operand(address_of_regexp_stack_memory_size));
1538 __ lw(a2, MemOperand(a2, 0));
1539 __ addu(a0, a0, a2);
1540 __ sw(a0, MemOperand(sp, 3 * kPointerSize));
1541
1542 // Argument 6: Set the number of capture registers to zero to force global
1543 // regexps to behave as non-global. This does not affect non-global regexps.
1544 __ mov(a0, zero_reg);
1545 __ sw(a0, MemOperand(sp, 2 * kPointerSize));
1546
1547 // Argument 5: static offsets vector buffer.
1548 __ li(a0, Operand(
1549 ExternalReference::address_of_static_offsets_vector(isolate())));
1550 __ sw(a0, MemOperand(sp, 1 * kPointerSize));
1551
1552 // For arguments 4 and 3 get string length, calculate start of string data
1553 // calculate the shift of the index (0 for one-byte and 1 for two-byte).
1554 __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
1555 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
1556 // Load the length from the original subject string from the previous stack
1557 // frame. Therefore we have to use fp, which points exactly to two pointer
1558 // sizes below the previous sp. (Because creating a new stack frame pushes
1559 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
1560 __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
1561 // If slice offset is not 0, load the length from the original sliced string.
1562 // Argument 4, a3: End of string data
1563 // Argument 3, a2: Start of string data
1564 // Prepare start and end index of the input.
1565 __ sllv(t1, t0, a3);
1566 __ addu(t0, t2, t1);
1567 __ sllv(t1, a1, a3);
1568 __ addu(a2, t0, t1);
1569
1570 __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
1571 __ sra(t2, t2, kSmiTagSize);
1572 __ sllv(t1, t2, a3);
1573 __ addu(a3, t0, t1);
1574 // Argument 2 (a1): Previous index.
1575 // Already there
1576
1577 // Argument 1 (a0): Subject string.
1578 __ mov(a0, subject);
1579
1580 // Locate the code entry and call it.
1581 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
1582 DirectCEntryStub stub(isolate());
1583 stub.GenerateCall(masm, t9);
1584
1585 __ LeaveExitFrame(false, no_reg, true);
1586
1587 // v0: result
1588 // subject: subject string (callee saved)
1589 // regexp_data: RegExp data (callee saved)
1590 // last_match_info_elements: Last match info elements (callee saved)
1591 // Check the result.
1592 Label success;
1593 __ Branch(&success, eq, v0, Operand(1));
1594 // We expect exactly one result since we force the called regexp to behave
1595 // as non-global.
1596 Label failure;
1597 __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
1598 // If not exception it can only be retry. Handle that in the runtime system.
1599 __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
1600 // Result must now be exception. If there is no pending exception already a
1601 // stack overflow (on the backtrack stack) was detected in RegExp code but
1602 // haven't created the exception yet. Handle that in the runtime system.
1603 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
1604 __ li(a1, Operand(isolate()->factory()->the_hole_value()));
1605 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1606 isolate())));
1607 __ lw(v0, MemOperand(a2, 0));
1608 __ Branch(&runtime, eq, v0, Operand(a1));
1609
1610 // For exception, throw the exception again.
1611 __ TailCallRuntime(Runtime::kRegExpExecReThrow);
1612
1613 __ bind(&failure);
1614 // For failure and exception return null.
1615 __ li(v0, Operand(isolate()->factory()->null_value()));
1616 __ DropAndRet(4);
1617
1618 // Process the result from the native regexp code.
1619 __ bind(&success);
1620 __ lw(a1,
1621 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1622 // Calculate number of capture registers (number_of_captures + 1) * 2.
1623 // Multiplying by 2 comes for free since r1 is smi-tagged.
1624 STATIC_ASSERT(kSmiTag == 0);
1625 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
1626 __ Addu(a1, a1, Operand(2)); // a1 was a smi.
1627
1628 // Check that the last match info is a FixedArray.
1629 __ lw(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
1630 __ JumpIfSmi(last_match_info_elements, &runtime);
1631 // Check that the object has fast elements.
1632 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
1633 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
1634 __ Branch(&runtime, ne, a0, Operand(at));
1635 // Check that the last match info has space for the capture registers and the
1636 // additional information.
1637 __ lw(a0,
1638 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
1639 __ Addu(a2, a1, Operand(RegExpMatchInfo::kLastMatchOverhead));
1640 __ sra(at, a0, kSmiTagSize);
1641 __ Branch(&runtime, gt, a2, Operand(at));
1642
1643 // a1: number of capture registers
1644 // subject: subject string
1645 // Store the capture count.
1646 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
1647 __ sw(a2, FieldMemOperand(last_match_info_elements,
1648 RegExpMatchInfo::kNumberOfCapturesOffset));
1649 // Store last subject and last input.
1650 __ sw(subject, FieldMemOperand(last_match_info_elements,
1651 RegExpMatchInfo::kLastSubjectOffset));
1652 __ mov(a2, subject);
1653 __ RecordWriteField(last_match_info_elements,
1654 RegExpMatchInfo::kLastSubjectOffset, subject, t3,
1655 kRAHasNotBeenSaved, kDontSaveFPRegs);
1656 __ mov(subject, a2);
1657 __ sw(subject, FieldMemOperand(last_match_info_elements,
1658 RegExpMatchInfo::kLastInputOffset));
1659 __ RecordWriteField(last_match_info_elements,
1660 RegExpMatchInfo::kLastInputOffset, subject, t3,
1661 kRAHasNotBeenSaved, kDontSaveFPRegs);
1662
1663 // Get the static offsets vector filled by the native regexp code.
1664 ExternalReference address_of_static_offsets_vector =
1665 ExternalReference::address_of_static_offsets_vector(isolate());
1666 __ li(a2, Operand(address_of_static_offsets_vector));
1667
1668 // a1: number of capture registers
1669 // a2: offsets vector
1670 Label next_capture, done;
1671 // Capture register counter starts from number of capture registers and
1672 // counts down until wrapping after zero.
1673 __ Addu(a0, last_match_info_elements,
1674 Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag));
1675 __ bind(&next_capture);
1676 __ Subu(a1, a1, Operand(1));
1677 __ Branch(&done, lt, a1, Operand(zero_reg));
1678 // Read the value from the static offsets vector buffer.
1679 __ lw(a3, MemOperand(a2, 0));
1680 __ addiu(a2, a2, kPointerSize);
1681 // Store the smi value in the last match info.
1682 __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
1683 __ sw(a3, MemOperand(a0, 0));
1684 __ Branch(&next_capture, USE_DELAY_SLOT);
1685 __ addiu(a0, a0, kPointerSize); // In branch delay slot.
1686
1687 __ bind(&done);
1688
1689 // Return last match info.
1690 __ mov(v0, last_match_info_elements);
1691 __ DropAndRet(4);
1692
1693 // Do the runtime call to execute the regexp.
1694 __ bind(&runtime);
1695 __ TailCallRuntime(Runtime::kRegExpExec);
1696
1697 // Deferred code for string handling.
1698 // (5) Long external string? If not, go to (7).
1699 __ bind(¬_seq_nor_cons);
1700 // Go to (7).
1701 __ Branch(¬_long_external, gt, a1, Operand(kExternalStringTag));
1702
1703 // (6) External string. Make it, offset-wise, look like a sequential string.
1704 __ bind(&external_string);
1705 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
1706 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
1707 if (FLAG_debug_code) {
1708 // Assert that we do not have a cons or slice (indirect strings) here.
1709 // Sequential strings have already been ruled out.
1710 __ And(at, a0, Operand(kIsIndirectStringMask));
1711 __ Assert(eq,
1712 kExternalStringExpectedButNotFound,
1713 at,
1714 Operand(zero_reg));
1715 }
1716 __ lw(subject,
1717 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
1718 // Move the pointer so that offset-wise, it looks like a sequential string.
1719 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1720 __ Subu(subject,
1721 subject,
1722 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
1723 __ jmp(&seq_string); // Go to (5).
1724
1725 // (7) Short external string or not a string? If yes, bail out to runtime.
1726 __ bind(¬_long_external);
1727 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
1728 __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
1729 __ Branch(&runtime, ne, at, Operand(zero_reg));
1730
1731 // (8) Sliced string. Replace subject with parent. Go to (4).
1732 // Load offset into t0 and replace subject string with parent.
1733 __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
1734 __ sra(t0, t0, kSmiTagSize);
1735 __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
1736 __ jmp(&check_underlying); // Go to (4).
1737 #endif // V8_INTERPRETED_REGEXP
1738 }
1739
1740
CallStubInRecordCallTarget(MacroAssembler * masm,CodeStub * stub)1741 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
1742 // a0 : number of arguments to the construct function
1743 // a2 : feedback vector
1744 // a3 : slot in feedback vector (Smi)
1745 // a1 : the function to call
1746 FrameScope scope(masm, StackFrame::INTERNAL);
1747 const RegList kSavedRegs = 1 << 4 | // a0
1748 1 << 5 | // a1
1749 1 << 6 | // a2
1750 1 << 7 | // a3
1751 1 << cp.code();
1752
1753 // Number-of-arguments register must be smi-tagged to call out.
1754 __ SmiTag(a0);
1755 __ MultiPush(kSavedRegs);
1756
1757 __ CallStub(stub);
1758
1759 __ MultiPop(kSavedRegs);
1760 __ SmiUntag(a0);
1761 }
1762
1763
GenerateRecordCallTarget(MacroAssembler * masm)1764 static void GenerateRecordCallTarget(MacroAssembler* masm) {
1765 // Cache the called function in a feedback vector slot. Cache states
1766 // are uninitialized, monomorphic (indicated by a JSFunction), and
1767 // megamorphic.
1768 // a0 : number of arguments to the construct function
1769 // a1 : the function to call
1770 // a2 : feedback vector
1771 // a3 : slot in feedback vector (Smi)
1772 Label initialize, done, miss, megamorphic, not_array_function;
1773
1774 DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
1775 masm->isolate()->heap()->megamorphic_symbol());
1776 DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
1777 masm->isolate()->heap()->uninitialized_symbol());
1778
1779 // Load the cache state into t2.
1780 __ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize);
1781 __ lw(t2, FieldMemOperand(t2, FixedArray::kHeaderSize));
1782
1783 // A monomorphic cache hit or an already megamorphic state: invoke the
1784 // function without changing the state.
1785 // We don't know if t2 is a WeakCell or a Symbol, but it's harmless to read at
1786 // this position in a symbol (see static asserts in type-feedback-vector.h).
1787 Label check_allocation_site;
1788 Register feedback_map = t1;
1789 Register weak_value = t4;
1790 __ lw(weak_value, FieldMemOperand(t2, WeakCell::kValueOffset));
1791 __ Branch(&done, eq, a1, Operand(weak_value));
1792 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
1793 __ Branch(&done, eq, t2, Operand(at));
1794 __ lw(feedback_map, FieldMemOperand(t2, HeapObject::kMapOffset));
1795 __ LoadRoot(at, Heap::kWeakCellMapRootIndex);
1796 __ Branch(&check_allocation_site, ne, feedback_map, Operand(at));
1797
1798 // If the weak cell is cleared, we have a new chance to become monomorphic.
1799 __ JumpIfSmi(weak_value, &initialize);
1800 __ jmp(&megamorphic);
1801
1802 __ bind(&check_allocation_site);
1803 // If we came here, we need to see if we are the array function.
1804 // If we didn't have a matching function, and we didn't find the megamorph
1805 // sentinel, then we have in the slot either some other function or an
1806 // AllocationSite.
1807 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
1808 __ Branch(&miss, ne, feedback_map, Operand(at));
1809
1810 // Make sure the function is the Array() function
1811 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
1812 __ Branch(&megamorphic, ne, a1, Operand(t2));
1813 __ jmp(&done);
1814
1815 __ bind(&miss);
1816
1817 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
1818 // megamorphic.
1819 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
1820 __ Branch(&initialize, eq, t2, Operand(at));
1821 // MegamorphicSentinel is an immortal immovable object (undefined) so no
1822 // write-barrier is needed.
1823 __ bind(&megamorphic);
1824 __ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize);
1825 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
1826 __ sw(at, FieldMemOperand(t2, FixedArray::kHeaderSize));
1827 __ jmp(&done);
1828
1829 // An uninitialized cache is patched with the function.
1830 __ bind(&initialize);
1831 // Make sure the function is the Array() function.
1832 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
1833 __ Branch(¬_array_function, ne, a1, Operand(t2));
1834
1835 // The target function is the Array constructor,
1836 // Create an AllocationSite if we don't already have it, store it in the
1837 // slot.
1838 CreateAllocationSiteStub create_stub(masm->isolate());
1839 CallStubInRecordCallTarget(masm, &create_stub);
1840 __ Branch(&done);
1841
1842 __ bind(¬_array_function);
1843 CreateWeakCellStub weak_cell_stub(masm->isolate());
1844 CallStubInRecordCallTarget(masm, &weak_cell_stub);
1845
1846 __ bind(&done);
1847
1848 // Increment the call count for all function calls.
1849 __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
1850 __ lw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
1851 __ Addu(t0, t0, Operand(Smi::FromInt(1)));
1852 __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
1853 }
1854
1855
Generate(MacroAssembler * masm)1856 void CallConstructStub::Generate(MacroAssembler* masm) {
1857 // a0 : number of arguments
1858 // a1 : the function to call
1859 // a2 : feedback vector
1860 // a3 : slot in feedback vector (Smi, for RecordCallTarget)
1861
1862 Label non_function;
1863 // Check that the function is not a smi.
1864 __ JumpIfSmi(a1, &non_function);
1865 // Check that the function is a JSFunction.
1866 __ GetObjectType(a1, t1, t1);
1867 __ Branch(&non_function, ne, t1, Operand(JS_FUNCTION_TYPE));
1868
1869 GenerateRecordCallTarget(masm);
1870
1871 __ Lsa(t1, a2, a3, kPointerSizeLog2 - kSmiTagSize);
1872 Label feedback_register_initialized;
1873 // Put the AllocationSite from the feedback vector into a2, or undefined.
1874 __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
1875 __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
1876 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
1877 __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
1878 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
1879 __ bind(&feedback_register_initialized);
1880
1881 __ AssertUndefinedOrAllocationSite(a2, t1);
1882
1883 // Pass function as new target.
1884 __ mov(a3, a1);
1885
1886 // Tail call to the function-specific construct stub (still in the caller
1887 // context at this point).
1888 __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
1889 __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
1890 __ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
1891 __ Jump(at);
1892
1893 __ bind(&non_function);
1894 __ mov(a3, a1);
1895 __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
1896 }
1897
1898 // Note: feedback_vector and slot are clobbered after the call.
IncrementCallCount(MacroAssembler * masm,Register feedback_vector,Register slot)1899 static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
1900 Register slot) {
1901 __ Lsa(at, feedback_vector, slot, kPointerSizeLog2 - kSmiTagSize);
1902 __ lw(slot, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
1903 __ Addu(slot, slot, Operand(Smi::FromInt(1)));
1904 __ sw(slot, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
1905 }
1906
HandleArrayCase(MacroAssembler * masm,Label * miss)1907 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
1908 // a0 - number of arguments
1909 // a1 - function
1910 // a3 - slot id
1911 // a2 - vector
1912 // t0 - loaded from vector[slot]
1913 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at);
1914 __ Branch(miss, ne, a1, Operand(at));
1915
1916 // Increment the call count for monomorphic function calls.
1917 IncrementCallCount(masm, a2, a3);
1918
1919 __ mov(a2, t0);
1920 __ mov(a3, a1);
1921 ArrayConstructorStub stub(masm->isolate());
1922 __ TailCallStub(&stub);
1923 }
1924
1925
Generate(MacroAssembler * masm)1926 void CallICStub::Generate(MacroAssembler* masm) {
1927 // a0 - number of arguments
1928 // a1 - function
1929 // a3 - slot id (Smi)
1930 // a2 - vector
1931 Label extra_checks_or_miss, call, call_function, call_count_incremented;
1932
1933 // The checks. First, does r1 match the recorded monomorphic target?
1934 __ Lsa(t0, a2, a3, kPointerSizeLog2 - kSmiTagSize);
1935 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
1936
1937 // We don't know that we have a weak cell. We might have a private symbol
1938 // or an AllocationSite, but the memory is safe to examine.
1939 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
1940 // FixedArray.
1941 // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
1942 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
1943 // computed, meaning that it can't appear to be a pointer. If the low bit is
1944 // 0, then hash is computed, but the 0 bit prevents the field from appearing
1945 // to be a pointer.
1946 STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
1947 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
1948 WeakCell::kValueOffset &&
1949 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
1950
1951 __ lw(t1, FieldMemOperand(t0, WeakCell::kValueOffset));
1952 __ Branch(&extra_checks_or_miss, ne, a1, Operand(t1));
1953
1954 // The compare above could have been a SMI/SMI comparison. Guard against this
1955 // convincing us that we have a monomorphic JSFunction.
1956 __ JumpIfSmi(a1, &extra_checks_or_miss);
1957
1958 __ bind(&call_function);
1959
1960 // Increment the call count for monomorphic function calls.
1961 IncrementCallCount(masm, a2, a3);
1962
1963 __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
1964 tail_call_mode()),
1965 RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
1966
1967 __ bind(&extra_checks_or_miss);
1968 Label uninitialized, miss, not_allocation_site;
1969
1970 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
1971 __ Branch(&call, eq, t0, Operand(at));
1972
1973 // Verify that t0 contains an AllocationSite
1974 __ lw(t1, FieldMemOperand(t0, HeapObject::kMapOffset));
1975 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
1976 __ Branch(¬_allocation_site, ne, t1, Operand(at));
1977
1978 HandleArrayCase(masm, &miss);
1979
1980 __ bind(¬_allocation_site);
1981
1982 // The following cases attempt to handle MISS cases without going to the
1983 // runtime.
1984 if (FLAG_trace_ic) {
1985 __ Branch(&miss);
1986 }
1987
1988 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
1989 __ Branch(&uninitialized, eq, t0, Operand(at));
1990
1991 // We are going megamorphic. If the feedback is a JSFunction, it is fine
1992 // to handle it here. More complex cases are dealt with in the runtime.
1993 __ AssertNotSmi(t0);
1994 __ GetObjectType(t0, t1, t1);
1995 __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
1996 __ Lsa(t0, a2, a3, kPointerSizeLog2 - kSmiTagSize);
1997 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
1998 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
1999
2000 __ bind(&call);
2001 IncrementCallCount(masm, a2, a3);
2002
2003 __ bind(&call_count_incremented);
2004
2005 __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
2006 RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
2007
2008 __ bind(&uninitialized);
2009
2010 // We are going monomorphic, provided we actually have a JSFunction.
2011 __ JumpIfSmi(a1, &miss);
2012
2013 // Goto miss case if we do not have a function.
2014 __ GetObjectType(a1, t0, t0);
2015 __ Branch(&miss, ne, t0, Operand(JS_FUNCTION_TYPE));
2016
2017 // Make sure the function is not the Array() function, which requires special
2018 // behavior on MISS.
2019 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t0);
2020 __ Branch(&miss, eq, a1, Operand(t0));
2021
2022 // Make sure the function belongs to the same native context.
2023 __ lw(t0, FieldMemOperand(a1, JSFunction::kContextOffset));
2024 __ lw(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX));
2025 __ lw(t1, NativeContextMemOperand());
2026 __ Branch(&miss, ne, t0, Operand(t1));
2027
2028 // Store the function. Use a stub since we need a frame for allocation.
2029 // a2 - vector
2030 // a3 - slot
2031 // a1 - function
2032 {
2033 FrameScope scope(masm, StackFrame::INTERNAL);
2034 CreateWeakCellStub create_stub(masm->isolate());
2035 __ SmiTag(a0);
2036 __ Push(a0);
2037 __ Push(a2, a3);
2038 __ Push(cp, a1);
2039 __ CallStub(&create_stub);
2040 __ Pop(cp, a1);
2041 __ Pop(a2, a3);
2042 __ Pop(a0);
2043 __ SmiUntag(a0);
2044 }
2045
2046 __ Branch(&call_function);
2047
2048 // We are here because tracing is on or we encountered a MISS case we can't
2049 // handle here.
2050 __ bind(&miss);
2051 GenerateMiss(masm);
2052
2053 __ Branch(&call_count_incremented);
2054 }
2055
2056
GenerateMiss(MacroAssembler * masm)2057 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2058 FrameScope scope(masm, StackFrame::INTERNAL);
2059
2060 // Preserve the number of arguments as Smi.
2061 __ SmiTag(a0);
2062 __ Push(a0);
2063
2064 // Push the receiver and the function and feedback info.
2065 __ Push(a1, a2, a3);
2066
2067 // Call the entry.
2068 __ CallRuntime(Runtime::kCallIC_Miss);
2069
2070 // Move result to a1 and exit the internal frame.
2071 __ mov(a1, v0);
2072
2073 // Restore number of arguments.
2074 __ Pop(a0);
2075 __ SmiUntag(a0);
2076 }
2077
2078
2079 // StringCharCodeAtGenerator.
GenerateFast(MacroAssembler * masm)2080 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2081 DCHECK(!t0.is(index_));
2082 DCHECK(!t0.is(result_));
2083 DCHECK(!t0.is(object_));
2084 if (check_mode_ == RECEIVER_IS_UNKNOWN) {
2085 // If the receiver is a smi trigger the non-string case.
2086 __ JumpIfSmi(object_, receiver_not_string_);
2087
2088 // Fetch the instance type of the receiver into result register.
2089 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2090 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2091 // If the receiver is not a string trigger the non-string case.
2092 __ And(t0, result_, Operand(kIsNotStringMask));
2093 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
2094 }
2095
2096 // If the index is non-smi trigger the non-smi case.
2097 __ JumpIfNotSmi(index_, &index_not_smi_);
2098
2099 __ bind(&got_smi_index_);
2100
2101 // Check for index out of range.
2102 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
2103 __ Branch(index_out_of_range_, ls, t0, Operand(index_));
2104
2105 __ sra(index_, index_, kSmiTagSize);
2106
2107 StringCharLoadGenerator::Generate(masm,
2108 object_,
2109 index_,
2110 result_,
2111 &call_runtime_);
2112
2113 __ sll(result_, result_, kSmiTagSize);
2114 __ bind(&exit_);
2115 }
2116
2117
GenerateSlow(MacroAssembler * masm,EmbedMode embed_mode,const RuntimeCallHelper & call_helper)2118 void StringCharCodeAtGenerator::GenerateSlow(
2119 MacroAssembler* masm, EmbedMode embed_mode,
2120 const RuntimeCallHelper& call_helper) {
2121 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2122
2123 // Index is not a smi.
2124 __ bind(&index_not_smi_);
2125 // If index is a heap number, try converting it to an integer.
2126 __ CheckMap(index_,
2127 result_,
2128 Heap::kHeapNumberMapRootIndex,
2129 index_not_number_,
2130 DONT_DO_SMI_CHECK);
2131 call_helper.BeforeCall(masm);
2132 // Consumed by runtime conversion function:
2133 if (embed_mode == PART_OF_IC_HANDLER) {
2134 __ Push(LoadWithVectorDescriptor::VectorRegister(),
2135 LoadWithVectorDescriptor::SlotRegister(), object_, index_);
2136 } else {
2137 __ Push(object_, index_);
2138 }
2139 __ CallRuntime(Runtime::kNumberToSmi);
2140
2141 // Save the conversion result before the pop instructions below
2142 // have a chance to overwrite it.
2143 __ Move(index_, v0);
2144 if (embed_mode == PART_OF_IC_HANDLER) {
2145 __ Pop(LoadWithVectorDescriptor::VectorRegister(),
2146 LoadWithVectorDescriptor::SlotRegister(), object_);
2147 } else {
2148 __ pop(object_);
2149 }
2150 // Reload the instance type.
2151 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2152 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2153 call_helper.AfterCall(masm);
2154 // If index is still not a smi, it must be out of range.
2155 __ JumpIfNotSmi(index_, index_out_of_range_);
2156 // Otherwise, return to the fast path.
2157 __ Branch(&got_smi_index_);
2158
2159 // Call runtime. We get here when the receiver is a string and the
2160 // index is a number, but the code of getting the actual character
2161 // is too complex (e.g., when the string needs to be flattened).
2162 __ bind(&call_runtime_);
2163 call_helper.BeforeCall(masm);
2164 __ sll(index_, index_, kSmiTagSize);
2165 __ Push(object_, index_);
2166 __ CallRuntime(Runtime::kStringCharCodeAtRT);
2167
2168 __ Move(result_, v0);
2169
2170 call_helper.AfterCall(masm);
2171 __ jmp(&exit_);
2172
2173 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2174 }
2175
2176
2177 // -------------------------------------------------------------------------
2178 // StringCharFromCodeGenerator
2179
GenerateFast(MacroAssembler * masm)2180 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
2181 // Fast case of Heap::LookupSingleCharacterStringFromCode.
2182
2183 DCHECK(!t0.is(result_));
2184 DCHECK(!t0.is(code_));
2185
2186 STATIC_ASSERT(kSmiTag == 0);
2187 STATIC_ASSERT(kSmiShiftSize == 0);
2188 DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
2189 __ And(t0, code_, Operand(kSmiTagMask |
2190 ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
2191 __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
2192
2193 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
2194 // At this point code register contains smi tagged one-byte char code.
2195 STATIC_ASSERT(kSmiTag == 0);
2196 __ Lsa(result_, result_, code_, kPointerSizeLog2 - kSmiTagSize);
2197 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
2198 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
2199 __ Branch(&slow_case_, eq, result_, Operand(t0));
2200 __ bind(&exit_);
2201 }
2202
2203
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)2204 void StringCharFromCodeGenerator::GenerateSlow(
2205 MacroAssembler* masm,
2206 const RuntimeCallHelper& call_helper) {
2207 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
2208
2209 __ bind(&slow_case_);
2210 call_helper.BeforeCall(masm);
2211 __ push(code_);
2212 __ CallRuntime(Runtime::kStringCharFromCode);
2213 __ Move(result_, v0);
2214
2215 call_helper.AfterCall(masm);
2216 __ Branch(&exit_);
2217
2218 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
2219 }
2220
2221
2222 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
2223
2224
GenerateCopyCharacters(MacroAssembler * masm,Register dest,Register src,Register count,Register scratch,String::Encoding encoding)2225 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
2226 Register dest,
2227 Register src,
2228 Register count,
2229 Register scratch,
2230 String::Encoding encoding) {
2231 if (FLAG_debug_code) {
2232 // Check that destination is word aligned.
2233 __ And(scratch, dest, Operand(kPointerAlignmentMask));
2234 __ Check(eq,
2235 kDestinationOfCopyNotAligned,
2236 scratch,
2237 Operand(zero_reg));
2238 }
2239
2240 // Assumes word reads and writes are little endian.
2241 // Nothing to do for zero characters.
2242 Label done;
2243
2244 if (encoding == String::TWO_BYTE_ENCODING) {
2245 __ Addu(count, count, count);
2246 }
2247
2248 Register limit = count; // Read until dest equals this.
2249 __ Addu(limit, dest, Operand(count));
2250
2251 Label loop_entry, loop;
2252 // Copy bytes from src to dest until dest hits limit.
2253 __ Branch(&loop_entry);
2254 __ bind(&loop);
2255 __ lbu(scratch, MemOperand(src));
2256 __ Addu(src, src, Operand(1));
2257 __ sb(scratch, MemOperand(dest));
2258 __ Addu(dest, dest, Operand(1));
2259 __ bind(&loop_entry);
2260 __ Branch(&loop, lt, dest, Operand(limit));
2261
2262 __ bind(&done);
2263 }
2264
2265
GenerateFlatOneByteStringEquals(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3)2266 void StringHelper::GenerateFlatOneByteStringEquals(
2267 MacroAssembler* masm, Register left, Register right, Register scratch1,
2268 Register scratch2, Register scratch3) {
2269 Register length = scratch1;
2270
2271 // Compare lengths.
2272 Label strings_not_equal, check_zero_length;
2273 __ lw(length, FieldMemOperand(left, String::kLengthOffset));
2274 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
2275 __ Branch(&check_zero_length, eq, length, Operand(scratch2));
2276 __ bind(&strings_not_equal);
2277 DCHECK(is_int16(NOT_EQUAL));
2278 __ Ret(USE_DELAY_SLOT);
2279 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
2280
2281 // Check if the length is zero.
2282 Label compare_chars;
2283 __ bind(&check_zero_length);
2284 STATIC_ASSERT(kSmiTag == 0);
2285 __ Branch(&compare_chars, ne, length, Operand(zero_reg));
2286 DCHECK(is_int16(EQUAL));
2287 __ Ret(USE_DELAY_SLOT);
2288 __ li(v0, Operand(Smi::FromInt(EQUAL)));
2289
2290 // Compare characters.
2291 __ bind(&compare_chars);
2292
2293 GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
2294 v0, &strings_not_equal);
2295
2296 // Characters are equal.
2297 __ Ret(USE_DELAY_SLOT);
2298 __ li(v0, Operand(Smi::FromInt(EQUAL)));
2299 }
2300
2301
GenerateCompareFlatOneByteStrings(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3,Register scratch4)2302 void StringHelper::GenerateCompareFlatOneByteStrings(
2303 MacroAssembler* masm, Register left, Register right, Register scratch1,
2304 Register scratch2, Register scratch3, Register scratch4) {
2305 Label result_not_equal, compare_lengths;
2306 // Find minimum length and length difference.
2307 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
2308 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
2309 __ Subu(scratch3, scratch1, Operand(scratch2));
2310 Register length_delta = scratch3;
2311 __ slt(scratch4, scratch2, scratch1);
2312 __ Movn(scratch1, scratch2, scratch4);
2313 Register min_length = scratch1;
2314 STATIC_ASSERT(kSmiTag == 0);
2315 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
2316
2317 // Compare loop.
2318 GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
2319 scratch4, v0, &result_not_equal);
2320
2321 // Compare lengths - strings up to min-length are equal.
2322 __ bind(&compare_lengths);
2323 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
2324 // Use length_delta as result if it's zero.
2325 __ mov(scratch2, length_delta);
2326 __ mov(scratch4, zero_reg);
2327 __ mov(v0, zero_reg);
2328
2329 __ bind(&result_not_equal);
2330 // Conditionally update the result based either on length_delta or
2331 // the last comparion performed in the loop above.
2332 Label ret;
2333 __ Branch(&ret, eq, scratch2, Operand(scratch4));
2334 __ li(v0, Operand(Smi::FromInt(GREATER)));
2335 __ Branch(&ret, gt, scratch2, Operand(scratch4));
2336 __ li(v0, Operand(Smi::FromInt(LESS)));
2337 __ bind(&ret);
2338 __ Ret();
2339 }
2340
2341
GenerateOneByteCharsCompareLoop(MacroAssembler * masm,Register left,Register right,Register length,Register scratch1,Register scratch2,Register scratch3,Label * chars_not_equal)2342 void StringHelper::GenerateOneByteCharsCompareLoop(
2343 MacroAssembler* masm, Register left, Register right, Register length,
2344 Register scratch1, Register scratch2, Register scratch3,
2345 Label* chars_not_equal) {
2346 // Change index to run from -length to -1 by adding length to string
2347 // start. This means that loop ends when index reaches zero, which
2348 // doesn't need an additional compare.
2349 __ SmiUntag(length);
2350 __ Addu(scratch1, length,
2351 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
2352 __ Addu(left, left, Operand(scratch1));
2353 __ Addu(right, right, Operand(scratch1));
2354 __ Subu(length, zero_reg, length);
2355 Register index = length; // index = -length;
2356
2357
2358 // Compare loop.
2359 Label loop;
2360 __ bind(&loop);
2361 __ Addu(scratch3, left, index);
2362 __ lbu(scratch1, MemOperand(scratch3));
2363 __ Addu(scratch3, right, index);
2364 __ lbu(scratch2, MemOperand(scratch3));
2365 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
2366 __ Addu(index, index, 1);
2367 __ Branch(&loop, ne, index, Operand(zero_reg));
2368 }
2369
2370
Generate(MacroAssembler * masm)2371 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
2372 // ----------- S t a t e -------------
2373 // -- a1 : left
2374 // -- a0 : right
2375 // -- ra : return address
2376 // -----------------------------------
2377
2378 // Load a2 with the allocation site. We stick an undefined dummy value here
2379 // and replace it with the real allocation site later when we instantiate this
2380 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
2381 __ li(a2, isolate()->factory()->undefined_value());
2382
2383 // Make sure that we actually patched the allocation site.
2384 if (FLAG_debug_code) {
2385 __ And(at, a2, Operand(kSmiTagMask));
2386 __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
2387 __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
2388 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2389 __ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
2390 }
2391
2392 // Tail call into the stub that handles binary operations with allocation
2393 // sites.
2394 BinaryOpWithAllocationSiteStub stub(isolate(), state());
2395 __ TailCallStub(&stub);
2396 }
2397
2398
GenerateBooleans(MacroAssembler * masm)2399 void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
2400 DCHECK_EQ(CompareICState::BOOLEAN, state());
2401 Label miss;
2402
2403 __ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2404 __ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2405 if (!Token::IsEqualityOp(op())) {
2406 __ lw(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
2407 __ AssertSmi(a1);
2408 __ lw(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
2409 __ AssertSmi(a0);
2410 }
2411 __ Ret(USE_DELAY_SLOT);
2412 __ Subu(v0, a1, a0);
2413
2414 __ bind(&miss);
2415 GenerateMiss(masm);
2416 }
2417
2418
GenerateSmis(MacroAssembler * masm)2419 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
2420 DCHECK(state() == CompareICState::SMI);
2421 Label miss;
2422 __ Or(a2, a1, a0);
2423 __ JumpIfNotSmi(a2, &miss);
2424
2425 if (GetCondition() == eq) {
2426 // For equality we do not care about the sign of the result.
2427 __ Ret(USE_DELAY_SLOT);
2428 __ Subu(v0, a0, a1);
2429 } else {
2430 // Untag before subtracting to avoid handling overflow.
2431 __ SmiUntag(a1);
2432 __ SmiUntag(a0);
2433 __ Ret(USE_DELAY_SLOT);
2434 __ Subu(v0, a1, a0);
2435 }
2436
2437 __ bind(&miss);
2438 GenerateMiss(masm);
2439 }
2440
2441
GenerateNumbers(MacroAssembler * masm)2442 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
2443 DCHECK(state() == CompareICState::NUMBER);
2444
2445 Label generic_stub;
2446 Label unordered, maybe_undefined1, maybe_undefined2;
2447 Label miss;
2448
2449 if (left() == CompareICState::SMI) {
2450 __ JumpIfNotSmi(a1, &miss);
2451 }
2452 if (right() == CompareICState::SMI) {
2453 __ JumpIfNotSmi(a0, &miss);
2454 }
2455
2456 // Inlining the double comparison and falling back to the general compare
2457 // stub if NaN is involved.
2458 // Load left and right operand.
2459 Label done, left, left_smi, right_smi;
2460 __ JumpIfSmi(a0, &right_smi);
2461 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
2462 DONT_DO_SMI_CHECK);
2463 __ Subu(a2, a0, Operand(kHeapObjectTag));
2464 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
2465 __ Branch(&left);
2466 __ bind(&right_smi);
2467 __ SmiUntag(a2, a0); // Can't clobber a0 yet.
2468 FPURegister single_scratch = f6;
2469 __ mtc1(a2, single_scratch);
2470 __ cvt_d_w(f2, single_scratch);
2471
2472 __ bind(&left);
2473 __ JumpIfSmi(a1, &left_smi);
2474 __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
2475 DONT_DO_SMI_CHECK);
2476 __ Subu(a2, a1, Operand(kHeapObjectTag));
2477 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
2478 __ Branch(&done);
2479 __ bind(&left_smi);
2480 __ SmiUntag(a2, a1); // Can't clobber a1 yet.
2481 single_scratch = f8;
2482 __ mtc1(a2, single_scratch);
2483 __ cvt_d_w(f0, single_scratch);
2484
2485 __ bind(&done);
2486
2487 // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
2488 Label fpu_eq, fpu_lt;
2489 // Test if equal, and also handle the unordered/NaN case.
2490 __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
2491
2492 // Test if less (unordered case is already handled).
2493 __ BranchF(&fpu_lt, NULL, lt, f0, f2);
2494
2495 // Otherwise it's greater, so just fall thru, and return.
2496 DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
2497 __ Ret(USE_DELAY_SLOT);
2498 __ li(v0, Operand(GREATER));
2499
2500 __ bind(&fpu_eq);
2501 __ Ret(USE_DELAY_SLOT);
2502 __ li(v0, Operand(EQUAL));
2503
2504 __ bind(&fpu_lt);
2505 __ Ret(USE_DELAY_SLOT);
2506 __ li(v0, Operand(LESS));
2507
2508 __ bind(&unordered);
2509 __ bind(&generic_stub);
2510 CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
2511 CompareICState::GENERIC, CompareICState::GENERIC);
2512 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2513
2514 __ bind(&maybe_undefined1);
2515 if (Token::IsOrderedRelationalCompareOp(op())) {
2516 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2517 __ Branch(&miss, ne, a0, Operand(at));
2518 __ JumpIfSmi(a1, &unordered);
2519 __ GetObjectType(a1, a2, a2);
2520 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
2521 __ jmp(&unordered);
2522 }
2523
2524 __ bind(&maybe_undefined2);
2525 if (Token::IsOrderedRelationalCompareOp(op())) {
2526 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2527 __ Branch(&unordered, eq, a1, Operand(at));
2528 }
2529
2530 __ bind(&miss);
2531 GenerateMiss(masm);
2532 }
2533
2534
GenerateInternalizedStrings(MacroAssembler * masm)2535 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
2536 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
2537 Label miss;
2538
2539 // Registers containing left and right operands respectively.
2540 Register left = a1;
2541 Register right = a0;
2542 Register tmp1 = a2;
2543 Register tmp2 = a3;
2544
2545 // Check that both operands are heap objects.
2546 __ JumpIfEitherSmi(left, right, &miss);
2547
2548 // Check that both operands are internalized strings.
2549 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2550 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2551 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2552 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2553 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2554 __ Or(tmp1, tmp1, Operand(tmp2));
2555 __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2556 __ Branch(&miss, ne, at, Operand(zero_reg));
2557
2558 // Make sure a0 is non-zero. At this point input operands are
2559 // guaranteed to be non-zero.
2560 DCHECK(right.is(a0));
2561 STATIC_ASSERT(EQUAL == 0);
2562 STATIC_ASSERT(kSmiTag == 0);
2563 __ mov(v0, right);
2564 // Internalized strings are compared by identity.
2565 __ Ret(ne, left, Operand(right));
2566 DCHECK(is_int16(EQUAL));
2567 __ Ret(USE_DELAY_SLOT);
2568 __ li(v0, Operand(Smi::FromInt(EQUAL)));
2569
2570 __ bind(&miss);
2571 GenerateMiss(masm);
2572 }
2573
2574
GenerateUniqueNames(MacroAssembler * masm)2575 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
2576 DCHECK(state() == CompareICState::UNIQUE_NAME);
2577 DCHECK(GetCondition() == eq);
2578 Label miss;
2579
2580 // Registers containing left and right operands respectively.
2581 Register left = a1;
2582 Register right = a0;
2583 Register tmp1 = a2;
2584 Register tmp2 = a3;
2585
2586 // Check that both operands are heap objects.
2587 __ JumpIfEitherSmi(left, right, &miss);
2588
2589 // Check that both operands are unique names. This leaves the instance
2590 // types loaded in tmp1 and tmp2.
2591 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2592 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2593 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2594 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2595
2596 __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
2597 __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
2598
2599 // Use a0 as result
2600 __ mov(v0, a0);
2601
2602 // Unique names are compared by identity.
2603 Label done;
2604 __ Branch(&done, ne, left, Operand(right));
2605 // Make sure a0 is non-zero. At this point input operands are
2606 // guaranteed to be non-zero.
2607 DCHECK(right.is(a0));
2608 STATIC_ASSERT(EQUAL == 0);
2609 STATIC_ASSERT(kSmiTag == 0);
2610 __ li(v0, Operand(Smi::FromInt(EQUAL)));
2611 __ bind(&done);
2612 __ Ret();
2613
2614 __ bind(&miss);
2615 GenerateMiss(masm);
2616 }
2617
2618
GenerateStrings(MacroAssembler * masm)2619 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
2620 DCHECK(state() == CompareICState::STRING);
2621 Label miss;
2622
2623 bool equality = Token::IsEqualityOp(op());
2624
2625 // Registers containing left and right operands respectively.
2626 Register left = a1;
2627 Register right = a0;
2628 Register tmp1 = a2;
2629 Register tmp2 = a3;
2630 Register tmp3 = t0;
2631 Register tmp4 = t1;
2632 Register tmp5 = t2;
2633
2634 // Check that both operands are heap objects.
2635 __ JumpIfEitherSmi(left, right, &miss);
2636
2637 // Check that both operands are strings. This leaves the instance
2638 // types loaded in tmp1 and tmp2.
2639 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2640 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2641 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2642 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2643 STATIC_ASSERT(kNotStringTag != 0);
2644 __ Or(tmp3, tmp1, tmp2);
2645 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
2646 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
2647
2648 // Fast check for identical strings.
2649 Label left_ne_right;
2650 STATIC_ASSERT(EQUAL == 0);
2651 STATIC_ASSERT(kSmiTag == 0);
2652 __ Branch(&left_ne_right, ne, left, Operand(right));
2653 __ Ret(USE_DELAY_SLOT);
2654 __ mov(v0, zero_reg); // In the delay slot.
2655 __ bind(&left_ne_right);
2656
2657 // Handle not identical strings.
2658
2659 // Check that both strings are internalized strings. If they are, we're done
2660 // because we already know they are not identical. We know they are both
2661 // strings.
2662 if (equality) {
2663 DCHECK(GetCondition() == eq);
2664 STATIC_ASSERT(kInternalizedTag == 0);
2665 __ Or(tmp3, tmp1, Operand(tmp2));
2666 __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
2667 Label is_symbol;
2668 __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
2669 // Make sure a0 is non-zero. At this point input operands are
2670 // guaranteed to be non-zero.
2671 DCHECK(right.is(a0));
2672 __ Ret(USE_DELAY_SLOT);
2673 __ mov(v0, a0); // In the delay slot.
2674 __ bind(&is_symbol);
2675 }
2676
2677 // Check that both strings are sequential one-byte.
2678 Label runtime;
2679 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
2680 &runtime);
2681
2682 // Compare flat one-byte strings. Returns when done.
2683 if (equality) {
2684 StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
2685 tmp3);
2686 } else {
2687 StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
2688 tmp2, tmp3, tmp4);
2689 }
2690
2691 // Handle more complex cases in runtime.
2692 __ bind(&runtime);
2693 if (equality) {
2694 {
2695 FrameScope scope(masm, StackFrame::INTERNAL);
2696 __ Push(left, right);
2697 __ CallRuntime(Runtime::kStringEqual);
2698 }
2699 __ LoadRoot(a0, Heap::kTrueValueRootIndex);
2700 __ Ret(USE_DELAY_SLOT);
2701 __ Subu(v0, v0, a0); // In delay slot.
2702 } else {
2703 __ Push(left, right);
2704 __ TailCallRuntime(Runtime::kStringCompare);
2705 }
2706
2707 __ bind(&miss);
2708 GenerateMiss(masm);
2709 }
2710
2711
GenerateReceivers(MacroAssembler * masm)2712 void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
2713 DCHECK_EQ(CompareICState::RECEIVER, state());
2714 Label miss;
2715 __ And(a2, a1, Operand(a0));
2716 __ JumpIfSmi(a2, &miss);
2717
2718 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2719 __ GetObjectType(a0, a2, a2);
2720 __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
2721 __ GetObjectType(a1, a2, a2);
2722 __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
2723
2724 DCHECK_EQ(eq, GetCondition());
2725 __ Ret(USE_DELAY_SLOT);
2726 __ subu(v0, a0, a1);
2727
2728 __ bind(&miss);
2729 GenerateMiss(masm);
2730 }
2731
2732
GenerateKnownReceivers(MacroAssembler * masm)2733 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
2734 Label miss;
2735 Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
2736 __ And(a2, a1, a0);
2737 __ JumpIfSmi(a2, &miss);
2738 __ GetWeakValue(t0, cell);
2739 __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
2740 __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
2741 __ Branch(&miss, ne, a2, Operand(t0));
2742 __ Branch(&miss, ne, a3, Operand(t0));
2743
2744 if (Token::IsEqualityOp(op())) {
2745 __ Ret(USE_DELAY_SLOT);
2746 __ subu(v0, a0, a1);
2747 } else {
2748 if (op() == Token::LT || op() == Token::LTE) {
2749 __ li(a2, Operand(Smi::FromInt(GREATER)));
2750 } else {
2751 __ li(a2, Operand(Smi::FromInt(LESS)));
2752 }
2753 __ Push(a1, a0, a2);
2754 __ TailCallRuntime(Runtime::kCompare);
2755 }
2756
2757 __ bind(&miss);
2758 GenerateMiss(masm);
2759 }
2760
2761
GenerateMiss(MacroAssembler * masm)2762 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
2763 {
2764 // Call the runtime system in a fresh internal frame.
2765 FrameScope scope(masm, StackFrame::INTERNAL);
2766 __ Push(a1, a0);
2767 __ Push(ra, a1, a0);
2768 __ li(t0, Operand(Smi::FromInt(op())));
2769 __ addiu(sp, sp, -kPointerSize);
2770 __ CallRuntime(Runtime::kCompareIC_Miss, 3, kDontSaveFPRegs,
2771 USE_DELAY_SLOT);
2772 __ sw(t0, MemOperand(sp)); // In the delay slot.
2773 // Compute the entry point of the rewritten stub.
2774 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
2775 // Restore registers.
2776 __ Pop(a1, a0, ra);
2777 }
2778 __ Jump(a2);
2779 }
2780
2781
Generate(MacroAssembler * masm)2782 void DirectCEntryStub::Generate(MacroAssembler* masm) {
2783 // Make place for arguments to fit C calling convention. Most of the callers
2784 // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
2785 // so they handle stack restoring and we don't have to do that here.
2786 // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
2787 // kCArgsSlotsSize stack space after the call.
2788 __ Subu(sp, sp, Operand(kCArgsSlotsSize));
2789 // Place the return address on the stack, making the call
2790 // GC safe. The RegExp backend also relies on this.
2791 __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
2792 __ Call(t9); // Call the C++ function.
2793 __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
2794
2795 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
2796 // In case of an error the return address may point to a memory area
2797 // filled with kZapValue by the GC.
2798 // Dereference the address and check for this.
2799 __ lw(t0, MemOperand(t9));
2800 __ Assert(ne, kReceivedInvalidReturnAddress, t0,
2801 Operand(reinterpret_cast<uint32_t>(kZapValue)));
2802 }
2803 __ Jump(t9);
2804 }
2805
2806
GenerateCall(MacroAssembler * masm,Register target)2807 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
2808 Register target) {
2809 intptr_t loc =
2810 reinterpret_cast<intptr_t>(GetCode().location());
2811 __ Move(t9, target);
2812 __ li(at, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
2813 __ Call(at);
2814 }
2815
2816
GenerateNegativeLookup(MacroAssembler * masm,Label * miss,Label * done,Register receiver,Register properties,Handle<Name> name,Register scratch0)2817 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
2818 Label* miss,
2819 Label* done,
2820 Register receiver,
2821 Register properties,
2822 Handle<Name> name,
2823 Register scratch0) {
2824 DCHECK(name->IsUniqueName());
2825 // If names of slots in range from 1 to kProbes - 1 for the hash value are
2826 // not equal to the name and kProbes-th slot is not used (its name is the
2827 // undefined value), it guarantees the hash table doesn't contain the
2828 // property. It's true even if some slots represent deleted properties
2829 // (their names are the hole value).
2830 for (int i = 0; i < kInlinedProbes; i++) {
2831 // scratch0 points to properties hash.
2832 // Compute the masked index: (hash + i + i * i) & mask.
2833 Register index = scratch0;
2834 // Capacity is smi 2^n.
2835 __ lw(index, FieldMemOperand(properties, kCapacityOffset));
2836 __ Subu(index, index, Operand(1));
2837 __ And(index, index, Operand(
2838 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
2839
2840 // Scale the index by multiplying by the entry size.
2841 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2842 __ Lsa(index, index, index, 1);
2843
2844 Register entity_name = scratch0;
2845 // Having undefined at this place means the name is not contained.
2846 STATIC_ASSERT(kSmiTagSize == 1);
2847 Register tmp = properties;
2848 __ Lsa(tmp, properties, index, 1);
2849 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
2850
2851 DCHECK(!tmp.is(entity_name));
2852 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
2853 __ Branch(done, eq, entity_name, Operand(tmp));
2854
2855 // Load the hole ready for use below:
2856 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
2857
2858 // Stop if found the property.
2859 __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
2860
2861 Label good;
2862 __ Branch(&good, eq, entity_name, Operand(tmp));
2863
2864 // Check if the entry name is not a unique name.
2865 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
2866 __ lbu(entity_name,
2867 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
2868 __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
2869 __ bind(&good);
2870
2871 // Restore the properties.
2872 __ lw(properties,
2873 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
2874 }
2875
2876 const int spill_mask =
2877 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
2878 a2.bit() | a1.bit() | a0.bit() | v0.bit());
2879
2880 __ MultiPush(spill_mask);
2881 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
2882 __ li(a1, Operand(Handle<Name>(name)));
2883 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
2884 __ CallStub(&stub);
2885 __ mov(at, v0);
2886 __ MultiPop(spill_mask);
2887
2888 __ Branch(done, eq, at, Operand(zero_reg));
2889 __ Branch(miss, ne, at, Operand(zero_reg));
2890 }
2891
2892
2893 // Probe the name dictionary in the |elements| register. Jump to the
2894 // |done| label if a property with the given name is found. Jump to
2895 // the |miss| label otherwise.
2896 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
GeneratePositiveLookup(MacroAssembler * masm,Label * miss,Label * done,Register elements,Register name,Register scratch1,Register scratch2)2897 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
2898 Label* miss,
2899 Label* done,
2900 Register elements,
2901 Register name,
2902 Register scratch1,
2903 Register scratch2) {
2904 DCHECK(!elements.is(scratch1));
2905 DCHECK(!elements.is(scratch2));
2906 DCHECK(!name.is(scratch1));
2907 DCHECK(!name.is(scratch2));
2908
2909 __ AssertName(name);
2910
2911 // Compute the capacity mask.
2912 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
2913 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
2914 __ Subu(scratch1, scratch1, Operand(1));
2915
2916 // Generate an unrolled loop that performs a few probes before
2917 // giving up. Measurements done on Gmail indicate that 2 probes
2918 // cover ~93% of loads from dictionaries.
2919 for (int i = 0; i < kInlinedProbes; i++) {
2920 // Compute the masked index: (hash + i + i * i) & mask.
2921 __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
2922 if (i > 0) {
2923 // Add the probe offset (i + i * i) left shifted to avoid right shifting
2924 // the hash in a separate instruction. The value hash + i + i * i is right
2925 // shifted in the following and instruction.
2926 DCHECK(NameDictionary::GetProbeOffset(i) <
2927 1 << (32 - Name::kHashFieldOffset));
2928 __ Addu(scratch2, scratch2, Operand(
2929 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
2930 }
2931 __ srl(scratch2, scratch2, Name::kHashShift);
2932 __ And(scratch2, scratch1, scratch2);
2933
2934 // Scale the index by multiplying by the element size.
2935 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2936 // scratch2 = scratch2 * 3.
2937
2938 __ Lsa(scratch2, scratch2, scratch2, 1);
2939
2940 // Check if the key is identical to the name.
2941 __ Lsa(scratch2, elements, scratch2, 2);
2942 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
2943 __ Branch(done, eq, name, Operand(at));
2944 }
2945
2946 const int spill_mask =
2947 (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
2948 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
2949 ~(scratch1.bit() | scratch2.bit());
2950
2951 __ MultiPush(spill_mask);
2952 if (name.is(a0)) {
2953 DCHECK(!elements.is(a1));
2954 __ Move(a1, name);
2955 __ Move(a0, elements);
2956 } else {
2957 __ Move(a0, elements);
2958 __ Move(a1, name);
2959 }
2960 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
2961 __ CallStub(&stub);
2962 __ mov(scratch2, a2);
2963 __ mov(at, v0);
2964 __ MultiPop(spill_mask);
2965
2966 __ Branch(done, ne, at, Operand(zero_reg));
2967 __ Branch(miss, eq, at, Operand(zero_reg));
2968 }
2969
2970
Generate(MacroAssembler * masm)2971 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
2972 // This stub overrides SometimesSetsUpAFrame() to return false. That means
2973 // we cannot call anything that could cause a GC from this stub.
2974 // Registers:
2975 // result: NameDictionary to probe
2976 // a1: key
2977 // dictionary: NameDictionary to probe.
2978 // index: will hold an index of entry if lookup is successful.
2979 // might alias with result_.
2980 // Returns:
2981 // result_ is zero if lookup failed, non zero otherwise.
2982
2983 Register result = v0;
2984 Register dictionary = a0;
2985 Register key = a1;
2986 Register index = a2;
2987 Register mask = a3;
2988 Register hash = t0;
2989 Register undefined = t1;
2990 Register entry_key = t2;
2991
2992 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
2993
2994 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
2995 __ sra(mask, mask, kSmiTagSize);
2996 __ Subu(mask, mask, Operand(1));
2997
2998 __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
2999
3000 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
3001
3002 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
3003 // Compute the masked index: (hash + i + i * i) & mask.
3004 // Capacity is smi 2^n.
3005 if (i > 0) {
3006 // Add the probe offset (i + i * i) left shifted to avoid right shifting
3007 // the hash in a separate instruction. The value hash + i + i * i is right
3008 // shifted in the following and instruction.
3009 DCHECK(NameDictionary::GetProbeOffset(i) <
3010 1 << (32 - Name::kHashFieldOffset));
3011 __ Addu(index, hash, Operand(
3012 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
3013 } else {
3014 __ mov(index, hash);
3015 }
3016 __ srl(index, index, Name::kHashShift);
3017 __ And(index, mask, index);
3018
3019 // Scale the index by multiplying by the entry size.
3020 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3021 // index *= 3.
3022 __ Lsa(index, index, index, 1);
3023
3024 STATIC_ASSERT(kSmiTagSize == 1);
3025 __ Lsa(index, dictionary, index, 2);
3026 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
3027
3028 // Having undefined at this place means the name is not contained.
3029 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined));
3030
3031 // Stop if found the property.
3032 __ Branch(&in_dictionary, eq, entry_key, Operand(key));
3033
3034 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
3035 // Check if the entry name is not a unique name.
3036 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
3037 __ lbu(entry_key,
3038 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
3039 __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
3040 }
3041 }
3042
3043 __ bind(&maybe_in_dictionary);
3044 // If we are doing negative lookup then probing failure should be
3045 // treated as a lookup success. For positive lookup probing failure
3046 // should be treated as lookup failure.
3047 if (mode() == POSITIVE_LOOKUP) {
3048 __ Ret(USE_DELAY_SLOT);
3049 __ mov(result, zero_reg);
3050 }
3051
3052 __ bind(&in_dictionary);
3053 __ Ret(USE_DELAY_SLOT);
3054 __ li(result, 1);
3055
3056 __ bind(¬_in_dictionary);
3057 __ Ret(USE_DELAY_SLOT);
3058 __ mov(result, zero_reg);
3059 }
3060
3061
GenerateFixedRegStubsAheadOfTime(Isolate * isolate)3062 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
3063 Isolate* isolate) {
3064 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
3065 stub1.GetCode();
3066 // Hydrogen code stubs need stub2 at snapshot time.
3067 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
3068 stub2.GetCode();
3069 }
3070
3071
3072 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
3073 // the value has just been written into the object, now this stub makes sure
3074 // we keep the GC informed. The word in the object where the value has been
3075 // written is in the address register.
Generate(MacroAssembler * masm)3076 void RecordWriteStub::Generate(MacroAssembler* masm) {
3077 Label skip_to_incremental_noncompacting;
3078 Label skip_to_incremental_compacting;
3079
3080 // The first two branch+nop instructions are generated with labels so as to
3081 // get the offset fixed up correctly by the bind(Label*) call. We patch it
3082 // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
3083 // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
3084 // incremental heap marking.
3085 // See RecordWriteStub::Patch for details.
3086 __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
3087 __ nop();
3088 __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
3089 __ nop();
3090
3091 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
3092 __ RememberedSetHelper(object(),
3093 address(),
3094 value(),
3095 save_fp_regs_mode(),
3096 MacroAssembler::kReturnAtEnd);
3097 }
3098 __ Ret();
3099
3100 __ bind(&skip_to_incremental_noncompacting);
3101 GenerateIncremental(masm, INCREMENTAL);
3102
3103 __ bind(&skip_to_incremental_compacting);
3104 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
3105
3106 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
3107 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
3108
3109 PatchBranchIntoNop(masm, 0);
3110 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
3111 }
3112
3113
GenerateIncremental(MacroAssembler * masm,Mode mode)3114 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
3115 regs_.Save(masm);
3116
3117 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
3118 Label dont_need_remembered_set;
3119
3120 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
3121 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
3122 regs_.scratch0(),
3123 &dont_need_remembered_set);
3124
3125 __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
3126 &dont_need_remembered_set);
3127
3128 // First notify the incremental marker if necessary, then update the
3129 // remembered set.
3130 CheckNeedsToInformIncrementalMarker(
3131 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
3132 InformIncrementalMarker(masm);
3133 regs_.Restore(masm);
3134 __ RememberedSetHelper(object(),
3135 address(),
3136 value(),
3137 save_fp_regs_mode(),
3138 MacroAssembler::kReturnAtEnd);
3139
3140 __ bind(&dont_need_remembered_set);
3141 }
3142
3143 CheckNeedsToInformIncrementalMarker(
3144 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
3145 InformIncrementalMarker(masm);
3146 regs_.Restore(masm);
3147 __ Ret();
3148 }
3149
3150
InformIncrementalMarker(MacroAssembler * masm)3151 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
3152 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
3153 int argument_count = 3;
3154 __ PrepareCallCFunction(argument_count, regs_.scratch0());
3155 Register address =
3156 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
3157 DCHECK(!address.is(regs_.object()));
3158 DCHECK(!address.is(a0));
3159 __ Move(address, regs_.address());
3160 __ Move(a0, regs_.object());
3161 __ Move(a1, address);
3162 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
3163
3164 AllowExternalCallThatCantCauseGC scope(masm);
3165 __ CallCFunction(
3166 ExternalReference::incremental_marking_record_write_function(isolate()),
3167 argument_count);
3168 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
3169 }
3170
3171
CheckNeedsToInformIncrementalMarker(MacroAssembler * masm,OnNoNeedToInformIncrementalMarker on_no_need,Mode mode)3172 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
3173 MacroAssembler* masm,
3174 OnNoNeedToInformIncrementalMarker on_no_need,
3175 Mode mode) {
3176 Label on_black;
3177 Label need_incremental;
3178 Label need_incremental_pop_scratch;
3179
3180 // Let's look at the color of the object: If it is not black we don't have
3181 // to inform the incremental marker.
3182 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
3183
3184 regs_.Restore(masm);
3185 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
3186 __ RememberedSetHelper(object(),
3187 address(),
3188 value(),
3189 save_fp_regs_mode(),
3190 MacroAssembler::kReturnAtEnd);
3191 } else {
3192 __ Ret();
3193 }
3194
3195 __ bind(&on_black);
3196
3197 // Get the value from the slot.
3198 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
3199
3200 if (mode == INCREMENTAL_COMPACTION) {
3201 Label ensure_not_white;
3202
3203 __ CheckPageFlag(regs_.scratch0(), // Contains value.
3204 regs_.scratch1(), // Scratch.
3205 MemoryChunk::kEvacuationCandidateMask,
3206 eq,
3207 &ensure_not_white);
3208
3209 __ CheckPageFlag(regs_.object(),
3210 regs_.scratch1(), // Scratch.
3211 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
3212 eq,
3213 &need_incremental);
3214
3215 __ bind(&ensure_not_white);
3216 }
3217
3218 // We need extra registers for this, so we push the object and the address
3219 // register temporarily.
3220 __ Push(regs_.object(), regs_.address());
3221 __ JumpIfWhite(regs_.scratch0(), // The value.
3222 regs_.scratch1(), // Scratch.
3223 regs_.object(), // Scratch.
3224 regs_.address(), // Scratch.
3225 &need_incremental_pop_scratch);
3226 __ Pop(regs_.object(), regs_.address());
3227
3228 regs_.Restore(masm);
3229 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
3230 __ RememberedSetHelper(object(),
3231 address(),
3232 value(),
3233 save_fp_regs_mode(),
3234 MacroAssembler::kReturnAtEnd);
3235 } else {
3236 __ Ret();
3237 }
3238
3239 __ bind(&need_incremental_pop_scratch);
3240 __ Pop(regs_.object(), regs_.address());
3241
3242 __ bind(&need_incremental);
3243
3244 // Fall through when we need to inform the incremental marker.
3245 }
3246
3247
Generate(MacroAssembler * masm)3248 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
3249 CEntryStub ces(isolate(), 1, kSaveFPRegs);
3250 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
3251 int parameter_count_offset =
3252 StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
3253 __ lw(a1, MemOperand(fp, parameter_count_offset));
3254 if (function_mode() == JS_FUNCTION_STUB_MODE) {
3255 __ Addu(a1, a1, Operand(1));
3256 }
3257 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
3258 __ sll(a1, a1, kPointerSizeLog2);
3259 __ Ret(USE_DELAY_SLOT);
3260 __ Addu(sp, sp, a1);
3261 }
3262
Generate(MacroAssembler * masm)3263 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
3264 __ EmitLoadTypeFeedbackVector(a2);
3265 CallICStub stub(isolate(), state());
3266 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3267 }
3268
3269
HandleArrayCases(MacroAssembler * masm,Register feedback,Register receiver_map,Register scratch1,Register scratch2,bool is_polymorphic,Label * miss)3270 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
3271 Register receiver_map, Register scratch1,
3272 Register scratch2, bool is_polymorphic,
3273 Label* miss) {
3274 // feedback initially contains the feedback array
3275 Label next_loop, prepare_next;
3276 Label start_polymorphic;
3277
3278 Register cached_map = scratch1;
3279
3280 __ lw(cached_map,
3281 FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
3282 __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3283 __ Branch(&start_polymorphic, ne, receiver_map, Operand(cached_map));
3284 // found, now call handler.
3285 Register handler = feedback;
3286 __ lw(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
3287 __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
3288 __ Jump(t9);
3289
3290
3291 Register length = scratch2;
3292 __ bind(&start_polymorphic);
3293 __ lw(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
3294 if (!is_polymorphic) {
3295 // If the IC could be monomorphic we have to make sure we don't go past the
3296 // end of the feedback array.
3297 __ Branch(miss, eq, length, Operand(Smi::FromInt(2)));
3298 }
3299
3300 Register too_far = length;
3301 Register pointer_reg = feedback;
3302
3303 // +-----+------+------+-----+-----+ ... ----+
3304 // | map | len | wm0 | h0 | wm1 | hN |
3305 // +-----+------+------+-----+-----+ ... ----+
3306 // 0 1 2 len-1
3307 // ^ ^
3308 // | |
3309 // pointer_reg too_far
3310 // aka feedback scratch2
3311 // also need receiver_map
3312 // use cached_map (scratch1) to look in the weak map values.
3313 __ Lsa(too_far, feedback, length, kPointerSizeLog2 - kSmiTagSize);
3314 __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3315 __ Addu(pointer_reg, feedback,
3316 Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
3317
3318 __ bind(&next_loop);
3319 __ lw(cached_map, MemOperand(pointer_reg));
3320 __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3321 __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
3322 __ lw(handler, MemOperand(pointer_reg, kPointerSize));
3323 __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
3324 __ Jump(t9);
3325
3326 __ bind(&prepare_next);
3327 __ Addu(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
3328 __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
3329
3330 // We exhausted our array of map handler pairs.
3331 __ jmp(miss);
3332 }
3333
3334
HandleMonomorphicCase(MacroAssembler * masm,Register receiver,Register receiver_map,Register feedback,Register vector,Register slot,Register scratch,Label * compare_map,Label * load_smi_map,Label * try_array)3335 static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
3336 Register receiver_map, Register feedback,
3337 Register vector, Register slot,
3338 Register scratch, Label* compare_map,
3339 Label* load_smi_map, Label* try_array) {
3340 __ JumpIfSmi(receiver, load_smi_map);
3341 __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
3342 __ bind(compare_map);
3343 Register cached_map = scratch;
3344 // Move the weak map into the weak_cell register.
3345 __ lw(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
3346 __ Branch(try_array, ne, cached_map, Operand(receiver_map));
3347 Register handler = feedback;
3348
3349 __ Lsa(handler, vector, slot, kPointerSizeLog2 - kSmiTagSize);
3350 __ lw(handler,
3351 FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
3352 __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
3353 __ Jump(t9);
3354 }
3355
Generate(MacroAssembler * masm)3356 void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
3357 __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
3358 KeyedStoreICStub stub(isolate(), state());
3359 stub.GenerateForTrampoline(masm);
3360 }
3361
Generate(MacroAssembler * masm)3362 void KeyedStoreICStub::Generate(MacroAssembler* masm) {
3363 GenerateImpl(masm, false);
3364 }
3365
GenerateForTrampoline(MacroAssembler * masm)3366 void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
3367 GenerateImpl(masm, true);
3368 }
3369
3370
HandlePolymorphicStoreCase(MacroAssembler * masm,Register feedback,Register receiver_map,Register scratch1,Register scratch2,Label * miss)3371 static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
3372 Register receiver_map, Register scratch1,
3373 Register scratch2, Label* miss) {
3374 // feedback initially contains the feedback array
3375 Label next_loop, prepare_next;
3376 Label start_polymorphic;
3377 Label transition_call;
3378
3379 Register cached_map = scratch1;
3380 Register too_far = scratch2;
3381 Register pointer_reg = feedback;
3382 __ lw(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
3383
3384 // +-----+------+------+-----+-----+-----+ ... ----+
3385 // | map | len | wm0 | wt0 | h0 | wm1 | hN |
3386 // +-----+------+------+-----+-----+ ----+ ... ----+
3387 // 0 1 2 len-1
3388 // ^ ^
3389 // | |
3390 // pointer_reg too_far
3391 // aka feedback scratch2
3392 // also need receiver_map
3393 // use cached_map (scratch1) to look in the weak map values.
3394 __ Lsa(too_far, feedback, too_far, kPointerSizeLog2 - kSmiTagSize);
3395 __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3396 __ Addu(pointer_reg, feedback,
3397 Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
3398
3399 __ bind(&next_loop);
3400 __ lw(cached_map, MemOperand(pointer_reg));
3401 __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3402 __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
3403 // Is it a transitioning store?
3404 __ lw(too_far, MemOperand(pointer_reg, kPointerSize));
3405 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3406 __ Branch(&transition_call, ne, too_far, Operand(at));
3407 __ lw(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
3408 __ Addu(t9, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
3409 __ Jump(t9);
3410
3411 __ bind(&transition_call);
3412 __ lw(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
3413 __ JumpIfSmi(too_far, miss);
3414
3415 __ lw(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
3416
3417 // Load the map into the correct register.
3418 DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
3419 __ mov(feedback, too_far);
3420
3421 __ Addu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
3422 __ Jump(t9);
3423
3424 __ bind(&prepare_next);
3425 __ Addu(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
3426 __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
3427
3428 // We exhausted our array of map handler pairs.
3429 __ jmp(miss);
3430 }
3431
GenerateImpl(MacroAssembler * masm,bool in_frame)3432 void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
3433 Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // a1
3434 Register key = StoreWithVectorDescriptor::NameRegister(); // a2
3435 Register vector = StoreWithVectorDescriptor::VectorRegister(); // a3
3436 Register slot = StoreWithVectorDescriptor::SlotRegister(); // t0
3437 DCHECK(StoreWithVectorDescriptor::ValueRegister().is(a0)); // a0
3438 Register feedback = t1;
3439 Register receiver_map = t2;
3440 Register scratch1 = t5;
3441
3442 __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
3443 __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
3444
3445 // Try to quickly handle the monomorphic case without knowing for sure
3446 // if we have a weak cell in feedback. We do know it's safe to look
3447 // at WeakCell::kValueOffset.
3448 Label try_array, load_smi_map, compare_map;
3449 Label not_array, miss;
3450 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
3451 scratch1, &compare_map, &load_smi_map, &try_array);
3452
3453 __ bind(&try_array);
3454 // Is it a fixed array?
3455 __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
3456 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
3457 __ Branch(¬_array, ne, scratch1, Operand(at));
3458
3459 // We have a polymorphic element handler.
3460 Label polymorphic, try_poly_name;
3461 __ bind(&polymorphic);
3462
3463 Register scratch2 = t4;
3464
3465 HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
3466 &miss);
3467
3468 __ bind(¬_array);
3469 // Is it generic?
3470 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
3471 __ Branch(&try_poly_name, ne, feedback, Operand(at));
3472 Handle<Code> megamorphic_stub =
3473 KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
3474 __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
3475
3476 __ bind(&try_poly_name);
3477 // We might have a name in feedback, and a fixed array in the next slot.
3478 __ Branch(&miss, ne, key, Operand(feedback));
3479 // If the name comparison succeeded, we know we have a fixed array with
3480 // at least one map/handler pair.
3481 __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
3482 __ lw(feedback,
3483 FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
3484 HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
3485 &miss);
3486
3487 __ bind(&miss);
3488 KeyedStoreIC::GenerateMiss(masm);
3489
3490 __ bind(&load_smi_map);
3491 __ Branch(USE_DELAY_SLOT, &compare_map);
3492 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot.
3493 }
3494
3495
MaybeCallEntryHook(MacroAssembler * masm)3496 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
3497 if (masm->isolate()->function_entry_hook() != NULL) {
3498 ProfileEntryHookStub stub(masm->isolate());
3499 __ push(ra);
3500 __ CallStub(&stub);
3501 __ pop(ra);
3502 }
3503 }
3504
3505
Generate(MacroAssembler * masm)3506 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
3507 // The entry hook is a "push ra" instruction, followed by a call.
3508 // Note: on MIPS "push" is 2 instruction
3509 const int32_t kReturnAddressDistanceFromFunctionStart =
3510 Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
3511
3512 // This should contain all kJSCallerSaved registers.
3513 const RegList kSavedRegs =
3514 kJSCallerSaved | // Caller saved registers.
3515 s5.bit(); // Saved stack pointer.
3516
3517 // We also save ra, so the count here is one higher than the mask indicates.
3518 const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
3519
3520 // Save all caller-save registers as this may be called from anywhere.
3521 __ MultiPush(kSavedRegs | ra.bit());
3522
3523 // Compute the function's address for the first argument.
3524 __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
3525
3526 // The caller's return address is above the saved temporaries.
3527 // Grab that for the second argument to the hook.
3528 __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
3529
3530 // Align the stack if necessary.
3531 int frame_alignment = masm->ActivationFrameAlignment();
3532 if (frame_alignment > kPointerSize) {
3533 __ mov(s5, sp);
3534 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3535 __ And(sp, sp, Operand(-frame_alignment));
3536 }
3537 __ Subu(sp, sp, kCArgsSlotsSize);
3538 #if defined(V8_HOST_ARCH_MIPS)
3539 int32_t entry_hook =
3540 reinterpret_cast<int32_t>(isolate()->function_entry_hook());
3541 __ li(t9, Operand(entry_hook));
3542 #else
3543 // Under the simulator we need to indirect the entry hook through a
3544 // trampoline function at a known address.
3545 // It additionally takes an isolate as a third parameter.
3546 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
3547
3548 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
3549 __ li(t9, Operand(ExternalReference(&dispatcher,
3550 ExternalReference::BUILTIN_CALL,
3551 isolate())));
3552 #endif
3553 // Call C function through t9 to conform ABI for PIC.
3554 __ Call(t9);
3555
3556 // Restore the stack pointer if needed.
3557 if (frame_alignment > kPointerSize) {
3558 __ mov(sp, s5);
3559 } else {
3560 __ Addu(sp, sp, kCArgsSlotsSize);
3561 }
3562
3563 // Also pop ra to get Ret(0).
3564 __ MultiPop(kSavedRegs | ra.bit());
3565 __ Ret();
3566 }
3567
3568
3569 template<class T>
CreateArrayDispatch(MacroAssembler * masm,AllocationSiteOverrideMode mode)3570 static void CreateArrayDispatch(MacroAssembler* masm,
3571 AllocationSiteOverrideMode mode) {
3572 if (mode == DISABLE_ALLOCATION_SITES) {
3573 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
3574 __ TailCallStub(&stub);
3575 } else if (mode == DONT_OVERRIDE) {
3576 int last_index = GetSequenceIndexFromFastElementsKind(
3577 TERMINAL_FAST_ELEMENTS_KIND);
3578 for (int i = 0; i <= last_index; ++i) {
3579 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3580 T stub(masm->isolate(), kind);
3581 __ TailCallStub(&stub, eq, a3, Operand(kind));
3582 }
3583
3584 // If we reached this point there is a problem.
3585 __ Abort(kUnexpectedElementsKindInArrayConstructor);
3586 } else {
3587 UNREACHABLE();
3588 }
3589 }
3590
3591
CreateArrayDispatchOneArgument(MacroAssembler * masm,AllocationSiteOverrideMode mode)3592 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
3593 AllocationSiteOverrideMode mode) {
3594 // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
3595 // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
3596 // a0 - number of arguments
3597 // a1 - constructor?
3598 // sp[0] - last argument
3599 Label normal_sequence;
3600 if (mode == DONT_OVERRIDE) {
3601 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3602 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3603 STATIC_ASSERT(FAST_ELEMENTS == 2);
3604 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3605 STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
3606 STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
3607
3608 // is the low bit set? If so, we are holey and that is good.
3609 __ And(at, a3, Operand(1));
3610 __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
3611 }
3612
3613 // look at the first argument
3614 __ lw(t1, MemOperand(sp, 0));
3615 __ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
3616
3617 if (mode == DISABLE_ALLOCATION_SITES) {
3618 ElementsKind initial = GetInitialFastElementsKind();
3619 ElementsKind holey_initial = GetHoleyElementsKind(initial);
3620
3621 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
3622 holey_initial,
3623 DISABLE_ALLOCATION_SITES);
3624 __ TailCallStub(&stub_holey);
3625
3626 __ bind(&normal_sequence);
3627 ArraySingleArgumentConstructorStub stub(masm->isolate(),
3628 initial,
3629 DISABLE_ALLOCATION_SITES);
3630 __ TailCallStub(&stub);
3631 } else if (mode == DONT_OVERRIDE) {
3632 // We are going to create a holey array, but our kind is non-holey.
3633 // Fix kind and retry (only if we have an allocation site in the slot).
3634 __ Addu(a3, a3, Operand(1));
3635
3636 if (FLAG_debug_code) {
3637 __ lw(t1, FieldMemOperand(a2, 0));
3638 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3639 __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
3640 }
3641
3642 // Save the resulting elements kind in type info. We can't just store a3
3643 // in the AllocationSite::transition_info field because elements kind is
3644 // restricted to a portion of the field...upper bits need to be left alone.
3645 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
3646 __ lw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
3647 __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
3648 __ sw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
3649
3650
3651 __ bind(&normal_sequence);
3652 int last_index = GetSequenceIndexFromFastElementsKind(
3653 TERMINAL_FAST_ELEMENTS_KIND);
3654 for (int i = 0; i <= last_index; ++i) {
3655 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3656 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
3657 __ TailCallStub(&stub, eq, a3, Operand(kind));
3658 }
3659
3660 // If we reached this point there is a problem.
3661 __ Abort(kUnexpectedElementsKindInArrayConstructor);
3662 } else {
3663 UNREACHABLE();
3664 }
3665 }
3666
3667
3668 template<class T>
ArrayConstructorStubAheadOfTimeHelper(Isolate * isolate)3669 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
3670 int to_index = GetSequenceIndexFromFastElementsKind(
3671 TERMINAL_FAST_ELEMENTS_KIND);
3672 for (int i = 0; i <= to_index; ++i) {
3673 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3674 T stub(isolate, kind);
3675 stub.GetCode();
3676 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
3677 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
3678 stub1.GetCode();
3679 }
3680 }
3681 }
3682
GenerateStubsAheadOfTime(Isolate * isolate)3683 void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
3684 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
3685 isolate);
3686 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
3687 isolate);
3688 ArrayNArgumentsConstructorStub stub(isolate);
3689 stub.GetCode();
3690 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
3691 for (int i = 0; i < 2; i++) {
3692 // For internal arrays we only need a few things.
3693 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
3694 stubh1.GetCode();
3695 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
3696 stubh2.GetCode();
3697 }
3698 }
3699
3700
GenerateDispatchToArrayStub(MacroAssembler * masm,AllocationSiteOverrideMode mode)3701 void ArrayConstructorStub::GenerateDispatchToArrayStub(
3702 MacroAssembler* masm,
3703 AllocationSiteOverrideMode mode) {
3704 Label not_zero_case, not_one_case;
3705 __ And(at, a0, a0);
3706 __ Branch(¬_zero_case, ne, at, Operand(zero_reg));
3707 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
3708
3709 __ bind(¬_zero_case);
3710 __ Branch(¬_one_case, gt, a0, Operand(1));
3711 CreateArrayDispatchOneArgument(masm, mode);
3712
3713 __ bind(¬_one_case);
3714 ArrayNArgumentsConstructorStub stub(masm->isolate());
3715 __ TailCallStub(&stub);
3716 }
3717
3718
Generate(MacroAssembler * masm)3719 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
3720 // ----------- S t a t e -------------
3721 // -- a0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
3722 // -- a1 : constructor
3723 // -- a2 : AllocationSite or undefined
3724 // -- a3 : Original constructor
3725 // -- sp[0] : last argument
3726 // -----------------------------------
3727
3728 if (FLAG_debug_code) {
3729 // The array construct code is only set for the global and natives
3730 // builtin Array functions which always have maps.
3731
3732 // Initial map for the builtin Array function should be a map.
3733 __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
3734 // Will both indicate a NULL and a Smi.
3735 __ SmiTst(t0, at);
3736 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
3737 at, Operand(zero_reg));
3738 __ GetObjectType(t0, t0, t1);
3739 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
3740 t1, Operand(MAP_TYPE));
3741
3742 // We should either have undefined in a2 or a valid AllocationSite
3743 __ AssertUndefinedOrAllocationSite(a2, t0);
3744 }
3745
3746 // Enter the context of the Array function.
3747 __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3748
3749 Label subclassing;
3750 __ Branch(&subclassing, ne, a1, Operand(a3));
3751
3752 Label no_info;
3753 // Get the elements kind and case on that.
3754 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3755 __ Branch(&no_info, eq, a2, Operand(at));
3756
3757 __ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
3758 __ SmiUntag(a3);
3759 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
3760 __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
3761 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
3762
3763 __ bind(&no_info);
3764 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
3765
3766 // Subclassing.
3767 __ bind(&subclassing);
3768 __ Lsa(at, sp, a0, kPointerSizeLog2);
3769 __ sw(a1, MemOperand(at));
3770 __ li(at, Operand(3));
3771 __ addu(a0, a0, at);
3772 __ Push(a3, a2);
3773 __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
3774 }
3775
3776
GenerateCase(MacroAssembler * masm,ElementsKind kind)3777 void InternalArrayConstructorStub::GenerateCase(
3778 MacroAssembler* masm, ElementsKind kind) {
3779
3780 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
3781 __ TailCallStub(&stub0, lo, a0, Operand(1));
3782
3783 ArrayNArgumentsConstructorStub stubN(isolate());
3784 __ TailCallStub(&stubN, hi, a0, Operand(1));
3785
3786 if (IsFastPackedElementsKind(kind)) {
3787 // We might need to create a holey array
3788 // look at the first argument.
3789 __ lw(at, MemOperand(sp, 0));
3790
3791 InternalArraySingleArgumentConstructorStub
3792 stub1_holey(isolate(), GetHoleyElementsKind(kind));
3793 __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
3794 }
3795
3796 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
3797 __ TailCallStub(&stub1);
3798 }
3799
3800
Generate(MacroAssembler * masm)3801 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
3802 // ----------- S t a t e -------------
3803 // -- a0 : argc
3804 // -- a1 : constructor
3805 // -- sp[0] : return address
3806 // -- sp[4] : last argument
3807 // -----------------------------------
3808
3809 if (FLAG_debug_code) {
3810 // The array construct code is only set for the global and natives
3811 // builtin Array functions which always have maps.
3812
3813 // Initial map for the builtin Array function should be a map.
3814 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
3815 // Will both indicate a NULL and a Smi.
3816 __ SmiTst(a3, at);
3817 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
3818 at, Operand(zero_reg));
3819 __ GetObjectType(a3, a3, t0);
3820 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
3821 t0, Operand(MAP_TYPE));
3822 }
3823
3824 // Figure out the right elements kind.
3825 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
3826
3827 // Load the map's "bit field 2" into a3. We only need the first byte,
3828 // but the following bit field extraction takes care of that anyway.
3829 __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
3830 // Retrieve elements_kind from bit field 2.
3831 __ DecodeField<Map::ElementsKindBits>(a3);
3832
3833 if (FLAG_debug_code) {
3834 Label done;
3835 __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
3836 __ Assert(
3837 eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
3838 a3, Operand(FAST_HOLEY_ELEMENTS));
3839 __ bind(&done);
3840 }
3841
3842 Label fast_elements_case;
3843 __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
3844 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
3845
3846 __ bind(&fast_elements_case);
3847 GenerateCase(masm, FAST_ELEMENTS);
3848 }
3849
3850
Generate(MacroAssembler * masm)3851 void FastNewObjectStub::Generate(MacroAssembler* masm) {
3852 // ----------- S t a t e -------------
3853 // -- a1 : target
3854 // -- a3 : new target
3855 // -- cp : context
3856 // -- ra : return address
3857 // -----------------------------------
3858 __ AssertFunction(a1);
3859 __ AssertReceiver(a3);
3860
3861 // Verify that the new target is a JSFunction.
3862 Label new_object;
3863 __ GetObjectType(a3, a2, a2);
3864 __ Branch(&new_object, ne, a2, Operand(JS_FUNCTION_TYPE));
3865
3866 // Load the initial map and verify that it's in fact a map.
3867 __ lw(a2, FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
3868 __ JumpIfSmi(a2, &new_object);
3869 __ GetObjectType(a2, a0, a0);
3870 __ Branch(&new_object, ne, a0, Operand(MAP_TYPE));
3871
3872 // Fall back to runtime if the target differs from the new target's
3873 // initial map constructor.
3874 __ lw(a0, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
3875 __ Branch(&new_object, ne, a0, Operand(a1));
3876
3877 // Allocate the JSObject on the heap.
3878 Label allocate, done_allocate;
3879 __ lbu(t0, FieldMemOperand(a2, Map::kInstanceSizeOffset));
3880 __ Allocate(t0, v0, t1, a0, &allocate, SIZE_IN_WORDS);
3881 __ bind(&done_allocate);
3882
3883 // Initialize the JSObject fields.
3884 __ sw(a2, FieldMemOperand(v0, JSObject::kMapOffset));
3885 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
3886 __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
3887 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
3888 STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
3889 __ Addu(a1, v0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
3890
3891 // ----------- S t a t e -------------
3892 // -- v0 : result (tagged)
3893 // -- a1 : result fields (untagged)
3894 // -- t1 : result end (untagged)
3895 // -- a2 : initial map
3896 // -- cp : context
3897 // -- ra : return address
3898 // -----------------------------------
3899
3900 // Perform in-object slack tracking if requested.
3901 Label slack_tracking;
3902 STATIC_ASSERT(Map::kNoSlackTracking == 0);
3903 __ lw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
3904 __ And(at, a3, Operand(Map::ConstructionCounter::kMask));
3905 __ Branch(USE_DELAY_SLOT, &slack_tracking, ne, at, Operand(0));
3906 __ LoadRoot(a0, Heap::kUndefinedValueRootIndex); // In delay slot.
3907 {
3908 // Initialize all in-object fields with undefined.
3909 __ InitializeFieldsWithFiller(a1, t1, a0);
3910 __ Ret();
3911 }
3912 __ bind(&slack_tracking);
3913 {
3914 // Decrease generous allocation count.
3915 STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
3916 __ Subu(a3, a3, Operand(1 << Map::ConstructionCounter::kShift));
3917 __ sw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
3918
3919 // Initialize the in-object fields with undefined.
3920 __ lbu(t0, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
3921 __ sll(t0, t0, kPointerSizeLog2);
3922 __ subu(t0, t1, t0);
3923 __ InitializeFieldsWithFiller(a1, t0, a0);
3924
3925 // Initialize the remaining (reserved) fields with one pointer filler map.
3926 __ LoadRoot(a0, Heap::kOnePointerFillerMapRootIndex);
3927 __ InitializeFieldsWithFiller(a1, t1, a0);
3928
3929 // Check if we can finalize the instance size.
3930 Label finalize;
3931 STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
3932 __ And(a3, a3, Operand(Map::ConstructionCounter::kMask));
3933 __ Branch(&finalize, eq, a3, Operand(zero_reg));
3934 __ Ret();
3935
3936 // Finalize the instance size.
3937 __ bind(&finalize);
3938 {
3939 FrameScope scope(masm, StackFrame::INTERNAL);
3940 __ Push(v0, a2);
3941 __ CallRuntime(Runtime::kFinalizeInstanceSize);
3942 __ Pop(v0);
3943 }
3944 __ Ret();
3945 }
3946
3947 // Fall back to %AllocateInNewSpace.
3948 __ bind(&allocate);
3949 {
3950 FrameScope scope(masm, StackFrame::INTERNAL);
3951 STATIC_ASSERT(kSmiTag == 0);
3952 STATIC_ASSERT(kSmiTagSize == 1);
3953 __ sll(t0, t0, kPointerSizeLog2 + kSmiTagSize);
3954 __ Push(a2, t0);
3955 __ CallRuntime(Runtime::kAllocateInNewSpace);
3956 __ Pop(a2);
3957 }
3958 __ lbu(t1, FieldMemOperand(a2, Map::kInstanceSizeOffset));
3959 __ Lsa(t1, v0, t1, kPointerSizeLog2);
3960 STATIC_ASSERT(kHeapObjectTag == 1);
3961 __ Subu(t1, t1, Operand(kHeapObjectTag));
3962 __ jmp(&done_allocate);
3963
3964 // Fall back to %NewObject.
3965 __ bind(&new_object);
3966 __ Push(a1, a3);
3967 __ TailCallRuntime(Runtime::kNewObject);
3968 }
3969
3970
Generate(MacroAssembler * masm)3971 void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
3972 // ----------- S t a t e -------------
3973 // -- a1 : function
3974 // -- cp : context
3975 // -- fp : frame pointer
3976 // -- ra : return address
3977 // -----------------------------------
3978 __ AssertFunction(a1);
3979
3980 // Make a2 point to the JavaScript frame.
3981 __ mov(a2, fp);
3982 if (skip_stub_frame()) {
3983 // For Ignition we need to skip the handler/stub frame to reach the
3984 // JavaScript frame for the function.
3985 __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
3986 }
3987 if (FLAG_debug_code) {
3988 Label ok;
3989 __ lw(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
3990 __ Branch(&ok, eq, a1, Operand(a3));
3991 __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
3992 __ bind(&ok);
3993 }
3994
3995 // Check if we have rest parameters (only possible if we have an
3996 // arguments adaptor frame below the function frame).
3997 Label no_rest_parameters;
3998 __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
3999 __ lw(a3, MemOperand(a2, CommonFrameConstants::kContextOrFrameTypeOffset));
4000 __ Branch(&no_rest_parameters, ne, a3,
4001 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4002
4003 // Check if the arguments adaptor frame contains more arguments than
4004 // specified by the function's internal formal parameter count.
4005 Label rest_parameters;
4006 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4007 __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4008 __ lw(a3,
4009 FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
4010 __ Subu(a0, a0, Operand(a3));
4011 __ Branch(&rest_parameters, gt, a0, Operand(zero_reg));
4012
4013 // Return an empty rest parameter array.
4014 __ bind(&no_rest_parameters);
4015 {
4016 // ----------- S t a t e -------------
4017 // -- cp : context
4018 // -- ra : return address
4019 // -----------------------------------
4020
4021 // Allocate an empty rest parameter array.
4022 Label allocate, done_allocate;
4023 __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, NO_ALLOCATION_FLAGS);
4024 __ bind(&done_allocate);
4025
4026 // Setup the rest parameter array in v0.
4027 __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, a1);
4028 __ sw(a1, FieldMemOperand(v0, JSArray::kMapOffset));
4029 __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
4030 __ sw(a1, FieldMemOperand(v0, JSArray::kPropertiesOffset));
4031 __ sw(a1, FieldMemOperand(v0, JSArray::kElementsOffset));
4032 __ Move(a1, Smi::kZero);
4033 __ Ret(USE_DELAY_SLOT);
4034 __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset)); // In delay slot
4035 STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
4036
4037 // Fall back to %AllocateInNewSpace.
4038 __ bind(&allocate);
4039 {
4040 FrameScope scope(masm, StackFrame::INTERNAL);
4041 __ Push(Smi::FromInt(JSArray::kSize));
4042 __ CallRuntime(Runtime::kAllocateInNewSpace);
4043 }
4044 __ jmp(&done_allocate);
4045 }
4046
4047 __ bind(&rest_parameters);
4048 {
4049 // Compute the pointer to the first rest parameter (skippping the receiver).
4050 __ Lsa(a2, a2, a0, kPointerSizeLog2 - 1);
4051 __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
4052 1 * kPointerSize));
4053
4054 // ----------- S t a t e -------------
4055 // -- cp : context
4056 // -- a0 : number of rest parameters (tagged)
4057 // -- a1 : function
4058 // -- a2 : pointer to first rest parameters
4059 // -- ra : return address
4060 // -----------------------------------
4061
4062 // Allocate space for the rest parameter array plus the backing store.
4063 Label allocate, done_allocate;
4064 __ li(t0, Operand(JSArray::kSize + FixedArray::kHeaderSize));
4065 __ Lsa(t0, t0, a0, kPointerSizeLog2 - 1);
4066 __ Allocate(t0, v0, a3, t1, &allocate, NO_ALLOCATION_FLAGS);
4067 __ bind(&done_allocate);
4068
4069 // Setup the elements array in v0.
4070 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4071 __ sw(at, FieldMemOperand(v0, FixedArray::kMapOffset));
4072 __ sw(a0, FieldMemOperand(v0, FixedArray::kLengthOffset));
4073 __ Addu(a3, v0, Operand(FixedArray::kHeaderSize));
4074 {
4075 Label loop, done_loop;
4076 __ sll(at, a0, kPointerSizeLog2 - 1);
4077 __ Addu(a1, a3, at);
4078 __ bind(&loop);
4079 __ Branch(&done_loop, eq, a1, Operand(a3));
4080 __ lw(at, MemOperand(a2, 0 * kPointerSize));
4081 __ sw(at, FieldMemOperand(a3, 0 * kPointerSize));
4082 __ Subu(a2, a2, Operand(1 * kPointerSize));
4083 __ Addu(a3, a3, Operand(1 * kPointerSize));
4084 __ jmp(&loop);
4085 __ bind(&done_loop);
4086 }
4087
4088 // Setup the rest parameter array in a3.
4089 __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, at);
4090 __ sw(at, FieldMemOperand(a3, JSArray::kMapOffset));
4091 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
4092 __ sw(at, FieldMemOperand(a3, JSArray::kPropertiesOffset));
4093 __ sw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
4094 __ sw(a0, FieldMemOperand(a3, JSArray::kLengthOffset));
4095 STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
4096 __ Ret(USE_DELAY_SLOT);
4097 __ mov(v0, a3); // In delay slot
4098
4099 // Fall back to %AllocateInNewSpace (if not too big).
4100 Label too_big_for_new_space;
4101 __ bind(&allocate);
4102 __ Branch(&too_big_for_new_space, gt, t0,
4103 Operand(kMaxRegularHeapObjectSize));
4104 {
4105 FrameScope scope(masm, StackFrame::INTERNAL);
4106 __ SmiTag(t0);
4107 __ Push(a0, a2, t0);
4108 __ CallRuntime(Runtime::kAllocateInNewSpace);
4109 __ Pop(a0, a2);
4110 }
4111 __ jmp(&done_allocate);
4112
4113 // Fall back to %NewStrictArguments.
4114 __ bind(&too_big_for_new_space);
4115 __ Push(a1);
4116 __ TailCallRuntime(Runtime::kNewStrictArguments);
4117 }
4118 }
4119
4120
Generate(MacroAssembler * masm)4121 void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
4122 // ----------- S t a t e -------------
4123 // -- a1 : function
4124 // -- cp : context
4125 // -- fp : frame pointer
4126 // -- ra : return address
4127 // -----------------------------------
4128 __ AssertFunction(a1);
4129
4130 // Make t0 point to the JavaScript frame.
4131 __ mov(t0, fp);
4132 if (skip_stub_frame()) {
4133 // For Ignition we need to skip the handler/stub frame to reach the
4134 // JavaScript frame for the function.
4135 __ lw(t0, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
4136 }
4137 if (FLAG_debug_code) {
4138 Label ok;
4139 __ lw(a3, MemOperand(t0, StandardFrameConstants::kFunctionOffset));
4140 __ Branch(&ok, eq, a1, Operand(a3));
4141 __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4142 __ bind(&ok);
4143 }
4144
4145 // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
4146 __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4147 __ lw(a2,
4148 FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
4149 __ Lsa(a3, t0, a2, kPointerSizeLog2 - 1);
4150 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4151
4152 // a1 : function
4153 // a2 : number of parameters (tagged)
4154 // a3 : parameters pointer
4155 // t0 : Javascript frame pointer
4156 // Registers used over whole function:
4157 // t1 : arguments count (tagged)
4158 // t2 : mapped parameter count (tagged)
4159
4160 // Check if the calling frame is an arguments adaptor frame.
4161 Label adaptor_frame, try_allocate, runtime;
4162 __ lw(t0, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
4163 __ lw(a0, MemOperand(t0, CommonFrameConstants::kContextOrFrameTypeOffset));
4164 __ Branch(&adaptor_frame, eq, a0,
4165 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4166
4167 // No adaptor, parameter count = argument count.
4168 __ mov(t1, a2);
4169 __ Branch(USE_DELAY_SLOT, &try_allocate);
4170 __ mov(t2, a2); // In delay slot.
4171
4172 // We have an adaptor frame. Patch the parameters pointer.
4173 __ bind(&adaptor_frame);
4174 __ lw(t1, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
4175 __ Lsa(t0, t0, t1, 1);
4176 __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset));
4177
4178 // t1 = argument count (tagged)
4179 // t2 = parameter count (tagged)
4180 // Compute the mapped parameter count = min(t2, t1) in t2.
4181 __ mov(t2, a2);
4182 __ Branch(&try_allocate, le, t2, Operand(t1));
4183 __ mov(t2, t1);
4184
4185 __ bind(&try_allocate);
4186
4187 // Compute the sizes of backing store, parameter map, and arguments object.
4188 // 1. Parameter map, has 2 extra words containing context and backing store.
4189 const int kParameterMapHeaderSize =
4190 FixedArray::kHeaderSize + 2 * kPointerSize;
4191 // If there are no mapped parameters, we do not need the parameter_map.
4192 Label param_map_size;
4193 DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
4194 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, t2, Operand(zero_reg));
4195 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when t2 == 0.
4196 __ sll(t5, t2, 1);
4197 __ addiu(t5, t5, kParameterMapHeaderSize);
4198 __ bind(¶m_map_size);
4199
4200 // 2. Backing store.
4201 __ Lsa(t5, t5, t1, 1);
4202 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
4203
4204 // 3. Arguments object.
4205 __ Addu(t5, t5, Operand(JSSloppyArgumentsObject::kSize));
4206
4207 // Do the allocation of all three objects in one go.
4208 __ Allocate(t5, v0, t5, t0, &runtime, NO_ALLOCATION_FLAGS);
4209
4210 // v0 = address of new object(s) (tagged)
4211 // a2 = argument count (smi-tagged)
4212 // Get the arguments boilerplate from the current native context into t0.
4213 const int kNormalOffset =
4214 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
4215 const int kAliasedOffset =
4216 Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
4217
4218 __ lw(t0, NativeContextMemOperand());
4219 Label skip2_ne, skip2_eq;
4220 __ Branch(&skip2_ne, ne, t2, Operand(zero_reg));
4221 __ lw(t0, MemOperand(t0, kNormalOffset));
4222 __ bind(&skip2_ne);
4223
4224 __ Branch(&skip2_eq, eq, t2, Operand(zero_reg));
4225 __ lw(t0, MemOperand(t0, kAliasedOffset));
4226 __ bind(&skip2_eq);
4227
4228 // v0 = address of new object (tagged)
4229 // a2 = argument count (smi-tagged)
4230 // t0 = address of arguments map (tagged)
4231 // t2 = mapped parameter count (tagged)
4232 __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
4233 __ LoadRoot(t5, Heap::kEmptyFixedArrayRootIndex);
4234 __ sw(t5, FieldMemOperand(v0, JSObject::kPropertiesOffset));
4235 __ sw(t5, FieldMemOperand(v0, JSObject::kElementsOffset));
4236
4237 // Set up the callee in-object property.
4238 __ AssertNotSmi(a1);
4239 __ sw(a1, FieldMemOperand(v0, JSSloppyArgumentsObject::kCalleeOffset));
4240
4241 // Use the length (smi tagged) and set that as an in-object property too.
4242 __ AssertSmi(t1);
4243 __ sw(t1, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
4244
4245 // Set up the elements pointer in the allocated arguments object.
4246 // If we allocated a parameter map, t0 will point there, otherwise
4247 // it will point to the backing store.
4248 __ Addu(t0, v0, Operand(JSSloppyArgumentsObject::kSize));
4249 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
4250
4251 // v0 = address of new object (tagged)
4252 // a2 = argument count (tagged)
4253 // t0 = address of parameter map or backing store (tagged)
4254 // t2 = mapped parameter count (tagged)
4255 // Initialize parameter map. If there are no mapped arguments, we're done.
4256 Label skip_parameter_map;
4257 Label skip3;
4258 __ Branch(&skip3, ne, t2, Operand(Smi::kZero));
4259 // Move backing store address to a1, because it is
4260 // expected there when filling in the unmapped arguments.
4261 __ mov(a1, t0);
4262 __ bind(&skip3);
4263
4264 __ Branch(&skip_parameter_map, eq, t2, Operand(Smi::kZero));
4265
4266 __ LoadRoot(t1, Heap::kSloppyArgumentsElementsMapRootIndex);
4267 __ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset));
4268 __ Addu(t1, t2, Operand(Smi::FromInt(2)));
4269 __ sw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
4270 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
4271 __ Lsa(t1, t0, t2, 1);
4272 __ Addu(t1, t1, Operand(kParameterMapHeaderSize));
4273 __ sw(t1, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
4274
4275 // Copy the parameter slots and the holes in the arguments.
4276 // We need to fill in mapped_parameter_count slots. They index the context,
4277 // where parameters are stored in reverse order, at
4278 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4279 // The mapped parameter thus need to get indices
4280 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
4281 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4282 // We loop from right to left.
4283 Label parameters_loop, parameters_test;
4284 __ mov(t1, t2);
4285 __ Addu(t5, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
4286 __ Subu(t5, t5, Operand(t2));
4287 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
4288 __ Lsa(a1, t0, t1, 1);
4289 __ Addu(a1, a1, Operand(kParameterMapHeaderSize));
4290
4291 // a1 = address of backing store (tagged)
4292 // t0 = address of parameter map (tagged)
4293 // a0 = temporary scratch (a.o., for address calculation)
4294 // t1 = loop variable (tagged)
4295 // t3 = the hole value
4296 __ jmp(¶meters_test);
4297
4298 __ bind(¶meters_loop);
4299 __ Subu(t1, t1, Operand(Smi::FromInt(1)));
4300 __ sll(a0, t1, 1);
4301 __ Addu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4302 __ Addu(t6, t0, a0);
4303 __ sw(t5, MemOperand(t6));
4304 __ Subu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
4305 __ Addu(t6, a1, a0);
4306 __ sw(t3, MemOperand(t6));
4307 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4308 __ bind(¶meters_test);
4309 __ Branch(¶meters_loop, ne, t1, Operand(Smi::kZero));
4310
4311 // t1 = argument count (tagged).
4312 __ lw(t1, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
4313
4314 __ bind(&skip_parameter_map);
4315 // v0 = address of new object (tagged)
4316 // a1 = address of backing store (tagged)
4317 // t1 = argument count (tagged)
4318 // t2 = mapped parameter count (tagged)
4319 // t5 = scratch
4320 // Copy arguments header and remaining slots (if there are any).
4321 __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
4322 __ sw(t5, FieldMemOperand(a1, FixedArray::kMapOffset));
4323 __ sw(t1, FieldMemOperand(a1, FixedArray::kLengthOffset));
4324
4325 Label arguments_loop, arguments_test;
4326 __ sll(t6, t2, 1);
4327 __ Subu(a3, a3, Operand(t6));
4328 __ jmp(&arguments_test);
4329
4330 __ bind(&arguments_loop);
4331 __ Subu(a3, a3, Operand(kPointerSize));
4332 __ lw(t0, MemOperand(a3, 0));
4333 __ Lsa(t5, a1, t2, 1);
4334 __ sw(t0, FieldMemOperand(t5, FixedArray::kHeaderSize));
4335 __ Addu(t2, t2, Operand(Smi::FromInt(1)));
4336
4337 __ bind(&arguments_test);
4338 __ Branch(&arguments_loop, lt, t2, Operand(t1));
4339
4340 // Return.
4341 __ Ret();
4342
4343 // Do the runtime call to allocate the arguments object.
4344 // t1 = argument count (tagged)
4345 __ bind(&runtime);
4346 __ Push(a1, a3, t1);
4347 __ TailCallRuntime(Runtime::kNewSloppyArguments);
4348 }
4349
4350
Generate(MacroAssembler * masm)4351 void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
4352 // ----------- S t a t e -------------
4353 // -- a1 : function
4354 // -- cp : context
4355 // -- fp : frame pointer
4356 // -- ra : return address
4357 // -----------------------------------
4358 __ AssertFunction(a1);
4359
4360 // Make a2 point to the JavaScript frame.
4361 __ mov(a2, fp);
4362 if (skip_stub_frame()) {
4363 // For Ignition we need to skip the handler/stub frame to reach the
4364 // JavaScript frame for the function.
4365 __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
4366 }
4367 if (FLAG_debug_code) {
4368 Label ok;
4369 __ lw(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
4370 __ Branch(&ok, eq, a1, Operand(a3));
4371 __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4372 __ bind(&ok);
4373 }
4374
4375 // Check if we have an arguments adaptor frame below the function frame.
4376 Label arguments_adaptor, arguments_done;
4377 __ lw(a3, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
4378 __ lw(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
4379 __ Branch(&arguments_adaptor, eq, a0,
4380 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4381 {
4382 __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4383 __ lw(a0,
4384 FieldMemOperand(t0, SharedFunctionInfo::kFormalParameterCountOffset));
4385 __ Lsa(a2, a2, a0, kPointerSizeLog2 - 1);
4386 __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
4387 1 * kPointerSize));
4388 }
4389 __ Branch(&arguments_done);
4390 __ bind(&arguments_adaptor);
4391 {
4392 __ lw(a0, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4393 __ Lsa(a2, a3, a0, kPointerSizeLog2 - 1);
4394 __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
4395 1 * kPointerSize));
4396 }
4397 __ bind(&arguments_done);
4398
4399 // ----------- S t a t e -------------
4400 // -- cp : context
4401 // -- a0 : number of rest parameters (tagged)
4402 // -- a1 : function
4403 // -- a2 : pointer to first rest parameters
4404 // -- ra : return address
4405 // -----------------------------------
4406
4407 // Allocate space for the strict arguments object plus the backing store.
4408 Label allocate, done_allocate;
4409 __ li(t0, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
4410 __ Lsa(t0, t0, a0, kPointerSizeLog2 - 1);
4411 __ Allocate(t0, v0, a3, t1, &allocate, NO_ALLOCATION_FLAGS);
4412 __ bind(&done_allocate);
4413
4414 // Setup the elements array in v0.
4415 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4416 __ sw(at, FieldMemOperand(v0, FixedArray::kMapOffset));
4417 __ sw(a0, FieldMemOperand(v0, FixedArray::kLengthOffset));
4418 __ Addu(a3, v0, Operand(FixedArray::kHeaderSize));
4419 {
4420 Label loop, done_loop;
4421 __ sll(at, a0, kPointerSizeLog2 - 1);
4422 __ Addu(a1, a3, at);
4423 __ bind(&loop);
4424 __ Branch(&done_loop, eq, a1, Operand(a3));
4425 __ lw(at, MemOperand(a2, 0 * kPointerSize));
4426 __ sw(at, FieldMemOperand(a3, 0 * kPointerSize));
4427 __ Subu(a2, a2, Operand(1 * kPointerSize));
4428 __ Addu(a3, a3, Operand(1 * kPointerSize));
4429 __ Branch(&loop);
4430 __ bind(&done_loop);
4431 }
4432
4433 // Setup the strict arguments object in a3.
4434 __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, at);
4435 __ sw(at, FieldMemOperand(a3, JSStrictArgumentsObject::kMapOffset));
4436 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
4437 __ sw(at, FieldMemOperand(a3, JSStrictArgumentsObject::kPropertiesOffset));
4438 __ sw(v0, FieldMemOperand(a3, JSStrictArgumentsObject::kElementsOffset));
4439 __ sw(a0, FieldMemOperand(a3, JSStrictArgumentsObject::kLengthOffset));
4440 STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
4441 __ Ret(USE_DELAY_SLOT);
4442 __ mov(v0, a3); // In delay slot
4443
4444 // Fall back to %AllocateInNewSpace (if not too big).
4445 Label too_big_for_new_space;
4446 __ bind(&allocate);
4447 __ Branch(&too_big_for_new_space, gt, t0, Operand(kMaxRegularHeapObjectSize));
4448 {
4449 FrameScope scope(masm, StackFrame::INTERNAL);
4450 __ SmiTag(t0);
4451 __ Push(a0, a2, t0);
4452 __ CallRuntime(Runtime::kAllocateInNewSpace);
4453 __ Pop(a0, a2);
4454 }
4455 __ jmp(&done_allocate);
4456
4457 // Fall back to %NewStrictArguments.
4458 __ bind(&too_big_for_new_space);
4459 __ Push(a1);
4460 __ TailCallRuntime(Runtime::kNewStrictArguments);
4461 }
4462
4463
AddressOffset(ExternalReference ref0,ExternalReference ref1)4464 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
4465 return ref0.address() - ref1.address();
4466 }
4467
4468
4469 // Calls an API function. Allocates HandleScope, extracts returned value
4470 // from handle and propagates exceptions. Restores context. stack_space
4471 // - space to be unwound on exit (includes the call JS arguments space and
4472 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,int32_t stack_space_offset,MemOperand return_value_operand,MemOperand * context_restore_operand)4473 static void CallApiFunctionAndReturn(
4474 MacroAssembler* masm, Register function_address,
4475 ExternalReference thunk_ref, int stack_space, int32_t stack_space_offset,
4476 MemOperand return_value_operand, MemOperand* context_restore_operand) {
4477 Isolate* isolate = masm->isolate();
4478 ExternalReference next_address =
4479 ExternalReference::handle_scope_next_address(isolate);
4480 const int kNextOffset = 0;
4481 const int kLimitOffset = AddressOffset(
4482 ExternalReference::handle_scope_limit_address(isolate), next_address);
4483 const int kLevelOffset = AddressOffset(
4484 ExternalReference::handle_scope_level_address(isolate), next_address);
4485
4486 DCHECK(function_address.is(a1) || function_address.is(a2));
4487
4488 Label profiler_disabled;
4489 Label end_profiler_check;
4490 __ li(t9, Operand(ExternalReference::is_profiling_address(isolate)));
4491 __ lb(t9, MemOperand(t9, 0));
4492 __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
4493
4494 // Additional parameter is the address of the actual callback.
4495 __ li(t9, Operand(thunk_ref));
4496 __ jmp(&end_profiler_check);
4497
4498 __ bind(&profiler_disabled);
4499 __ mov(t9, function_address);
4500 __ bind(&end_profiler_check);
4501
4502 // Allocate HandleScope in callee-save registers.
4503 __ li(s3, Operand(next_address));
4504 __ lw(s0, MemOperand(s3, kNextOffset));
4505 __ lw(s1, MemOperand(s3, kLimitOffset));
4506 __ lw(s2, MemOperand(s3, kLevelOffset));
4507 __ Addu(s2, s2, Operand(1));
4508 __ sw(s2, MemOperand(s3, kLevelOffset));
4509
4510 if (FLAG_log_timer_events) {
4511 FrameScope frame(masm, StackFrame::MANUAL);
4512 __ PushSafepointRegisters();
4513 __ PrepareCallCFunction(1, a0);
4514 __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
4515 __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
4516 1);
4517 __ PopSafepointRegisters();
4518 }
4519
4520 // Native call returns to the DirectCEntry stub which redirects to the
4521 // return address pushed on stack (could have moved after GC).
4522 // DirectCEntry stub itself is generated early and never moves.
4523 DirectCEntryStub stub(isolate);
4524 stub.GenerateCall(masm, t9);
4525
4526 if (FLAG_log_timer_events) {
4527 FrameScope frame(masm, StackFrame::MANUAL);
4528 __ PushSafepointRegisters();
4529 __ PrepareCallCFunction(1, a0);
4530 __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
4531 __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
4532 1);
4533 __ PopSafepointRegisters();
4534 }
4535
4536 Label promote_scheduled_exception;
4537 Label delete_allocated_handles;
4538 Label leave_exit_frame;
4539 Label return_value_loaded;
4540
4541 // Load value from ReturnValue.
4542 __ lw(v0, return_value_operand);
4543 __ bind(&return_value_loaded);
4544
4545 // No more valid handles (the result handle was the last one). Restore
4546 // previous handle scope.
4547 __ sw(s0, MemOperand(s3, kNextOffset));
4548 if (__ emit_debug_code()) {
4549 __ lw(a1, MemOperand(s3, kLevelOffset));
4550 __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
4551 }
4552 __ Subu(s2, s2, Operand(1));
4553 __ sw(s2, MemOperand(s3, kLevelOffset));
4554 __ lw(at, MemOperand(s3, kLimitOffset));
4555 __ Branch(&delete_allocated_handles, ne, s1, Operand(at));
4556
4557 // Leave the API exit frame.
4558 __ bind(&leave_exit_frame);
4559
4560 bool restore_context = context_restore_operand != NULL;
4561 if (restore_context) {
4562 __ lw(cp, *context_restore_operand);
4563 }
4564 if (stack_space_offset != kInvalidStackOffset) {
4565 // ExitFrame contains four MIPS argument slots after DirectCEntryStub call
4566 // so this must be accounted for.
4567 __ lw(s0, MemOperand(sp, stack_space_offset + kCArgsSlotsSize));
4568 } else {
4569 __ li(s0, Operand(stack_space));
4570 }
4571 __ LeaveExitFrame(false, s0, !restore_context, NO_EMIT_RETURN,
4572 stack_space_offset != kInvalidStackOffset);
4573
4574 // Check if the function scheduled an exception.
4575 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
4576 __ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
4577 __ lw(t1, MemOperand(at));
4578 __ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
4579
4580 __ Ret();
4581
4582 // Re-throw by promoting a scheduled exception.
4583 __ bind(&promote_scheduled_exception);
4584 __ TailCallRuntime(Runtime::kPromoteScheduledException);
4585
4586 // HandleScope limit has changed. Delete allocated extensions.
4587 __ bind(&delete_allocated_handles);
4588 __ sw(s1, MemOperand(s3, kLimitOffset));
4589 __ mov(s0, v0);
4590 __ mov(a0, v0);
4591 __ PrepareCallCFunction(1, s1);
4592 __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
4593 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
4594 1);
4595 __ mov(v0, s0);
4596 __ jmp(&leave_exit_frame);
4597 }
4598
Generate(MacroAssembler * masm)4599 void CallApiCallbackStub::Generate(MacroAssembler* masm) {
4600 // ----------- S t a t e -------------
4601 // -- a0 : callee
4602 // -- t0 : call_data
4603 // -- a2 : holder
4604 // -- a1 : api_function_address
4605 // -- cp : context
4606 // --
4607 // -- sp[0] : last argument
4608 // -- ...
4609 // -- sp[(argc - 1)* 4] : first argument
4610 // -- sp[argc * 4] : receiver
4611 // -----------------------------------
4612
4613 Register callee = a0;
4614 Register call_data = t0;
4615 Register holder = a2;
4616 Register api_function_address = a1;
4617 Register context = cp;
4618
4619 typedef FunctionCallbackArguments FCA;
4620
4621 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
4622 STATIC_ASSERT(FCA::kCalleeIndex == 5);
4623 STATIC_ASSERT(FCA::kDataIndex == 4);
4624 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
4625 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
4626 STATIC_ASSERT(FCA::kIsolateIndex == 1);
4627 STATIC_ASSERT(FCA::kHolderIndex == 0);
4628 STATIC_ASSERT(FCA::kNewTargetIndex == 7);
4629 STATIC_ASSERT(FCA::kArgsLength == 8);
4630
4631 // new target
4632 __ PushRoot(Heap::kUndefinedValueRootIndex);
4633
4634 // Save context, callee and call data.
4635 __ Push(context, callee, call_data);
4636 if (!is_lazy()) {
4637 // Load context from callee.
4638 __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
4639 }
4640
4641 Register scratch = call_data;
4642 if (!call_data_undefined()) {
4643 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4644 }
4645 // Push return value and default return value.
4646 __ Push(scratch, scratch);
4647 __ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
4648 // Push isolate and holder.
4649 __ Push(scratch, holder);
4650
4651 // Prepare arguments.
4652 __ mov(scratch, sp);
4653
4654 // Allocate the v8::Arguments structure in the arguments' space since
4655 // it's not controlled by GC.
4656 const int kApiStackSpace = 3;
4657
4658 FrameScope frame_scope(masm, StackFrame::MANUAL);
4659 __ EnterExitFrame(false, kApiStackSpace);
4660
4661 DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
4662 // a0 = FunctionCallbackInfo&
4663 // Arguments is after the return address.
4664 __ Addu(a0, sp, Operand(1 * kPointerSize));
4665 // FunctionCallbackInfo::implicit_args_
4666 __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
4667 // FunctionCallbackInfo::values_
4668 __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
4669 __ sw(at, MemOperand(a0, 1 * kPointerSize));
4670 // FunctionCallbackInfo::length_ = argc
4671 __ li(at, Operand(argc()));
4672 __ sw(at, MemOperand(a0, 2 * kPointerSize));
4673
4674 ExternalReference thunk_ref =
4675 ExternalReference::invoke_function_callback(masm->isolate());
4676
4677 AllowExternalCallThatCantCauseGC scope(masm);
4678 MemOperand context_restore_operand(
4679 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
4680 // Stores return the first js argument.
4681 int return_value_offset = 0;
4682 if (is_store()) {
4683 return_value_offset = 2 + FCA::kArgsLength;
4684 } else {
4685 return_value_offset = 2 + FCA::kReturnValueOffset;
4686 }
4687 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
4688 int stack_space = 0;
4689 int32_t stack_space_offset = 3 * kPointerSize;
4690 stack_space = argc() + FCA::kArgsLength + 1;
4691 // TODO(adamk): Why are we clobbering this immediately?
4692 stack_space_offset = kInvalidStackOffset;
4693 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
4694 stack_space_offset, return_value_operand,
4695 &context_restore_operand);
4696 }
4697
4698
Generate(MacroAssembler * masm)4699 void CallApiGetterStub::Generate(MacroAssembler* masm) {
4700 // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
4701 // name below the exit frame to make GC aware of them.
4702 STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
4703 STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
4704 STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
4705 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
4706 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
4707 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
4708 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
4709 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
4710
4711 Register receiver = ApiGetterDescriptor::ReceiverRegister();
4712 Register holder = ApiGetterDescriptor::HolderRegister();
4713 Register callback = ApiGetterDescriptor::CallbackRegister();
4714 Register scratch = t0;
4715 DCHECK(!AreAliased(receiver, holder, callback, scratch));
4716
4717 Register api_function_address = a2;
4718
4719 // Here and below +1 is for name() pushed after the args_ array.
4720 typedef PropertyCallbackArguments PCA;
4721 __ Subu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
4722 __ sw(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
4723 __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
4724 __ sw(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
4725 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4726 __ sw(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
4727 __ sw(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
4728 kPointerSize));
4729 __ li(scratch, Operand(ExternalReference::isolate_address(isolate())));
4730 __ sw(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
4731 __ sw(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
4732 // should_throw_on_error -> false
4733 DCHECK(Smi::kZero == nullptr);
4734 __ sw(zero_reg,
4735 MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
4736 __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
4737 __ sw(scratch, MemOperand(sp, 0 * kPointerSize));
4738
4739 // v8::PropertyCallbackInfo::args_ array and name handle.
4740 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
4741
4742 // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
4743 __ mov(a0, sp); // a0 = Handle<Name>
4744 __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_
4745
4746 const int kApiStackSpace = 1;
4747 FrameScope frame_scope(masm, StackFrame::MANUAL);
4748 __ EnterExitFrame(false, kApiStackSpace);
4749
4750 // Create v8::PropertyCallbackInfo object on the stack and initialize
4751 // it's args_ field.
4752 __ sw(a1, MemOperand(sp, 1 * kPointerSize));
4753 __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = v8::PropertyCallbackInfo&
4754
4755 ExternalReference thunk_ref =
4756 ExternalReference::invoke_accessor_getter_callback(isolate());
4757
4758 __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
4759 __ lw(api_function_address,
4760 FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
4761
4762 // +3 is to skip prolog, return address and name handle.
4763 MemOperand return_value_operand(
4764 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
4765 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
4766 kStackUnwindSpace, kInvalidStackOffset,
4767 return_value_operand, NULL);
4768 }
4769
4770 #undef __
4771
4772 } // namespace internal
4773 } // namespace v8
4774
4775 #endif // V8_TARGET_ARCH_MIPS
4776