1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #if V8_TARGET_ARCH_MIPS
8
9 #include "src/base/bits.h"
10 #include "src/bootstrapper.h"
11 #include "src/code-stubs.h"
12 #include "src/codegen.h"
13 #include "src/ic/handler-compiler.h"
14 #include "src/ic/ic.h"
15 #include "src/isolate.h"
16 #include "src/jsregexp.h"
17 #include "src/regexp-macro-assembler.h"
18 #include "src/runtime.h"
19
20 namespace v8 {
21 namespace internal {
22
23
InitializeArrayConstructorDescriptor(Isolate * isolate,CodeStubDescriptor * descriptor,int constant_stack_parameter_count)24 static void InitializeArrayConstructorDescriptor(
25 Isolate* isolate, CodeStubDescriptor* descriptor,
26 int constant_stack_parameter_count) {
27 Address deopt_handler = Runtime::FunctionForId(
28 Runtime::kArrayConstructor)->entry;
29
30 if (constant_stack_parameter_count == 0) {
31 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
32 JS_FUNCTION_STUB_MODE);
33 } else {
34 descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
35 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
36 }
37 }
38
39
InitializeInternalArrayConstructorDescriptor(Isolate * isolate,CodeStubDescriptor * descriptor,int constant_stack_parameter_count)40 static void InitializeInternalArrayConstructorDescriptor(
41 Isolate* isolate, CodeStubDescriptor* descriptor,
42 int constant_stack_parameter_count) {
43 Address deopt_handler = Runtime::FunctionForId(
44 Runtime::kInternalArrayConstructor)->entry;
45
46 if (constant_stack_parameter_count == 0) {
47 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
48 JS_FUNCTION_STUB_MODE);
49 } else {
50 descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
51 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
52 }
53 }
54
55
InitializeDescriptor(CodeStubDescriptor * descriptor)56 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
57 CodeStubDescriptor* descriptor) {
58 InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
59 }
60
61
InitializeDescriptor(CodeStubDescriptor * descriptor)62 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
63 CodeStubDescriptor* descriptor) {
64 InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
65 }
66
67
InitializeDescriptor(CodeStubDescriptor * descriptor)68 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
69 CodeStubDescriptor* descriptor) {
70 InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
71 }
72
73
InitializeDescriptor(CodeStubDescriptor * descriptor)74 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
75 CodeStubDescriptor* descriptor) {
76 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
77 }
78
79
InitializeDescriptor(CodeStubDescriptor * descriptor)80 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
81 CodeStubDescriptor* descriptor) {
82 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
83 }
84
85
InitializeDescriptor(CodeStubDescriptor * descriptor)86 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
87 CodeStubDescriptor* descriptor) {
88 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
89 }
90
91
92 #define __ ACCESS_MASM(masm)
93
94
95 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
96 Label* slow,
97 Condition cc);
98 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
99 Register lhs,
100 Register rhs,
101 Label* rhs_not_nan,
102 Label* slow,
103 bool strict);
104 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
105 Register lhs,
106 Register rhs);
107
108
GenerateLightweightMiss(MacroAssembler * masm,ExternalReference miss)109 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
110 ExternalReference miss) {
111 // Update the static counter each time a new code stub is generated.
112 isolate()->counters()->code_stubs()->Increment();
113
114 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
115 int param_count = descriptor.GetEnvironmentParameterCount();
116 {
117 // Call the runtime system in a fresh internal frame.
118 FrameScope scope(masm, StackFrame::INTERNAL);
119 DCHECK(param_count == 0 ||
120 a0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
121 // Push arguments, adjust sp.
122 __ Subu(sp, sp, Operand(param_count * kPointerSize));
123 for (int i = 0; i < param_count; ++i) {
124 // Store argument to stack.
125 __ sw(descriptor.GetEnvironmentParameterRegister(i),
126 MemOperand(sp, (param_count - 1 - i) * kPointerSize));
127 }
128 __ CallExternalReference(miss, param_count);
129 }
130
131 __ Ret();
132 }
133
134
Generate(MacroAssembler * masm)135 void DoubleToIStub::Generate(MacroAssembler* masm) {
136 Label out_of_range, only_low, negate, done;
137 Register input_reg = source();
138 Register result_reg = destination();
139
140 int double_offset = offset();
141 // Account for saved regs if input is sp.
142 if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
143
144 Register scratch =
145 GetRegisterThatIsNotOneOf(input_reg, result_reg);
146 Register scratch2 =
147 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
148 Register scratch3 =
149 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
150 DoubleRegister double_scratch = kLithiumScratchDouble;
151
152 __ Push(scratch, scratch2, scratch3);
153
154 if (!skip_fastpath()) {
155 // Load double input.
156 __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
157
158 // Clear cumulative exception flags and save the FCSR.
159 __ cfc1(scratch2, FCSR);
160 __ ctc1(zero_reg, FCSR);
161
162 // Try a conversion to a signed integer.
163 __ Trunc_w_d(double_scratch, double_scratch);
164 // Move the converted value into the result register.
165 __ mfc1(scratch3, double_scratch);
166
167 // Retrieve and restore the FCSR.
168 __ cfc1(scratch, FCSR);
169 __ ctc1(scratch2, FCSR);
170
171 // Check for overflow and NaNs.
172 __ And(
173 scratch, scratch,
174 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
175 | kFCSRInvalidOpFlagMask);
176 // If we had no exceptions then set result_reg and we are done.
177 Label error;
178 __ Branch(&error, ne, scratch, Operand(zero_reg));
179 __ Move(result_reg, scratch3);
180 __ Branch(&done);
181 __ bind(&error);
182 }
183
184 // Load the double value and perform a manual truncation.
185 Register input_high = scratch2;
186 Register input_low = scratch3;
187
188 __ lw(input_low,
189 MemOperand(input_reg, double_offset + Register::kMantissaOffset));
190 __ lw(input_high,
191 MemOperand(input_reg, double_offset + Register::kExponentOffset));
192
193 Label normal_exponent, restore_sign;
194 // Extract the biased exponent in result.
195 __ Ext(result_reg,
196 input_high,
197 HeapNumber::kExponentShift,
198 HeapNumber::kExponentBits);
199
200 // Check for Infinity and NaNs, which should return 0.
201 __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
202 __ Movz(result_reg, zero_reg, scratch);
203 __ Branch(&done, eq, scratch, Operand(zero_reg));
204
205 // Express exponent as delta to (number of mantissa bits + 31).
206 __ Subu(result_reg,
207 result_reg,
208 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
209
210 // If the delta is strictly positive, all bits would be shifted away,
211 // which means that we can return 0.
212 __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
213 __ mov(result_reg, zero_reg);
214 __ Branch(&done);
215
216 __ bind(&normal_exponent);
217 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
218 // Calculate shift.
219 __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
220
221 // Save the sign.
222 Register sign = result_reg;
223 result_reg = no_reg;
224 __ And(sign, input_high, Operand(HeapNumber::kSignMask));
225
226 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
227 // to check for this specific case.
228 Label high_shift_needed, high_shift_done;
229 __ Branch(&high_shift_needed, lt, scratch, Operand(32));
230 __ mov(input_high, zero_reg);
231 __ Branch(&high_shift_done);
232 __ bind(&high_shift_needed);
233
234 // Set the implicit 1 before the mantissa part in input_high.
235 __ Or(input_high,
236 input_high,
237 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
238 // Shift the mantissa bits to the correct position.
239 // We don't need to clear non-mantissa bits as they will be shifted away.
240 // If they weren't, it would mean that the answer is in the 32bit range.
241 __ sllv(input_high, input_high, scratch);
242
243 __ bind(&high_shift_done);
244
245 // Replace the shifted bits with bits from the lower mantissa word.
246 Label pos_shift, shift_done;
247 __ li(at, 32);
248 __ subu(scratch, at, scratch);
249 __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
250
251 // Negate scratch.
252 __ Subu(scratch, zero_reg, scratch);
253 __ sllv(input_low, input_low, scratch);
254 __ Branch(&shift_done);
255
256 __ bind(&pos_shift);
257 __ srlv(input_low, input_low, scratch);
258
259 __ bind(&shift_done);
260 __ Or(input_high, input_high, Operand(input_low));
261 // Restore sign if necessary.
262 __ mov(scratch, sign);
263 result_reg = sign;
264 sign = no_reg;
265 __ Subu(result_reg, zero_reg, input_high);
266 __ Movz(result_reg, input_high, scratch);
267
268 __ bind(&done);
269
270 __ Pop(scratch, scratch2, scratch3);
271 __ Ret();
272 }
273
274
GenerateFixedRegStubsAheadOfTime(Isolate * isolate)275 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
276 Isolate* isolate) {
277 WriteInt32ToHeapNumberStub stub1(isolate, a1, v0, a2, a3);
278 WriteInt32ToHeapNumberStub stub2(isolate, a2, v0, a3, a0);
279 stub1.GetCode();
280 stub2.GetCode();
281 }
282
283
284 // See comment for class, this does NOT work for int32's that are in Smi range.
Generate(MacroAssembler * masm)285 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
286 Label max_negative_int;
287 // the_int_ has the answer which is a signed int32 but not a Smi.
288 // We test for the special value that has a different exponent.
289 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
290 // Test sign, and save for later conditionals.
291 __ And(sign(), the_int(), Operand(0x80000000u));
292 __ Branch(&max_negative_int, eq, the_int(), Operand(0x80000000u));
293
294 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
295 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
296 uint32_t non_smi_exponent =
297 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
298 __ li(scratch(), Operand(non_smi_exponent));
299 // Set the sign bit in scratch_ if the value was negative.
300 __ or_(scratch(), scratch(), sign());
301 // Subtract from 0 if the value was negative.
302 __ subu(at, zero_reg, the_int());
303 __ Movn(the_int(), at, sign());
304 // We should be masking the implict first digit of the mantissa away here,
305 // but it just ends up combining harmlessly with the last digit of the
306 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
307 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
308 DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
309 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
310 __ srl(at, the_int(), shift_distance);
311 __ or_(scratch(), scratch(), at);
312 __ sw(scratch(), FieldMemOperand(the_heap_number(),
313 HeapNumber::kExponentOffset));
314 __ sll(scratch(), the_int(), 32 - shift_distance);
315 __ Ret(USE_DELAY_SLOT);
316 __ sw(scratch(), FieldMemOperand(the_heap_number(),
317 HeapNumber::kMantissaOffset));
318
319 __ bind(&max_negative_int);
320 // The max negative int32 is stored as a positive number in the mantissa of
321 // a double because it uses a sign bit instead of using two's complement.
322 // The actual mantissa bits stored are all 0 because the implicit most
323 // significant 1 bit is not stored.
324 non_smi_exponent += 1 << HeapNumber::kExponentShift;
325 __ li(scratch(), Operand(HeapNumber::kSignMask | non_smi_exponent));
326 __ sw(scratch(),
327 FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
328 __ mov(scratch(), zero_reg);
329 __ Ret(USE_DELAY_SLOT);
330 __ sw(scratch(),
331 FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
332 }
333
334
335 // Handle the case where the lhs and rhs are the same object.
336 // Equality is almost reflexive (everything but NaN), so this is a test
337 // for "identity and not NaN".
EmitIdenticalObjectComparison(MacroAssembler * masm,Label * slow,Condition cc)338 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
339 Label* slow,
340 Condition cc) {
341 Label not_identical;
342 Label heap_number, return_equal;
343 Register exp_mask_reg = t5;
344
345 __ Branch(¬_identical, ne, a0, Operand(a1));
346
347 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
348
349 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
350 // so we do the second best thing - test it ourselves.
351 // They are both equal and they are not both Smis so both of them are not
352 // Smis. If it's not a heap number, then return equal.
353 if (cc == less || cc == greater) {
354 __ GetObjectType(a0, t4, t4);
355 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
356 } else {
357 __ GetObjectType(a0, t4, t4);
358 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
359 // Comparing JS objects with <=, >= is complicated.
360 if (cc != eq) {
361 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
362 // Normally here we fall through to return_equal, but undefined is
363 // special: (undefined == undefined) == true, but
364 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
365 if (cc == less_equal || cc == greater_equal) {
366 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
367 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
368 __ Branch(&return_equal, ne, a0, Operand(t2));
369 DCHECK(is_int16(GREATER) && is_int16(LESS));
370 __ Ret(USE_DELAY_SLOT);
371 if (cc == le) {
372 // undefined <= undefined should fail.
373 __ li(v0, Operand(GREATER));
374 } else {
375 // undefined >= undefined should fail.
376 __ li(v0, Operand(LESS));
377 }
378 }
379 }
380 }
381
382 __ bind(&return_equal);
383 DCHECK(is_int16(GREATER) && is_int16(LESS));
384 __ Ret(USE_DELAY_SLOT);
385 if (cc == less) {
386 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
387 } else if (cc == greater) {
388 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
389 } else {
390 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
391 }
392
393 // For less and greater we don't have to check for NaN since the result of
394 // x < x is false regardless. For the others here is some code to check
395 // for NaN.
396 if (cc != lt && cc != gt) {
397 __ bind(&heap_number);
398 // It is a heap number, so return non-equal if it's NaN and equal if it's
399 // not NaN.
400
401 // The representation of NaN values has all exponent bits (52..62) set,
402 // and not all mantissa bits (0..51) clear.
403 // Read top bits of double representation (second word of value).
404 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
405 // Test that exponent bits are all set.
406 __ And(t3, t2, Operand(exp_mask_reg));
407 // If all bits not set (ne cond), then not a NaN, objects are equal.
408 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
409
410 // Shift out flag and all exponent bits, retaining only mantissa.
411 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
412 // Or with all low-bits of mantissa.
413 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
414 __ Or(v0, t3, Operand(t2));
415 // For equal we already have the right value in v0: Return zero (equal)
416 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
417 // not (it's a NaN). For <= and >= we need to load v0 with the failing
418 // value if it's a NaN.
419 if (cc != eq) {
420 // All-zero means Infinity means equal.
421 __ Ret(eq, v0, Operand(zero_reg));
422 DCHECK(is_int16(GREATER) && is_int16(LESS));
423 __ Ret(USE_DELAY_SLOT);
424 if (cc == le) {
425 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
426 } else {
427 __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
428 }
429 }
430 }
431 // No fall through here.
432
433 __ bind(¬_identical);
434 }
435
436
EmitSmiNonsmiComparison(MacroAssembler * masm,Register lhs,Register rhs,Label * both_loaded_as_doubles,Label * slow,bool strict)437 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
438 Register lhs,
439 Register rhs,
440 Label* both_loaded_as_doubles,
441 Label* slow,
442 bool strict) {
443 DCHECK((lhs.is(a0) && rhs.is(a1)) ||
444 (lhs.is(a1) && rhs.is(a0)));
445
446 Label lhs_is_smi;
447 __ JumpIfSmi(lhs, &lhs_is_smi);
448 // Rhs is a Smi.
449 // Check whether the non-smi is a heap number.
450 __ GetObjectType(lhs, t4, t4);
451 if (strict) {
452 // If lhs was not a number and rhs was a Smi then strict equality cannot
453 // succeed. Return non-equal (lhs is already not zero).
454 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
455 __ mov(v0, lhs);
456 } else {
457 // Smi compared non-strictly with a non-Smi non-heap-number. Call
458 // the runtime.
459 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
460 }
461
462 // Rhs is a smi, lhs is a number.
463 // Convert smi rhs to double.
464 __ sra(at, rhs, kSmiTagSize);
465 __ mtc1(at, f14);
466 __ cvt_d_w(f14, f14);
467 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
468
469 // We now have both loaded as doubles.
470 __ jmp(both_loaded_as_doubles);
471
472 __ bind(&lhs_is_smi);
473 // Lhs is a Smi. Check whether the non-smi is a heap number.
474 __ GetObjectType(rhs, t4, t4);
475 if (strict) {
476 // If lhs was not a number and rhs was a Smi then strict equality cannot
477 // succeed. Return non-equal.
478 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
479 __ li(v0, Operand(1));
480 } else {
481 // Smi compared non-strictly with a non-Smi non-heap-number. Call
482 // the runtime.
483 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
484 }
485
486 // Lhs is a smi, rhs is a number.
487 // Convert smi lhs to double.
488 __ sra(at, lhs, kSmiTagSize);
489 __ mtc1(at, f12);
490 __ cvt_d_w(f12, f12);
491 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
492 // Fall through to both_loaded_as_doubles.
493 }
494
495
EmitStrictTwoHeapObjectCompare(MacroAssembler * masm,Register lhs,Register rhs)496 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
497 Register lhs,
498 Register rhs) {
499 // If either operand is a JS object or an oddball value, then they are
500 // not equal since their pointers are different.
501 // There is no test for undetectability in strict equality.
502 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
503 Label first_non_object;
504 // Get the type of the first operand into a2 and compare it with
505 // FIRST_SPEC_OBJECT_TYPE.
506 __ GetObjectType(lhs, a2, a2);
507 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
508
509 // Return non-zero.
510 Label return_not_equal;
511 __ bind(&return_not_equal);
512 __ Ret(USE_DELAY_SLOT);
513 __ li(v0, Operand(1));
514
515 __ bind(&first_non_object);
516 // Check for oddballs: true, false, null, undefined.
517 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
518
519 __ GetObjectType(rhs, a3, a3);
520 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
521
522 // Check for oddballs: true, false, null, undefined.
523 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
524
525 // Now that we have the types we might as well check for
526 // internalized-internalized.
527 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
528 __ Or(a2, a2, Operand(a3));
529 __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
530 __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
531 }
532
533
EmitCheckForTwoHeapNumbers(MacroAssembler * masm,Register lhs,Register rhs,Label * both_loaded_as_doubles,Label * not_heap_numbers,Label * slow)534 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
535 Register lhs,
536 Register rhs,
537 Label* both_loaded_as_doubles,
538 Label* not_heap_numbers,
539 Label* slow) {
540 __ GetObjectType(lhs, a3, a2);
541 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
542 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
543 // If first was a heap number & second wasn't, go to slow case.
544 __ Branch(slow, ne, a3, Operand(a2));
545
546 // Both are heap numbers. Load them up then jump to the code we have
547 // for that.
548 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
549 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
550
551 __ jmp(both_loaded_as_doubles);
552 }
553
554
555 // Fast negative check for internalized-to-internalized equality.
EmitCheckForInternalizedStringsOrObjects(MacroAssembler * masm,Register lhs,Register rhs,Label * possible_strings,Label * not_both_strings)556 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
557 Register lhs,
558 Register rhs,
559 Label* possible_strings,
560 Label* not_both_strings) {
561 DCHECK((lhs.is(a0) && rhs.is(a1)) ||
562 (lhs.is(a1) && rhs.is(a0)));
563
564 // a2 is object type of rhs.
565 Label object_test;
566 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
567 __ And(at, a2, Operand(kIsNotStringMask));
568 __ Branch(&object_test, ne, at, Operand(zero_reg));
569 __ And(at, a2, Operand(kIsNotInternalizedMask));
570 __ Branch(possible_strings, ne, at, Operand(zero_reg));
571 __ GetObjectType(rhs, a3, a3);
572 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
573 __ And(at, a3, Operand(kIsNotInternalizedMask));
574 __ Branch(possible_strings, ne, at, Operand(zero_reg));
575
576 // Both are internalized strings. We already checked they weren't the same
577 // pointer so they are not equal.
578 __ Ret(USE_DELAY_SLOT);
579 __ li(v0, Operand(1)); // Non-zero indicates not equal.
580
581 __ bind(&object_test);
582 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
583 __ GetObjectType(rhs, a2, a3);
584 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
585
586 // If both objects are undetectable, they are equal. Otherwise, they
587 // are not equal, since they are different objects and an object is not
588 // equal to undefined.
589 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
590 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
591 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
592 __ and_(a0, a2, a3);
593 __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
594 __ Ret(USE_DELAY_SLOT);
595 __ xori(v0, a0, 1 << Map::kIsUndetectable);
596 }
597
598
CompareICStub_CheckInputType(MacroAssembler * masm,Register input,Register scratch,CompareICState::State expected,Label * fail)599 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
600 Register scratch,
601 CompareICState::State expected,
602 Label* fail) {
603 Label ok;
604 if (expected == CompareICState::SMI) {
605 __ JumpIfNotSmi(input, fail);
606 } else if (expected == CompareICState::NUMBER) {
607 __ JumpIfSmi(input, &ok);
608 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
609 DONT_DO_SMI_CHECK);
610 }
611 // We could be strict about internalized/string here, but as long as
612 // hydrogen doesn't care, the stub doesn't have to care either.
613 __ bind(&ok);
614 }
615
616
617 // On entry a1 and a2 are the values to be compared.
618 // On exit a0 is 0, positive or negative to indicate the result of
619 // the comparison.
GenerateGeneric(MacroAssembler * masm)620 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
621 Register lhs = a1;
622 Register rhs = a0;
623 Condition cc = GetCondition();
624
625 Label miss;
626 CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
627 CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
628
629 Label slow; // Call builtin.
630 Label not_smis, both_loaded_as_doubles;
631
632 Label not_two_smis, smi_done;
633 __ Or(a2, a1, a0);
634 __ JumpIfNotSmi(a2, ¬_two_smis);
635 __ sra(a1, a1, 1);
636 __ sra(a0, a0, 1);
637 __ Ret(USE_DELAY_SLOT);
638 __ subu(v0, a1, a0);
639 __ bind(¬_two_smis);
640
641 // NOTICE! This code is only reached after a smi-fast-case check, so
642 // it is certain that at least one operand isn't a smi.
643
644 // Handle the case where the objects are identical. Either returns the answer
645 // or goes to slow. Only falls through if the objects were not identical.
646 EmitIdenticalObjectComparison(masm, &slow, cc);
647
648 // If either is a Smi (we know that not both are), then they can only
649 // be strictly equal if the other is a HeapNumber.
650 STATIC_ASSERT(kSmiTag == 0);
651 DCHECK_EQ(0, Smi::FromInt(0));
652 __ And(t2, lhs, Operand(rhs));
653 __ JumpIfNotSmi(t2, ¬_smis, t0);
654 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
655 // 1) Return the answer.
656 // 2) Go to slow.
657 // 3) Fall through to both_loaded_as_doubles.
658 // 4) Jump to rhs_not_nan.
659 // In cases 3 and 4 we have found out we were dealing with a number-number
660 // comparison and the numbers have been loaded into f12 and f14 as doubles,
661 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
662 EmitSmiNonsmiComparison(masm, lhs, rhs,
663 &both_loaded_as_doubles, &slow, strict());
664
665 __ bind(&both_loaded_as_doubles);
666 // f12, f14 are the double representations of the left hand side
667 // and the right hand side if we have FPU. Otherwise a2, a3 represent
668 // left hand side and a0, a1 represent right hand side.
669 Label nan;
670 __ li(t0, Operand(LESS));
671 __ li(t1, Operand(GREATER));
672 __ li(t2, Operand(EQUAL));
673
674 // Check if either rhs or lhs is NaN.
675 __ BranchF(NULL, &nan, eq, f12, f14);
676
677 // Check if LESS condition is satisfied. If true, move conditionally
678 // result to v0.
679 if (!IsMipsArchVariant(kMips32r6)) {
680 __ c(OLT, D, f12, f14);
681 __ Movt(v0, t0);
682 // Use previous check to store conditionally to v0 oposite condition
683 // (GREATER). If rhs is equal to lhs, this will be corrected in next
684 // check.
685 __ Movf(v0, t1);
686 // Check if EQUAL condition is satisfied. If true, move conditionally
687 // result to v0.
688 __ c(EQ, D, f12, f14);
689 __ Movt(v0, t2);
690 } else {
691 Label skip;
692 __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
693 __ mov(v0, t0); // Return LESS as result.
694
695 __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
696 __ mov(v0, t2); // Return EQUAL as result.
697
698 __ mov(v0, t1); // Return GREATER as result.
699 __ bind(&skip);
700 }
701
702 __ Ret();
703
704 __ bind(&nan);
705 // NaN comparisons always fail.
706 // Load whatever we need in v0 to make the comparison fail.
707 DCHECK(is_int16(GREATER) && is_int16(LESS));
708 __ Ret(USE_DELAY_SLOT);
709 if (cc == lt || cc == le) {
710 __ li(v0, Operand(GREATER));
711 } else {
712 __ li(v0, Operand(LESS));
713 }
714
715
716 __ bind(¬_smis);
717 // At this point we know we are dealing with two different objects,
718 // and neither of them is a Smi. The objects are in lhs_ and rhs_.
719 if (strict()) {
720 // This returns non-equal for some object types, or falls through if it
721 // was not lucky.
722 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
723 }
724
725 Label check_for_internalized_strings;
726 Label flat_string_check;
727 // Check for heap-number-heap-number comparison. Can jump to slow case,
728 // or load both doubles and jump to the code that handles
729 // that case. If the inputs are not doubles then jumps to
730 // check_for_internalized_strings.
731 // In this case a2 will contain the type of lhs_.
732 EmitCheckForTwoHeapNumbers(masm,
733 lhs,
734 rhs,
735 &both_loaded_as_doubles,
736 &check_for_internalized_strings,
737 &flat_string_check);
738
739 __ bind(&check_for_internalized_strings);
740 if (cc == eq && !strict()) {
741 // Returns an answer for two internalized strings or two
742 // detectable objects.
743 // Otherwise jumps to string case or not both strings case.
744 // Assumes that a2 is the type of lhs_ on entry.
745 EmitCheckForInternalizedStringsOrObjects(
746 masm, lhs, rhs, &flat_string_check, &slow);
747 }
748
749 // Check for both being sequential one-byte strings,
750 // and inline if that is the case.
751 __ bind(&flat_string_check);
752
753 __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
754
755 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
756 a3);
757 if (cc == eq) {
758 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, t0);
759 } else {
760 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, t0,
761 t1);
762 }
763 // Never falls through to here.
764
765 __ bind(&slow);
766 // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
767 // a1 (rhs) second.
768 __ Push(lhs, rhs);
769 // Figure out which native to call and setup the arguments.
770 Builtins::JavaScript native;
771 if (cc == eq) {
772 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
773 } else {
774 native = Builtins::COMPARE;
775 int ncr; // NaN compare result.
776 if (cc == lt || cc == le) {
777 ncr = GREATER;
778 } else {
779 DCHECK(cc == gt || cc == ge); // Remaining cases.
780 ncr = LESS;
781 }
782 __ li(a0, Operand(Smi::FromInt(ncr)));
783 __ push(a0);
784 }
785
786 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
787 // tagged as a small integer.
788 __ InvokeBuiltin(native, JUMP_FUNCTION);
789
790 __ bind(&miss);
791 GenerateMiss(masm);
792 }
793
794
Generate(MacroAssembler * masm)795 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
796 __ mov(t9, ra);
797 __ pop(ra);
798 __ PushSafepointRegisters();
799 __ Jump(t9);
800 }
801
802
Generate(MacroAssembler * masm)803 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
804 __ mov(t9, ra);
805 __ pop(ra);
806 __ PopSafepointRegisters();
807 __ Jump(t9);
808 }
809
810
Generate(MacroAssembler * masm)811 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
812 // We don't allow a GC during a store buffer overflow so there is no need to
813 // store the registers in any particular way, but we do have to store and
814 // restore them.
815 __ MultiPush(kJSCallerSaved | ra.bit());
816 if (save_doubles()) {
817 __ MultiPushFPU(kCallerSavedFPU);
818 }
819 const int argument_count = 1;
820 const int fp_argument_count = 0;
821 const Register scratch = a1;
822
823 AllowExternalCallThatCantCauseGC scope(masm);
824 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
825 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
826 __ CallCFunction(
827 ExternalReference::store_buffer_overflow_function(isolate()),
828 argument_count);
829 if (save_doubles()) {
830 __ MultiPopFPU(kCallerSavedFPU);
831 }
832
833 __ MultiPop(kJSCallerSaved | ra.bit());
834 __ Ret();
835 }
836
837
Generate(MacroAssembler * masm)838 void MathPowStub::Generate(MacroAssembler* masm) {
839 const Register base = a1;
840 const Register exponent = MathPowTaggedDescriptor::exponent();
841 DCHECK(exponent.is(a2));
842 const Register heapnumbermap = t1;
843 const Register heapnumber = v0;
844 const DoubleRegister double_base = f2;
845 const DoubleRegister double_exponent = f4;
846 const DoubleRegister double_result = f0;
847 const DoubleRegister double_scratch = f6;
848 const FPURegister single_scratch = f8;
849 const Register scratch = t5;
850 const Register scratch2 = t3;
851
852 Label call_runtime, done, int_exponent;
853 if (exponent_type() == ON_STACK) {
854 Label base_is_smi, unpack_exponent;
855 // The exponent and base are supplied as arguments on the stack.
856 // This can only happen if the stub is called from non-optimized code.
857 // Load input parameters from stack to double registers.
858 __ lw(base, MemOperand(sp, 1 * kPointerSize));
859 __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
860
861 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
862
863 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
864 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
865 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
866
867 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
868 __ jmp(&unpack_exponent);
869
870 __ bind(&base_is_smi);
871 __ mtc1(scratch, single_scratch);
872 __ cvt_d_w(double_base, single_scratch);
873 __ bind(&unpack_exponent);
874
875 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
876
877 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
878 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
879 __ ldc1(double_exponent,
880 FieldMemOperand(exponent, HeapNumber::kValueOffset));
881 } else if (exponent_type() == TAGGED) {
882 // Base is already in double_base.
883 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
884
885 __ ldc1(double_exponent,
886 FieldMemOperand(exponent, HeapNumber::kValueOffset));
887 }
888
889 if (exponent_type() != INTEGER) {
890 Label int_exponent_convert;
891 // Detect integer exponents stored as double.
892 __ EmitFPUTruncate(kRoundToMinusInf,
893 scratch,
894 double_exponent,
895 at,
896 double_scratch,
897 scratch2,
898 kCheckForInexactConversion);
899 // scratch2 == 0 means there was no conversion error.
900 __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
901
902 if (exponent_type() == ON_STACK) {
903 // Detect square root case. Crankshaft detects constant +/-0.5 at
904 // compile time and uses DoMathPowHalf instead. We then skip this check
905 // for non-constant cases of +/-0.5 as these hardly occur.
906 Label not_plus_half;
907
908 // Test for 0.5.
909 __ Move(double_scratch, 0.5);
910 __ BranchF(USE_DELAY_SLOT,
911 ¬_plus_half,
912 NULL,
913 ne,
914 double_exponent,
915 double_scratch);
916 // double_scratch can be overwritten in the delay slot.
917 // Calculates square root of base. Check for the special case of
918 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
919 __ Move(double_scratch, -V8_INFINITY);
920 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
921 __ neg_d(double_result, double_scratch);
922
923 // Add +0 to convert -0 to +0.
924 __ add_d(double_scratch, double_base, kDoubleRegZero);
925 __ sqrt_d(double_result, double_scratch);
926 __ jmp(&done);
927
928 __ bind(¬_plus_half);
929 __ Move(double_scratch, -0.5);
930 __ BranchF(USE_DELAY_SLOT,
931 &call_runtime,
932 NULL,
933 ne,
934 double_exponent,
935 double_scratch);
936 // double_scratch can be overwritten in the delay slot.
937 // Calculates square root of base. Check for the special case of
938 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
939 __ Move(double_scratch, -V8_INFINITY);
940 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
941 __ Move(double_result, kDoubleRegZero);
942
943 // Add +0 to convert -0 to +0.
944 __ add_d(double_scratch, double_base, kDoubleRegZero);
945 __ Move(double_result, 1);
946 __ sqrt_d(double_scratch, double_scratch);
947 __ div_d(double_result, double_result, double_scratch);
948 __ jmp(&done);
949 }
950
951 __ push(ra);
952 {
953 AllowExternalCallThatCantCauseGC scope(masm);
954 __ PrepareCallCFunction(0, 2, scratch2);
955 __ MovToFloatParameters(double_base, double_exponent);
956 __ CallCFunction(
957 ExternalReference::power_double_double_function(isolate()),
958 0, 2);
959 }
960 __ pop(ra);
961 __ MovFromFloatResult(double_result);
962 __ jmp(&done);
963
964 __ bind(&int_exponent_convert);
965 }
966
967 // Calculate power with integer exponent.
968 __ bind(&int_exponent);
969
970 // Get two copies of exponent in the registers scratch and exponent.
971 if (exponent_type() == INTEGER) {
972 __ mov(scratch, exponent);
973 } else {
974 // Exponent has previously been stored into scratch as untagged integer.
975 __ mov(exponent, scratch);
976 }
977
978 __ mov_d(double_scratch, double_base); // Back up base.
979 __ Move(double_result, 1.0);
980
981 // Get absolute value of exponent.
982 Label positive_exponent;
983 __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
984 __ Subu(scratch, zero_reg, scratch);
985 __ bind(&positive_exponent);
986
987 Label while_true, no_carry, loop_end;
988 __ bind(&while_true);
989
990 __ And(scratch2, scratch, 1);
991
992 __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
993 __ mul_d(double_result, double_result, double_scratch);
994 __ bind(&no_carry);
995
996 __ sra(scratch, scratch, 1);
997
998 __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
999 __ mul_d(double_scratch, double_scratch, double_scratch);
1000
1001 __ Branch(&while_true);
1002
1003 __ bind(&loop_end);
1004
1005 __ Branch(&done, ge, exponent, Operand(zero_reg));
1006 __ Move(double_scratch, 1.0);
1007 __ div_d(double_result, double_scratch, double_result);
1008 // Test whether result is zero. Bail out to check for subnormal result.
1009 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
1010 __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
1011
1012 // double_exponent may not contain the exponent value if the input was a
1013 // smi. We set it with exponent value before bailing out.
1014 __ mtc1(exponent, single_scratch);
1015 __ cvt_d_w(double_exponent, single_scratch);
1016
1017 // Returning or bailing out.
1018 Counters* counters = isolate()->counters();
1019 if (exponent_type() == ON_STACK) {
1020 // The arguments are still on the stack.
1021 __ bind(&call_runtime);
1022 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
1023
1024 // The stub is called from non-optimized code, which expects the result
1025 // as heap number in exponent.
1026 __ bind(&done);
1027 __ AllocateHeapNumber(
1028 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
1029 __ sdc1(double_result,
1030 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
1031 DCHECK(heapnumber.is(v0));
1032 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1033 __ DropAndRet(2);
1034 } else {
1035 __ push(ra);
1036 {
1037 AllowExternalCallThatCantCauseGC scope(masm);
1038 __ PrepareCallCFunction(0, 2, scratch);
1039 __ MovToFloatParameters(double_base, double_exponent);
1040 __ CallCFunction(
1041 ExternalReference::power_double_double_function(isolate()),
1042 0, 2);
1043 }
1044 __ pop(ra);
1045 __ MovFromFloatResult(double_result);
1046
1047 __ bind(&done);
1048 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1049 __ Ret();
1050 }
1051 }
1052
1053
NeedsImmovableCode()1054 bool CEntryStub::NeedsImmovableCode() {
1055 return true;
1056 }
1057
1058
GenerateStubsAheadOfTime(Isolate * isolate)1059 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1060 CEntryStub::GenerateAheadOfTime(isolate);
1061 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
1062 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1063 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1064 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1065 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1066 BinaryOpICStub::GenerateAheadOfTime(isolate);
1067 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
1068 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
1069 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1070 }
1071
1072
GenerateAheadOfTime(Isolate * isolate)1073 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1074 StoreRegistersStateStub stub(isolate);
1075 stub.GetCode();
1076 }
1077
1078
GenerateAheadOfTime(Isolate * isolate)1079 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1080 RestoreRegistersStateStub stub(isolate);
1081 stub.GetCode();
1082 }
1083
1084
GenerateFPStubs(Isolate * isolate)1085 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1086 // Generate if not already in cache.
1087 SaveFPRegsMode mode = kSaveFPRegs;
1088 CEntryStub(isolate, 1, mode).GetCode();
1089 StoreBufferOverflowStub(isolate, mode).GetCode();
1090 isolate->set_fp_stubs_generated(true);
1091 }
1092
1093
GenerateAheadOfTime(Isolate * isolate)1094 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1095 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
1096 stub.GetCode();
1097 }
1098
1099
Generate(MacroAssembler * masm)1100 void CEntryStub::Generate(MacroAssembler* masm) {
1101 // Called from JavaScript; parameters are on stack as if calling JS function
1102 // s0: number of arguments including receiver
1103 // s1: size of arguments excluding receiver
1104 // s2: pointer to builtin function
1105 // fp: frame pointer (restored after C call)
1106 // sp: stack pointer (restored as callee's sp after C call)
1107 // cp: current context (C callee-saved)
1108
1109 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1110
1111 // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
1112 // The reason for this is that these arguments would need to be saved anyway
1113 // so it's faster to set them up directly.
1114 // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
1115
1116 // Compute the argv pointer in a callee-saved register.
1117 __ Addu(s1, sp, s1);
1118
1119 // Enter the exit frame that transitions from JavaScript to C++.
1120 FrameScope scope(masm, StackFrame::MANUAL);
1121 __ EnterExitFrame(save_doubles());
1122
1123 // s0: number of arguments including receiver (C callee-saved)
1124 // s1: pointer to first argument (C callee-saved)
1125 // s2: pointer to builtin function (C callee-saved)
1126
1127 // Prepare arguments for C routine.
1128 // a0 = argc
1129 __ mov(a0, s0);
1130 // a1 = argv (set in the delay slot after find_ra below).
1131
1132 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
1133 // also need to reserve the 4 argument slots on the stack.
1134
1135 __ AssertStackIsAligned();
1136
1137 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
1138
1139 // To let the GC traverse the return address of the exit frames, we need to
1140 // know where the return address is. The CEntryStub is unmovable, so
1141 // we can store the address on the stack to be able to find it again and
1142 // we never have to restore it, because it will not change.
1143 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
1144 // This branch-and-link sequence is needed to find the current PC on mips,
1145 // saved to the ra register.
1146 // Use masm-> here instead of the double-underscore macro since extra
1147 // coverage code can interfere with the proper calculation of ra.
1148 Label find_ra;
1149 masm->bal(&find_ra); // bal exposes branch delay slot.
1150 masm->mov(a1, s1);
1151 masm->bind(&find_ra);
1152
1153 // Adjust the value in ra to point to the correct return location, 2nd
1154 // instruction past the real call into C code (the jalr(t9)), and push it.
1155 // This is the return address of the exit frame.
1156 const int kNumInstructionsToJump = 5;
1157 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
1158 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
1159 // Stack space reservation moved to the branch delay slot below.
1160 // Stack is still aligned.
1161
1162 // Call the C routine.
1163 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
1164 masm->jalr(t9);
1165 // Set up sp in the delay slot.
1166 masm->addiu(sp, sp, -kCArgsSlotsSize);
1167 // Make sure the stored 'ra' points to this position.
1168 DCHECK_EQ(kNumInstructionsToJump,
1169 masm->InstructionsGeneratedSince(&find_ra));
1170 }
1171
1172
1173 // Runtime functions should not return 'the hole'. Allowing it to escape may
1174 // lead to crashes in the IC code later.
1175 if (FLAG_debug_code) {
1176 Label okay;
1177 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
1178 __ Branch(&okay, ne, v0, Operand(t0));
1179 __ stop("The hole escaped");
1180 __ bind(&okay);
1181 }
1182
1183 // Check result for exception sentinel.
1184 Label exception_returned;
1185 __ LoadRoot(t0, Heap::kExceptionRootIndex);
1186 __ Branch(&exception_returned, eq, t0, Operand(v0));
1187
1188 ExternalReference pending_exception_address(
1189 Isolate::kPendingExceptionAddress, isolate());
1190
1191 // Check that there is no pending exception, otherwise we
1192 // should have returned the exception sentinel.
1193 if (FLAG_debug_code) {
1194 Label okay;
1195 __ li(a2, Operand(pending_exception_address));
1196 __ lw(a2, MemOperand(a2));
1197 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
1198 // Cannot use check here as it attempts to generate call into runtime.
1199 __ Branch(&okay, eq, t0, Operand(a2));
1200 __ stop("Unexpected pending exception");
1201 __ bind(&okay);
1202 }
1203
1204 // Exit C frame and return.
1205 // v0:v1: result
1206 // sp: stack pointer
1207 // fp: frame pointer
1208 // s0: still holds argc (callee-saved).
1209 __ LeaveExitFrame(save_doubles(), s0, true, EMIT_RETURN);
1210
1211 // Handling of exception.
1212 __ bind(&exception_returned);
1213
1214 // Retrieve the pending exception.
1215 __ li(a2, Operand(pending_exception_address));
1216 __ lw(v0, MemOperand(a2));
1217
1218 // Clear the pending exception.
1219 __ li(a3, Operand(isolate()->factory()->the_hole_value()));
1220 __ sw(a3, MemOperand(a2));
1221
1222 // Special handling of termination exceptions which are uncatchable
1223 // by javascript code.
1224 Label throw_termination_exception;
1225 __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
1226 __ Branch(&throw_termination_exception, eq, v0, Operand(t0));
1227
1228 // Handle normal exception.
1229 __ Throw(v0);
1230
1231 __ bind(&throw_termination_exception);
1232 __ ThrowUncatchable(v0);
1233 }
1234
1235
Generate(MacroAssembler * masm)1236 void JSEntryStub::Generate(MacroAssembler* masm) {
1237 Label invoke, handler_entry, exit;
1238 Isolate* isolate = masm->isolate();
1239
1240 // Registers:
1241 // a0: entry address
1242 // a1: function
1243 // a2: receiver
1244 // a3: argc
1245 //
1246 // Stack:
1247 // 4 args slots
1248 // args
1249
1250 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1251
1252 // Save callee saved registers on the stack.
1253 __ MultiPush(kCalleeSaved | ra.bit());
1254
1255 // Save callee-saved FPU registers.
1256 __ MultiPushFPU(kCalleeSavedFPU);
1257 // Set up the reserved register for 0.0.
1258 __ Move(kDoubleRegZero, 0.0);
1259
1260
1261 // Load argv in s0 register.
1262 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1263 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
1264
1265 __ InitializeRootRegister();
1266 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
1267
1268 // We build an EntryFrame.
1269 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1270 int marker = type();
1271 __ li(t2, Operand(Smi::FromInt(marker)));
1272 __ li(t1, Operand(Smi::FromInt(marker)));
1273 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1274 isolate)));
1275 __ lw(t0, MemOperand(t0));
1276 __ Push(t3, t2, t1, t0);
1277 // Set up frame pointer for the frame to be pushed.
1278 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
1279
1280 // Registers:
1281 // a0: entry_address
1282 // a1: function
1283 // a2: receiver_pointer
1284 // a3: argc
1285 // s0: argv
1286 //
1287 // Stack:
1288 // caller fp |
1289 // function slot | entry frame
1290 // context slot |
1291 // bad fp (0xff...f) |
1292 // callee saved registers + ra
1293 // 4 args slots
1294 // args
1295
1296 // If this is the outermost JS call, set js_entry_sp value.
1297 Label non_outermost_js;
1298 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1299 __ li(t1, Operand(ExternalReference(js_entry_sp)));
1300 __ lw(t2, MemOperand(t1));
1301 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
1302 __ sw(fp, MemOperand(t1));
1303 __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1304 Label cont;
1305 __ b(&cont);
1306 __ nop(); // Branch delay slot nop.
1307 __ bind(&non_outermost_js);
1308 __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1309 __ bind(&cont);
1310 __ push(t0);
1311
1312 // Jump to a faked try block that does the invoke, with a faked catch
1313 // block that sets the pending exception.
1314 __ jmp(&invoke);
1315 __ bind(&handler_entry);
1316 handler_offset_ = handler_entry.pos();
1317 // Caught exception: Store result (exception) in the pending exception
1318 // field in the JSEnv and return a failure sentinel. Coming in here the
1319 // fp will be invalid because the PushTryHandler below sets it to 0 to
1320 // signal the existence of the JSEntry frame.
1321 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1322 isolate)));
1323 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
1324 __ LoadRoot(v0, Heap::kExceptionRootIndex);
1325 __ b(&exit); // b exposes branch delay slot.
1326 __ nop(); // Branch delay slot nop.
1327
1328 // Invoke: Link this frame into the handler chain. There's only one
1329 // handler block in this code object, so its index is 0.
1330 __ bind(&invoke);
1331 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1332 // If an exception not caught by another handler occurs, this handler
1333 // returns control to the code after the bal(&invoke) above, which
1334 // restores all kCalleeSaved registers (including cp and fp) to their
1335 // saved values before returning a failure to C.
1336
1337 // Clear any pending exceptions.
1338 __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
1339 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1340 isolate)));
1341 __ sw(t1, MemOperand(t0));
1342
1343 // Invoke the function by calling through JS entry trampoline builtin.
1344 // Notice that we cannot store a reference to the trampoline code directly in
1345 // this stub, because runtime stubs are not traversed when doing GC.
1346
1347 // Registers:
1348 // a0: entry_address
1349 // a1: function
1350 // a2: receiver_pointer
1351 // a3: argc
1352 // s0: argv
1353 //
1354 // Stack:
1355 // handler frame
1356 // entry frame
1357 // callee saved registers + ra
1358 // 4 args slots
1359 // args
1360
1361 if (type() == StackFrame::ENTRY_CONSTRUCT) {
1362 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1363 isolate);
1364 __ li(t0, Operand(construct_entry));
1365 } else {
1366 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
1367 __ li(t0, Operand(entry));
1368 }
1369 __ lw(t9, MemOperand(t0)); // Deref address.
1370
1371 // Call JSEntryTrampoline.
1372 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1373 __ Call(t9);
1374
1375 // Unlink this frame from the handler chain.
1376 __ PopTryHandler();
1377
1378 __ bind(&exit); // v0 holds result
1379 // Check if the current stack frame is marked as the outermost JS frame.
1380 Label non_outermost_js_2;
1381 __ pop(t1);
1382 __ Branch(&non_outermost_js_2,
1383 ne,
1384 t1,
1385 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1386 __ li(t1, Operand(ExternalReference(js_entry_sp)));
1387 __ sw(zero_reg, MemOperand(t1));
1388 __ bind(&non_outermost_js_2);
1389
1390 // Restore the top frame descriptors from the stack.
1391 __ pop(t1);
1392 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1393 isolate)));
1394 __ sw(t1, MemOperand(t0));
1395
1396 // Reset the stack to the callee saved registers.
1397 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
1398
1399 // Restore callee-saved fpu registers.
1400 __ MultiPopFPU(kCalleeSavedFPU);
1401
1402 // Restore callee saved registers from the stack.
1403 __ MultiPop(kCalleeSaved | ra.bit());
1404 // Return.
1405 __ Jump(ra);
1406 }
1407
1408
1409 // Uses registers a0 to t0.
1410 // Expected input (depending on whether args are in registers or on the stack):
1411 // * object: a0 or at sp + 1 * kPointerSize.
1412 // * function: a1 or at sp.
1413 //
1414 // An inlined call site may have been generated before calling this stub.
1415 // In this case the offset to the inline site to patch is passed on the stack,
1416 // in the safepoint slot for register t0.
Generate(MacroAssembler * masm)1417 void InstanceofStub::Generate(MacroAssembler* masm) {
1418 // Call site inlining and patching implies arguments in registers.
1419 DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
1420 // ReturnTrueFalse is only implemented for inlined call sites.
1421 DCHECK(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
1422
1423 // Fixed register usage throughout the stub:
1424 const Register object = a0; // Object (lhs).
1425 Register map = a3; // Map of the object.
1426 const Register function = a1; // Function (rhs).
1427 const Register prototype = t0; // Prototype of the function.
1428 const Register inline_site = t5;
1429 const Register scratch = a2;
1430
1431 const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
1432
1433 Label slow, loop, is_instance, is_not_instance, not_js_object;
1434
1435 if (!HasArgsInRegisters()) {
1436 __ lw(object, MemOperand(sp, 1 * kPointerSize));
1437 __ lw(function, MemOperand(sp, 0));
1438 }
1439
1440 // Check that the left hand is a JS object and load map.
1441 __ JumpIfSmi(object, ¬_js_object);
1442 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
1443
1444 // If there is a call site cache don't look in the global cache, but do the
1445 // real lookup and update the call site cache.
1446 if (!HasCallSiteInlineCheck()) {
1447 Label miss;
1448 __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
1449 __ Branch(&miss, ne, function, Operand(at));
1450 __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
1451 __ Branch(&miss, ne, map, Operand(at));
1452 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1453 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1454
1455 __ bind(&miss);
1456 }
1457
1458 // Get the prototype of the function.
1459 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
1460
1461 // Check that the function prototype is a JS object.
1462 __ JumpIfSmi(prototype, &slow);
1463 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
1464
1465 // Update the global instanceof or call site inlined cache with the current
1466 // map and function. The cached answer will be set when it is known below.
1467 if (!HasCallSiteInlineCheck()) {
1468 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1469 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1470 } else {
1471 DCHECK(HasArgsInRegisters());
1472 // Patch the (relocated) inlined map check.
1473
1474 // The offset was stored in t0 safepoint slot.
1475 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1476 __ LoadFromSafepointRegisterSlot(scratch, t0);
1477 __ Subu(inline_site, ra, scratch);
1478 // Get the map location in scratch and patch it.
1479 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
1480 __ sw(map, FieldMemOperand(scratch, Cell::kValueOffset));
1481 }
1482
1483 // Register mapping: a3 is object map and t0 is function prototype.
1484 // Get prototype of object into a2.
1485 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
1486
1487 // We don't need map any more. Use it as a scratch register.
1488 Register scratch2 = map;
1489 map = no_reg;
1490
1491 // Loop through the prototype chain looking for the function prototype.
1492 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
1493 __ bind(&loop);
1494 __ Branch(&is_instance, eq, scratch, Operand(prototype));
1495 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
1496 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
1497 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
1498 __ Branch(&loop);
1499
1500 __ bind(&is_instance);
1501 DCHECK(Smi::FromInt(0) == 0);
1502 if (!HasCallSiteInlineCheck()) {
1503 __ mov(v0, zero_reg);
1504 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1505 } else {
1506 // Patch the call site to return true.
1507 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1508 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1509 // Get the boolean result location in scratch and patch it.
1510 __ PatchRelocatedValue(inline_site, scratch, v0);
1511
1512 if (!ReturnTrueFalseObject()) {
1513 DCHECK_EQ(Smi::FromInt(0), 0);
1514 __ mov(v0, zero_reg);
1515 }
1516 }
1517 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1518
1519 __ bind(&is_not_instance);
1520 if (!HasCallSiteInlineCheck()) {
1521 __ li(v0, Operand(Smi::FromInt(1)));
1522 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1523 } else {
1524 // Patch the call site to return false.
1525 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1526 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1527 // Get the boolean result location in scratch and patch it.
1528 __ PatchRelocatedValue(inline_site, scratch, v0);
1529
1530 if (!ReturnTrueFalseObject()) {
1531 __ li(v0, Operand(Smi::FromInt(1)));
1532 }
1533 }
1534
1535 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1536
1537 Label object_not_null, object_not_null_or_smi;
1538 __ bind(¬_js_object);
1539 // Before null, smi and string value checks, check that the rhs is a function
1540 // as for a non-function rhs an exception needs to be thrown.
1541 __ JumpIfSmi(function, &slow);
1542 __ GetObjectType(function, scratch2, scratch);
1543 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
1544
1545 // Null is not instance of anything.
1546 __ Branch(&object_not_null,
1547 ne,
1548 scratch,
1549 Operand(isolate()->factory()->null_value()));
1550 __ li(v0, Operand(Smi::FromInt(1)));
1551 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1552
1553 __ bind(&object_not_null);
1554 // Smi values are not instances of anything.
1555 __ JumpIfNotSmi(object, &object_not_null_or_smi);
1556 __ li(v0, Operand(Smi::FromInt(1)));
1557 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1558
1559 __ bind(&object_not_null_or_smi);
1560 // String values are not instances of anything.
1561 __ IsObjectJSStringType(object, scratch, &slow);
1562 __ li(v0, Operand(Smi::FromInt(1)));
1563 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1564
1565 // Slow-case. Tail call builtin.
1566 __ bind(&slow);
1567 if (!ReturnTrueFalseObject()) {
1568 if (HasArgsInRegisters()) {
1569 __ Push(a0, a1);
1570 }
1571 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
1572 } else {
1573 {
1574 FrameScope scope(masm, StackFrame::INTERNAL);
1575 __ Push(a0, a1);
1576 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
1577 }
1578 __ mov(a0, v0);
1579 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1580 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
1581 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1582 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1583 }
1584 }
1585
1586
Generate(MacroAssembler * masm)1587 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1588 Label miss;
1589 Register receiver = LoadDescriptor::ReceiverRegister();
1590 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3,
1591 t0, &miss);
1592 __ bind(&miss);
1593 PropertyAccessCompiler::TailCallBuiltin(
1594 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1595 }
1596
1597
GenerateReadElement(MacroAssembler * masm)1598 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1599 // The displacement is the offset of the last parameter (if any)
1600 // relative to the frame pointer.
1601 const int kDisplacement =
1602 StandardFrameConstants::kCallerSPOffset - kPointerSize;
1603 DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
1604 DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
1605
1606 // Check that the key is a smiGenerateReadElement.
1607 Label slow;
1608 __ JumpIfNotSmi(a1, &slow);
1609
1610 // Check if the calling frame is an arguments adaptor frame.
1611 Label adaptor;
1612 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1613 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
1614 __ Branch(&adaptor,
1615 eq,
1616 a3,
1617 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1618
1619 // Check index (a1) against formal parameters count limit passed in
1620 // through register a0. Use unsigned comparison to get negative
1621 // check for free.
1622 __ Branch(&slow, hs, a1, Operand(a0));
1623
1624 // Read the argument from the stack and return it.
1625 __ subu(a3, a0, a1);
1626 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
1627 __ Addu(a3, fp, Operand(t3));
1628 __ Ret(USE_DELAY_SLOT);
1629 __ lw(v0, MemOperand(a3, kDisplacement));
1630
1631 // Arguments adaptor case: Check index (a1) against actual arguments
1632 // limit found in the arguments adaptor frame. Use unsigned
1633 // comparison to get negative check for free.
1634 __ bind(&adaptor);
1635 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1636 __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
1637
1638 // Read the argument from the adaptor frame and return it.
1639 __ subu(a3, a0, a1);
1640 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
1641 __ Addu(a3, a2, Operand(t3));
1642 __ Ret(USE_DELAY_SLOT);
1643 __ lw(v0, MemOperand(a3, kDisplacement));
1644
1645 // Slow-case: Handle non-smi or out-of-bounds access to arguments
1646 // by calling the runtime system.
1647 __ bind(&slow);
1648 __ push(a1);
1649 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
1650 }
1651
1652
GenerateNewSloppySlow(MacroAssembler * masm)1653 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
1654 // sp[0] : number of parameters
1655 // sp[4] : receiver displacement
1656 // sp[8] : function
1657 // Check if the calling frame is an arguments adaptor frame.
1658 Label runtime;
1659 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1660 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
1661 __ Branch(&runtime,
1662 ne,
1663 a2,
1664 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1665
1666 // Patch the arguments.length and the parameters pointer in the current frame.
1667 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1668 __ sw(a2, MemOperand(sp, 0 * kPointerSize));
1669 __ sll(t3, a2, 1);
1670 __ Addu(a3, a3, Operand(t3));
1671 __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
1672 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
1673
1674 __ bind(&runtime);
1675 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1676 }
1677
1678
GenerateNewSloppyFast(MacroAssembler * masm)1679 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
1680 // Stack layout:
1681 // sp[0] : number of parameters (tagged)
1682 // sp[4] : address of receiver argument
1683 // sp[8] : function
1684 // Registers used over whole function:
1685 // t2 : allocated object (tagged)
1686 // t5 : mapped parameter count (tagged)
1687
1688 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
1689 // a1 = parameter count (tagged)
1690
1691 // Check if the calling frame is an arguments adaptor frame.
1692 Label runtime;
1693 Label adaptor_frame, try_allocate;
1694 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1695 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
1696 __ Branch(&adaptor_frame,
1697 eq,
1698 a2,
1699 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1700
1701 // No adaptor, parameter count = argument count.
1702 __ mov(a2, a1);
1703 __ b(&try_allocate);
1704 __ nop(); // Branch delay slot nop.
1705
1706 // We have an adaptor frame. Patch the parameters pointer.
1707 __ bind(&adaptor_frame);
1708 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1709 __ sll(t6, a2, 1);
1710 __ Addu(a3, a3, Operand(t6));
1711 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
1712 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
1713
1714 // a1 = parameter count (tagged)
1715 // a2 = argument count (tagged)
1716 // Compute the mapped parameter count = min(a1, a2) in a1.
1717 Label skip_min;
1718 __ Branch(&skip_min, lt, a1, Operand(a2));
1719 __ mov(a1, a2);
1720 __ bind(&skip_min);
1721
1722 __ bind(&try_allocate);
1723
1724 // Compute the sizes of backing store, parameter map, and arguments object.
1725 // 1. Parameter map, has 2 extra words containing context and backing store.
1726 const int kParameterMapHeaderSize =
1727 FixedArray::kHeaderSize + 2 * kPointerSize;
1728 // If there are no mapped parameters, we do not need the parameter_map.
1729 Label param_map_size;
1730 DCHECK_EQ(0, Smi::FromInt(0));
1731 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
1732 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
1733 __ sll(t5, a1, 1);
1734 __ addiu(t5, t5, kParameterMapHeaderSize);
1735 __ bind(¶m_map_size);
1736
1737 // 2. Backing store.
1738 __ sll(t6, a2, 1);
1739 __ Addu(t5, t5, Operand(t6));
1740 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
1741
1742 // 3. Arguments object.
1743 __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
1744
1745 // Do the allocation of all three objects in one go.
1746 __ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
1747
1748 // v0 = address of new object(s) (tagged)
1749 // a2 = argument count (smi-tagged)
1750 // Get the arguments boilerplate from the current native context into t0.
1751 const int kNormalOffset =
1752 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
1753 const int kAliasedOffset =
1754 Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
1755
1756 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1757 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
1758 Label skip2_ne, skip2_eq;
1759 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
1760 __ lw(t0, MemOperand(t0, kNormalOffset));
1761 __ bind(&skip2_ne);
1762
1763 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
1764 __ lw(t0, MemOperand(t0, kAliasedOffset));
1765 __ bind(&skip2_eq);
1766
1767 // v0 = address of new object (tagged)
1768 // a1 = mapped parameter count (tagged)
1769 // a2 = argument count (smi-tagged)
1770 // t0 = address of arguments map (tagged)
1771 __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
1772 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
1773 __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
1774 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
1775
1776 // Set up the callee in-object property.
1777 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
1778 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
1779 __ AssertNotSmi(a3);
1780 const int kCalleeOffset = JSObject::kHeaderSize +
1781 Heap::kArgumentsCalleeIndex * kPointerSize;
1782 __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
1783
1784 // Use the length (smi tagged) and set that as an in-object property too.
1785 __ AssertSmi(a2);
1786 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1787 const int kLengthOffset = JSObject::kHeaderSize +
1788 Heap::kArgumentsLengthIndex * kPointerSize;
1789 __ sw(a2, FieldMemOperand(v0, kLengthOffset));
1790
1791 // Set up the elements pointer in the allocated arguments object.
1792 // If we allocated a parameter map, t0 will point there, otherwise
1793 // it will point to the backing store.
1794 __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
1795 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
1796
1797 // v0 = address of new object (tagged)
1798 // a1 = mapped parameter count (tagged)
1799 // a2 = argument count (tagged)
1800 // t0 = address of parameter map or backing store (tagged)
1801 // Initialize parameter map. If there are no mapped arguments, we're done.
1802 Label skip_parameter_map;
1803 Label skip3;
1804 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
1805 // Move backing store address to a3, because it is
1806 // expected there when filling in the unmapped arguments.
1807 __ mov(a3, t0);
1808 __ bind(&skip3);
1809
1810 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
1811
1812 __ LoadRoot(t2, Heap::kSloppyArgumentsElementsMapRootIndex);
1813 __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
1814 __ Addu(t2, a1, Operand(Smi::FromInt(2)));
1815 __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
1816 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
1817 __ sll(t6, a1, 1);
1818 __ Addu(t2, t0, Operand(t6));
1819 __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
1820 __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
1821
1822 // Copy the parameter slots and the holes in the arguments.
1823 // We need to fill in mapped_parameter_count slots. They index the context,
1824 // where parameters are stored in reverse order, at
1825 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
1826 // The mapped parameter thus need to get indices
1827 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
1828 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
1829 // We loop from right to left.
1830 Label parameters_loop, parameters_test;
1831 __ mov(t2, a1);
1832 __ lw(t5, MemOperand(sp, 0 * kPointerSize));
1833 __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
1834 __ Subu(t5, t5, Operand(a1));
1835 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
1836 __ sll(t6, t2, 1);
1837 __ Addu(a3, t0, Operand(t6));
1838 __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
1839
1840 // t2 = loop variable (tagged)
1841 // a1 = mapping index (tagged)
1842 // a3 = address of backing store (tagged)
1843 // t0 = address of parameter map (tagged)
1844 // t1 = temporary scratch (a.o., for address calculation)
1845 // t3 = the hole value
1846 __ jmp(¶meters_test);
1847
1848 __ bind(¶meters_loop);
1849 __ Subu(t2, t2, Operand(Smi::FromInt(1)));
1850 __ sll(t1, t2, 1);
1851 __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
1852 __ Addu(t6, t0, t1);
1853 __ sw(t5, MemOperand(t6));
1854 __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
1855 __ Addu(t6, a3, t1);
1856 __ sw(t3, MemOperand(t6));
1857 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
1858 __ bind(¶meters_test);
1859 __ Branch(¶meters_loop, ne, t2, Operand(Smi::FromInt(0)));
1860
1861 __ bind(&skip_parameter_map);
1862 // a2 = argument count (tagged)
1863 // a3 = address of backing store (tagged)
1864 // t1 = scratch
1865 // Copy arguments header and remaining slots (if there are any).
1866 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
1867 __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
1868 __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
1869
1870 Label arguments_loop, arguments_test;
1871 __ mov(t5, a1);
1872 __ lw(t0, MemOperand(sp, 1 * kPointerSize));
1873 __ sll(t6, t5, 1);
1874 __ Subu(t0, t0, Operand(t6));
1875 __ jmp(&arguments_test);
1876
1877 __ bind(&arguments_loop);
1878 __ Subu(t0, t0, Operand(kPointerSize));
1879 __ lw(t2, MemOperand(t0, 0));
1880 __ sll(t6, t5, 1);
1881 __ Addu(t1, a3, Operand(t6));
1882 __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
1883 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
1884
1885 __ bind(&arguments_test);
1886 __ Branch(&arguments_loop, lt, t5, Operand(a2));
1887
1888 // Return and remove the on-stack parameters.
1889 __ DropAndRet(3);
1890
1891 // Do the runtime call to allocate the arguments object.
1892 // a2 = argument count (tagged)
1893 __ bind(&runtime);
1894 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
1895 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1896 }
1897
1898
Generate(MacroAssembler * masm)1899 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
1900 // Return address is in ra.
1901 Label slow;
1902
1903 Register receiver = LoadDescriptor::ReceiverRegister();
1904 Register key = LoadDescriptor::NameRegister();
1905
1906 // Check that the key is an array index, that is Uint32.
1907 __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
1908 __ Branch(&slow, ne, t0, Operand(zero_reg));
1909
1910 // Everything is fine, call runtime.
1911 __ Push(receiver, key); // Receiver, key.
1912
1913 // Perform tail call to the entry.
1914 __ TailCallExternalReference(
1915 ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
1916 masm->isolate()),
1917 2, 1);
1918
1919 __ bind(&slow);
1920 PropertyAccessCompiler::TailCallBuiltin(
1921 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1922 }
1923
1924
GenerateNewStrict(MacroAssembler * masm)1925 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
1926 // sp[0] : number of parameters
1927 // sp[4] : receiver displacement
1928 // sp[8] : function
1929 // Check if the calling frame is an arguments adaptor frame.
1930 Label adaptor_frame, try_allocate, runtime;
1931 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1932 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
1933 __ Branch(&adaptor_frame,
1934 eq,
1935 a3,
1936 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1937
1938 // Get the length from the frame.
1939 __ lw(a1, MemOperand(sp, 0));
1940 __ Branch(&try_allocate);
1941
1942 // Patch the arguments.length and the parameters pointer.
1943 __ bind(&adaptor_frame);
1944 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1945 __ sw(a1, MemOperand(sp, 0));
1946 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
1947 __ Addu(a3, a2, Operand(at));
1948
1949 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
1950 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
1951
1952 // Try the new space allocation. Start out with computing the size
1953 // of the arguments object and the elements array in words.
1954 Label add_arguments_object;
1955 __ bind(&try_allocate);
1956 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
1957 __ srl(a1, a1, kSmiTagSize);
1958
1959 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
1960 __ bind(&add_arguments_object);
1961 __ Addu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
1962
1963 // Do the allocation of both objects in one go.
1964 __ Allocate(a1, v0, a2, a3, &runtime,
1965 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
1966
1967 // Get the arguments boilerplate from the current native context.
1968 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1969 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
1970 __ lw(t0, MemOperand(
1971 t0, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
1972
1973 __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
1974 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
1975 __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
1976 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
1977
1978 // Get the length (smi tagged) and set that as an in-object property too.
1979 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1980 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
1981 __ AssertSmi(a1);
1982 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
1983 Heap::kArgumentsLengthIndex * kPointerSize));
1984
1985 Label done;
1986 __ Branch(&done, eq, a1, Operand(zero_reg));
1987
1988 // Get the parameters pointer from the stack.
1989 __ lw(a2, MemOperand(sp, 1 * kPointerSize));
1990
1991 // Set up the elements pointer in the allocated arguments object and
1992 // initialize the header in the elements fixed array.
1993 __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
1994 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
1995 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
1996 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
1997 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
1998 // Untag the length for the loop.
1999 __ srl(a1, a1, kSmiTagSize);
2000
2001 // Copy the fixed array slots.
2002 Label loop;
2003 // Set up t0 to point to the first array slot.
2004 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2005 __ bind(&loop);
2006 // Pre-decrement a2 with kPointerSize on each iteration.
2007 // Pre-decrement in order to skip receiver.
2008 __ Addu(a2, a2, Operand(-kPointerSize));
2009 __ lw(a3, MemOperand(a2));
2010 // Post-increment t0 with kPointerSize on each iteration.
2011 __ sw(a3, MemOperand(t0));
2012 __ Addu(t0, t0, Operand(kPointerSize));
2013 __ Subu(a1, a1, Operand(1));
2014 __ Branch(&loop, ne, a1, Operand(zero_reg));
2015
2016 // Return and remove the on-stack parameters.
2017 __ bind(&done);
2018 __ DropAndRet(3);
2019
2020 // Do the runtime call to allocate the arguments object.
2021 __ bind(&runtime);
2022 __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
2023 }
2024
2025
Generate(MacroAssembler * masm)2026 void RegExpExecStub::Generate(MacroAssembler* masm) {
2027 // Just jump directly to runtime if native RegExp is not selected at compile
2028 // time or if regexp entry in generated code is turned off runtime switch or
2029 // at compilation.
2030 #ifdef V8_INTERPRETED_REGEXP
2031 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2032 #else // V8_INTERPRETED_REGEXP
2033
2034 // Stack frame on entry.
2035 // sp[0]: last_match_info (expected JSArray)
2036 // sp[4]: previous index
2037 // sp[8]: subject string
2038 // sp[12]: JSRegExp object
2039
2040 const int kLastMatchInfoOffset = 0 * kPointerSize;
2041 const int kPreviousIndexOffset = 1 * kPointerSize;
2042 const int kSubjectOffset = 2 * kPointerSize;
2043 const int kJSRegExpOffset = 3 * kPointerSize;
2044
2045 Label runtime;
2046 // Allocation of registers for this function. These are in callee save
2047 // registers and will be preserved by the call to the native RegExp code, as
2048 // this code is called using the normal C calling convention. When calling
2049 // directly from generated code the native RegExp code will not do a GC and
2050 // therefore the content of these registers are safe to use after the call.
2051 // MIPS - using s0..s2, since we are not using CEntry Stub.
2052 Register subject = s0;
2053 Register regexp_data = s1;
2054 Register last_match_info_elements = s2;
2055
2056 // Ensure that a RegExp stack is allocated.
2057 ExternalReference address_of_regexp_stack_memory_address =
2058 ExternalReference::address_of_regexp_stack_memory_address(
2059 isolate());
2060 ExternalReference address_of_regexp_stack_memory_size =
2061 ExternalReference::address_of_regexp_stack_memory_size(isolate());
2062 __ li(a0, Operand(address_of_regexp_stack_memory_size));
2063 __ lw(a0, MemOperand(a0, 0));
2064 __ Branch(&runtime, eq, a0, Operand(zero_reg));
2065
2066 // Check that the first argument is a JSRegExp object.
2067 __ lw(a0, MemOperand(sp, kJSRegExpOffset));
2068 STATIC_ASSERT(kSmiTag == 0);
2069 __ JumpIfSmi(a0, &runtime);
2070 __ GetObjectType(a0, a1, a1);
2071 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
2072
2073 // Check that the RegExp has been compiled (data contains a fixed array).
2074 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
2075 if (FLAG_debug_code) {
2076 __ SmiTst(regexp_data, t0);
2077 __ Check(nz,
2078 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2079 t0,
2080 Operand(zero_reg));
2081 __ GetObjectType(regexp_data, a0, a0);
2082 __ Check(eq,
2083 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2084 a0,
2085 Operand(FIXED_ARRAY_TYPE));
2086 }
2087
2088 // regexp_data: RegExp data (FixedArray)
2089 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2090 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2091 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2092
2093 // regexp_data: RegExp data (FixedArray)
2094 // Check that the number of captures fit in the static offsets vector buffer.
2095 __ lw(a2,
2096 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2097 // Check (number_of_captures + 1) * 2 <= offsets vector size
2098 // Or number_of_captures * 2 <= offsets vector size - 2
2099 // Multiplying by 2 comes for free since a2 is smi-tagged.
2100 STATIC_ASSERT(kSmiTag == 0);
2101 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2102 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2103 __ Branch(
2104 &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2105
2106 // Reset offset for possibly sliced string.
2107 __ mov(t0, zero_reg);
2108 __ lw(subject, MemOperand(sp, kSubjectOffset));
2109 __ JumpIfSmi(subject, &runtime);
2110 __ mov(a3, subject); // Make a copy of the original subject string.
2111 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2112 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2113 // subject: subject string
2114 // a3: subject string
2115 // a0: subject string instance type
2116 // regexp_data: RegExp data (FixedArray)
2117 // Handle subject string according to its encoding and representation:
2118 // (1) Sequential string? If yes, go to (5).
2119 // (2) Anything but sequential or cons? If yes, go to (6).
2120 // (3) Cons string. If the string is flat, replace subject with first string.
2121 // Otherwise bailout.
2122 // (4) Is subject external? If yes, go to (7).
2123 // (5) Sequential string. Load regexp code according to encoding.
2124 // (E) Carry on.
2125 /// [...]
2126
2127 // Deferred code at the end of the stub:
2128 // (6) Not a long external string? If yes, go to (8).
2129 // (7) External string. Make it, offset-wise, look like a sequential string.
2130 // Go to (5).
2131 // (8) Short external string or not a string? If yes, bail out to runtime.
2132 // (9) Sliced string. Replace subject with parent. Go to (4).
2133
2134 Label seq_string /* 5 */, external_string /* 7 */,
2135 check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
2136 not_long_external /* 8 */;
2137
2138 // (1) Sequential string? If yes, go to (5).
2139 __ And(a1,
2140 a0,
2141 Operand(kIsNotStringMask |
2142 kStringRepresentationMask |
2143 kShortExternalStringMask));
2144 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2145 __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
2146
2147 // (2) Anything but sequential or cons? If yes, go to (6).
2148 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2149 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2150 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2151 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2152 // Go to (6).
2153 __ Branch(¬_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
2154
2155 // (3) Cons string. Check that it's flat.
2156 // Replace subject with first string and reload instance type.
2157 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
2158 __ LoadRoot(a1, Heap::kempty_stringRootIndex);
2159 __ Branch(&runtime, ne, a0, Operand(a1));
2160 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2161
2162 // (4) Is subject external? If yes, go to (7).
2163 __ bind(&check_underlying);
2164 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2165 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2166 STATIC_ASSERT(kSeqStringTag == 0);
2167 __ And(at, a0, Operand(kStringRepresentationMask));
2168 // The underlying external string is never a short external string.
2169 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2170 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2171 __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
2172
2173 // (5) Sequential string. Load regexp code according to encoding.
2174 __ bind(&seq_string);
2175 // subject: sequential subject string (or look-alike, external string)
2176 // a3: original subject string
2177 // Load previous index and check range before a3 is overwritten. We have to
2178 // use a3 instead of subject here because subject might have been only made
2179 // to look like a sequential string when it actually is an external string.
2180 __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
2181 __ JumpIfNotSmi(a1, &runtime);
2182 __ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
2183 __ Branch(&runtime, ls, a3, Operand(a1));
2184 __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
2185
2186 STATIC_ASSERT(kStringEncodingMask == 4);
2187 STATIC_ASSERT(kOneByteStringTag == 4);
2188 STATIC_ASSERT(kTwoByteStringTag == 0);
2189 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for one-byte.
2190 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
2191 __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
2192 __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
2193 __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
2194
2195 // (E) Carry on. String handling is done.
2196 // t9: irregexp code
2197 // Check that the irregexp code has been generated for the actual string
2198 // encoding. If it has, the field contains a code object otherwise it contains
2199 // a smi (code flushing support).
2200 __ JumpIfSmi(t9, &runtime);
2201
2202 // a1: previous index
2203 // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
2204 // t9: code
2205 // subject: Subject string
2206 // regexp_data: RegExp data (FixedArray)
2207 // All checks done. Now push arguments for native regexp code.
2208 __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
2209 1, a0, a2);
2210
2211 // Isolates: note we add an additional parameter here (isolate pointer).
2212 const int kRegExpExecuteArguments = 9;
2213 const int kParameterRegisters = 4;
2214 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2215
2216 // Stack pointer now points to cell where return address is to be written.
2217 // Arguments are before that on the stack or in registers, meaning we
2218 // treat the return address as argument 5. Thus every argument after that
2219 // needs to be shifted back by 1. Since DirectCEntryStub will handle
2220 // allocating space for the c argument slots, we don't need to calculate
2221 // that into the argument positions on the stack. This is how the stack will
2222 // look (sp meaning the value of sp at this moment):
2223 // [sp + 5] - Argument 9
2224 // [sp + 4] - Argument 8
2225 // [sp + 3] - Argument 7
2226 // [sp + 2] - Argument 6
2227 // [sp + 1] - Argument 5
2228 // [sp + 0] - saved ra
2229
2230 // Argument 9: Pass current isolate address.
2231 // CFunctionArgumentOperand handles MIPS stack argument slots.
2232 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
2233 __ sw(a0, MemOperand(sp, 5 * kPointerSize));
2234
2235 // Argument 8: Indicate that this is a direct call from JavaScript.
2236 __ li(a0, Operand(1));
2237 __ sw(a0, MemOperand(sp, 4 * kPointerSize));
2238
2239 // Argument 7: Start (high end) of backtracking stack memory area.
2240 __ li(a0, Operand(address_of_regexp_stack_memory_address));
2241 __ lw(a0, MemOperand(a0, 0));
2242 __ li(a2, Operand(address_of_regexp_stack_memory_size));
2243 __ lw(a2, MemOperand(a2, 0));
2244 __ addu(a0, a0, a2);
2245 __ sw(a0, MemOperand(sp, 3 * kPointerSize));
2246
2247 // Argument 6: Set the number of capture registers to zero to force global
2248 // regexps to behave as non-global. This does not affect non-global regexps.
2249 __ mov(a0, zero_reg);
2250 __ sw(a0, MemOperand(sp, 2 * kPointerSize));
2251
2252 // Argument 5: static offsets vector buffer.
2253 __ li(a0, Operand(
2254 ExternalReference::address_of_static_offsets_vector(isolate())));
2255 __ sw(a0, MemOperand(sp, 1 * kPointerSize));
2256
2257 // For arguments 4 and 3 get string length, calculate start of string data
2258 // calculate the shift of the index (0 for one-byte and 1 for two-byte).
2259 __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2260 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
2261 // Load the length from the original subject string from the previous stack
2262 // frame. Therefore we have to use fp, which points exactly to two pointer
2263 // sizes below the previous sp. (Because creating a new stack frame pushes
2264 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2265 __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2266 // If slice offset is not 0, load the length from the original sliced string.
2267 // Argument 4, a3: End of string data
2268 // Argument 3, a2: Start of string data
2269 // Prepare start and end index of the input.
2270 __ sllv(t1, t0, a3);
2271 __ addu(t0, t2, t1);
2272 __ sllv(t1, a1, a3);
2273 __ addu(a2, t0, t1);
2274
2275 __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
2276 __ sra(t2, t2, kSmiTagSize);
2277 __ sllv(t1, t2, a3);
2278 __ addu(a3, t0, t1);
2279 // Argument 2 (a1): Previous index.
2280 // Already there
2281
2282 // Argument 1 (a0): Subject string.
2283 __ mov(a0, subject);
2284
2285 // Locate the code entry and call it.
2286 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
2287 DirectCEntryStub stub(isolate());
2288 stub.GenerateCall(masm, t9);
2289
2290 __ LeaveExitFrame(false, no_reg, true);
2291
2292 // v0: result
2293 // subject: subject string (callee saved)
2294 // regexp_data: RegExp data (callee saved)
2295 // last_match_info_elements: Last match info elements (callee saved)
2296 // Check the result.
2297 Label success;
2298 __ Branch(&success, eq, v0, Operand(1));
2299 // We expect exactly one result since we force the called regexp to behave
2300 // as non-global.
2301 Label failure;
2302 __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
2303 // If not exception it can only be retry. Handle that in the runtime system.
2304 __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2305 // Result must now be exception. If there is no pending exception already a
2306 // stack overflow (on the backtrack stack) was detected in RegExp code but
2307 // haven't created the exception yet. Handle that in the runtime system.
2308 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2309 __ li(a1, Operand(isolate()->factory()->the_hole_value()));
2310 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2311 isolate())));
2312 __ lw(v0, MemOperand(a2, 0));
2313 __ Branch(&runtime, eq, v0, Operand(a1));
2314
2315 __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
2316
2317 // Check if the exception is a termination. If so, throw as uncatchable.
2318 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
2319 Label termination_exception;
2320 __ Branch(&termination_exception, eq, v0, Operand(a0));
2321
2322 __ Throw(v0);
2323
2324 __ bind(&termination_exception);
2325 __ ThrowUncatchable(v0);
2326
2327 __ bind(&failure);
2328 // For failure and exception return null.
2329 __ li(v0, Operand(isolate()->factory()->null_value()));
2330 __ DropAndRet(4);
2331
2332 // Process the result from the native regexp code.
2333 __ bind(&success);
2334 __ lw(a1,
2335 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2336 // Calculate number of capture registers (number_of_captures + 1) * 2.
2337 // Multiplying by 2 comes for free since r1 is smi-tagged.
2338 STATIC_ASSERT(kSmiTag == 0);
2339 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2340 __ Addu(a1, a1, Operand(2)); // a1 was a smi.
2341
2342 __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
2343 __ JumpIfSmi(a0, &runtime);
2344 __ GetObjectType(a0, a2, a2);
2345 __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
2346 // Check that the JSArray is in fast case.
2347 __ lw(last_match_info_elements,
2348 FieldMemOperand(a0, JSArray::kElementsOffset));
2349 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2350 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2351 __ Branch(&runtime, ne, a0, Operand(at));
2352 // Check that the last match info has space for the capture registers and the
2353 // additional information.
2354 __ lw(a0,
2355 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2356 __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
2357 __ sra(at, a0, kSmiTagSize);
2358 __ Branch(&runtime, gt, a2, Operand(at));
2359
2360 // a1: number of capture registers
2361 // subject: subject string
2362 // Store the capture count.
2363 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
2364 __ sw(a2, FieldMemOperand(last_match_info_elements,
2365 RegExpImpl::kLastCaptureCountOffset));
2366 // Store last subject and last input.
2367 __ sw(subject,
2368 FieldMemOperand(last_match_info_elements,
2369 RegExpImpl::kLastSubjectOffset));
2370 __ mov(a2, subject);
2371 __ RecordWriteField(last_match_info_elements,
2372 RegExpImpl::kLastSubjectOffset,
2373 subject,
2374 t3,
2375 kRAHasNotBeenSaved,
2376 kDontSaveFPRegs);
2377 __ mov(subject, a2);
2378 __ sw(subject,
2379 FieldMemOperand(last_match_info_elements,
2380 RegExpImpl::kLastInputOffset));
2381 __ RecordWriteField(last_match_info_elements,
2382 RegExpImpl::kLastInputOffset,
2383 subject,
2384 t3,
2385 kRAHasNotBeenSaved,
2386 kDontSaveFPRegs);
2387
2388 // Get the static offsets vector filled by the native regexp code.
2389 ExternalReference address_of_static_offsets_vector =
2390 ExternalReference::address_of_static_offsets_vector(isolate());
2391 __ li(a2, Operand(address_of_static_offsets_vector));
2392
2393 // a1: number of capture registers
2394 // a2: offsets vector
2395 Label next_capture, done;
2396 // Capture register counter starts from number of capture registers and
2397 // counts down until wrapping after zero.
2398 __ Addu(a0,
2399 last_match_info_elements,
2400 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
2401 __ bind(&next_capture);
2402 __ Subu(a1, a1, Operand(1));
2403 __ Branch(&done, lt, a1, Operand(zero_reg));
2404 // Read the value from the static offsets vector buffer.
2405 __ lw(a3, MemOperand(a2, 0));
2406 __ addiu(a2, a2, kPointerSize);
2407 // Store the smi value in the last match info.
2408 __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
2409 __ sw(a3, MemOperand(a0, 0));
2410 __ Branch(&next_capture, USE_DELAY_SLOT);
2411 __ addiu(a0, a0, kPointerSize); // In branch delay slot.
2412
2413 __ bind(&done);
2414
2415 // Return last match info.
2416 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
2417 __ DropAndRet(4);
2418
2419 // Do the runtime call to execute the regexp.
2420 __ bind(&runtime);
2421 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2422
2423 // Deferred code for string handling.
2424 // (6) Not a long external string? If yes, go to (8).
2425 __ bind(¬_seq_nor_cons);
2426 // Go to (8).
2427 __ Branch(¬_long_external, gt, a1, Operand(kExternalStringTag));
2428
2429 // (7) External string. Make it, offset-wise, look like a sequential string.
2430 __ bind(&external_string);
2431 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2432 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2433 if (FLAG_debug_code) {
2434 // Assert that we do not have a cons or slice (indirect strings) here.
2435 // Sequential strings have already been ruled out.
2436 __ And(at, a0, Operand(kIsIndirectStringMask));
2437 __ Assert(eq,
2438 kExternalStringExpectedButNotFound,
2439 at,
2440 Operand(zero_reg));
2441 }
2442 __ lw(subject,
2443 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2444 // Move the pointer so that offset-wise, it looks like a sequential string.
2445 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2446 __ Subu(subject,
2447 subject,
2448 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
2449 __ jmp(&seq_string); // Go to (5).
2450
2451 // (8) Short external string or not a string? If yes, bail out to runtime.
2452 __ bind(¬_long_external);
2453 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2454 __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
2455 __ Branch(&runtime, ne, at, Operand(zero_reg));
2456
2457 // (9) Sliced string. Replace subject with parent. Go to (4).
2458 // Load offset into t0 and replace subject string with parent.
2459 __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
2460 __ sra(t0, t0, kSmiTagSize);
2461 __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2462 __ jmp(&check_underlying); // Go to (4).
2463 #endif // V8_INTERPRETED_REGEXP
2464 }
2465
2466
GenerateRecordCallTarget(MacroAssembler * masm)2467 static void GenerateRecordCallTarget(MacroAssembler* masm) {
2468 // Cache the called function in a feedback vector slot. Cache states
2469 // are uninitialized, monomorphic (indicated by a JSFunction), and
2470 // megamorphic.
2471 // a0 : number of arguments to the construct function
2472 // a1 : the function to call
2473 // a2 : Feedback vector
2474 // a3 : slot in feedback vector (Smi)
2475 Label initialize, done, miss, megamorphic, not_array_function;
2476
2477 DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
2478 masm->isolate()->heap()->megamorphic_symbol());
2479 DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
2480 masm->isolate()->heap()->uninitialized_symbol());
2481
2482 // Load the cache state into t0.
2483 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2484 __ Addu(t0, a2, Operand(t0));
2485 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
2486
2487 // A monomorphic cache hit or an already megamorphic state: invoke the
2488 // function without changing the state.
2489 __ Branch(&done, eq, t0, Operand(a1));
2490
2491 if (!FLAG_pretenuring_call_new) {
2492 // If we came here, we need to see if we are the array function.
2493 // If we didn't have a matching function, and we didn't find the megamorph
2494 // sentinel, then we have in the slot either some other function or an
2495 // AllocationSite. Do a map check on the object in a3.
2496 __ lw(t1, FieldMemOperand(t0, 0));
2497 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2498 __ Branch(&miss, ne, t1, Operand(at));
2499
2500 // Make sure the function is the Array() function
2501 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
2502 __ Branch(&megamorphic, ne, a1, Operand(t0));
2503 __ jmp(&done);
2504 }
2505
2506 __ bind(&miss);
2507
2508 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2509 // megamorphic.
2510 __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
2511 __ Branch(&initialize, eq, t0, Operand(at));
2512 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2513 // write-barrier is needed.
2514 __ bind(&megamorphic);
2515 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2516 __ Addu(t0, a2, Operand(t0));
2517 __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
2518 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
2519 __ jmp(&done);
2520
2521 // An uninitialized cache is patched with the function.
2522 __ bind(&initialize);
2523 if (!FLAG_pretenuring_call_new) {
2524 // Make sure the function is the Array() function.
2525 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
2526 __ Branch(¬_array_function, ne, a1, Operand(t0));
2527
2528 // The target function is the Array constructor,
2529 // Create an AllocationSite if we don't already have it, store it in the
2530 // slot.
2531 {
2532 FrameScope scope(masm, StackFrame::INTERNAL);
2533 const RegList kSavedRegs =
2534 1 << 4 | // a0
2535 1 << 5 | // a1
2536 1 << 6 | // a2
2537 1 << 7; // a3
2538
2539 // Arguments register must be smi-tagged to call out.
2540 __ SmiTag(a0);
2541 __ MultiPush(kSavedRegs);
2542
2543 CreateAllocationSiteStub create_stub(masm->isolate());
2544 __ CallStub(&create_stub);
2545
2546 __ MultiPop(kSavedRegs);
2547 __ SmiUntag(a0);
2548 }
2549 __ Branch(&done);
2550
2551 __ bind(¬_array_function);
2552 }
2553
2554 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2555 __ Addu(t0, a2, Operand(t0));
2556 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2557 __ sw(a1, MemOperand(t0, 0));
2558
2559 __ Push(t0, a2, a1);
2560 __ RecordWrite(a2, t0, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
2561 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
2562 __ Pop(t0, a2, a1);
2563
2564 __ bind(&done);
2565 }
2566
2567
EmitContinueIfStrictOrNative(MacroAssembler * masm,Label * cont)2568 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
2569 __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2570 __ lw(t0, FieldMemOperand(a3, SharedFunctionInfo::kCompilerHintsOffset));
2571
2572 // Do not transform the receiver for strict mode functions.
2573 int32_t strict_mode_function_mask =
2574 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
2575 // Do not transform the receiver for native (Compilerhints already in a3).
2576 int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
2577 __ And(at, t0, Operand(strict_mode_function_mask | native_mask));
2578 __ Branch(cont, ne, at, Operand(zero_reg));
2579 }
2580
2581
EmitSlowCase(MacroAssembler * masm,int argc,Label * non_function)2582 static void EmitSlowCase(MacroAssembler* masm,
2583 int argc,
2584 Label* non_function) {
2585 // Check for function proxy.
2586 __ Branch(non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
2587 __ push(a1); // put proxy as additional argument
2588 __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
2589 __ mov(a2, zero_reg);
2590 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
2591 {
2592 Handle<Code> adaptor =
2593 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
2594 __ Jump(adaptor, RelocInfo::CODE_TARGET);
2595 }
2596
2597 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2598 // of the original receiver from the call site).
2599 __ bind(non_function);
2600 __ sw(a1, MemOperand(sp, argc * kPointerSize));
2601 __ li(a0, Operand(argc)); // Set up the number of arguments.
2602 __ mov(a2, zero_reg);
2603 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
2604 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2605 RelocInfo::CODE_TARGET);
2606 }
2607
2608
EmitWrapCase(MacroAssembler * masm,int argc,Label * cont)2609 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
2610 // Wrap the receiver and patch it back onto the stack.
2611 { FrameScope frame_scope(masm, StackFrame::INTERNAL);
2612 __ Push(a1, a3);
2613 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
2614 __ pop(a1);
2615 }
2616 __ Branch(USE_DELAY_SLOT, cont);
2617 __ sw(v0, MemOperand(sp, argc * kPointerSize));
2618 }
2619
2620
CallFunctionNoFeedback(MacroAssembler * masm,int argc,bool needs_checks,bool call_as_method)2621 static void CallFunctionNoFeedback(MacroAssembler* masm,
2622 int argc, bool needs_checks,
2623 bool call_as_method) {
2624 // a1 : the function to call
2625 Label slow, non_function, wrap, cont;
2626
2627 if (needs_checks) {
2628 // Check that the function is really a JavaScript function.
2629 // a1: pushed function (to be verified)
2630 __ JumpIfSmi(a1, &non_function);
2631
2632 // Goto slow case if we do not have a function.
2633 __ GetObjectType(a1, t0, t0);
2634 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
2635 }
2636
2637 // Fast-case: Invoke the function now.
2638 // a1: pushed function
2639 ParameterCount actual(argc);
2640
2641 if (call_as_method) {
2642 if (needs_checks) {
2643 EmitContinueIfStrictOrNative(masm, &cont);
2644 }
2645
2646 // Compute the receiver in sloppy mode.
2647 __ lw(a3, MemOperand(sp, argc * kPointerSize));
2648
2649 if (needs_checks) {
2650 __ JumpIfSmi(a3, &wrap);
2651 __ GetObjectType(a3, t0, t0);
2652 __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
2653 } else {
2654 __ jmp(&wrap);
2655 }
2656
2657 __ bind(&cont);
2658 }
2659
2660 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
2661
2662 if (needs_checks) {
2663 // Slow-case: Non-function called.
2664 __ bind(&slow);
2665 EmitSlowCase(masm, argc, &non_function);
2666 }
2667
2668 if (call_as_method) {
2669 __ bind(&wrap);
2670 // Wrap the receiver and patch it back onto the stack.
2671 EmitWrapCase(masm, argc, &cont);
2672 }
2673 }
2674
2675
Generate(MacroAssembler * masm)2676 void CallFunctionStub::Generate(MacroAssembler* masm) {
2677 CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
2678 }
2679
2680
Generate(MacroAssembler * masm)2681 void CallConstructStub::Generate(MacroAssembler* masm) {
2682 // a0 : number of arguments
2683 // a1 : the function to call
2684 // a2 : feedback vector
2685 // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
2686 Label slow, non_function_call;
2687
2688 // Check that the function is not a smi.
2689 __ JumpIfSmi(a1, &non_function_call);
2690 // Check that the function is a JSFunction.
2691 __ GetObjectType(a1, t0, t0);
2692 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
2693
2694 if (RecordCallTarget()) {
2695 GenerateRecordCallTarget(masm);
2696
2697 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
2698 __ Addu(t1, a2, at);
2699 if (FLAG_pretenuring_call_new) {
2700 // Put the AllocationSite from the feedback vector into a2.
2701 // By adding kPointerSize we encode that we know the AllocationSite
2702 // entry is at the feedback vector slot given by a3 + 1.
2703 __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize + kPointerSize));
2704 } else {
2705 Label feedback_register_initialized;
2706 // Put the AllocationSite from the feedback vector into a2, or undefined.
2707 __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
2708 __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
2709 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2710 __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
2711 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
2712 __ bind(&feedback_register_initialized);
2713 }
2714
2715 __ AssertUndefinedOrAllocationSite(a2, t1);
2716 }
2717
2718 // Jump to the function-specific construct stub.
2719 Register jmp_reg = t0;
2720 __ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2721 __ lw(jmp_reg, FieldMemOperand(jmp_reg,
2722 SharedFunctionInfo::kConstructStubOffset));
2723 __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
2724 __ Jump(at);
2725
2726 // a0: number of arguments
2727 // a1: called object
2728 // t0: object type
2729 Label do_call;
2730 __ bind(&slow);
2731 __ Branch(&non_function_call, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
2732 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
2733 __ jmp(&do_call);
2734
2735 __ bind(&non_function_call);
2736 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
2737 __ bind(&do_call);
2738 // Set expected number of arguments to zero (not changing r0).
2739 __ li(a2, Operand(0, RelocInfo::NONE32));
2740 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2741 RelocInfo::CODE_TARGET);
2742 }
2743
2744
EmitLoadTypeFeedbackVector(MacroAssembler * masm,Register vector)2745 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
2746 __ lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2747 __ lw(vector, FieldMemOperand(vector,
2748 JSFunction::kSharedFunctionInfoOffset));
2749 __ lw(vector, FieldMemOperand(vector,
2750 SharedFunctionInfo::kFeedbackVectorOffset));
2751 }
2752
2753
Generate(MacroAssembler * masm)2754 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
2755 // a1 - function
2756 // a3 - slot id
2757 Label miss;
2758
2759 EmitLoadTypeFeedbackVector(masm, a2);
2760
2761 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
2762 __ Branch(&miss, ne, a1, Operand(at));
2763
2764 __ li(a0, Operand(arg_count()));
2765 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
2766 __ Addu(at, a2, Operand(at));
2767 __ lw(t0, FieldMemOperand(at, FixedArray::kHeaderSize));
2768
2769 // Verify that t0 contains an AllocationSite
2770 __ lw(t1, FieldMemOperand(t0, HeapObject::kMapOffset));
2771 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2772 __ Branch(&miss, ne, t1, Operand(at));
2773
2774 __ mov(a2, t0);
2775 ArrayConstructorStub stub(masm->isolate(), arg_count());
2776 __ TailCallStub(&stub);
2777
2778 __ bind(&miss);
2779 GenerateMiss(masm);
2780
2781 // The slow case, we need this no matter what to complete a call after a miss.
2782 CallFunctionNoFeedback(masm,
2783 arg_count(),
2784 true,
2785 CallAsMethod());
2786
2787 // Unreachable.
2788 __ stop("Unexpected code address");
2789 }
2790
2791
Generate(MacroAssembler * masm)2792 void CallICStub::Generate(MacroAssembler* masm) {
2793 // r1 - function
2794 // r3 - slot id (Smi)
2795 Label extra_checks_or_miss, slow_start;
2796 Label slow, non_function, wrap, cont;
2797 Label have_js_function;
2798 int argc = arg_count();
2799 ParameterCount actual(argc);
2800
2801 EmitLoadTypeFeedbackVector(masm, a2);
2802
2803 // The checks. First, does r1 match the recorded monomorphic target?
2804 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2805 __ Addu(t0, a2, Operand(t0));
2806 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
2807 __ Branch(&extra_checks_or_miss, ne, a1, Operand(t0));
2808
2809 __ bind(&have_js_function);
2810 if (CallAsMethod()) {
2811 EmitContinueIfStrictOrNative(masm, &cont);
2812 // Compute the receiver in sloppy mode.
2813 __ lw(a3, MemOperand(sp, argc * kPointerSize));
2814
2815 __ JumpIfSmi(a3, &wrap);
2816 __ GetObjectType(a3, t0, t0);
2817 __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
2818
2819 __ bind(&cont);
2820 }
2821
2822 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
2823
2824 __ bind(&slow);
2825 EmitSlowCase(masm, argc, &non_function);
2826
2827 if (CallAsMethod()) {
2828 __ bind(&wrap);
2829 EmitWrapCase(masm, argc, &cont);
2830 }
2831
2832 __ bind(&extra_checks_or_miss);
2833 Label miss;
2834
2835 __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
2836 __ Branch(&slow_start, eq, t0, Operand(at));
2837 __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
2838 __ Branch(&miss, eq, t0, Operand(at));
2839
2840 if (!FLAG_trace_ic) {
2841 // We are going megamorphic. If the feedback is a JSFunction, it is fine
2842 // to handle it here. More complex cases are dealt with in the runtime.
2843 __ AssertNotSmi(t0);
2844 __ GetObjectType(t0, t1, t1);
2845 __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
2846 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2847 __ Addu(t0, a2, Operand(t0));
2848 __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
2849 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
2850 __ Branch(&slow_start);
2851 }
2852
2853 // We are here because tracing is on or we are going monomorphic.
2854 __ bind(&miss);
2855 GenerateMiss(masm);
2856
2857 // the slow case
2858 __ bind(&slow_start);
2859 // Check that the function is really a JavaScript function.
2860 // r1: pushed function (to be verified)
2861 __ JumpIfSmi(a1, &non_function);
2862
2863 // Goto slow case if we do not have a function.
2864 __ GetObjectType(a1, t0, t0);
2865 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
2866 __ Branch(&have_js_function);
2867 }
2868
2869
GenerateMiss(MacroAssembler * masm)2870 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2871 // Get the receiver of the function from the stack; 1 ~ return address.
2872 __ lw(t0, MemOperand(sp, (arg_count() + 1) * kPointerSize));
2873
2874 {
2875 FrameScope scope(masm, StackFrame::INTERNAL);
2876
2877 // Push the receiver and the function and feedback info.
2878 __ Push(t0, a1, a2, a3);
2879
2880 // Call the entry.
2881 IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
2882 : IC::kCallIC_Customization_Miss;
2883
2884 ExternalReference miss = ExternalReference(IC_Utility(id),
2885 masm->isolate());
2886 __ CallExternalReference(miss, 4);
2887
2888 // Move result to a1 and exit the internal frame.
2889 __ mov(a1, v0);
2890 }
2891 }
2892
2893
2894 // StringCharCodeAtGenerator.
GenerateFast(MacroAssembler * masm)2895 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2896 DCHECK(!t0.is(index_));
2897 DCHECK(!t0.is(result_));
2898 DCHECK(!t0.is(object_));
2899
2900 // If the receiver is a smi trigger the non-string case.
2901 __ JumpIfSmi(object_, receiver_not_string_);
2902
2903 // Fetch the instance type of the receiver into result register.
2904 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2905 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2906 // If the receiver is not a string trigger the non-string case.
2907 __ And(t0, result_, Operand(kIsNotStringMask));
2908 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
2909
2910 // If the index is non-smi trigger the non-smi case.
2911 __ JumpIfNotSmi(index_, &index_not_smi_);
2912
2913 __ bind(&got_smi_index_);
2914
2915 // Check for index out of range.
2916 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
2917 __ Branch(index_out_of_range_, ls, t0, Operand(index_));
2918
2919 __ sra(index_, index_, kSmiTagSize);
2920
2921 StringCharLoadGenerator::Generate(masm,
2922 object_,
2923 index_,
2924 result_,
2925 &call_runtime_);
2926
2927 __ sll(result_, result_, kSmiTagSize);
2928 __ bind(&exit_);
2929 }
2930
2931
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)2932 void StringCharCodeAtGenerator::GenerateSlow(
2933 MacroAssembler* masm,
2934 const RuntimeCallHelper& call_helper) {
2935 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2936
2937 // Index is not a smi.
2938 __ bind(&index_not_smi_);
2939 // If index is a heap number, try converting it to an integer.
2940 __ CheckMap(index_,
2941 result_,
2942 Heap::kHeapNumberMapRootIndex,
2943 index_not_number_,
2944 DONT_DO_SMI_CHECK);
2945 call_helper.BeforeCall(masm);
2946 // Consumed by runtime conversion function:
2947 __ Push(object_, index_);
2948 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
2949 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
2950 } else {
2951 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
2952 // NumberToSmi discards numbers that are not exact integers.
2953 __ CallRuntime(Runtime::kNumberToSmi, 1);
2954 }
2955
2956 // Save the conversion result before the pop instructions below
2957 // have a chance to overwrite it.
2958
2959 __ Move(index_, v0);
2960 __ pop(object_);
2961 // Reload the instance type.
2962 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2963 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2964 call_helper.AfterCall(masm);
2965 // If index is still not a smi, it must be out of range.
2966 __ JumpIfNotSmi(index_, index_out_of_range_);
2967 // Otherwise, return to the fast path.
2968 __ Branch(&got_smi_index_);
2969
2970 // Call runtime. We get here when the receiver is a string and the
2971 // index is a number, but the code of getting the actual character
2972 // is too complex (e.g., when the string needs to be flattened).
2973 __ bind(&call_runtime_);
2974 call_helper.BeforeCall(masm);
2975 __ sll(index_, index_, kSmiTagSize);
2976 __ Push(object_, index_);
2977 __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
2978
2979 __ Move(result_, v0);
2980
2981 call_helper.AfterCall(masm);
2982 __ jmp(&exit_);
2983
2984 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2985 }
2986
2987
2988 // -------------------------------------------------------------------------
2989 // StringCharFromCodeGenerator
2990
GenerateFast(MacroAssembler * masm)2991 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
2992 // Fast case of Heap::LookupSingleCharacterStringFromCode.
2993
2994 DCHECK(!t0.is(result_));
2995 DCHECK(!t0.is(code_));
2996
2997 STATIC_ASSERT(kSmiTag == 0);
2998 STATIC_ASSERT(kSmiShiftSize == 0);
2999 DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
3000 __ And(t0,
3001 code_,
3002 Operand(kSmiTagMask |
3003 ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
3004 __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
3005
3006 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3007 // At this point code register contains smi tagged one-byte char code.
3008 STATIC_ASSERT(kSmiTag == 0);
3009 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
3010 __ Addu(result_, result_, t0);
3011 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3012 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3013 __ Branch(&slow_case_, eq, result_, Operand(t0));
3014 __ bind(&exit_);
3015 }
3016
3017
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)3018 void StringCharFromCodeGenerator::GenerateSlow(
3019 MacroAssembler* masm,
3020 const RuntimeCallHelper& call_helper) {
3021 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3022
3023 __ bind(&slow_case_);
3024 call_helper.BeforeCall(masm);
3025 __ push(code_);
3026 __ CallRuntime(Runtime::kCharFromCode, 1);
3027 __ Move(result_, v0);
3028
3029 call_helper.AfterCall(masm);
3030 __ Branch(&exit_);
3031
3032 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3033 }
3034
3035
3036 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
3037
3038
GenerateCopyCharacters(MacroAssembler * masm,Register dest,Register src,Register count,Register scratch,String::Encoding encoding)3039 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
3040 Register dest,
3041 Register src,
3042 Register count,
3043 Register scratch,
3044 String::Encoding encoding) {
3045 if (FLAG_debug_code) {
3046 // Check that destination is word aligned.
3047 __ And(scratch, dest, Operand(kPointerAlignmentMask));
3048 __ Check(eq,
3049 kDestinationOfCopyNotAligned,
3050 scratch,
3051 Operand(zero_reg));
3052 }
3053
3054 // Assumes word reads and writes are little endian.
3055 // Nothing to do for zero characters.
3056 Label done;
3057
3058 if (encoding == String::TWO_BYTE_ENCODING) {
3059 __ Addu(count, count, count);
3060 }
3061
3062 Register limit = count; // Read until dest equals this.
3063 __ Addu(limit, dest, Operand(count));
3064
3065 Label loop_entry, loop;
3066 // Copy bytes from src to dest until dest hits limit.
3067 __ Branch(&loop_entry);
3068 __ bind(&loop);
3069 __ lbu(scratch, MemOperand(src));
3070 __ Addu(src, src, Operand(1));
3071 __ sb(scratch, MemOperand(dest));
3072 __ Addu(dest, dest, Operand(1));
3073 __ bind(&loop_entry);
3074 __ Branch(&loop, lt, dest, Operand(limit));
3075
3076 __ bind(&done);
3077 }
3078
3079
Generate(MacroAssembler * masm)3080 void SubStringStub::Generate(MacroAssembler* masm) {
3081 Label runtime;
3082 // Stack frame on entry.
3083 // ra: return address
3084 // sp[0]: to
3085 // sp[4]: from
3086 // sp[8]: string
3087
3088 // This stub is called from the native-call %_SubString(...), so
3089 // nothing can be assumed about the arguments. It is tested that:
3090 // "string" is a sequential string,
3091 // both "from" and "to" are smis, and
3092 // 0 <= from <= to <= string.length.
3093 // If any of these assumptions fail, we call the runtime system.
3094
3095 const int kToOffset = 0 * kPointerSize;
3096 const int kFromOffset = 1 * kPointerSize;
3097 const int kStringOffset = 2 * kPointerSize;
3098
3099 __ lw(a2, MemOperand(sp, kToOffset));
3100 __ lw(a3, MemOperand(sp, kFromOffset));
3101 STATIC_ASSERT(kFromOffset == kToOffset + 4);
3102 STATIC_ASSERT(kSmiTag == 0);
3103 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3104
3105 // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
3106 // safe in this case.
3107 __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
3108 __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
3109 // Both a2 and a3 are untagged integers.
3110
3111 __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
3112
3113 __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
3114 __ Subu(a2, a2, a3);
3115
3116 // Make sure first argument is a string.
3117 __ lw(v0, MemOperand(sp, kStringOffset));
3118 __ JumpIfSmi(v0, &runtime);
3119 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
3120 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3121 __ And(t0, a1, Operand(kIsNotStringMask));
3122
3123 __ Branch(&runtime, ne, t0, Operand(zero_reg));
3124
3125 Label single_char;
3126 __ Branch(&single_char, eq, a2, Operand(1));
3127
3128 // Short-cut for the case of trivial substring.
3129 Label return_v0;
3130 // v0: original string
3131 // a2: result string length
3132 __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
3133 __ sra(t0, t0, 1);
3134 // Return original string.
3135 __ Branch(&return_v0, eq, a2, Operand(t0));
3136 // Longer than original string's length or negative: unsafe arguments.
3137 __ Branch(&runtime, hi, a2, Operand(t0));
3138 // Shorter than original string's length: an actual substring.
3139
3140 // Deal with different string types: update the index if necessary
3141 // and put the underlying string into t1.
3142 // v0: original string
3143 // a1: instance type
3144 // a2: length
3145 // a3: from index (untagged)
3146 Label underlying_unpacked, sliced_string, seq_or_external_string;
3147 // If the string is not indirect, it can only be sequential or external.
3148 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3149 STATIC_ASSERT(kIsIndirectStringMask != 0);
3150 __ And(t0, a1, Operand(kIsIndirectStringMask));
3151 __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
3152 // t0 is used as a scratch register and can be overwritten in either case.
3153 __ And(t0, a1, Operand(kSlicedNotConsMask));
3154 __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
3155 // Cons string. Check whether it is flat, then fetch first part.
3156 __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
3157 __ LoadRoot(t0, Heap::kempty_stringRootIndex);
3158 __ Branch(&runtime, ne, t1, Operand(t0));
3159 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
3160 // Update instance type.
3161 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
3162 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3163 __ jmp(&underlying_unpacked);
3164
3165 __ bind(&sliced_string);
3166 // Sliced string. Fetch parent and correct start index by offset.
3167 __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
3168 __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3169 __ sra(t0, t0, 1); // Add offset to index.
3170 __ Addu(a3, a3, t0);
3171 // Update instance type.
3172 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
3173 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3174 __ jmp(&underlying_unpacked);
3175
3176 __ bind(&seq_or_external_string);
3177 // Sequential or external string. Just move string to the expected register.
3178 __ mov(t1, v0);
3179
3180 __ bind(&underlying_unpacked);
3181
3182 if (FLAG_string_slices) {
3183 Label copy_routine;
3184 // t1: underlying subject string
3185 // a1: instance type of underlying subject string
3186 // a2: length
3187 // a3: adjusted start index (untagged)
3188 // Short slice. Copy instead of slicing.
3189 __ Branch(©_routine, lt, a2, Operand(SlicedString::kMinLength));
3190 // Allocate new sliced string. At this point we do not reload the instance
3191 // type including the string encoding because we simply rely on the info
3192 // provided by the original string. It does not matter if the original
3193 // string's encoding is wrong because we always have to recheck encoding of
3194 // the newly created string's parent anyways due to externalized strings.
3195 Label two_byte_slice, set_slice_header;
3196 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3197 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3198 __ And(t0, a1, Operand(kStringEncodingMask));
3199 __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
3200 __ AllocateOneByteSlicedString(v0, a2, t2, t3, &runtime);
3201 __ jmp(&set_slice_header);
3202 __ bind(&two_byte_slice);
3203 __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
3204 __ bind(&set_slice_header);
3205 __ sll(a3, a3, 1);
3206 __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
3207 __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3208 __ jmp(&return_v0);
3209
3210 __ bind(©_routine);
3211 }
3212
3213 // t1: underlying subject string
3214 // a1: instance type of underlying subject string
3215 // a2: length
3216 // a3: adjusted start index (untagged)
3217 Label two_byte_sequential, sequential_string, allocate_result;
3218 STATIC_ASSERT(kExternalStringTag != 0);
3219 STATIC_ASSERT(kSeqStringTag == 0);
3220 __ And(t0, a1, Operand(kExternalStringTag));
3221 __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
3222
3223 // Handle external string.
3224 // Rule out short external strings.
3225 STATIC_ASSERT(kShortExternalStringTag != 0);
3226 __ And(t0, a1, Operand(kShortExternalStringTag));
3227 __ Branch(&runtime, ne, t0, Operand(zero_reg));
3228 __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
3229 // t1 already points to the first character of underlying string.
3230 __ jmp(&allocate_result);
3231
3232 __ bind(&sequential_string);
3233 // Locate first character of underlying subject string.
3234 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3235 __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3236
3237 __ bind(&allocate_result);
3238 // Sequential acii string. Allocate the result.
3239 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3240 __ And(t0, a1, Operand(kStringEncodingMask));
3241 __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
3242
3243 // Allocate and copy the resulting ASCII string.
3244 __ AllocateOneByteString(v0, a2, t0, t2, t3, &runtime);
3245
3246 // Locate first character of substring to copy.
3247 __ Addu(t1, t1, a3);
3248
3249 // Locate first character of result.
3250 __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3251
3252 // v0: result string
3253 // a1: first character of result string
3254 // a2: result string length
3255 // t1: first character of substring to copy
3256 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3257 StringHelper::GenerateCopyCharacters(
3258 masm, a1, t1, a2, a3, String::ONE_BYTE_ENCODING);
3259 __ jmp(&return_v0);
3260
3261 // Allocate and copy the resulting two-byte string.
3262 __ bind(&two_byte_sequential);
3263 __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
3264
3265 // Locate first character of substring to copy.
3266 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3267 __ sll(t0, a3, 1);
3268 __ Addu(t1, t1, t0);
3269 // Locate first character of result.
3270 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3271
3272 // v0: result string.
3273 // a1: first character of result.
3274 // a2: result length.
3275 // t1: first character of substring to copy.
3276 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3277 StringHelper::GenerateCopyCharacters(
3278 masm, a1, t1, a2, a3, String::TWO_BYTE_ENCODING);
3279
3280 __ bind(&return_v0);
3281 Counters* counters = isolate()->counters();
3282 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
3283 __ DropAndRet(3);
3284
3285 // Just jump to runtime to create the sub string.
3286 __ bind(&runtime);
3287 __ TailCallRuntime(Runtime::kSubString, 3, 1);
3288
3289 __ bind(&single_char);
3290 // v0: original string
3291 // a1: instance type
3292 // a2: length
3293 // a3: from index (untagged)
3294 __ SmiTag(a3, a3);
3295 StringCharAtGenerator generator(
3296 v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
3297 generator.GenerateFast(masm);
3298 __ DropAndRet(3);
3299 generator.SkipSlow(masm, &runtime);
3300 }
3301
3302
GenerateFlatOneByteStringEquals(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3)3303 void StringHelper::GenerateFlatOneByteStringEquals(
3304 MacroAssembler* masm, Register left, Register right, Register scratch1,
3305 Register scratch2, Register scratch3) {
3306 Register length = scratch1;
3307
3308 // Compare lengths.
3309 Label strings_not_equal, check_zero_length;
3310 __ lw(length, FieldMemOperand(left, String::kLengthOffset));
3311 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3312 __ Branch(&check_zero_length, eq, length, Operand(scratch2));
3313 __ bind(&strings_not_equal);
3314 DCHECK(is_int16(NOT_EQUAL));
3315 __ Ret(USE_DELAY_SLOT);
3316 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
3317
3318 // Check if the length is zero.
3319 Label compare_chars;
3320 __ bind(&check_zero_length);
3321 STATIC_ASSERT(kSmiTag == 0);
3322 __ Branch(&compare_chars, ne, length, Operand(zero_reg));
3323 DCHECK(is_int16(EQUAL));
3324 __ Ret(USE_DELAY_SLOT);
3325 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3326
3327 // Compare characters.
3328 __ bind(&compare_chars);
3329
3330 GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
3331 v0, &strings_not_equal);
3332
3333 // Characters are equal.
3334 __ Ret(USE_DELAY_SLOT);
3335 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3336 }
3337
3338
GenerateCompareFlatOneByteStrings(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3,Register scratch4)3339 void StringHelper::GenerateCompareFlatOneByteStrings(
3340 MacroAssembler* masm, Register left, Register right, Register scratch1,
3341 Register scratch2, Register scratch3, Register scratch4) {
3342 Label result_not_equal, compare_lengths;
3343 // Find minimum length and length difference.
3344 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
3345 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3346 __ Subu(scratch3, scratch1, Operand(scratch2));
3347 Register length_delta = scratch3;
3348 __ slt(scratch4, scratch2, scratch1);
3349 __ Movn(scratch1, scratch2, scratch4);
3350 Register min_length = scratch1;
3351 STATIC_ASSERT(kSmiTag == 0);
3352 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
3353
3354 // Compare loop.
3355 GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
3356 scratch4, v0, &result_not_equal);
3357
3358 // Compare lengths - strings up to min-length are equal.
3359 __ bind(&compare_lengths);
3360 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3361 // Use length_delta as result if it's zero.
3362 __ mov(scratch2, length_delta);
3363 __ mov(scratch4, zero_reg);
3364 __ mov(v0, zero_reg);
3365
3366 __ bind(&result_not_equal);
3367 // Conditionally update the result based either on length_delta or
3368 // the last comparion performed in the loop above.
3369 Label ret;
3370 __ Branch(&ret, eq, scratch2, Operand(scratch4));
3371 __ li(v0, Operand(Smi::FromInt(GREATER)));
3372 __ Branch(&ret, gt, scratch2, Operand(scratch4));
3373 __ li(v0, Operand(Smi::FromInt(LESS)));
3374 __ bind(&ret);
3375 __ Ret();
3376 }
3377
3378
GenerateOneByteCharsCompareLoop(MacroAssembler * masm,Register left,Register right,Register length,Register scratch1,Register scratch2,Register scratch3,Label * chars_not_equal)3379 void StringHelper::GenerateOneByteCharsCompareLoop(
3380 MacroAssembler* masm, Register left, Register right, Register length,
3381 Register scratch1, Register scratch2, Register scratch3,
3382 Label* chars_not_equal) {
3383 // Change index to run from -length to -1 by adding length to string
3384 // start. This means that loop ends when index reaches zero, which
3385 // doesn't need an additional compare.
3386 __ SmiUntag(length);
3387 __ Addu(scratch1, length,
3388 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3389 __ Addu(left, left, Operand(scratch1));
3390 __ Addu(right, right, Operand(scratch1));
3391 __ Subu(length, zero_reg, length);
3392 Register index = length; // index = -length;
3393
3394
3395 // Compare loop.
3396 Label loop;
3397 __ bind(&loop);
3398 __ Addu(scratch3, left, index);
3399 __ lbu(scratch1, MemOperand(scratch3));
3400 __ Addu(scratch3, right, index);
3401 __ lbu(scratch2, MemOperand(scratch3));
3402 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
3403 __ Addu(index, index, 1);
3404 __ Branch(&loop, ne, index, Operand(zero_reg));
3405 }
3406
3407
Generate(MacroAssembler * masm)3408 void StringCompareStub::Generate(MacroAssembler* masm) {
3409 Label runtime;
3410
3411 Counters* counters = isolate()->counters();
3412
3413 // Stack frame on entry.
3414 // sp[0]: right string
3415 // sp[4]: left string
3416 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
3417 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
3418
3419 Label not_same;
3420 __ Branch(¬_same, ne, a0, Operand(a1));
3421 STATIC_ASSERT(EQUAL == 0);
3422 STATIC_ASSERT(kSmiTag == 0);
3423 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3424 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
3425 __ DropAndRet(2);
3426
3427 __ bind(¬_same);
3428
3429 // Check that both objects are sequential one-byte strings.
3430 __ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
3431
3432 // Compare flat ASCII strings natively. Remove arguments from stack first.
3433 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
3434 __ Addu(sp, sp, Operand(2 * kPointerSize));
3435 StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
3436
3437 __ bind(&runtime);
3438 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3439 }
3440
3441
Generate(MacroAssembler * masm)3442 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3443 // ----------- S t a t e -------------
3444 // -- a1 : left
3445 // -- a0 : right
3446 // -- ra : return address
3447 // -----------------------------------
3448
3449 // Load a2 with the allocation site. We stick an undefined dummy value here
3450 // and replace it with the real allocation site later when we instantiate this
3451 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3452 __ li(a2, handle(isolate()->heap()->undefined_value()));
3453
3454 // Make sure that we actually patched the allocation site.
3455 if (FLAG_debug_code) {
3456 __ And(at, a2, Operand(kSmiTagMask));
3457 __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
3458 __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
3459 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3460 __ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
3461 }
3462
3463 // Tail call into the stub that handles binary operations with allocation
3464 // sites.
3465 BinaryOpWithAllocationSiteStub stub(isolate(), state());
3466 __ TailCallStub(&stub);
3467 }
3468
3469
GenerateSmis(MacroAssembler * masm)3470 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3471 DCHECK(state() == CompareICState::SMI);
3472 Label miss;
3473 __ Or(a2, a1, a0);
3474 __ JumpIfNotSmi(a2, &miss);
3475
3476 if (GetCondition() == eq) {
3477 // For equality we do not care about the sign of the result.
3478 __ Ret(USE_DELAY_SLOT);
3479 __ Subu(v0, a0, a1);
3480 } else {
3481 // Untag before subtracting to avoid handling overflow.
3482 __ SmiUntag(a1);
3483 __ SmiUntag(a0);
3484 __ Ret(USE_DELAY_SLOT);
3485 __ Subu(v0, a1, a0);
3486 }
3487
3488 __ bind(&miss);
3489 GenerateMiss(masm);
3490 }
3491
3492
GenerateNumbers(MacroAssembler * masm)3493 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3494 DCHECK(state() == CompareICState::NUMBER);
3495
3496 Label generic_stub;
3497 Label unordered, maybe_undefined1, maybe_undefined2;
3498 Label miss;
3499
3500 if (left() == CompareICState::SMI) {
3501 __ JumpIfNotSmi(a1, &miss);
3502 }
3503 if (right() == CompareICState::SMI) {
3504 __ JumpIfNotSmi(a0, &miss);
3505 }
3506
3507 // Inlining the double comparison and falling back to the general compare
3508 // stub if NaN is involved.
3509 // Load left and right operand.
3510 Label done, left, left_smi, right_smi;
3511 __ JumpIfSmi(a0, &right_smi);
3512 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3513 DONT_DO_SMI_CHECK);
3514 __ Subu(a2, a0, Operand(kHeapObjectTag));
3515 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
3516 __ Branch(&left);
3517 __ bind(&right_smi);
3518 __ SmiUntag(a2, a0); // Can't clobber a0 yet.
3519 FPURegister single_scratch = f6;
3520 __ mtc1(a2, single_scratch);
3521 __ cvt_d_w(f2, single_scratch);
3522
3523 __ bind(&left);
3524 __ JumpIfSmi(a1, &left_smi);
3525 __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3526 DONT_DO_SMI_CHECK);
3527 __ Subu(a2, a1, Operand(kHeapObjectTag));
3528 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
3529 __ Branch(&done);
3530 __ bind(&left_smi);
3531 __ SmiUntag(a2, a1); // Can't clobber a1 yet.
3532 single_scratch = f8;
3533 __ mtc1(a2, single_scratch);
3534 __ cvt_d_w(f0, single_scratch);
3535
3536 __ bind(&done);
3537
3538 // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
3539 Label fpu_eq, fpu_lt;
3540 // Test if equal, and also handle the unordered/NaN case.
3541 __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
3542
3543 // Test if less (unordered case is already handled).
3544 __ BranchF(&fpu_lt, NULL, lt, f0, f2);
3545
3546 // Otherwise it's greater, so just fall thru, and return.
3547 DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
3548 __ Ret(USE_DELAY_SLOT);
3549 __ li(v0, Operand(GREATER));
3550
3551 __ bind(&fpu_eq);
3552 __ Ret(USE_DELAY_SLOT);
3553 __ li(v0, Operand(EQUAL));
3554
3555 __ bind(&fpu_lt);
3556 __ Ret(USE_DELAY_SLOT);
3557 __ li(v0, Operand(LESS));
3558
3559 __ bind(&unordered);
3560 __ bind(&generic_stub);
3561 CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
3562 CompareICState::GENERIC, CompareICState::GENERIC);
3563 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3564
3565 __ bind(&maybe_undefined1);
3566 if (Token::IsOrderedRelationalCompareOp(op())) {
3567 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3568 __ Branch(&miss, ne, a0, Operand(at));
3569 __ JumpIfSmi(a1, &unordered);
3570 __ GetObjectType(a1, a2, a2);
3571 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
3572 __ jmp(&unordered);
3573 }
3574
3575 __ bind(&maybe_undefined2);
3576 if (Token::IsOrderedRelationalCompareOp(op())) {
3577 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3578 __ Branch(&unordered, eq, a1, Operand(at));
3579 }
3580
3581 __ bind(&miss);
3582 GenerateMiss(masm);
3583 }
3584
3585
GenerateInternalizedStrings(MacroAssembler * masm)3586 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3587 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
3588 Label miss;
3589
3590 // Registers containing left and right operands respectively.
3591 Register left = a1;
3592 Register right = a0;
3593 Register tmp1 = a2;
3594 Register tmp2 = a3;
3595
3596 // Check that both operands are heap objects.
3597 __ JumpIfEitherSmi(left, right, &miss);
3598
3599 // Check that both operands are internalized strings.
3600 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3601 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3602 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3603 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3604 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3605 __ Or(tmp1, tmp1, Operand(tmp2));
3606 __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3607 __ Branch(&miss, ne, at, Operand(zero_reg));
3608
3609 // Make sure a0 is non-zero. At this point input operands are
3610 // guaranteed to be non-zero.
3611 DCHECK(right.is(a0));
3612 STATIC_ASSERT(EQUAL == 0);
3613 STATIC_ASSERT(kSmiTag == 0);
3614 __ mov(v0, right);
3615 // Internalized strings are compared by identity.
3616 __ Ret(ne, left, Operand(right));
3617 DCHECK(is_int16(EQUAL));
3618 __ Ret(USE_DELAY_SLOT);
3619 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3620
3621 __ bind(&miss);
3622 GenerateMiss(masm);
3623 }
3624
3625
GenerateUniqueNames(MacroAssembler * masm)3626 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3627 DCHECK(state() == CompareICState::UNIQUE_NAME);
3628 DCHECK(GetCondition() == eq);
3629 Label miss;
3630
3631 // Registers containing left and right operands respectively.
3632 Register left = a1;
3633 Register right = a0;
3634 Register tmp1 = a2;
3635 Register tmp2 = a3;
3636
3637 // Check that both operands are heap objects.
3638 __ JumpIfEitherSmi(left, right, &miss);
3639
3640 // Check that both operands are unique names. This leaves the instance
3641 // types loaded in tmp1 and tmp2.
3642 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3643 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3644 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3645 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3646
3647 __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
3648 __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
3649
3650 // Use a0 as result
3651 __ mov(v0, a0);
3652
3653 // Unique names are compared by identity.
3654 Label done;
3655 __ Branch(&done, ne, left, Operand(right));
3656 // Make sure a0 is non-zero. At this point input operands are
3657 // guaranteed to be non-zero.
3658 DCHECK(right.is(a0));
3659 STATIC_ASSERT(EQUAL == 0);
3660 STATIC_ASSERT(kSmiTag == 0);
3661 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3662 __ bind(&done);
3663 __ Ret();
3664
3665 __ bind(&miss);
3666 GenerateMiss(masm);
3667 }
3668
3669
GenerateStrings(MacroAssembler * masm)3670 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3671 DCHECK(state() == CompareICState::STRING);
3672 Label miss;
3673
3674 bool equality = Token::IsEqualityOp(op());
3675
3676 // Registers containing left and right operands respectively.
3677 Register left = a1;
3678 Register right = a0;
3679 Register tmp1 = a2;
3680 Register tmp2 = a3;
3681 Register tmp3 = t0;
3682 Register tmp4 = t1;
3683 Register tmp5 = t2;
3684
3685 // Check that both operands are heap objects.
3686 __ JumpIfEitherSmi(left, right, &miss);
3687
3688 // Check that both operands are strings. This leaves the instance
3689 // types loaded in tmp1 and tmp2.
3690 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3691 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3692 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3693 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3694 STATIC_ASSERT(kNotStringTag != 0);
3695 __ Or(tmp3, tmp1, tmp2);
3696 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
3697 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
3698
3699 // Fast check for identical strings.
3700 Label left_ne_right;
3701 STATIC_ASSERT(EQUAL == 0);
3702 STATIC_ASSERT(kSmiTag == 0);
3703 __ Branch(&left_ne_right, ne, left, Operand(right));
3704 __ Ret(USE_DELAY_SLOT);
3705 __ mov(v0, zero_reg); // In the delay slot.
3706 __ bind(&left_ne_right);
3707
3708 // Handle not identical strings.
3709
3710 // Check that both strings are internalized strings. If they are, we're done
3711 // because we already know they are not identical. We know they are both
3712 // strings.
3713 if (equality) {
3714 DCHECK(GetCondition() == eq);
3715 STATIC_ASSERT(kInternalizedTag == 0);
3716 __ Or(tmp3, tmp1, Operand(tmp2));
3717 __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
3718 Label is_symbol;
3719 __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
3720 // Make sure a0 is non-zero. At this point input operands are
3721 // guaranteed to be non-zero.
3722 DCHECK(right.is(a0));
3723 __ Ret(USE_DELAY_SLOT);
3724 __ mov(v0, a0); // In the delay slot.
3725 __ bind(&is_symbol);
3726 }
3727
3728 // Check that both strings are sequential one-byte.
3729 Label runtime;
3730 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
3731 &runtime);
3732
3733 // Compare flat one-byte strings. Returns when done.
3734 if (equality) {
3735 StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
3736 tmp3);
3737 } else {
3738 StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
3739 tmp2, tmp3, tmp4);
3740 }
3741
3742 // Handle more complex cases in runtime.
3743 __ bind(&runtime);
3744 __ Push(left, right);
3745 if (equality) {
3746 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3747 } else {
3748 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3749 }
3750
3751 __ bind(&miss);
3752 GenerateMiss(masm);
3753 }
3754
3755
GenerateObjects(MacroAssembler * masm)3756 void CompareICStub::GenerateObjects(MacroAssembler* masm) {
3757 DCHECK(state() == CompareICState::OBJECT);
3758 Label miss;
3759 __ And(a2, a1, Operand(a0));
3760 __ JumpIfSmi(a2, &miss);
3761
3762 __ GetObjectType(a0, a2, a2);
3763 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
3764 __ GetObjectType(a1, a2, a2);
3765 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
3766
3767 DCHECK(GetCondition() == eq);
3768 __ Ret(USE_DELAY_SLOT);
3769 __ subu(v0, a0, a1);
3770
3771 __ bind(&miss);
3772 GenerateMiss(masm);
3773 }
3774
3775
GenerateKnownObjects(MacroAssembler * masm)3776 void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
3777 Label miss;
3778 __ And(a2, a1, a0);
3779 __ JumpIfSmi(a2, &miss);
3780 __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
3781 __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
3782 __ Branch(&miss, ne, a2, Operand(known_map_));
3783 __ Branch(&miss, ne, a3, Operand(known_map_));
3784
3785 __ Ret(USE_DELAY_SLOT);
3786 __ subu(v0, a0, a1);
3787
3788 __ bind(&miss);
3789 GenerateMiss(masm);
3790 }
3791
3792
GenerateMiss(MacroAssembler * masm)3793 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3794 {
3795 // Call the runtime system in a fresh internal frame.
3796 ExternalReference miss =
3797 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
3798 FrameScope scope(masm, StackFrame::INTERNAL);
3799 __ Push(a1, a0);
3800 __ Push(ra, a1, a0);
3801 __ li(t0, Operand(Smi::FromInt(op())));
3802 __ addiu(sp, sp, -kPointerSize);
3803 __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
3804 __ sw(t0, MemOperand(sp)); // In the delay slot.
3805 // Compute the entry point of the rewritten stub.
3806 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
3807 // Restore registers.
3808 __ Pop(a1, a0, ra);
3809 }
3810 __ Jump(a2);
3811 }
3812
3813
Generate(MacroAssembler * masm)3814 void DirectCEntryStub::Generate(MacroAssembler* masm) {
3815 // Make place for arguments to fit C calling convention. Most of the callers
3816 // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
3817 // so they handle stack restoring and we don't have to do that here.
3818 // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
3819 // kCArgsSlotsSize stack space after the call.
3820 __ Subu(sp, sp, Operand(kCArgsSlotsSize));
3821 // Place the return address on the stack, making the call
3822 // GC safe. The RegExp backend also relies on this.
3823 __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
3824 __ Call(t9); // Call the C++ function.
3825 __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
3826
3827 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
3828 // In case of an error the return address may point to a memory area
3829 // filled with kZapValue by the GC.
3830 // Dereference the address and check for this.
3831 __ lw(t0, MemOperand(t9));
3832 __ Assert(ne, kReceivedInvalidReturnAddress, t0,
3833 Operand(reinterpret_cast<uint32_t>(kZapValue)));
3834 }
3835 __ Jump(t9);
3836 }
3837
3838
GenerateCall(MacroAssembler * masm,Register target)3839 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
3840 Register target) {
3841 intptr_t loc =
3842 reinterpret_cast<intptr_t>(GetCode().location());
3843 __ Move(t9, target);
3844 __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
3845 __ Call(ra);
3846 }
3847
3848
GenerateNegativeLookup(MacroAssembler * masm,Label * miss,Label * done,Register receiver,Register properties,Handle<Name> name,Register scratch0)3849 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
3850 Label* miss,
3851 Label* done,
3852 Register receiver,
3853 Register properties,
3854 Handle<Name> name,
3855 Register scratch0) {
3856 DCHECK(name->IsUniqueName());
3857 // If names of slots in range from 1 to kProbes - 1 for the hash value are
3858 // not equal to the name and kProbes-th slot is not used (its name is the
3859 // undefined value), it guarantees the hash table doesn't contain the
3860 // property. It's true even if some slots represent deleted properties
3861 // (their names are the hole value).
3862 for (int i = 0; i < kInlinedProbes; i++) {
3863 // scratch0 points to properties hash.
3864 // Compute the masked index: (hash + i + i * i) & mask.
3865 Register index = scratch0;
3866 // Capacity is smi 2^n.
3867 __ lw(index, FieldMemOperand(properties, kCapacityOffset));
3868 __ Subu(index, index, Operand(1));
3869 __ And(index, index, Operand(
3870 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
3871
3872 // Scale the index by multiplying by the entry size.
3873 DCHECK(NameDictionary::kEntrySize == 3);
3874 __ sll(at, index, 1);
3875 __ Addu(index, index, at);
3876
3877 Register entity_name = scratch0;
3878 // Having undefined at this place means the name is not contained.
3879 DCHECK_EQ(kSmiTagSize, 1);
3880 Register tmp = properties;
3881 __ sll(scratch0, index, 1);
3882 __ Addu(tmp, properties, scratch0);
3883 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
3884
3885 DCHECK(!tmp.is(entity_name));
3886 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
3887 __ Branch(done, eq, entity_name, Operand(tmp));
3888
3889 // Load the hole ready for use below:
3890 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
3891
3892 // Stop if found the property.
3893 __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
3894
3895 Label good;
3896 __ Branch(&good, eq, entity_name, Operand(tmp));
3897
3898 // Check if the entry name is not a unique name.
3899 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
3900 __ lbu(entity_name,
3901 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
3902 __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
3903 __ bind(&good);
3904
3905 // Restore the properties.
3906 __ lw(properties,
3907 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3908 }
3909
3910 const int spill_mask =
3911 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
3912 a2.bit() | a1.bit() | a0.bit() | v0.bit());
3913
3914 __ MultiPush(spill_mask);
3915 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3916 __ li(a1, Operand(Handle<Name>(name)));
3917 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
3918 __ CallStub(&stub);
3919 __ mov(at, v0);
3920 __ MultiPop(spill_mask);
3921
3922 __ Branch(done, eq, at, Operand(zero_reg));
3923 __ Branch(miss, ne, at, Operand(zero_reg));
3924 }
3925
3926
3927 // Probe the name dictionary in the |elements| register. Jump to the
3928 // |done| label if a property with the given name is found. Jump to
3929 // the |miss| label otherwise.
3930 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
GeneratePositiveLookup(MacroAssembler * masm,Label * miss,Label * done,Register elements,Register name,Register scratch1,Register scratch2)3931 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
3932 Label* miss,
3933 Label* done,
3934 Register elements,
3935 Register name,
3936 Register scratch1,
3937 Register scratch2) {
3938 DCHECK(!elements.is(scratch1));
3939 DCHECK(!elements.is(scratch2));
3940 DCHECK(!name.is(scratch1));
3941 DCHECK(!name.is(scratch2));
3942
3943 __ AssertName(name);
3944
3945 // Compute the capacity mask.
3946 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
3947 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
3948 __ Subu(scratch1, scratch1, Operand(1));
3949
3950 // Generate an unrolled loop that performs a few probes before
3951 // giving up. Measurements done on Gmail indicate that 2 probes
3952 // cover ~93% of loads from dictionaries.
3953 for (int i = 0; i < kInlinedProbes; i++) {
3954 // Compute the masked index: (hash + i + i * i) & mask.
3955 __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
3956 if (i > 0) {
3957 // Add the probe offset (i + i * i) left shifted to avoid right shifting
3958 // the hash in a separate instruction. The value hash + i + i * i is right
3959 // shifted in the following and instruction.
3960 DCHECK(NameDictionary::GetProbeOffset(i) <
3961 1 << (32 - Name::kHashFieldOffset));
3962 __ Addu(scratch2, scratch2, Operand(
3963 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
3964 }
3965 __ srl(scratch2, scratch2, Name::kHashShift);
3966 __ And(scratch2, scratch1, scratch2);
3967
3968 // Scale the index by multiplying by the element size.
3969 DCHECK(NameDictionary::kEntrySize == 3);
3970 // scratch2 = scratch2 * 3.
3971
3972 __ sll(at, scratch2, 1);
3973 __ Addu(scratch2, scratch2, at);
3974
3975 // Check if the key is identical to the name.
3976 __ sll(at, scratch2, 2);
3977 __ Addu(scratch2, elements, at);
3978 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
3979 __ Branch(done, eq, name, Operand(at));
3980 }
3981
3982 const int spill_mask =
3983 (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
3984 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
3985 ~(scratch1.bit() | scratch2.bit());
3986
3987 __ MultiPush(spill_mask);
3988 if (name.is(a0)) {
3989 DCHECK(!elements.is(a1));
3990 __ Move(a1, name);
3991 __ Move(a0, elements);
3992 } else {
3993 __ Move(a0, elements);
3994 __ Move(a1, name);
3995 }
3996 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
3997 __ CallStub(&stub);
3998 __ mov(scratch2, a2);
3999 __ mov(at, v0);
4000 __ MultiPop(spill_mask);
4001
4002 __ Branch(done, ne, at, Operand(zero_reg));
4003 __ Branch(miss, eq, at, Operand(zero_reg));
4004 }
4005
4006
Generate(MacroAssembler * masm)4007 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4008 // This stub overrides SometimesSetsUpAFrame() to return false. That means
4009 // we cannot call anything that could cause a GC from this stub.
4010 // Registers:
4011 // result: NameDictionary to probe
4012 // a1: key
4013 // dictionary: NameDictionary to probe.
4014 // index: will hold an index of entry if lookup is successful.
4015 // might alias with result_.
4016 // Returns:
4017 // result_ is zero if lookup failed, non zero otherwise.
4018
4019 Register result = v0;
4020 Register dictionary = a0;
4021 Register key = a1;
4022 Register index = a2;
4023 Register mask = a3;
4024 Register hash = t0;
4025 Register undefined = t1;
4026 Register entry_key = t2;
4027
4028 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4029
4030 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
4031 __ sra(mask, mask, kSmiTagSize);
4032 __ Subu(mask, mask, Operand(1));
4033
4034 __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4035
4036 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4037
4038 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4039 // Compute the masked index: (hash + i + i * i) & mask.
4040 // Capacity is smi 2^n.
4041 if (i > 0) {
4042 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4043 // the hash in a separate instruction. The value hash + i + i * i is right
4044 // shifted in the following and instruction.
4045 DCHECK(NameDictionary::GetProbeOffset(i) <
4046 1 << (32 - Name::kHashFieldOffset));
4047 __ Addu(index, hash, Operand(
4048 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4049 } else {
4050 __ mov(index, hash);
4051 }
4052 __ srl(index, index, Name::kHashShift);
4053 __ And(index, mask, index);
4054
4055 // Scale the index by multiplying by the entry size.
4056 DCHECK(NameDictionary::kEntrySize == 3);
4057 // index *= 3.
4058 __ mov(at, index);
4059 __ sll(index, index, 1);
4060 __ Addu(index, index, at);
4061
4062
4063 DCHECK_EQ(kSmiTagSize, 1);
4064 __ sll(index, index, 2);
4065 __ Addu(index, index, dictionary);
4066 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
4067
4068 // Having undefined at this place means the name is not contained.
4069 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined));
4070
4071 // Stop if found the property.
4072 __ Branch(&in_dictionary, eq, entry_key, Operand(key));
4073
4074 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
4075 // Check if the entry name is not a unique name.
4076 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4077 __ lbu(entry_key,
4078 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4079 __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
4080 }
4081 }
4082
4083 __ bind(&maybe_in_dictionary);
4084 // If we are doing negative lookup then probing failure should be
4085 // treated as a lookup success. For positive lookup probing failure
4086 // should be treated as lookup failure.
4087 if (mode() == POSITIVE_LOOKUP) {
4088 __ Ret(USE_DELAY_SLOT);
4089 __ mov(result, zero_reg);
4090 }
4091
4092 __ bind(&in_dictionary);
4093 __ Ret(USE_DELAY_SLOT);
4094 __ li(result, 1);
4095
4096 __ bind(¬_in_dictionary);
4097 __ Ret(USE_DELAY_SLOT);
4098 __ mov(result, zero_reg);
4099 }
4100
4101
GenerateFixedRegStubsAheadOfTime(Isolate * isolate)4102 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4103 Isolate* isolate) {
4104 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
4105 stub1.GetCode();
4106 // Hydrogen code stubs need stub2 at snapshot time.
4107 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
4108 stub2.GetCode();
4109 }
4110
4111
4112 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4113 // the value has just been written into the object, now this stub makes sure
4114 // we keep the GC informed. The word in the object where the value has been
4115 // written is in the address register.
Generate(MacroAssembler * masm)4116 void RecordWriteStub::Generate(MacroAssembler* masm) {
4117 Label skip_to_incremental_noncompacting;
4118 Label skip_to_incremental_compacting;
4119
4120 // The first two branch+nop instructions are generated with labels so as to
4121 // get the offset fixed up correctly by the bind(Label*) call. We patch it
4122 // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
4123 // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
4124 // incremental heap marking.
4125 // See RecordWriteStub::Patch for details.
4126 __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
4127 __ nop();
4128 __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
4129 __ nop();
4130
4131 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4132 __ RememberedSetHelper(object(),
4133 address(),
4134 value(),
4135 save_fp_regs_mode(),
4136 MacroAssembler::kReturnAtEnd);
4137 }
4138 __ Ret();
4139
4140 __ bind(&skip_to_incremental_noncompacting);
4141 GenerateIncremental(masm, INCREMENTAL);
4142
4143 __ bind(&skip_to_incremental_compacting);
4144 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4145
4146 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4147 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4148
4149 PatchBranchIntoNop(masm, 0);
4150 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
4151 }
4152
4153
GenerateIncremental(MacroAssembler * masm,Mode mode)4154 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4155 regs_.Save(masm);
4156
4157 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4158 Label dont_need_remembered_set;
4159
4160 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
4161 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
4162 regs_.scratch0(),
4163 &dont_need_remembered_set);
4164
4165 __ CheckPageFlag(regs_.object(),
4166 regs_.scratch0(),
4167 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4168 ne,
4169 &dont_need_remembered_set);
4170
4171 // First notify the incremental marker if necessary, then update the
4172 // remembered set.
4173 CheckNeedsToInformIncrementalMarker(
4174 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4175 InformIncrementalMarker(masm);
4176 regs_.Restore(masm);
4177 __ RememberedSetHelper(object(),
4178 address(),
4179 value(),
4180 save_fp_regs_mode(),
4181 MacroAssembler::kReturnAtEnd);
4182
4183 __ bind(&dont_need_remembered_set);
4184 }
4185
4186 CheckNeedsToInformIncrementalMarker(
4187 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4188 InformIncrementalMarker(masm);
4189 regs_.Restore(masm);
4190 __ Ret();
4191 }
4192
4193
InformIncrementalMarker(MacroAssembler * masm)4194 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4195 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
4196 int argument_count = 3;
4197 __ PrepareCallCFunction(argument_count, regs_.scratch0());
4198 Register address =
4199 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4200 DCHECK(!address.is(regs_.object()));
4201 DCHECK(!address.is(a0));
4202 __ Move(address, regs_.address());
4203 __ Move(a0, regs_.object());
4204 __ Move(a1, address);
4205 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4206
4207 AllowExternalCallThatCantCauseGC scope(masm);
4208 __ CallCFunction(
4209 ExternalReference::incremental_marking_record_write_function(isolate()),
4210 argument_count);
4211 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
4212 }
4213
4214
CheckNeedsToInformIncrementalMarker(MacroAssembler * masm,OnNoNeedToInformIncrementalMarker on_no_need,Mode mode)4215 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4216 MacroAssembler* masm,
4217 OnNoNeedToInformIncrementalMarker on_no_need,
4218 Mode mode) {
4219 Label on_black;
4220 Label need_incremental;
4221 Label need_incremental_pop_scratch;
4222
4223 __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4224 __ lw(regs_.scratch1(),
4225 MemOperand(regs_.scratch0(),
4226 MemoryChunk::kWriteBarrierCounterOffset));
4227 __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
4228 __ sw(regs_.scratch1(),
4229 MemOperand(regs_.scratch0(),
4230 MemoryChunk::kWriteBarrierCounterOffset));
4231 __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
4232
4233 // Let's look at the color of the object: If it is not black we don't have
4234 // to inform the incremental marker.
4235 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4236
4237 regs_.Restore(masm);
4238 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4239 __ RememberedSetHelper(object(),
4240 address(),
4241 value(),
4242 save_fp_regs_mode(),
4243 MacroAssembler::kReturnAtEnd);
4244 } else {
4245 __ Ret();
4246 }
4247
4248 __ bind(&on_black);
4249
4250 // Get the value from the slot.
4251 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
4252
4253 if (mode == INCREMENTAL_COMPACTION) {
4254 Label ensure_not_white;
4255
4256 __ CheckPageFlag(regs_.scratch0(), // Contains value.
4257 regs_.scratch1(), // Scratch.
4258 MemoryChunk::kEvacuationCandidateMask,
4259 eq,
4260 &ensure_not_white);
4261
4262 __ CheckPageFlag(regs_.object(),
4263 regs_.scratch1(), // Scratch.
4264 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4265 eq,
4266 &need_incremental);
4267
4268 __ bind(&ensure_not_white);
4269 }
4270
4271 // We need extra registers for this, so we push the object and the address
4272 // register temporarily.
4273 __ Push(regs_.object(), regs_.address());
4274 __ EnsureNotWhite(regs_.scratch0(), // The value.
4275 regs_.scratch1(), // Scratch.
4276 regs_.object(), // Scratch.
4277 regs_.address(), // Scratch.
4278 &need_incremental_pop_scratch);
4279 __ Pop(regs_.object(), regs_.address());
4280
4281 regs_.Restore(masm);
4282 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4283 __ RememberedSetHelper(object(),
4284 address(),
4285 value(),
4286 save_fp_regs_mode(),
4287 MacroAssembler::kReturnAtEnd);
4288 } else {
4289 __ Ret();
4290 }
4291
4292 __ bind(&need_incremental_pop_scratch);
4293 __ Pop(regs_.object(), regs_.address());
4294
4295 __ bind(&need_incremental);
4296
4297 // Fall through when we need to inform the incremental marker.
4298 }
4299
4300
Generate(MacroAssembler * masm)4301 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4302 // ----------- S t a t e -------------
4303 // -- a0 : element value to store
4304 // -- a3 : element index as smi
4305 // -- sp[0] : array literal index in function as smi
4306 // -- sp[4] : array literal
4307 // clobbers a1, a2, t0
4308 // -----------------------------------
4309
4310 Label element_done;
4311 Label double_elements;
4312 Label smi_element;
4313 Label slow_elements;
4314 Label fast_elements;
4315
4316 // Get array literal index, array literal and its map.
4317 __ lw(t0, MemOperand(sp, 0 * kPointerSize));
4318 __ lw(a1, MemOperand(sp, 1 * kPointerSize));
4319 __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
4320
4321 __ CheckFastElements(a2, t1, &double_elements);
4322 // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
4323 __ JumpIfSmi(a0, &smi_element);
4324 __ CheckFastSmiElements(a2, t1, &fast_elements);
4325
4326 // Store into the array literal requires a elements transition. Call into
4327 // the runtime.
4328 __ bind(&slow_elements);
4329 // call.
4330 __ Push(a1, a3, a0);
4331 __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4332 __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
4333 __ Push(t1, t0);
4334 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4335
4336 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4337 __ bind(&fast_elements);
4338 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4339 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
4340 __ Addu(t2, t1, t2);
4341 __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4342 __ sw(a0, MemOperand(t2, 0));
4343 // Update the write barrier for the array store.
4344 __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
4345 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4346 __ Ret(USE_DELAY_SLOT);
4347 __ mov(v0, a0);
4348
4349 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4350 // and value is Smi.
4351 __ bind(&smi_element);
4352 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4353 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
4354 __ Addu(t2, t1, t2);
4355 __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
4356 __ Ret(USE_DELAY_SLOT);
4357 __ mov(v0, a0);
4358
4359 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
4360 __ bind(&double_elements);
4361 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4362 __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
4363 __ Ret(USE_DELAY_SLOT);
4364 __ mov(v0, a0);
4365 }
4366
4367
Generate(MacroAssembler * masm)4368 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4369 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4370 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4371 int parameter_count_offset =
4372 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4373 __ lw(a1, MemOperand(fp, parameter_count_offset));
4374 if (function_mode() == JS_FUNCTION_STUB_MODE) {
4375 __ Addu(a1, a1, Operand(1));
4376 }
4377 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4378 __ sll(a1, a1, kPointerSizeLog2);
4379 __ Ret(USE_DELAY_SLOT);
4380 __ Addu(sp, sp, a1);
4381 }
4382
4383
Generate(MacroAssembler * masm)4384 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4385 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4386 VectorLoadStub stub(isolate(), state());
4387 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4388 }
4389
4390
Generate(MacroAssembler * masm)4391 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4392 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4393 VectorKeyedLoadStub stub(isolate());
4394 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4395 }
4396
4397
MaybeCallEntryHook(MacroAssembler * masm)4398 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4399 if (masm->isolate()->function_entry_hook() != NULL) {
4400 ProfileEntryHookStub stub(masm->isolate());
4401 __ push(ra);
4402 __ CallStub(&stub);
4403 __ pop(ra);
4404 }
4405 }
4406
4407
Generate(MacroAssembler * masm)4408 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4409 // The entry hook is a "push ra" instruction, followed by a call.
4410 // Note: on MIPS "push" is 2 instruction
4411 const int32_t kReturnAddressDistanceFromFunctionStart =
4412 Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
4413
4414 // This should contain all kJSCallerSaved registers.
4415 const RegList kSavedRegs =
4416 kJSCallerSaved | // Caller saved registers.
4417 s5.bit(); // Saved stack pointer.
4418
4419 // We also save ra, so the count here is one higher than the mask indicates.
4420 const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
4421
4422 // Save all caller-save registers as this may be called from anywhere.
4423 __ MultiPush(kSavedRegs | ra.bit());
4424
4425 // Compute the function's address for the first argument.
4426 __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
4427
4428 // The caller's return address is above the saved temporaries.
4429 // Grab that for the second argument to the hook.
4430 __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
4431
4432 // Align the stack if necessary.
4433 int frame_alignment = masm->ActivationFrameAlignment();
4434 if (frame_alignment > kPointerSize) {
4435 __ mov(s5, sp);
4436 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4437 __ And(sp, sp, Operand(-frame_alignment));
4438 }
4439 __ Subu(sp, sp, kCArgsSlotsSize);
4440 #if defined(V8_HOST_ARCH_MIPS)
4441 int32_t entry_hook =
4442 reinterpret_cast<int32_t>(isolate()->function_entry_hook());
4443 __ li(t9, Operand(entry_hook));
4444 #else
4445 // Under the simulator we need to indirect the entry hook through a
4446 // trampoline function at a known address.
4447 // It additionally takes an isolate as a third parameter.
4448 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4449
4450 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4451 __ li(t9, Operand(ExternalReference(&dispatcher,
4452 ExternalReference::BUILTIN_CALL,
4453 isolate())));
4454 #endif
4455 // Call C function through t9 to conform ABI for PIC.
4456 __ Call(t9);
4457
4458 // Restore the stack pointer if needed.
4459 if (frame_alignment > kPointerSize) {
4460 __ mov(sp, s5);
4461 } else {
4462 __ Addu(sp, sp, kCArgsSlotsSize);
4463 }
4464
4465 // Also pop ra to get Ret(0).
4466 __ MultiPop(kSavedRegs | ra.bit());
4467 __ Ret();
4468 }
4469
4470
4471 template<class T>
CreateArrayDispatch(MacroAssembler * masm,AllocationSiteOverrideMode mode)4472 static void CreateArrayDispatch(MacroAssembler* masm,
4473 AllocationSiteOverrideMode mode) {
4474 if (mode == DISABLE_ALLOCATION_SITES) {
4475 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4476 __ TailCallStub(&stub);
4477 } else if (mode == DONT_OVERRIDE) {
4478 int last_index = GetSequenceIndexFromFastElementsKind(
4479 TERMINAL_FAST_ELEMENTS_KIND);
4480 for (int i = 0; i <= last_index; ++i) {
4481 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4482 T stub(masm->isolate(), kind);
4483 __ TailCallStub(&stub, eq, a3, Operand(kind));
4484 }
4485
4486 // If we reached this point there is a problem.
4487 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4488 } else {
4489 UNREACHABLE();
4490 }
4491 }
4492
4493
CreateArrayDispatchOneArgument(MacroAssembler * masm,AllocationSiteOverrideMode mode)4494 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4495 AllocationSiteOverrideMode mode) {
4496 // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4497 // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4498 // a0 - number of arguments
4499 // a1 - constructor?
4500 // sp[0] - last argument
4501 Label normal_sequence;
4502 if (mode == DONT_OVERRIDE) {
4503 DCHECK(FAST_SMI_ELEMENTS == 0);
4504 DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
4505 DCHECK(FAST_ELEMENTS == 2);
4506 DCHECK(FAST_HOLEY_ELEMENTS == 3);
4507 DCHECK(FAST_DOUBLE_ELEMENTS == 4);
4508 DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4509
4510 // is the low bit set? If so, we are holey and that is good.
4511 __ And(at, a3, Operand(1));
4512 __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
4513 }
4514
4515 // look at the first argument
4516 __ lw(t1, MemOperand(sp, 0));
4517 __ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
4518
4519 if (mode == DISABLE_ALLOCATION_SITES) {
4520 ElementsKind initial = GetInitialFastElementsKind();
4521 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4522
4523 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4524 holey_initial,
4525 DISABLE_ALLOCATION_SITES);
4526 __ TailCallStub(&stub_holey);
4527
4528 __ bind(&normal_sequence);
4529 ArraySingleArgumentConstructorStub stub(masm->isolate(),
4530 initial,
4531 DISABLE_ALLOCATION_SITES);
4532 __ TailCallStub(&stub);
4533 } else if (mode == DONT_OVERRIDE) {
4534 // We are going to create a holey array, but our kind is non-holey.
4535 // Fix kind and retry (only if we have an allocation site in the slot).
4536 __ Addu(a3, a3, Operand(1));
4537
4538 if (FLAG_debug_code) {
4539 __ lw(t1, FieldMemOperand(a2, 0));
4540 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
4541 __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
4542 }
4543
4544 // Save the resulting elements kind in type info. We can't just store a3
4545 // in the AllocationSite::transition_info field because elements kind is
4546 // restricted to a portion of the field...upper bits need to be left alone.
4547 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4548 __ lw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4549 __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
4550 __ sw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4551
4552
4553 __ bind(&normal_sequence);
4554 int last_index = GetSequenceIndexFromFastElementsKind(
4555 TERMINAL_FAST_ELEMENTS_KIND);
4556 for (int i = 0; i <= last_index; ++i) {
4557 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4558 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4559 __ TailCallStub(&stub, eq, a3, Operand(kind));
4560 }
4561
4562 // If we reached this point there is a problem.
4563 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4564 } else {
4565 UNREACHABLE();
4566 }
4567 }
4568
4569
4570 template<class T>
ArrayConstructorStubAheadOfTimeHelper(Isolate * isolate)4571 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4572 int to_index = GetSequenceIndexFromFastElementsKind(
4573 TERMINAL_FAST_ELEMENTS_KIND);
4574 for (int i = 0; i <= to_index; ++i) {
4575 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4576 T stub(isolate, kind);
4577 stub.GetCode();
4578 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4579 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4580 stub1.GetCode();
4581 }
4582 }
4583 }
4584
4585
GenerateStubsAheadOfTime(Isolate * isolate)4586 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4587 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4588 isolate);
4589 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4590 isolate);
4591 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4592 isolate);
4593 }
4594
4595
GenerateStubsAheadOfTime(Isolate * isolate)4596 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4597 Isolate* isolate) {
4598 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4599 for (int i = 0; i < 2; i++) {
4600 // For internal arrays we only need a few things.
4601 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4602 stubh1.GetCode();
4603 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4604 stubh2.GetCode();
4605 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4606 stubh3.GetCode();
4607 }
4608 }
4609
4610
GenerateDispatchToArrayStub(MacroAssembler * masm,AllocationSiteOverrideMode mode)4611 void ArrayConstructorStub::GenerateDispatchToArrayStub(
4612 MacroAssembler* masm,
4613 AllocationSiteOverrideMode mode) {
4614 if (argument_count() == ANY) {
4615 Label not_zero_case, not_one_case;
4616 __ And(at, a0, a0);
4617 __ Branch(¬_zero_case, ne, at, Operand(zero_reg));
4618 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4619
4620 __ bind(¬_zero_case);
4621 __ Branch(¬_one_case, gt, a0, Operand(1));
4622 CreateArrayDispatchOneArgument(masm, mode);
4623
4624 __ bind(¬_one_case);
4625 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4626 } else if (argument_count() == NONE) {
4627 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4628 } else if (argument_count() == ONE) {
4629 CreateArrayDispatchOneArgument(masm, mode);
4630 } else if (argument_count() == MORE_THAN_ONE) {
4631 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4632 } else {
4633 UNREACHABLE();
4634 }
4635 }
4636
4637
Generate(MacroAssembler * masm)4638 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4639 // ----------- S t a t e -------------
4640 // -- a0 : argc (only if argument_count() == ANY)
4641 // -- a1 : constructor
4642 // -- a2 : AllocationSite or undefined
4643 // -- sp[0] : return address
4644 // -- sp[4] : last argument
4645 // -----------------------------------
4646
4647 if (FLAG_debug_code) {
4648 // The array construct code is only set for the global and natives
4649 // builtin Array functions which always have maps.
4650
4651 // Initial map for the builtin Array function should be a map.
4652 __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
4653 // Will both indicate a NULL and a Smi.
4654 __ SmiTst(t0, at);
4655 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
4656 at, Operand(zero_reg));
4657 __ GetObjectType(t0, t0, t1);
4658 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
4659 t1, Operand(MAP_TYPE));
4660
4661 // We should either have undefined in a2 or a valid AllocationSite
4662 __ AssertUndefinedOrAllocationSite(a2, t0);
4663 }
4664
4665 Label no_info;
4666 // Get the elements kind and case on that.
4667 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4668 __ Branch(&no_info, eq, a2, Operand(at));
4669
4670 __ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4671 __ SmiUntag(a3);
4672 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4673 __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
4674 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
4675
4676 __ bind(&no_info);
4677 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
4678 }
4679
4680
GenerateCase(MacroAssembler * masm,ElementsKind kind)4681 void InternalArrayConstructorStub::GenerateCase(
4682 MacroAssembler* masm, ElementsKind kind) {
4683
4684 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4685 __ TailCallStub(&stub0, lo, a0, Operand(1));
4686
4687 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
4688 __ TailCallStub(&stubN, hi, a0, Operand(1));
4689
4690 if (IsFastPackedElementsKind(kind)) {
4691 // We might need to create a holey array
4692 // look at the first argument.
4693 __ lw(at, MemOperand(sp, 0));
4694
4695 InternalArraySingleArgumentConstructorStub
4696 stub1_holey(isolate(), GetHoleyElementsKind(kind));
4697 __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
4698 }
4699
4700 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4701 __ TailCallStub(&stub1);
4702 }
4703
4704
Generate(MacroAssembler * masm)4705 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4706 // ----------- S t a t e -------------
4707 // -- a0 : argc
4708 // -- a1 : constructor
4709 // -- sp[0] : return address
4710 // -- sp[4] : last argument
4711 // -----------------------------------
4712
4713 if (FLAG_debug_code) {
4714 // The array construct code is only set for the global and natives
4715 // builtin Array functions which always have maps.
4716
4717 // Initial map for the builtin Array function should be a map.
4718 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
4719 // Will both indicate a NULL and a Smi.
4720 __ SmiTst(a3, at);
4721 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
4722 at, Operand(zero_reg));
4723 __ GetObjectType(a3, a3, t0);
4724 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
4725 t0, Operand(MAP_TYPE));
4726 }
4727
4728 // Figure out the right elements kind.
4729 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
4730
4731 // Load the map's "bit field 2" into a3. We only need the first byte,
4732 // but the following bit field extraction takes care of that anyway.
4733 __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
4734 // Retrieve elements_kind from bit field 2.
4735 __ DecodeField<Map::ElementsKindBits>(a3);
4736
4737 if (FLAG_debug_code) {
4738 Label done;
4739 __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
4740 __ Assert(
4741 eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
4742 a3, Operand(FAST_HOLEY_ELEMENTS));
4743 __ bind(&done);
4744 }
4745
4746 Label fast_elements_case;
4747 __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
4748 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
4749
4750 __ bind(&fast_elements_case);
4751 GenerateCase(masm, FAST_ELEMENTS);
4752 }
4753
4754
Generate(MacroAssembler * masm)4755 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
4756 // ----------- S t a t e -------------
4757 // -- a0 : callee
4758 // -- t0 : call_data
4759 // -- a2 : holder
4760 // -- a1 : api_function_address
4761 // -- cp : context
4762 // --
4763 // -- sp[0] : last argument
4764 // -- ...
4765 // -- sp[(argc - 1)* 4] : first argument
4766 // -- sp[argc * 4] : receiver
4767 // -----------------------------------
4768
4769 Register callee = a0;
4770 Register call_data = t0;
4771 Register holder = a2;
4772 Register api_function_address = a1;
4773 Register context = cp;
4774
4775 int argc = this->argc();
4776 bool is_store = this->is_store();
4777 bool call_data_undefined = this->call_data_undefined();
4778
4779 typedef FunctionCallbackArguments FCA;
4780
4781 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
4782 STATIC_ASSERT(FCA::kCalleeIndex == 5);
4783 STATIC_ASSERT(FCA::kDataIndex == 4);
4784 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
4785 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
4786 STATIC_ASSERT(FCA::kIsolateIndex == 1);
4787 STATIC_ASSERT(FCA::kHolderIndex == 0);
4788 STATIC_ASSERT(FCA::kArgsLength == 7);
4789
4790 // Save context, callee and call data.
4791 __ Push(context, callee, call_data);
4792 // Load context from callee.
4793 __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
4794
4795 Register scratch = call_data;
4796 if (!call_data_undefined) {
4797 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4798 }
4799 // Push return value and default return value.
4800 __ Push(scratch, scratch);
4801 __ li(scratch,
4802 Operand(ExternalReference::isolate_address(isolate())));
4803 // Push isolate and holder.
4804 __ Push(scratch, holder);
4805
4806 // Prepare arguments.
4807 __ mov(scratch, sp);
4808
4809 // Allocate the v8::Arguments structure in the arguments' space since
4810 // it's not controlled by GC.
4811 const int kApiStackSpace = 4;
4812
4813 FrameScope frame_scope(masm, StackFrame::MANUAL);
4814 __ EnterExitFrame(false, kApiStackSpace);
4815
4816 DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
4817 // a0 = FunctionCallbackInfo&
4818 // Arguments is after the return address.
4819 __ Addu(a0, sp, Operand(1 * kPointerSize));
4820 // FunctionCallbackInfo::implicit_args_
4821 __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
4822 // FunctionCallbackInfo::values_
4823 __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
4824 __ sw(at, MemOperand(a0, 1 * kPointerSize));
4825 // FunctionCallbackInfo::length_ = argc
4826 __ li(at, Operand(argc));
4827 __ sw(at, MemOperand(a0, 2 * kPointerSize));
4828 // FunctionCallbackInfo::is_construct_call = 0
4829 __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
4830
4831 const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
4832 ExternalReference thunk_ref =
4833 ExternalReference::invoke_function_callback(isolate());
4834
4835 AllowExternalCallThatCantCauseGC scope(masm);
4836 MemOperand context_restore_operand(
4837 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
4838 // Stores return the first js argument.
4839 int return_value_offset = 0;
4840 if (is_store) {
4841 return_value_offset = 2 + FCA::kArgsLength;
4842 } else {
4843 return_value_offset = 2 + FCA::kReturnValueOffset;
4844 }
4845 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
4846
4847 __ CallApiFunctionAndReturn(api_function_address,
4848 thunk_ref,
4849 kStackUnwindSpace,
4850 return_value_operand,
4851 &context_restore_operand);
4852 }
4853
4854
Generate(MacroAssembler * masm)4855 void CallApiGetterStub::Generate(MacroAssembler* masm) {
4856 // ----------- S t a t e -------------
4857 // -- sp[0] : name
4858 // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
4859 // -- ...
4860 // -- a2 : api_function_address
4861 // -----------------------------------
4862
4863 Register api_function_address = ApiGetterDescriptor::function_address();
4864 DCHECK(api_function_address.is(a2));
4865
4866 __ mov(a0, sp); // a0 = Handle<Name>
4867 __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
4868
4869 const int kApiStackSpace = 1;
4870 FrameScope frame_scope(masm, StackFrame::MANUAL);
4871 __ EnterExitFrame(false, kApiStackSpace);
4872
4873 // Create PropertyAccessorInfo instance on the stack above the exit frame with
4874 // a1 (internal::Object** args_) as the data.
4875 __ sw(a1, MemOperand(sp, 1 * kPointerSize));
4876 __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
4877
4878 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
4879
4880 ExternalReference thunk_ref =
4881 ExternalReference::invoke_accessor_getter_callback(isolate());
4882 __ CallApiFunctionAndReturn(api_function_address,
4883 thunk_ref,
4884 kStackUnwindSpace,
4885 MemOperand(fp, 6 * kPointerSize),
4886 NULL);
4887 }
4888
4889
4890 #undef __
4891
4892 } } // namespace v8::internal
4893
4894 #endif // V8_TARGET_ARCH_MIPS
4895