1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_ARM
6
7 #include "src/code-stubs.h"
8 #include "src/api-arguments.h"
9 #include "src/base/bits.h"
10 #include "src/bootstrapper.h"
11 #include "src/codegen.h"
12 #include "src/ic/handler-compiler.h"
13 #include "src/ic/ic.h"
14 #include "src/ic/stub-cache.h"
15 #include "src/isolate.h"
16 #include "src/regexp/jsregexp.h"
17 #include "src/regexp/regexp-macro-assembler.h"
18 #include "src/runtime/runtime.h"
19
20 #include "src/arm/code-stubs-arm.h"
21
22 namespace v8 {
23 namespace internal {
24
25 #define __ ACCESS_MASM(masm)
26
Generate(MacroAssembler * masm)27 void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
28 __ lsl(r5, r0, Operand(kPointerSizeLog2));
29 __ str(r1, MemOperand(sp, r5));
30 __ Push(r1);
31 __ Push(r2);
32 __ add(r0, r0, Operand(3));
33 __ TailCallRuntime(Runtime::kNewArray);
34 }
35
InitializeDescriptor(CodeStubDescriptor * descriptor)36 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
37 Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
38 descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
39 }
40
InitializeDescriptor(CodeStubDescriptor * descriptor)41 void FastFunctionBindStub::InitializeDescriptor(
42 CodeStubDescriptor* descriptor) {
43 Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
44 descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
45 }
46
47 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
48 Condition cond);
49 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
50 Register lhs,
51 Register rhs,
52 Label* lhs_not_nan,
53 Label* slow,
54 bool strict);
55 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
56 Register lhs,
57 Register rhs);
58
59
GenerateLightweightMiss(MacroAssembler * masm,ExternalReference miss)60 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
61 ExternalReference miss) {
62 // Update the static counter each time a new code stub is generated.
63 isolate()->counters()->code_stubs()->Increment();
64
65 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
66 int param_count = descriptor.GetRegisterParameterCount();
67 {
68 // Call the runtime system in a fresh internal frame.
69 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
70 DCHECK(param_count == 0 ||
71 r0.is(descriptor.GetRegisterParameter(param_count - 1)));
72 // Push arguments
73 for (int i = 0; i < param_count; ++i) {
74 __ push(descriptor.GetRegisterParameter(i));
75 }
76 __ CallExternalReference(miss, param_count);
77 }
78
79 __ Ret();
80 }
81
82
Generate(MacroAssembler * masm)83 void DoubleToIStub::Generate(MacroAssembler* masm) {
84 Label out_of_range, only_low, negate, done;
85 Register input_reg = source();
86 Register result_reg = destination();
87 DCHECK(is_truncating());
88
89 int double_offset = offset();
90 // Account for saved regs if input is sp.
91 if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
92
93 Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
94 Register scratch_low =
95 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
96 Register scratch_high =
97 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
98 LowDwVfpRegister double_scratch = kScratchDoubleReg;
99
100 __ Push(scratch_high, scratch_low, scratch);
101
102 if (!skip_fastpath()) {
103 // Load double input.
104 __ vldr(double_scratch, MemOperand(input_reg, double_offset));
105 __ vmov(scratch_low, scratch_high, double_scratch);
106
107 // Do fast-path convert from double to int.
108 __ vcvt_s32_f64(double_scratch.low(), double_scratch);
109 __ vmov(result_reg, double_scratch.low());
110
111 // If result is not saturated (0x7fffffff or 0x80000000), we are done.
112 __ sub(scratch, result_reg, Operand(1));
113 __ cmp(scratch, Operand(0x7ffffffe));
114 __ b(lt, &done);
115 } else {
116 // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
117 // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
118 if (double_offset == 0) {
119 __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
120 } else {
121 __ ldr(scratch_low, MemOperand(input_reg, double_offset));
122 __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
123 }
124 }
125
126 __ Ubfx(scratch, scratch_high,
127 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
128 // Load scratch with exponent - 1. This is faster than loading
129 // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
130 STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
131 __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
132 // If exponent is greater than or equal to 84, the 32 less significant
133 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
134 // the result is 0.
135 // Compare exponent with 84 (compare exponent - 1 with 83).
136 __ cmp(scratch, Operand(83));
137 __ b(ge, &out_of_range);
138
139 // If we reach this code, 31 <= exponent <= 83.
140 // So, we don't have to handle cases where 0 <= exponent <= 20 for
141 // which we would need to shift right the high part of the mantissa.
142 // Scratch contains exponent - 1.
143 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
144 __ rsb(scratch, scratch, Operand(51), SetCC);
145 __ b(ls, &only_low);
146 // 21 <= exponent <= 51, shift scratch_low and scratch_high
147 // to generate the result.
148 __ mov(scratch_low, Operand(scratch_low, LSR, scratch));
149 // Scratch contains: 52 - exponent.
150 // We needs: exponent - 20.
151 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
152 __ rsb(scratch, scratch, Operand(32));
153 __ Ubfx(result_reg, scratch_high,
154 0, HeapNumber::kMantissaBitsInTopWord);
155 // Set the implicit 1 before the mantissa part in scratch_high.
156 __ orr(result_reg, result_reg,
157 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
158 __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
159 __ b(&negate);
160
161 __ bind(&out_of_range);
162 __ mov(result_reg, Operand::Zero());
163 __ b(&done);
164
165 __ bind(&only_low);
166 // 52 <= exponent <= 83, shift only scratch_low.
167 // On entry, scratch contains: 52 - exponent.
168 __ rsb(scratch, scratch, Operand::Zero());
169 __ mov(result_reg, Operand(scratch_low, LSL, scratch));
170
171 __ bind(&negate);
172 // If input was positive, scratch_high ASR 31 equals 0 and
173 // scratch_high LSR 31 equals zero.
174 // New result = (result eor 0) + 0 = result.
175 // If the input was negative, we have to negate the result.
176 // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
177 // New result = (result eor 0xffffffff) + 1 = 0 - result.
178 __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
179 __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
180
181 __ bind(&done);
182
183 __ Pop(scratch_high, scratch_low, scratch);
184 __ Ret();
185 }
186
187
188 // Handle the case where the lhs and rhs are the same object.
189 // Equality is almost reflexive (everything but NaN), so this is a test
190 // for "identity and not NaN".
EmitIdenticalObjectComparison(MacroAssembler * masm,Label * slow,Condition cond)191 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
192 Condition cond) {
193 Label not_identical;
194 Label heap_number, return_equal;
195 __ cmp(r0, r1);
196 __ b(ne, ¬_identical);
197
198 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
199 // so we do the second best thing - test it ourselves.
200 // They are both equal and they are not both Smis so both of them are not
201 // Smis. If it's not a heap number, then return equal.
202 if (cond == lt || cond == gt) {
203 // Call runtime on identical JSObjects.
204 __ CompareObjectType(r0, r4, r4, FIRST_JS_RECEIVER_TYPE);
205 __ b(ge, slow);
206 // Call runtime on identical symbols since we need to throw a TypeError.
207 __ cmp(r4, Operand(SYMBOL_TYPE));
208 __ b(eq, slow);
209 // Call runtime on identical SIMD values since we must throw a TypeError.
210 __ cmp(r4, Operand(SIMD128_VALUE_TYPE));
211 __ b(eq, slow);
212 } else {
213 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
214 __ b(eq, &heap_number);
215 // Comparing JS objects with <=, >= is complicated.
216 if (cond != eq) {
217 __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
218 __ b(ge, slow);
219 // Call runtime on identical symbols since we need to throw a TypeError.
220 __ cmp(r4, Operand(SYMBOL_TYPE));
221 __ b(eq, slow);
222 // Call runtime on identical SIMD values since we must throw a TypeError.
223 __ cmp(r4, Operand(SIMD128_VALUE_TYPE));
224 __ b(eq, slow);
225 // Normally here we fall through to return_equal, but undefined is
226 // special: (undefined == undefined) == true, but
227 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
228 if (cond == le || cond == ge) {
229 __ cmp(r4, Operand(ODDBALL_TYPE));
230 __ b(ne, &return_equal);
231 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
232 __ cmp(r0, r2);
233 __ b(ne, &return_equal);
234 if (cond == le) {
235 // undefined <= undefined should fail.
236 __ mov(r0, Operand(GREATER));
237 } else {
238 // undefined >= undefined should fail.
239 __ mov(r0, Operand(LESS));
240 }
241 __ Ret();
242 }
243 }
244 }
245
246 __ bind(&return_equal);
247 if (cond == lt) {
248 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
249 } else if (cond == gt) {
250 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
251 } else {
252 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
253 }
254 __ Ret();
255
256 // For less and greater we don't have to check for NaN since the result of
257 // x < x is false regardless. For the others here is some code to check
258 // for NaN.
259 if (cond != lt && cond != gt) {
260 __ bind(&heap_number);
261 // It is a heap number, so return non-equal if it's NaN and equal if it's
262 // not NaN.
263
264 // The representation of NaN values has all exponent bits (52..62) set,
265 // and not all mantissa bits (0..51) clear.
266 // Read top bits of double representation (second word of value).
267 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
268 // Test that exponent bits are all set.
269 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
270 // NaNs have all-one exponents so they sign extend to -1.
271 __ cmp(r3, Operand(-1));
272 __ b(ne, &return_equal);
273
274 // Shift out flag and all exponent bits, retaining only mantissa.
275 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
276 // Or with all low-bits of mantissa.
277 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
278 __ orr(r0, r3, Operand(r2), SetCC);
279 // For equal we already have the right value in r0: Return zero (equal)
280 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
281 // not (it's a NaN). For <= and >= we need to load r0 with the failing
282 // value if it's a NaN.
283 if (cond != eq) {
284 // All-zero means Infinity means equal.
285 __ Ret(eq);
286 if (cond == le) {
287 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
288 } else {
289 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
290 }
291 }
292 __ Ret();
293 }
294 // No fall through here.
295
296 __ bind(¬_identical);
297 }
298
299
300 // See comment at call site.
EmitSmiNonsmiComparison(MacroAssembler * masm,Register lhs,Register rhs,Label * lhs_not_nan,Label * slow,bool strict)301 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
302 Register lhs,
303 Register rhs,
304 Label* lhs_not_nan,
305 Label* slow,
306 bool strict) {
307 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
308 (lhs.is(r1) && rhs.is(r0)));
309
310 Label rhs_is_smi;
311 __ JumpIfSmi(rhs, &rhs_is_smi);
312
313 // Lhs is a Smi. Check whether the rhs is a heap number.
314 __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
315 if (strict) {
316 // If rhs is not a number and lhs is a Smi then strict equality cannot
317 // succeed. Return non-equal
318 // If rhs is r0 then there is already a non zero value in it.
319 if (!rhs.is(r0)) {
320 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
321 }
322 __ Ret(ne);
323 } else {
324 // Smi compared non-strictly with a non-Smi non-heap-number. Call
325 // the runtime.
326 __ b(ne, slow);
327 }
328
329 // Lhs is a smi, rhs is a number.
330 // Convert lhs to a double in d7.
331 __ SmiToDouble(d7, lhs);
332 // Load the double from rhs, tagged HeapNumber r0, to d6.
333 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
334
335 // We now have both loaded as doubles but we can skip the lhs nan check
336 // since it's a smi.
337 __ jmp(lhs_not_nan);
338
339 __ bind(&rhs_is_smi);
340 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
341 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
342 if (strict) {
343 // If lhs is not a number and rhs is a smi then strict equality cannot
344 // succeed. Return non-equal.
345 // If lhs is r0 then there is already a non zero value in it.
346 if (!lhs.is(r0)) {
347 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
348 }
349 __ Ret(ne);
350 } else {
351 // Smi compared non-strictly with a non-smi non-heap-number. Call
352 // the runtime.
353 __ b(ne, slow);
354 }
355
356 // Rhs is a smi, lhs is a heap number.
357 // Load the double from lhs, tagged HeapNumber r1, to d7.
358 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
359 // Convert rhs to a double in d6 .
360 __ SmiToDouble(d6, rhs);
361 // Fall through to both_loaded_as_doubles.
362 }
363
364
365 // See comment at call site.
EmitStrictTwoHeapObjectCompare(MacroAssembler * masm,Register lhs,Register rhs)366 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
367 Register lhs,
368 Register rhs) {
369 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
370 (lhs.is(r1) && rhs.is(r0)));
371
372 // If either operand is a JS object or an oddball value, then they are
373 // not equal since their pointers are different.
374 // There is no test for undetectability in strict equality.
375 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
376 Label first_non_object;
377 // Get the type of the first operand into r2 and compare it with
378 // FIRST_JS_RECEIVER_TYPE.
379 __ CompareObjectType(rhs, r2, r2, FIRST_JS_RECEIVER_TYPE);
380 __ b(lt, &first_non_object);
381
382 // Return non-zero (r0 is not zero)
383 Label return_not_equal;
384 __ bind(&return_not_equal);
385 __ Ret();
386
387 __ bind(&first_non_object);
388 // Check for oddballs: true, false, null, undefined.
389 __ cmp(r2, Operand(ODDBALL_TYPE));
390 __ b(eq, &return_not_equal);
391
392 __ CompareObjectType(lhs, r3, r3, FIRST_JS_RECEIVER_TYPE);
393 __ b(ge, &return_not_equal);
394
395 // Check for oddballs: true, false, null, undefined.
396 __ cmp(r3, Operand(ODDBALL_TYPE));
397 __ b(eq, &return_not_equal);
398
399 // Now that we have the types we might as well check for
400 // internalized-internalized.
401 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
402 __ orr(r2, r2, Operand(r3));
403 __ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
404 __ b(eq, &return_not_equal);
405 }
406
407
408 // See comment at call site.
EmitCheckForTwoHeapNumbers(MacroAssembler * masm,Register lhs,Register rhs,Label * both_loaded_as_doubles,Label * not_heap_numbers,Label * slow)409 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
410 Register lhs,
411 Register rhs,
412 Label* both_loaded_as_doubles,
413 Label* not_heap_numbers,
414 Label* slow) {
415 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
416 (lhs.is(r1) && rhs.is(r0)));
417
418 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
419 __ b(ne, not_heap_numbers);
420 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
421 __ cmp(r2, r3);
422 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
423
424 // Both are heap numbers. Load them up then jump to the code we have
425 // for that.
426 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
427 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
428 __ jmp(both_loaded_as_doubles);
429 }
430
431
432 // Fast negative check for internalized-to-internalized equality or receiver
433 // equality. Also handles the undetectable receiver to null/undefined
434 // comparison.
EmitCheckForInternalizedStringsOrObjects(MacroAssembler * masm,Register lhs,Register rhs,Label * possible_strings,Label * runtime_call)435 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
436 Register lhs, Register rhs,
437 Label* possible_strings,
438 Label* runtime_call) {
439 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
440 (lhs.is(r1) && rhs.is(r0)));
441
442 // r2 is object type of rhs.
443 Label object_test, return_equal, return_unequal, undetectable;
444 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
445 __ tst(r2, Operand(kIsNotStringMask));
446 __ b(ne, &object_test);
447 __ tst(r2, Operand(kIsNotInternalizedMask));
448 __ b(ne, possible_strings);
449 __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
450 __ b(ge, runtime_call);
451 __ tst(r3, Operand(kIsNotInternalizedMask));
452 __ b(ne, possible_strings);
453
454 // Both are internalized. We already checked they weren't the same pointer so
455 // they are not equal. Return non-equal by returning the non-zero object
456 // pointer in r0.
457 __ Ret();
458
459 __ bind(&object_test);
460 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
461 __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
462 __ ldrb(r4, FieldMemOperand(r2, Map::kBitFieldOffset));
463 __ ldrb(r5, FieldMemOperand(r3, Map::kBitFieldOffset));
464 __ tst(r4, Operand(1 << Map::kIsUndetectable));
465 __ b(ne, &undetectable);
466 __ tst(r5, Operand(1 << Map::kIsUndetectable));
467 __ b(ne, &return_unequal);
468
469 __ CompareInstanceType(r2, r2, FIRST_JS_RECEIVER_TYPE);
470 __ b(lt, runtime_call);
471 __ CompareInstanceType(r3, r3, FIRST_JS_RECEIVER_TYPE);
472 __ b(lt, runtime_call);
473
474 __ bind(&return_unequal);
475 // Return non-equal by returning the non-zero object pointer in r0.
476 __ Ret();
477
478 __ bind(&undetectable);
479 __ tst(r5, Operand(1 << Map::kIsUndetectable));
480 __ b(eq, &return_unequal);
481
482 // If both sides are JSReceivers, then the result is false according to
483 // the HTML specification, which says that only comparisons with null or
484 // undefined are affected by special casing for document.all.
485 __ CompareInstanceType(r2, r2, ODDBALL_TYPE);
486 __ b(eq, &return_equal);
487 __ CompareInstanceType(r3, r3, ODDBALL_TYPE);
488 __ b(ne, &return_unequal);
489
490 __ bind(&return_equal);
491 __ mov(r0, Operand(EQUAL));
492 __ Ret();
493 }
494
495
CompareICStub_CheckInputType(MacroAssembler * masm,Register input,Register scratch,CompareICState::State expected,Label * fail)496 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
497 Register scratch,
498 CompareICState::State expected,
499 Label* fail) {
500 Label ok;
501 if (expected == CompareICState::SMI) {
502 __ JumpIfNotSmi(input, fail);
503 } else if (expected == CompareICState::NUMBER) {
504 __ JumpIfSmi(input, &ok);
505 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
506 DONT_DO_SMI_CHECK);
507 }
508 // We could be strict about internalized/non-internalized here, but as long as
509 // hydrogen doesn't care, the stub doesn't have to care either.
510 __ bind(&ok);
511 }
512
513
514 // On entry r1 and r2 are the values to be compared.
515 // On exit r0 is 0, positive or negative to indicate the result of
516 // the comparison.
GenerateGeneric(MacroAssembler * masm)517 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
518 Register lhs = r1;
519 Register rhs = r0;
520 Condition cc = GetCondition();
521
522 Label miss;
523 CompareICStub_CheckInputType(masm, lhs, r2, left(), &miss);
524 CompareICStub_CheckInputType(masm, rhs, r3, right(), &miss);
525
526 Label slow; // Call builtin.
527 Label not_smis, both_loaded_as_doubles, lhs_not_nan;
528
529 Label not_two_smis, smi_done;
530 __ orr(r2, r1, r0);
531 __ JumpIfNotSmi(r2, ¬_two_smis);
532 __ mov(r1, Operand(r1, ASR, 1));
533 __ sub(r0, r1, Operand(r0, ASR, 1));
534 __ Ret();
535 __ bind(¬_two_smis);
536
537 // NOTICE! This code is only reached after a smi-fast-case check, so
538 // it is certain that at least one operand isn't a smi.
539
540 // Handle the case where the objects are identical. Either returns the answer
541 // or goes to slow. Only falls through if the objects were not identical.
542 EmitIdenticalObjectComparison(masm, &slow, cc);
543
544 // If either is a Smi (we know that not both are), then they can only
545 // be strictly equal if the other is a HeapNumber.
546 STATIC_ASSERT(kSmiTag == 0);
547 DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
548 __ and_(r2, lhs, Operand(rhs));
549 __ JumpIfNotSmi(r2, ¬_smis);
550 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
551 // 1) Return the answer.
552 // 2) Go to slow.
553 // 3) Fall through to both_loaded_as_doubles.
554 // 4) Jump to lhs_not_nan.
555 // In cases 3 and 4 we have found out we were dealing with a number-number
556 // comparison. The double values of the numbers have been loaded into d7 (lhs)
557 // and d6 (rhs).
558 EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
559
560 __ bind(&both_loaded_as_doubles);
561 // The arguments have been converted to doubles and stored in d6 and d7.
562 __ bind(&lhs_not_nan);
563 Label no_nan;
564 __ VFPCompareAndSetFlags(d7, d6);
565 Label nan;
566 __ b(vs, &nan);
567 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
568 __ mov(r0, Operand(LESS), LeaveCC, lt);
569 __ mov(r0, Operand(GREATER), LeaveCC, gt);
570 __ Ret();
571
572 __ bind(&nan);
573 // If one of the sides was a NaN then the v flag is set. Load r0 with
574 // whatever it takes to make the comparison fail, since comparisons with NaN
575 // always fail.
576 if (cc == lt || cc == le) {
577 __ mov(r0, Operand(GREATER));
578 } else {
579 __ mov(r0, Operand(LESS));
580 }
581 __ Ret();
582
583 __ bind(¬_smis);
584 // At this point we know we are dealing with two different objects,
585 // and neither of them is a Smi. The objects are in rhs_ and lhs_.
586 if (strict()) {
587 // This returns non-equal for some object types, or falls through if it
588 // was not lucky.
589 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
590 }
591
592 Label check_for_internalized_strings;
593 Label flat_string_check;
594 // Check for heap-number-heap-number comparison. Can jump to slow case,
595 // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
596 // that case. If the inputs are not doubles then jumps to
597 // check_for_internalized_strings.
598 // In this case r2 will contain the type of rhs_. Never falls through.
599 EmitCheckForTwoHeapNumbers(masm,
600 lhs,
601 rhs,
602 &both_loaded_as_doubles,
603 &check_for_internalized_strings,
604 &flat_string_check);
605
606 __ bind(&check_for_internalized_strings);
607 // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
608 // internalized strings.
609 if (cc == eq && !strict()) {
610 // Returns an answer for two internalized strings or two detectable objects.
611 // Otherwise jumps to string case or not both strings case.
612 // Assumes that r2 is the type of rhs_ on entry.
613 EmitCheckForInternalizedStringsOrObjects(
614 masm, lhs, rhs, &flat_string_check, &slow);
615 }
616
617 // Check for both being sequential one-byte strings,
618 // and inline if that is the case.
619 __ bind(&flat_string_check);
620
621 __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r2, r3, &slow);
622
623 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2,
624 r3);
625 if (cc == eq) {
626 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r2, r3, r4);
627 } else {
628 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r2, r3, r4,
629 r5);
630 }
631 // Never falls through to here.
632
633 __ bind(&slow);
634
635 if (cc == eq) {
636 {
637 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
638 __ Push(lhs, rhs);
639 __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
640 }
641 // Turn true into 0 and false into some non-zero value.
642 STATIC_ASSERT(EQUAL == 0);
643 __ LoadRoot(r1, Heap::kTrueValueRootIndex);
644 __ sub(r0, r0, r1);
645 __ Ret();
646 } else {
647 __ Push(lhs, rhs);
648 int ncr; // NaN compare result
649 if (cc == lt || cc == le) {
650 ncr = GREATER;
651 } else {
652 DCHECK(cc == gt || cc == ge); // remaining cases
653 ncr = LESS;
654 }
655 __ mov(r0, Operand(Smi::FromInt(ncr)));
656 __ push(r0);
657
658 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
659 // tagged as a small integer.
660 __ TailCallRuntime(Runtime::kCompare);
661 }
662
663 __ bind(&miss);
664 GenerateMiss(masm);
665 }
666
667
Generate(MacroAssembler * masm)668 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
669 // We don't allow a GC during a store buffer overflow so there is no need to
670 // store the registers in any particular way, but we do have to store and
671 // restore them.
672 __ stm(db_w, sp, kCallerSaved | lr.bit());
673
674 const Register scratch = r1;
675
676 if (save_doubles()) {
677 __ SaveFPRegs(sp, scratch);
678 }
679 const int argument_count = 1;
680 const int fp_argument_count = 0;
681
682 AllowExternalCallThatCantCauseGC scope(masm);
683 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
684 __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
685 __ CallCFunction(
686 ExternalReference::store_buffer_overflow_function(isolate()),
687 argument_count);
688 if (save_doubles()) {
689 __ RestoreFPRegs(sp, scratch);
690 }
691 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
692 }
693
Generate(MacroAssembler * masm)694 void MathPowStub::Generate(MacroAssembler* masm) {
695 const Register exponent = MathPowTaggedDescriptor::exponent();
696 DCHECK(exponent.is(r2));
697 const LowDwVfpRegister double_base = d0;
698 const LowDwVfpRegister double_exponent = d1;
699 const LowDwVfpRegister double_result = d2;
700 const LowDwVfpRegister double_scratch = d3;
701 const SwVfpRegister single_scratch = s6;
702 const Register scratch = r9;
703 const Register scratch2 = r4;
704
705 Label call_runtime, done, int_exponent;
706 if (exponent_type() == TAGGED) {
707 // Base is already in double_base.
708 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
709
710 __ vldr(double_exponent,
711 FieldMemOperand(exponent, HeapNumber::kValueOffset));
712 }
713
714 if (exponent_type() != INTEGER) {
715 // Detect integer exponents stored as double.
716 __ TryDoubleToInt32Exact(scratch, double_exponent, double_scratch);
717 __ b(eq, &int_exponent);
718
719 __ push(lr);
720 {
721 AllowExternalCallThatCantCauseGC scope(masm);
722 __ PrepareCallCFunction(0, 2, scratch);
723 __ MovToFloatParameters(double_base, double_exponent);
724 __ CallCFunction(
725 ExternalReference::power_double_double_function(isolate()), 0, 2);
726 }
727 __ pop(lr);
728 __ MovFromFloatResult(double_result);
729 __ b(&done);
730 }
731
732 // Calculate power with integer exponent.
733 __ bind(&int_exponent);
734
735 // Get two copies of exponent in the registers scratch and exponent.
736 if (exponent_type() == INTEGER) {
737 __ mov(scratch, exponent);
738 } else {
739 // Exponent has previously been stored into scratch as untagged integer.
740 __ mov(exponent, scratch);
741 }
742 __ vmov(double_scratch, double_base); // Back up base.
743 __ vmov(double_result, 1.0, scratch2);
744
745 // Get absolute value of exponent.
746 __ cmp(scratch, Operand::Zero());
747 __ rsb(scratch, scratch, Operand::Zero(), LeaveCC, mi);
748
749 Label while_true;
750 __ bind(&while_true);
751 __ mov(scratch, Operand(scratch, LSR, 1), SetCC);
752 __ vmul(double_result, double_result, double_scratch, cs);
753 __ vmul(double_scratch, double_scratch, double_scratch, ne);
754 __ b(ne, &while_true);
755
756 __ cmp(exponent, Operand::Zero());
757 __ b(ge, &done);
758 __ vmov(double_scratch, 1.0, scratch);
759 __ vdiv(double_result, double_scratch, double_result);
760 // Test whether result is zero. Bail out to check for subnormal result.
761 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
762 __ VFPCompareAndSetFlags(double_result, 0.0);
763 __ b(ne, &done);
764 // double_exponent may not containe the exponent value if the input was a
765 // smi. We set it with exponent value before bailing out.
766 __ vmov(single_scratch, exponent);
767 __ vcvt_f64_s32(double_exponent, single_scratch);
768
769 // Returning or bailing out.
770 __ push(lr);
771 {
772 AllowExternalCallThatCantCauseGC scope(masm);
773 __ PrepareCallCFunction(0, 2, scratch);
774 __ MovToFloatParameters(double_base, double_exponent);
775 __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
776 0, 2);
777 }
778 __ pop(lr);
779 __ MovFromFloatResult(double_result);
780
781 __ bind(&done);
782 __ Ret();
783 }
784
NeedsImmovableCode()785 bool CEntryStub::NeedsImmovableCode() {
786 return true;
787 }
788
789
GenerateStubsAheadOfTime(Isolate * isolate)790 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
791 CEntryStub::GenerateAheadOfTime(isolate);
792 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
793 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
794 CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
795 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
796 CreateWeakCellStub::GenerateAheadOfTime(isolate);
797 BinaryOpICStub::GenerateAheadOfTime(isolate);
798 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
799 StoreFastElementStub::GenerateAheadOfTime(isolate);
800 }
801
802
GenerateFPStubs(Isolate * isolate)803 void CodeStub::GenerateFPStubs(Isolate* isolate) {
804 // Generate if not already in cache.
805 SaveFPRegsMode mode = kSaveFPRegs;
806 CEntryStub(isolate, 1, mode).GetCode();
807 StoreBufferOverflowStub(isolate, mode).GetCode();
808 isolate->set_fp_stubs_generated(true);
809 }
810
811
GenerateAheadOfTime(Isolate * isolate)812 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
813 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
814 stub.GetCode();
815 }
816
817
Generate(MacroAssembler * masm)818 void CEntryStub::Generate(MacroAssembler* masm) {
819 // Called from JavaScript; parameters are on stack as if calling JS function.
820 // r0: number of arguments including receiver
821 // r1: pointer to builtin function
822 // fp: frame pointer (restored after C call)
823 // sp: stack pointer (restored as callee's sp after C call)
824 // cp: current context (C callee-saved)
825 //
826 // If argv_in_register():
827 // r2: pointer to the first argument
828 ProfileEntryHookStub::MaybeCallEntryHook(masm);
829
830 __ mov(r5, Operand(r1));
831
832 if (argv_in_register()) {
833 // Move argv into the correct register.
834 __ mov(r1, Operand(r2));
835 } else {
836 // Compute the argv pointer in a callee-saved register.
837 __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
838 __ sub(r1, r1, Operand(kPointerSize));
839 }
840
841 // Enter the exit frame that transitions from JavaScript to C++.
842 FrameScope scope(masm, StackFrame::MANUAL);
843 __ EnterExitFrame(save_doubles(), 0, is_builtin_exit()
844 ? StackFrame::BUILTIN_EXIT
845 : StackFrame::EXIT);
846
847 // Store a copy of argc in callee-saved registers for later.
848 __ mov(r4, Operand(r0));
849
850 // r0, r4: number of arguments including receiver (C callee-saved)
851 // r1: pointer to the first argument (C callee-saved)
852 // r5: pointer to builtin function (C callee-saved)
853
854 int frame_alignment = MacroAssembler::ActivationFrameAlignment();
855 int frame_alignment_mask = frame_alignment - 1;
856 #if V8_HOST_ARCH_ARM
857 if (FLAG_debug_code) {
858 if (frame_alignment > kPointerSize) {
859 Label alignment_as_expected;
860 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
861 __ tst(sp, Operand(frame_alignment_mask));
862 __ b(eq, &alignment_as_expected);
863 // Don't use Check here, as it will call Runtime_Abort re-entering here.
864 __ stop("Unexpected alignment");
865 __ bind(&alignment_as_expected);
866 }
867 }
868 #endif
869
870 // Call C built-in.
871 int result_stack_size;
872 if (result_size() <= 2) {
873 // r0 = argc, r1 = argv, r2 = isolate
874 __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
875 result_stack_size = 0;
876 } else {
877 DCHECK_EQ(3, result_size());
878 // Allocate additional space for the result.
879 result_stack_size =
880 ((result_size() * kPointerSize) + frame_alignment_mask) &
881 ~frame_alignment_mask;
882 __ sub(sp, sp, Operand(result_stack_size));
883
884 // r0 = hidden result argument, r1 = argc, r2 = argv, r3 = isolate.
885 __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
886 __ mov(r2, Operand(r1));
887 __ mov(r1, Operand(r0));
888 __ mov(r0, Operand(sp));
889 }
890
891 // To let the GC traverse the return address of the exit frames, we need to
892 // know where the return address is. The CEntryStub is unmovable, so
893 // we can store the address on the stack to be able to find it again and
894 // we never have to restore it, because it will not change.
895 // Compute the return address in lr to return to after the jump below. Pc is
896 // already at '+ 8' from the current instruction but return is after three
897 // instructions so add another 4 to pc to get the return address.
898 {
899 // Prevent literal pool emission before return address.
900 Assembler::BlockConstPoolScope block_const_pool(masm);
901 __ add(lr, pc, Operand(4));
902 __ str(lr, MemOperand(sp, result_stack_size));
903 __ Call(r5);
904 }
905 if (result_size() > 2) {
906 DCHECK_EQ(3, result_size());
907 // Read result values stored on stack.
908 __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
909 __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
910 __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
911 }
912 // Result returned in r0, r1:r0 or r2:r1:r0 - do not destroy these registers!
913
914 // Check result for exception sentinel.
915 Label exception_returned;
916 __ CompareRoot(r0, Heap::kExceptionRootIndex);
917 __ b(eq, &exception_returned);
918
919 // Check that there is no pending exception, otherwise we
920 // should have returned the exception sentinel.
921 if (FLAG_debug_code) {
922 Label okay;
923 ExternalReference pending_exception_address(
924 Isolate::kPendingExceptionAddress, isolate());
925 __ mov(r3, Operand(pending_exception_address));
926 __ ldr(r3, MemOperand(r3));
927 __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
928 // Cannot use check here as it attempts to generate call into runtime.
929 __ b(eq, &okay);
930 __ stop("Unexpected pending exception");
931 __ bind(&okay);
932 }
933
934 // Exit C frame and return.
935 // r0:r1: result
936 // sp: stack pointer
937 // fp: frame pointer
938 Register argc;
939 if (argv_in_register()) {
940 // We don't want to pop arguments so set argc to no_reg.
941 argc = no_reg;
942 } else {
943 // Callee-saved register r4 still holds argc.
944 argc = r4;
945 }
946 __ LeaveExitFrame(save_doubles(), argc, true);
947 __ mov(pc, lr);
948
949 // Handling of exception.
950 __ bind(&exception_returned);
951
952 ExternalReference pending_handler_context_address(
953 Isolate::kPendingHandlerContextAddress, isolate());
954 ExternalReference pending_handler_code_address(
955 Isolate::kPendingHandlerCodeAddress, isolate());
956 ExternalReference pending_handler_offset_address(
957 Isolate::kPendingHandlerOffsetAddress, isolate());
958 ExternalReference pending_handler_fp_address(
959 Isolate::kPendingHandlerFPAddress, isolate());
960 ExternalReference pending_handler_sp_address(
961 Isolate::kPendingHandlerSPAddress, isolate());
962
963 // Ask the runtime for help to determine the handler. This will set r0 to
964 // contain the current pending exception, don't clobber it.
965 ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
966 isolate());
967 {
968 FrameScope scope(masm, StackFrame::MANUAL);
969 __ PrepareCallCFunction(3, 0, r0);
970 __ mov(r0, Operand(0));
971 __ mov(r1, Operand(0));
972 __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
973 __ CallCFunction(find_handler, 3);
974 }
975
976 // Retrieve the handler context, SP and FP.
977 __ mov(cp, Operand(pending_handler_context_address));
978 __ ldr(cp, MemOperand(cp));
979 __ mov(sp, Operand(pending_handler_sp_address));
980 __ ldr(sp, MemOperand(sp));
981 __ mov(fp, Operand(pending_handler_fp_address));
982 __ ldr(fp, MemOperand(fp));
983
984 // If the handler is a JS frame, restore the context to the frame. Note that
985 // the context will be set to (cp == 0) for non-JS frames.
986 __ cmp(cp, Operand(0));
987 __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
988
989 // Compute the handler entry address and jump to it.
990 ConstantPoolUnavailableScope constant_pool_unavailable(masm);
991 __ mov(r1, Operand(pending_handler_code_address));
992 __ ldr(r1, MemOperand(r1));
993 __ mov(r2, Operand(pending_handler_offset_address));
994 __ ldr(r2, MemOperand(r2));
995 __ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
996 if (FLAG_enable_embedded_constant_pool) {
997 __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r1);
998 }
999 __ add(pc, r1, r2);
1000 }
1001
1002
Generate(MacroAssembler * masm)1003 void JSEntryStub::Generate(MacroAssembler* masm) {
1004 // r0: code entry
1005 // r1: function
1006 // r2: receiver
1007 // r3: argc
1008 // [sp+0]: argv
1009
1010 Label invoke, handler_entry, exit;
1011
1012 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1013
1014 // Called from C, so do not pop argc and args on exit (preserve sp)
1015 // No need to save register-passed args
1016 // Save callee-saved registers (incl. cp and fp), sp, and lr
1017 __ stm(db_w, sp, kCalleeSaved | lr.bit());
1018
1019 // Save callee-saved vfp registers.
1020 __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
1021 // Set up the reserved register for 0.0.
1022 __ vmov(kDoubleRegZero, 0.0);
1023
1024 // Get address of argv, see stm above.
1025 // r0: code entry
1026 // r1: function
1027 // r2: receiver
1028 // r3: argc
1029
1030 // Set up argv in r4.
1031 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1032 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
1033 __ ldr(r4, MemOperand(sp, offset_to_argv));
1034
1035 // Push a frame with special values setup to mark it as an entry frame.
1036 // r0: code entry
1037 // r1: function
1038 // r2: receiver
1039 // r3: argc
1040 // r4: argv
1041 int marker = type();
1042 if (FLAG_enable_embedded_constant_pool) {
1043 __ mov(r8, Operand::Zero());
1044 }
1045 __ mov(r7, Operand(Smi::FromInt(marker)));
1046 __ mov(r6, Operand(Smi::FromInt(marker)));
1047 __ mov(r5,
1048 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1049 __ ldr(r5, MemOperand(r5));
1050 __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1051 __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
1052 (FLAG_enable_embedded_constant_pool ? r8.bit() : 0) |
1053 ip.bit());
1054
1055 // Set up frame pointer for the frame to be pushed.
1056 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1057
1058 // If this is the outermost JS call, set js_entry_sp value.
1059 Label non_outermost_js;
1060 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1061 __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1062 __ ldr(r6, MemOperand(r5));
1063 __ cmp(r6, Operand::Zero());
1064 __ b(ne, &non_outermost_js);
1065 __ str(fp, MemOperand(r5));
1066 __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1067 Label cont;
1068 __ b(&cont);
1069 __ bind(&non_outermost_js);
1070 __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1071 __ bind(&cont);
1072 __ push(ip);
1073
1074 // Jump to a faked try block that does the invoke, with a faked catch
1075 // block that sets the pending exception.
1076 __ jmp(&invoke);
1077
1078 // Block literal pool emission whilst taking the position of the handler
1079 // entry. This avoids making the assumption that literal pools are always
1080 // emitted after an instruction is emitted, rather than before.
1081 {
1082 Assembler::BlockConstPoolScope block_const_pool(masm);
1083 __ bind(&handler_entry);
1084 handler_offset_ = handler_entry.pos();
1085 // Caught exception: Store result (exception) in the pending exception
1086 // field in the JSEnv and return a failure sentinel. Coming in here the
1087 // fp will be invalid because the PushStackHandler below sets it to 0 to
1088 // signal the existence of the JSEntry frame.
1089 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1090 isolate())));
1091 }
1092 __ str(r0, MemOperand(ip));
1093 __ LoadRoot(r0, Heap::kExceptionRootIndex);
1094 __ b(&exit);
1095
1096 // Invoke: Link this frame into the handler chain.
1097 __ bind(&invoke);
1098 // Must preserve r0-r4, r5-r6 are available.
1099 __ PushStackHandler();
1100 // If an exception not caught by another handler occurs, this handler
1101 // returns control to the code after the bl(&invoke) above, which
1102 // restores all kCalleeSaved registers (including cp and fp) to their
1103 // saved values before returning a failure to C.
1104
1105 // Invoke the function by calling through JS entry trampoline builtin.
1106 // Notice that we cannot store a reference to the trampoline code directly in
1107 // this stub, because runtime stubs are not traversed when doing GC.
1108
1109 // Expected registers by Builtins::JSEntryTrampoline
1110 // r0: code entry
1111 // r1: function
1112 // r2: receiver
1113 // r3: argc
1114 // r4: argv
1115 if (type() == StackFrame::ENTRY_CONSTRUCT) {
1116 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1117 isolate());
1118 __ mov(ip, Operand(construct_entry));
1119 } else {
1120 ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
1121 __ mov(ip, Operand(entry));
1122 }
1123 __ ldr(ip, MemOperand(ip)); // deref address
1124 __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
1125
1126 // Branch and link to JSEntryTrampoline.
1127 __ Call(ip);
1128
1129 // Unlink this frame from the handler chain.
1130 __ PopStackHandler();
1131
1132 __ bind(&exit); // r0 holds result
1133 // Check if the current stack frame is marked as the outermost JS frame.
1134 Label non_outermost_js_2;
1135 __ pop(r5);
1136 __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1137 __ b(ne, &non_outermost_js_2);
1138 __ mov(r6, Operand::Zero());
1139 __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1140 __ str(r6, MemOperand(r5));
1141 __ bind(&non_outermost_js_2);
1142
1143 // Restore the top frame descriptors from the stack.
1144 __ pop(r3);
1145 __ mov(ip,
1146 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1147 __ str(r3, MemOperand(ip));
1148
1149 // Reset the stack to the callee saved registers.
1150 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1151
1152 // Restore callee-saved registers and return.
1153 #ifdef DEBUG
1154 if (FLAG_debug_code) {
1155 __ mov(lr, Operand(pc));
1156 }
1157 #endif
1158
1159 // Restore callee-saved vfp registers.
1160 __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
1161
1162 __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
1163 }
1164
1165
Generate(MacroAssembler * masm)1166 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1167 Label miss;
1168 Register receiver = LoadDescriptor::ReceiverRegister();
1169 // Ensure that the vector and slot registers won't be clobbered before
1170 // calling the miss handler.
1171 DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::VectorRegister(),
1172 LoadWithVectorDescriptor::SlotRegister()));
1173
1174 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r4,
1175 r5, &miss);
1176 __ bind(&miss);
1177 PropertyAccessCompiler::TailCallBuiltin(
1178 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1179 }
1180
1181
Generate(MacroAssembler * masm)1182 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1183 // Return address is in lr.
1184 Label miss;
1185
1186 Register receiver = LoadDescriptor::ReceiverRegister();
1187 Register index = LoadDescriptor::NameRegister();
1188 Register scratch = r5;
1189 Register result = r0;
1190 DCHECK(!scratch.is(receiver) && !scratch.is(index));
1191 DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
1192 result.is(LoadWithVectorDescriptor::SlotRegister()));
1193
1194 // StringCharAtGenerator doesn't use the result register until it's passed
1195 // the different miss possibilities. If it did, we would have a conflict
1196 // when FLAG_vector_ics is true.
1197 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1198 &miss, // When not a string.
1199 &miss, // When not a number.
1200 &miss, // When index out of range.
1201 RECEIVER_IS_STRING);
1202 char_at_generator.GenerateFast(masm);
1203 __ Ret();
1204
1205 StubRuntimeCallHelper call_helper;
1206 char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
1207
1208 __ bind(&miss);
1209 PropertyAccessCompiler::TailCallBuiltin(
1210 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1211 }
1212
1213
Generate(MacroAssembler * masm)1214 void RegExpExecStub::Generate(MacroAssembler* masm) {
1215 // Just jump directly to runtime if native RegExp is not selected at compile
1216 // time or if regexp entry in generated code is turned off runtime switch or
1217 // at compilation.
1218 #ifdef V8_INTERPRETED_REGEXP
1219 __ TailCallRuntime(Runtime::kRegExpExec);
1220 #else // V8_INTERPRETED_REGEXP
1221
1222 // Stack frame on entry.
1223 // sp[0]: last_match_info (expected JSArray)
1224 // sp[4]: previous index
1225 // sp[8]: subject string
1226 // sp[12]: JSRegExp object
1227
1228 const int kLastMatchInfoOffset = 0 * kPointerSize;
1229 const int kPreviousIndexOffset = 1 * kPointerSize;
1230 const int kSubjectOffset = 2 * kPointerSize;
1231 const int kJSRegExpOffset = 3 * kPointerSize;
1232
1233 Label runtime;
1234 // Allocation of registers for this function. These are in callee save
1235 // registers and will be preserved by the call to the native RegExp code, as
1236 // this code is called using the normal C calling convention. When calling
1237 // directly from generated code the native RegExp code will not do a GC and
1238 // therefore the content of these registers are safe to use after the call.
1239 Register subject = r4;
1240 Register regexp_data = r5;
1241 Register last_match_info_elements = no_reg; // will be r6;
1242
1243 // Ensure that a RegExp stack is allocated.
1244 ExternalReference address_of_regexp_stack_memory_address =
1245 ExternalReference::address_of_regexp_stack_memory_address(isolate());
1246 ExternalReference address_of_regexp_stack_memory_size =
1247 ExternalReference::address_of_regexp_stack_memory_size(isolate());
1248 __ mov(r0, Operand(address_of_regexp_stack_memory_size));
1249 __ ldr(r0, MemOperand(r0, 0));
1250 __ cmp(r0, Operand::Zero());
1251 __ b(eq, &runtime);
1252
1253 // Check that the first argument is a JSRegExp object.
1254 __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
1255 __ JumpIfSmi(r0, &runtime);
1256 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
1257 __ b(ne, &runtime);
1258
1259 // Check that the RegExp has been compiled (data contains a fixed array).
1260 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
1261 if (FLAG_debug_code) {
1262 __ SmiTst(regexp_data);
1263 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1264 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
1265 __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1266 }
1267
1268 // regexp_data: RegExp data (FixedArray)
1269 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1270 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
1271 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
1272 __ b(ne, &runtime);
1273
1274 // regexp_data: RegExp data (FixedArray)
1275 // Check that the number of captures fit in the static offsets vector buffer.
1276 __ ldr(r2,
1277 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1278 // Check (number_of_captures + 1) * 2 <= offsets vector size
1279 // Or number_of_captures * 2 <= offsets vector size - 2
1280 // Multiplying by 2 comes for free since r2 is smi-tagged.
1281 STATIC_ASSERT(kSmiTag == 0);
1282 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
1283 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1284 __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
1285 __ b(hi, &runtime);
1286
1287 // Reset offset for possibly sliced string.
1288 __ mov(r9, Operand::Zero());
1289 __ ldr(subject, MemOperand(sp, kSubjectOffset));
1290 __ JumpIfSmi(subject, &runtime);
1291 __ mov(r3, subject); // Make a copy of the original subject string.
1292 // subject: subject string
1293 // r3: subject string
1294 // regexp_data: RegExp data (FixedArray)
1295 // Handle subject string according to its encoding and representation:
1296 // (1) Sequential string? If yes, go to (4).
1297 // (2) Sequential or cons? If not, go to (5).
1298 // (3) Cons string. If the string is flat, replace subject with first string
1299 // and go to (1). Otherwise bail out to runtime.
1300 // (4) Sequential string. Load regexp code according to encoding.
1301 // (E) Carry on.
1302 /// [...]
1303
1304 // Deferred code at the end of the stub:
1305 // (5) Long external string? If not, go to (7).
1306 // (6) External string. Make it, offset-wise, look like a sequential string.
1307 // Go to (4).
1308 // (7) Short external string or not a string? If yes, bail out to runtime.
1309 // (8) Sliced string. Replace subject with parent. Go to (1).
1310
1311 Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
1312 not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
1313
1314 __ bind(&check_underlying);
1315 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
1316 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
1317
1318 // (1) Sequential string? If yes, go to (4).
1319 __ and_(r1,
1320 r0,
1321 Operand(kIsNotStringMask |
1322 kStringRepresentationMask |
1323 kShortExternalStringMask),
1324 SetCC);
1325 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
1326 __ b(eq, &seq_string); // Go to (4).
1327
1328 // (2) Sequential or cons? If not, go to (5).
1329 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
1330 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
1331 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
1332 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
1333 __ cmp(r1, Operand(kExternalStringTag));
1334 __ b(ge, ¬_seq_nor_cons); // Go to (5).
1335
1336 // (3) Cons string. Check that it's flat.
1337 // Replace subject with first string and reload instance type.
1338 __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
1339 __ CompareRoot(r0, Heap::kempty_stringRootIndex);
1340 __ b(ne, &runtime);
1341 __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
1342 __ jmp(&check_underlying);
1343
1344 // (4) Sequential string. Load regexp code according to encoding.
1345 __ bind(&seq_string);
1346 // subject: sequential subject string (or look-alike, external string)
1347 // r3: original subject string
1348 // Load previous index and check range before r3 is overwritten. We have to
1349 // use r3 instead of subject here because subject might have been only made
1350 // to look like a sequential string when it actually is an external string.
1351 __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
1352 __ JumpIfNotSmi(r1, &runtime);
1353 __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
1354 __ cmp(r3, Operand(r1));
1355 __ b(ls, &runtime);
1356 __ SmiUntag(r1);
1357
1358 STATIC_ASSERT(4 == kOneByteStringTag);
1359 STATIC_ASSERT(kTwoByteStringTag == 0);
1360 __ and_(r0, r0, Operand(kStringEncodingMask));
1361 __ mov(r3, Operand(r0, ASR, 2), SetCC);
1362 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset),
1363 ne);
1364 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
1365
1366 // (E) Carry on. String handling is done.
1367 // r6: irregexp code
1368 // Check that the irregexp code has been generated for the actual string
1369 // encoding. If it has, the field contains a code object otherwise it contains
1370 // a smi (code flushing support).
1371 __ JumpIfSmi(r6, &runtime);
1372
1373 // r1: previous index
1374 // r3: encoding of subject string (1 if one_byte, 0 if two_byte);
1375 // r6: code
1376 // subject: Subject string
1377 // regexp_data: RegExp data (FixedArray)
1378 // All checks done. Now push arguments for native regexp code.
1379 __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r0, r2);
1380
1381 // Isolates: note we add an additional parameter here (isolate pointer).
1382 const int kRegExpExecuteArguments = 9;
1383 const int kParameterRegisters = 4;
1384 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
1385
1386 // Stack pointer now points to cell where return address is to be written.
1387 // Arguments are before that on the stack or in registers.
1388
1389 // Argument 9 (sp[20]): Pass current isolate address.
1390 __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
1391 __ str(r0, MemOperand(sp, 5 * kPointerSize));
1392
1393 // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
1394 __ mov(r0, Operand(1));
1395 __ str(r0, MemOperand(sp, 4 * kPointerSize));
1396
1397 // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
1398 __ mov(r0, Operand(address_of_regexp_stack_memory_address));
1399 __ ldr(r0, MemOperand(r0, 0));
1400 __ mov(r2, Operand(address_of_regexp_stack_memory_size));
1401 __ ldr(r2, MemOperand(r2, 0));
1402 __ add(r0, r0, Operand(r2));
1403 __ str(r0, MemOperand(sp, 3 * kPointerSize));
1404
1405 // Argument 6: Set the number of capture registers to zero to force global
1406 // regexps to behave as non-global. This does not affect non-global regexps.
1407 __ mov(r0, Operand::Zero());
1408 __ str(r0, MemOperand(sp, 2 * kPointerSize));
1409
1410 // Argument 5 (sp[4]): static offsets vector buffer.
1411 __ mov(r0,
1412 Operand(ExternalReference::address_of_static_offsets_vector(
1413 isolate())));
1414 __ str(r0, MemOperand(sp, 1 * kPointerSize));
1415
1416 // For arguments 4 and 3 get string length, calculate start of string data and
1417 // calculate the shift of the index (0 for one-byte and 1 for two-byte).
1418 __ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
1419 __ eor(r3, r3, Operand(1));
1420 // Load the length from the original subject string from the previous stack
1421 // frame. Therefore we have to use fp, which points exactly to two pointer
1422 // sizes below the previous sp. (Because creating a new stack frame pushes
1423 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
1424 __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
1425 // If slice offset is not 0, load the length from the original sliced string.
1426 // Argument 4, r3: End of string data
1427 // Argument 3, r2: Start of string data
1428 // Prepare start and end index of the input.
1429 __ add(r9, r7, Operand(r9, LSL, r3));
1430 __ add(r2, r9, Operand(r1, LSL, r3));
1431
1432 __ ldr(r7, FieldMemOperand(subject, String::kLengthOffset));
1433 __ SmiUntag(r7);
1434 __ add(r3, r9, Operand(r7, LSL, r3));
1435
1436 // Argument 2 (r1): Previous index.
1437 // Already there
1438
1439 // Argument 1 (r0): Subject string.
1440 __ mov(r0, subject);
1441
1442 // Locate the code entry and call it.
1443 __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
1444 DirectCEntryStub stub(isolate());
1445 stub.GenerateCall(masm, r6);
1446
1447 __ LeaveExitFrame(false, no_reg, true);
1448
1449 last_match_info_elements = r6;
1450
1451 // r0: result
1452 // subject: subject string (callee saved)
1453 // regexp_data: RegExp data (callee saved)
1454 // last_match_info_elements: Last match info elements (callee saved)
1455 // Check the result.
1456 Label success;
1457 __ cmp(r0, Operand(1));
1458 // We expect exactly one result since we force the called regexp to behave
1459 // as non-global.
1460 __ b(eq, &success);
1461 Label failure;
1462 __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
1463 __ b(eq, &failure);
1464 __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
1465 // If not exception it can only be retry. Handle that in the runtime system.
1466 __ b(ne, &runtime);
1467 // Result must now be exception. If there is no pending exception already a
1468 // stack overflow (on the backtrack stack) was detected in RegExp code but
1469 // haven't created the exception yet. Handle that in the runtime system.
1470 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
1471 __ mov(r1, Operand(isolate()->factory()->the_hole_value()));
1472 __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1473 isolate())));
1474 __ ldr(r0, MemOperand(r2, 0));
1475 __ cmp(r0, r1);
1476 __ b(eq, &runtime);
1477
1478 // For exception, throw the exception again.
1479 __ TailCallRuntime(Runtime::kRegExpExecReThrow);
1480
1481 __ bind(&failure);
1482 // For failure and exception return null.
1483 __ mov(r0, Operand(isolate()->factory()->null_value()));
1484 __ add(sp, sp, Operand(4 * kPointerSize));
1485 __ Ret();
1486
1487 // Process the result from the native regexp code.
1488 __ bind(&success);
1489 __ ldr(r1,
1490 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1491 // Calculate number of capture registers (number_of_captures + 1) * 2.
1492 // Multiplying by 2 comes for free since r1 is smi-tagged.
1493 STATIC_ASSERT(kSmiTag == 0);
1494 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
1495 __ add(r1, r1, Operand(2)); // r1 was a smi.
1496
1497 // Check that the last match info is a FixedArray.
1498 __ ldr(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
1499 __ JumpIfSmi(last_match_info_elements, &runtime);
1500 // Check that the object has fast elements.
1501 __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
1502 __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
1503 __ b(ne, &runtime);
1504 // Check that the last match info has space for the capture registers and the
1505 // additional information.
1506 __ ldr(r0,
1507 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
1508 __ add(r2, r1, Operand(RegExpMatchInfo::kLastMatchOverhead));
1509 __ cmp(r2, Operand::SmiUntag(r0));
1510 __ b(gt, &runtime);
1511
1512 // r1: number of capture registers
1513 // r4: subject string
1514 // Store the capture count.
1515 __ SmiTag(r2, r1);
1516 __ str(r2, FieldMemOperand(last_match_info_elements,
1517 RegExpMatchInfo::kNumberOfCapturesOffset));
1518 // Store last subject and last input.
1519 __ str(subject, FieldMemOperand(last_match_info_elements,
1520 RegExpMatchInfo::kLastSubjectOffset));
1521 __ mov(r2, subject);
1522 __ RecordWriteField(last_match_info_elements,
1523 RegExpMatchInfo::kLastSubjectOffset, subject, r3,
1524 kLRHasNotBeenSaved, kDontSaveFPRegs);
1525 __ mov(subject, r2);
1526 __ str(subject, FieldMemOperand(last_match_info_elements,
1527 RegExpMatchInfo::kLastInputOffset));
1528 __ RecordWriteField(last_match_info_elements,
1529 RegExpMatchInfo::kLastInputOffset, subject, r3,
1530 kLRHasNotBeenSaved, kDontSaveFPRegs);
1531
1532 // Get the static offsets vector filled by the native regexp code.
1533 ExternalReference address_of_static_offsets_vector =
1534 ExternalReference::address_of_static_offsets_vector(isolate());
1535 __ mov(r2, Operand(address_of_static_offsets_vector));
1536
1537 // r1: number of capture registers
1538 // r2: offsets vector
1539 Label next_capture, done;
1540 // Capture register counter starts from number of capture registers and
1541 // counts down until wrapping after zero.
1542 __ add(r0, last_match_info_elements,
1543 Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag));
1544 __ bind(&next_capture);
1545 __ sub(r1, r1, Operand(1), SetCC);
1546 __ b(mi, &done);
1547 // Read the value from the static offsets vector buffer.
1548 __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
1549 // Store the smi value in the last match info.
1550 __ SmiTag(r3);
1551 __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
1552 __ jmp(&next_capture);
1553 __ bind(&done);
1554
1555 // Return last match info.
1556 __ mov(r0, last_match_info_elements);
1557 __ add(sp, sp, Operand(4 * kPointerSize));
1558 __ Ret();
1559
1560 // Do the runtime call to execute the regexp.
1561 __ bind(&runtime);
1562 __ TailCallRuntime(Runtime::kRegExpExec);
1563
1564 // Deferred code for string handling.
1565 // (5) Long external string? If not, go to (7).
1566 __ bind(¬_seq_nor_cons);
1567 // Compare flags are still set.
1568 __ b(gt, ¬_long_external); // Go to (7).
1569
1570 // (6) External string. Make it, offset-wise, look like a sequential string.
1571 __ bind(&external_string);
1572 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
1573 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
1574 if (FLAG_debug_code) {
1575 // Assert that we do not have a cons or slice (indirect strings) here.
1576 // Sequential strings have already been ruled out.
1577 __ tst(r0, Operand(kIsIndirectStringMask));
1578 __ Assert(eq, kExternalStringExpectedButNotFound);
1579 }
1580 __ ldr(subject,
1581 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
1582 // Move the pointer so that offset-wise, it looks like a sequential string.
1583 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1584 __ sub(subject,
1585 subject,
1586 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
1587 __ jmp(&seq_string); // Go to (4).
1588
1589 // (7) Short external string or not a string? If yes, bail out to runtime.
1590 __ bind(¬_long_external);
1591 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
1592 __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
1593 __ b(ne, &runtime);
1594
1595 // (8) Sliced string. Replace subject with parent. Go to (4).
1596 // Load offset into r9 and replace subject string with parent.
1597 __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
1598 __ SmiUntag(r9);
1599 __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
1600 __ jmp(&check_underlying); // Go to (4).
1601 #endif // V8_INTERPRETED_REGEXP
1602 }
1603
1604
CallStubInRecordCallTarget(MacroAssembler * masm,CodeStub * stub)1605 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
1606 // r0 : number of arguments to the construct function
1607 // r1 : the function to call
1608 // r2 : feedback vector
1609 // r3 : slot in feedback vector (Smi)
1610 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1611
1612 // Number-of-arguments register must be smi-tagged to call out.
1613 __ SmiTag(r0);
1614 __ Push(r3, r2, r1, r0);
1615 __ Push(cp);
1616
1617 __ CallStub(stub);
1618
1619 __ Pop(cp);
1620 __ Pop(r3, r2, r1, r0);
1621 __ SmiUntag(r0);
1622 }
1623
1624
GenerateRecordCallTarget(MacroAssembler * masm)1625 static void GenerateRecordCallTarget(MacroAssembler* masm) {
1626 // Cache the called function in a feedback vector slot. Cache states
1627 // are uninitialized, monomorphic (indicated by a JSFunction), and
1628 // megamorphic.
1629 // r0 : number of arguments to the construct function
1630 // r1 : the function to call
1631 // r2 : feedback vector
1632 // r3 : slot in feedback vector (Smi)
1633 Label initialize, done, miss, megamorphic, not_array_function;
1634
1635 DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
1636 masm->isolate()->heap()->megamorphic_symbol());
1637 DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
1638 masm->isolate()->heap()->uninitialized_symbol());
1639
1640 // Load the cache state into r5.
1641 __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
1642 __ ldr(r5, FieldMemOperand(r5, FixedArray::kHeaderSize));
1643
1644 // A monomorphic cache hit or an already megamorphic state: invoke the
1645 // function without changing the state.
1646 // We don't know if r5 is a WeakCell or a Symbol, but it's harmless to read at
1647 // this position in a symbol (see static asserts in type-feedback-vector.h).
1648 Label check_allocation_site;
1649 Register feedback_map = r6;
1650 Register weak_value = r9;
1651 __ ldr(weak_value, FieldMemOperand(r5, WeakCell::kValueOffset));
1652 __ cmp(r1, weak_value);
1653 __ b(eq, &done);
1654 __ CompareRoot(r5, Heap::kmegamorphic_symbolRootIndex);
1655 __ b(eq, &done);
1656 __ ldr(feedback_map, FieldMemOperand(r5, HeapObject::kMapOffset));
1657 __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
1658 __ b(ne, &check_allocation_site);
1659
1660 // If the weak cell is cleared, we have a new chance to become monomorphic.
1661 __ JumpIfSmi(weak_value, &initialize);
1662 __ jmp(&megamorphic);
1663
1664 __ bind(&check_allocation_site);
1665 // If we came here, we need to see if we are the array function.
1666 // If we didn't have a matching function, and we didn't find the megamorph
1667 // sentinel, then we have in the slot either some other function or an
1668 // AllocationSite.
1669 __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
1670 __ b(ne, &miss);
1671
1672 // Make sure the function is the Array() function
1673 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
1674 __ cmp(r1, r5);
1675 __ b(ne, &megamorphic);
1676 __ jmp(&done);
1677
1678 __ bind(&miss);
1679
1680 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
1681 // megamorphic.
1682 __ CompareRoot(r5, Heap::kuninitialized_symbolRootIndex);
1683 __ b(eq, &initialize);
1684 // MegamorphicSentinel is an immortal immovable object (undefined) so no
1685 // write-barrier is needed.
1686 __ bind(&megamorphic);
1687 __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
1688 __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
1689 __ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize));
1690 __ jmp(&done);
1691
1692 // An uninitialized cache is patched with the function
1693 __ bind(&initialize);
1694
1695 // Make sure the function is the Array() function
1696 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
1697 __ cmp(r1, r5);
1698 __ b(ne, ¬_array_function);
1699
1700 // The target function is the Array constructor,
1701 // Create an AllocationSite if we don't already have it, store it in the
1702 // slot.
1703 CreateAllocationSiteStub create_stub(masm->isolate());
1704 CallStubInRecordCallTarget(masm, &create_stub);
1705 __ b(&done);
1706
1707 __ bind(¬_array_function);
1708 CreateWeakCellStub weak_cell_stub(masm->isolate());
1709 CallStubInRecordCallTarget(masm, &weak_cell_stub);
1710
1711 __ bind(&done);
1712
1713 // Increment the call count for all function calls.
1714 __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
1715 __ add(r5, r5, Operand(FixedArray::kHeaderSize + kPointerSize));
1716 __ ldr(r4, FieldMemOperand(r5, 0));
1717 __ add(r4, r4, Operand(Smi::FromInt(1)));
1718 __ str(r4, FieldMemOperand(r5, 0));
1719 }
1720
Generate(MacroAssembler * masm)1721 void CallConstructStub::Generate(MacroAssembler* masm) {
1722 // r0 : number of arguments
1723 // r1 : the function to call
1724 // r2 : feedback vector
1725 // r3 : slot in feedback vector (Smi, for RecordCallTarget)
1726
1727 Label non_function;
1728 // Check that the function is not a smi.
1729 __ JumpIfSmi(r1, &non_function);
1730 // Check that the function is a JSFunction.
1731 __ CompareObjectType(r1, r5, r5, JS_FUNCTION_TYPE);
1732 __ b(ne, &non_function);
1733
1734 GenerateRecordCallTarget(masm);
1735
1736 __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
1737 Label feedback_register_initialized;
1738 // Put the AllocationSite from the feedback vector into r2, or undefined.
1739 __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
1740 __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
1741 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
1742 __ b(eq, &feedback_register_initialized);
1743 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
1744 __ bind(&feedback_register_initialized);
1745
1746 __ AssertUndefinedOrAllocationSite(r2, r5);
1747
1748 // Pass function as new target.
1749 __ mov(r3, r1);
1750
1751 // Tail call to the function-specific construct stub (still in the caller
1752 // context at this point).
1753 __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1754 __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kConstructStubOffset));
1755 __ add(pc, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
1756
1757 __ bind(&non_function);
1758 __ mov(r3, r1);
1759 __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
1760 }
1761
1762 // Note: feedback_vector and slot are clobbered after the call.
IncrementCallCount(MacroAssembler * masm,Register feedback_vector,Register slot)1763 static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
1764 Register slot) {
1765 __ add(feedback_vector, feedback_vector,
1766 Operand::PointerOffsetFromSmiKey(slot));
1767 __ add(feedback_vector, feedback_vector,
1768 Operand(FixedArray::kHeaderSize + kPointerSize));
1769 __ ldr(slot, FieldMemOperand(feedback_vector, 0));
1770 __ add(slot, slot, Operand(Smi::FromInt(1)));
1771 __ str(slot, FieldMemOperand(feedback_vector, 0));
1772 }
1773
HandleArrayCase(MacroAssembler * masm,Label * miss)1774 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
1775 // r0 - number of arguments
1776 // r1 - function
1777 // r3 - slot id
1778 // r2 - vector
1779 // r4 - allocation site (loaded from vector[slot])
1780 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
1781 __ cmp(r1, r5);
1782 __ b(ne, miss);
1783
1784 // Increment the call count for monomorphic function calls.
1785 IncrementCallCount(masm, r2, r3);
1786
1787 __ mov(r2, r4);
1788 __ mov(r3, r1);
1789 ArrayConstructorStub stub(masm->isolate());
1790 __ TailCallStub(&stub);
1791 }
1792
1793
Generate(MacroAssembler * masm)1794 void CallICStub::Generate(MacroAssembler* masm) {
1795 // r0 - number of arguments
1796 // r1 - function
1797 // r3 - slot id (Smi)
1798 // r2 - vector
1799 Label extra_checks_or_miss, call, call_function, call_count_incremented;
1800
1801 // The checks. First, does r1 match the recorded monomorphic target?
1802 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
1803 __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
1804
1805 // We don't know that we have a weak cell. We might have a private symbol
1806 // or an AllocationSite, but the memory is safe to examine.
1807 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
1808 // FixedArray.
1809 // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
1810 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
1811 // computed, meaning that it can't appear to be a pointer. If the low bit is
1812 // 0, then hash is computed, but the 0 bit prevents the field from appearing
1813 // to be a pointer.
1814 STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
1815 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
1816 WeakCell::kValueOffset &&
1817 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
1818
1819 __ ldr(r5, FieldMemOperand(r4, WeakCell::kValueOffset));
1820 __ cmp(r1, r5);
1821 __ b(ne, &extra_checks_or_miss);
1822
1823 // The compare above could have been a SMI/SMI comparison. Guard against this
1824 // convincing us that we have a monomorphic JSFunction.
1825 __ JumpIfSmi(r1, &extra_checks_or_miss);
1826
1827 __ bind(&call_function);
1828
1829 // Increment the call count for monomorphic function calls.
1830 IncrementCallCount(masm, r2, r3);
1831
1832 __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
1833 tail_call_mode()),
1834 RelocInfo::CODE_TARGET);
1835
1836 __ bind(&extra_checks_or_miss);
1837 Label uninitialized, miss, not_allocation_site;
1838
1839 __ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
1840 __ b(eq, &call);
1841
1842 // Verify that r4 contains an AllocationSite
1843 __ ldr(r5, FieldMemOperand(r4, HeapObject::kMapOffset));
1844 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
1845 __ b(ne, ¬_allocation_site);
1846
1847 // We have an allocation site.
1848 HandleArrayCase(masm, &miss);
1849
1850 __ bind(¬_allocation_site);
1851
1852 // The following cases attempt to handle MISS cases without going to the
1853 // runtime.
1854 if (FLAG_trace_ic) {
1855 __ jmp(&miss);
1856 }
1857
1858 __ CompareRoot(r4, Heap::kuninitialized_symbolRootIndex);
1859 __ b(eq, &uninitialized);
1860
1861 // We are going megamorphic. If the feedback is a JSFunction, it is fine
1862 // to handle it here. More complex cases are dealt with in the runtime.
1863 __ AssertNotSmi(r4);
1864 __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
1865 __ b(ne, &miss);
1866 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
1867 __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
1868 __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
1869
1870 __ bind(&call);
1871
1872 // Increment the call count for megamorphic function calls.
1873 IncrementCallCount(masm, r2, r3);
1874
1875 __ bind(&call_count_incremented);
1876 __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
1877 RelocInfo::CODE_TARGET);
1878
1879 __ bind(&uninitialized);
1880
1881 // We are going monomorphic, provided we actually have a JSFunction.
1882 __ JumpIfSmi(r1, &miss);
1883
1884 // Goto miss case if we do not have a function.
1885 __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
1886 __ b(ne, &miss);
1887
1888 // Make sure the function is not the Array() function, which requires special
1889 // behavior on MISS.
1890 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r4);
1891 __ cmp(r1, r4);
1892 __ b(eq, &miss);
1893
1894 // Make sure the function belongs to the same native context.
1895 __ ldr(r4, FieldMemOperand(r1, JSFunction::kContextOffset));
1896 __ ldr(r4, ContextMemOperand(r4, Context::NATIVE_CONTEXT_INDEX));
1897 __ ldr(ip, NativeContextMemOperand());
1898 __ cmp(r4, ip);
1899 __ b(ne, &miss);
1900
1901 // Store the function. Use a stub since we need a frame for allocation.
1902 // r2 - vector
1903 // r3 - slot
1904 // r1 - function
1905 {
1906 FrameScope scope(masm, StackFrame::INTERNAL);
1907 CreateWeakCellStub create_stub(masm->isolate());
1908 __ SmiTag(r0);
1909 __ Push(r0, r2, r3, cp, r1);
1910 __ CallStub(&create_stub);
1911 __ Pop(r2, r3, cp, r1);
1912 __ Pop(r0);
1913 __ SmiUntag(r0);
1914 }
1915
1916 __ jmp(&call_function);
1917
1918 // We are here because tracing is on or we encountered a MISS case we can't
1919 // handle here.
1920 __ bind(&miss);
1921 GenerateMiss(masm);
1922
1923 __ jmp(&call_count_incremented);
1924 }
1925
1926
GenerateMiss(MacroAssembler * masm)1927 void CallICStub::GenerateMiss(MacroAssembler* masm) {
1928 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1929
1930 // Preserve the number of arguments as Smi.
1931 __ SmiTag(r0);
1932
1933 // Push the receiver and the function and feedback info.
1934 __ Push(r0, r1, r2, r3);
1935
1936 // Call the entry.
1937 __ CallRuntime(Runtime::kCallIC_Miss);
1938
1939 // Move result to edi and exit the internal frame.
1940 __ mov(r1, r0);
1941
1942 // Restore number of arguments.
1943 __ Pop(r0);
1944 __ SmiUntag(r0);
1945 }
1946
1947
1948 // StringCharCodeAtGenerator
GenerateFast(MacroAssembler * masm)1949 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
1950 // If the receiver is a smi trigger the non-string case.
1951 if (check_mode_ == RECEIVER_IS_UNKNOWN) {
1952 __ JumpIfSmi(object_, receiver_not_string_);
1953
1954 // Fetch the instance type of the receiver into result register.
1955 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
1956 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
1957 // If the receiver is not a string trigger the non-string case.
1958 __ tst(result_, Operand(kIsNotStringMask));
1959 __ b(ne, receiver_not_string_);
1960 }
1961
1962 // If the index is non-smi trigger the non-smi case.
1963 __ JumpIfNotSmi(index_, &index_not_smi_);
1964 __ bind(&got_smi_index_);
1965
1966 // Check for index out of range.
1967 __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
1968 __ cmp(ip, Operand(index_));
1969 __ b(ls, index_out_of_range_);
1970
1971 __ SmiUntag(index_);
1972
1973 StringCharLoadGenerator::Generate(masm,
1974 object_,
1975 index_,
1976 result_,
1977 &call_runtime_);
1978
1979 __ SmiTag(result_);
1980 __ bind(&exit_);
1981 }
1982
1983
GenerateSlow(MacroAssembler * masm,EmbedMode embed_mode,const RuntimeCallHelper & call_helper)1984 void StringCharCodeAtGenerator::GenerateSlow(
1985 MacroAssembler* masm, EmbedMode embed_mode,
1986 const RuntimeCallHelper& call_helper) {
1987 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
1988
1989 // Index is not a smi.
1990 __ bind(&index_not_smi_);
1991 // If index is a heap number, try converting it to an integer.
1992 __ CheckMap(index_,
1993 result_,
1994 Heap::kHeapNumberMapRootIndex,
1995 index_not_number_,
1996 DONT_DO_SMI_CHECK);
1997 call_helper.BeforeCall(masm);
1998 if (embed_mode == PART_OF_IC_HANDLER) {
1999 __ Push(LoadWithVectorDescriptor::VectorRegister(),
2000 LoadWithVectorDescriptor::SlotRegister(), object_, index_);
2001 } else {
2002 // index_ is consumed by runtime conversion function.
2003 __ Push(object_, index_);
2004 }
2005 __ CallRuntime(Runtime::kNumberToSmi);
2006 // Save the conversion result before the pop instructions below
2007 // have a chance to overwrite it.
2008 __ Move(index_, r0);
2009 if (embed_mode == PART_OF_IC_HANDLER) {
2010 __ Pop(LoadWithVectorDescriptor::VectorRegister(),
2011 LoadWithVectorDescriptor::SlotRegister(), object_);
2012 } else {
2013 __ pop(object_);
2014 }
2015 // Reload the instance type.
2016 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2017 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2018 call_helper.AfterCall(masm);
2019 // If index is still not a smi, it must be out of range.
2020 __ JumpIfNotSmi(index_, index_out_of_range_);
2021 // Otherwise, return to the fast path.
2022 __ jmp(&got_smi_index_);
2023
2024 // Call runtime. We get here when the receiver is a string and the
2025 // index is a number, but the code of getting the actual character
2026 // is too complex (e.g., when the string needs to be flattened).
2027 __ bind(&call_runtime_);
2028 call_helper.BeforeCall(masm);
2029 __ SmiTag(index_);
2030 __ Push(object_, index_);
2031 __ CallRuntime(Runtime::kStringCharCodeAtRT);
2032 __ Move(result_, r0);
2033 call_helper.AfterCall(masm);
2034 __ jmp(&exit_);
2035
2036 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2037 }
2038
2039
2040 // -------------------------------------------------------------------------
2041 // StringCharFromCodeGenerator
2042
GenerateFast(MacroAssembler * masm)2043 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
2044 // Fast case of Heap::LookupSingleCharacterStringFromCode.
2045 STATIC_ASSERT(kSmiTag == 0);
2046 STATIC_ASSERT(kSmiShiftSize == 0);
2047 DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
2048 __ tst(code_, Operand(kSmiTagMask |
2049 ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
2050 __ b(ne, &slow_case_);
2051
2052 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
2053 // At this point code register contains smi tagged one-byte char code.
2054 __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
2055 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
2056 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
2057 __ b(eq, &slow_case_);
2058 __ bind(&exit_);
2059 }
2060
2061
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)2062 void StringCharFromCodeGenerator::GenerateSlow(
2063 MacroAssembler* masm,
2064 const RuntimeCallHelper& call_helper) {
2065 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
2066
2067 __ bind(&slow_case_);
2068 call_helper.BeforeCall(masm);
2069 __ push(code_);
2070 __ CallRuntime(Runtime::kStringCharFromCode);
2071 __ Move(result_, r0);
2072 call_helper.AfterCall(masm);
2073 __ jmp(&exit_);
2074
2075 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
2076 }
2077
2078
2079 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
2080
2081
GenerateCopyCharacters(MacroAssembler * masm,Register dest,Register src,Register count,Register scratch,String::Encoding encoding)2082 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
2083 Register dest,
2084 Register src,
2085 Register count,
2086 Register scratch,
2087 String::Encoding encoding) {
2088 if (FLAG_debug_code) {
2089 // Check that destination is word aligned.
2090 __ tst(dest, Operand(kPointerAlignmentMask));
2091 __ Check(eq, kDestinationOfCopyNotAligned);
2092 }
2093
2094 // Assumes word reads and writes are little endian.
2095 // Nothing to do for zero characters.
2096 Label done;
2097 if (encoding == String::TWO_BYTE_ENCODING) {
2098 __ add(count, count, Operand(count), SetCC);
2099 }
2100
2101 Register limit = count; // Read until dest equals this.
2102 __ add(limit, dest, Operand(count));
2103
2104 Label loop_entry, loop;
2105 // Copy bytes from src to dest until dest hits limit.
2106 __ b(&loop_entry);
2107 __ bind(&loop);
2108 __ ldrb(scratch, MemOperand(src, 1, PostIndex), lt);
2109 __ strb(scratch, MemOperand(dest, 1, PostIndex));
2110 __ bind(&loop_entry);
2111 __ cmp(dest, Operand(limit));
2112 __ b(lt, &loop);
2113
2114 __ bind(&done);
2115 }
2116
2117
GenerateFlatOneByteStringEquals(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3)2118 void StringHelper::GenerateFlatOneByteStringEquals(
2119 MacroAssembler* masm, Register left, Register right, Register scratch1,
2120 Register scratch2, Register scratch3) {
2121 Register length = scratch1;
2122
2123 // Compare lengths.
2124 Label strings_not_equal, check_zero_length;
2125 __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
2126 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
2127 __ cmp(length, scratch2);
2128 __ b(eq, &check_zero_length);
2129 __ bind(&strings_not_equal);
2130 __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
2131 __ Ret();
2132
2133 // Check if the length is zero.
2134 Label compare_chars;
2135 __ bind(&check_zero_length);
2136 STATIC_ASSERT(kSmiTag == 0);
2137 __ cmp(length, Operand::Zero());
2138 __ b(ne, &compare_chars);
2139 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
2140 __ Ret();
2141
2142 // Compare characters.
2143 __ bind(&compare_chars);
2144 GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
2145 &strings_not_equal);
2146
2147 // Characters are equal.
2148 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
2149 __ Ret();
2150 }
2151
2152
GenerateCompareFlatOneByteStrings(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3,Register scratch4)2153 void StringHelper::GenerateCompareFlatOneByteStrings(
2154 MacroAssembler* masm, Register left, Register right, Register scratch1,
2155 Register scratch2, Register scratch3, Register scratch4) {
2156 Label result_not_equal, compare_lengths;
2157 // Find minimum length and length difference.
2158 __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
2159 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
2160 __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
2161 Register length_delta = scratch3;
2162 __ mov(scratch1, scratch2, LeaveCC, gt);
2163 Register min_length = scratch1;
2164 STATIC_ASSERT(kSmiTag == 0);
2165 __ cmp(min_length, Operand::Zero());
2166 __ b(eq, &compare_lengths);
2167
2168 // Compare loop.
2169 GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
2170 scratch4, &result_not_equal);
2171
2172 // Compare lengths - strings up to min-length are equal.
2173 __ bind(&compare_lengths);
2174 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
2175 // Use length_delta as result if it's zero.
2176 __ mov(r0, Operand(length_delta), SetCC);
2177 __ bind(&result_not_equal);
2178 // Conditionally update the result based either on length_delta or
2179 // the last comparion performed in the loop above.
2180 __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
2181 __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
2182 __ Ret();
2183 }
2184
2185
GenerateOneByteCharsCompareLoop(MacroAssembler * masm,Register left,Register right,Register length,Register scratch1,Register scratch2,Label * chars_not_equal)2186 void StringHelper::GenerateOneByteCharsCompareLoop(
2187 MacroAssembler* masm, Register left, Register right, Register length,
2188 Register scratch1, Register scratch2, Label* chars_not_equal) {
2189 // Change index to run from -length to -1 by adding length to string
2190 // start. This means that loop ends when index reaches zero, which
2191 // doesn't need an additional compare.
2192 __ SmiUntag(length);
2193 __ add(scratch1, length,
2194 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
2195 __ add(left, left, Operand(scratch1));
2196 __ add(right, right, Operand(scratch1));
2197 __ rsb(length, length, Operand::Zero());
2198 Register index = length; // index = -length;
2199
2200 // Compare loop.
2201 Label loop;
2202 __ bind(&loop);
2203 __ ldrb(scratch1, MemOperand(left, index));
2204 __ ldrb(scratch2, MemOperand(right, index));
2205 __ cmp(scratch1, scratch2);
2206 __ b(ne, chars_not_equal);
2207 __ add(index, index, Operand(1), SetCC);
2208 __ b(ne, &loop);
2209 }
2210
2211
Generate(MacroAssembler * masm)2212 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
2213 // ----------- S t a t e -------------
2214 // -- r1 : left
2215 // -- r0 : right
2216 // -- lr : return address
2217 // -----------------------------------
2218
2219 // Load r2 with the allocation site. We stick an undefined dummy value here
2220 // and replace it with the real allocation site later when we instantiate this
2221 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
2222 __ Move(r2, isolate()->factory()->undefined_value());
2223
2224 // Make sure that we actually patched the allocation site.
2225 if (FLAG_debug_code) {
2226 __ tst(r2, Operand(kSmiTagMask));
2227 __ Assert(ne, kExpectedAllocationSite);
2228 __ push(r2);
2229 __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
2230 __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
2231 __ cmp(r2, ip);
2232 __ pop(r2);
2233 __ Assert(eq, kExpectedAllocationSite);
2234 }
2235
2236 // Tail call into the stub that handles binary operations with allocation
2237 // sites.
2238 BinaryOpWithAllocationSiteStub stub(isolate(), state());
2239 __ TailCallStub(&stub);
2240 }
2241
2242
GenerateBooleans(MacroAssembler * masm)2243 void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
2244 DCHECK_EQ(CompareICState::BOOLEAN, state());
2245 Label miss;
2246
2247 __ CheckMap(r1, r2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2248 __ CheckMap(r0, r3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2249 if (!Token::IsEqualityOp(op())) {
2250 __ ldr(r1, FieldMemOperand(r1, Oddball::kToNumberOffset));
2251 __ AssertSmi(r1);
2252 __ ldr(r0, FieldMemOperand(r0, Oddball::kToNumberOffset));
2253 __ AssertSmi(r0);
2254 }
2255 __ sub(r0, r1, r0);
2256 __ Ret();
2257
2258 __ bind(&miss);
2259 GenerateMiss(masm);
2260 }
2261
2262
GenerateSmis(MacroAssembler * masm)2263 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
2264 DCHECK(state() == CompareICState::SMI);
2265 Label miss;
2266 __ orr(r2, r1, r0);
2267 __ JumpIfNotSmi(r2, &miss);
2268
2269 if (GetCondition() == eq) {
2270 // For equality we do not care about the sign of the result.
2271 __ sub(r0, r0, r1, SetCC);
2272 } else {
2273 // Untag before subtracting to avoid handling overflow.
2274 __ SmiUntag(r1);
2275 __ sub(r0, r1, Operand::SmiUntag(r0));
2276 }
2277 __ Ret();
2278
2279 __ bind(&miss);
2280 GenerateMiss(masm);
2281 }
2282
2283
GenerateNumbers(MacroAssembler * masm)2284 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
2285 DCHECK(state() == CompareICState::NUMBER);
2286
2287 Label generic_stub;
2288 Label unordered, maybe_undefined1, maybe_undefined2;
2289 Label miss;
2290
2291 if (left() == CompareICState::SMI) {
2292 __ JumpIfNotSmi(r1, &miss);
2293 }
2294 if (right() == CompareICState::SMI) {
2295 __ JumpIfNotSmi(r0, &miss);
2296 }
2297
2298 // Inlining the double comparison and falling back to the general compare
2299 // stub if NaN is involved.
2300 // Load left and right operand.
2301 Label done, left, left_smi, right_smi;
2302 __ JumpIfSmi(r0, &right_smi);
2303 __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
2304 DONT_DO_SMI_CHECK);
2305 __ sub(r2, r0, Operand(kHeapObjectTag));
2306 __ vldr(d1, r2, HeapNumber::kValueOffset);
2307 __ b(&left);
2308 __ bind(&right_smi);
2309 __ SmiToDouble(d1, r0);
2310
2311 __ bind(&left);
2312 __ JumpIfSmi(r1, &left_smi);
2313 __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
2314 DONT_DO_SMI_CHECK);
2315 __ sub(r2, r1, Operand(kHeapObjectTag));
2316 __ vldr(d0, r2, HeapNumber::kValueOffset);
2317 __ b(&done);
2318 __ bind(&left_smi);
2319 __ SmiToDouble(d0, r1);
2320
2321 __ bind(&done);
2322 // Compare operands.
2323 __ VFPCompareAndSetFlags(d0, d1);
2324
2325 // Don't base result on status bits when a NaN is involved.
2326 __ b(vs, &unordered);
2327
2328 // Return a result of -1, 0, or 1, based on status bits.
2329 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
2330 __ mov(r0, Operand(LESS), LeaveCC, lt);
2331 __ mov(r0, Operand(GREATER), LeaveCC, gt);
2332 __ Ret();
2333
2334 __ bind(&unordered);
2335 __ bind(&generic_stub);
2336 CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
2337 CompareICState::GENERIC, CompareICState::GENERIC);
2338 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2339
2340 __ bind(&maybe_undefined1);
2341 if (Token::IsOrderedRelationalCompareOp(op())) {
2342 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
2343 __ b(ne, &miss);
2344 __ JumpIfSmi(r1, &unordered);
2345 __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
2346 __ b(ne, &maybe_undefined2);
2347 __ jmp(&unordered);
2348 }
2349
2350 __ bind(&maybe_undefined2);
2351 if (Token::IsOrderedRelationalCompareOp(op())) {
2352 __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
2353 __ b(eq, &unordered);
2354 }
2355
2356 __ bind(&miss);
2357 GenerateMiss(masm);
2358 }
2359
2360
GenerateInternalizedStrings(MacroAssembler * masm)2361 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
2362 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
2363 Label miss;
2364
2365 // Registers containing left and right operands respectively.
2366 Register left = r1;
2367 Register right = r0;
2368 Register tmp1 = r2;
2369 Register tmp2 = r3;
2370
2371 // Check that both operands are heap objects.
2372 __ JumpIfEitherSmi(left, right, &miss);
2373
2374 // Check that both operands are internalized strings.
2375 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2376 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2377 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2378 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2379 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2380 __ orr(tmp1, tmp1, Operand(tmp2));
2381 __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2382 __ b(ne, &miss);
2383
2384 // Internalized strings are compared by identity.
2385 __ cmp(left, right);
2386 // Make sure r0 is non-zero. At this point input operands are
2387 // guaranteed to be non-zero.
2388 DCHECK(right.is(r0));
2389 STATIC_ASSERT(EQUAL == 0);
2390 STATIC_ASSERT(kSmiTag == 0);
2391 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
2392 __ Ret();
2393
2394 __ bind(&miss);
2395 GenerateMiss(masm);
2396 }
2397
2398
GenerateUniqueNames(MacroAssembler * masm)2399 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
2400 DCHECK(state() == CompareICState::UNIQUE_NAME);
2401 DCHECK(GetCondition() == eq);
2402 Label miss;
2403
2404 // Registers containing left and right operands respectively.
2405 Register left = r1;
2406 Register right = r0;
2407 Register tmp1 = r2;
2408 Register tmp2 = r3;
2409
2410 // Check that both operands are heap objects.
2411 __ JumpIfEitherSmi(left, right, &miss);
2412
2413 // Check that both operands are unique names. This leaves the instance
2414 // types loaded in tmp1 and tmp2.
2415 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2416 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2417 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2418 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2419
2420 __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
2421 __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
2422
2423 // Unique names are compared by identity.
2424 __ cmp(left, right);
2425 // Make sure r0 is non-zero. At this point input operands are
2426 // guaranteed to be non-zero.
2427 DCHECK(right.is(r0));
2428 STATIC_ASSERT(EQUAL == 0);
2429 STATIC_ASSERT(kSmiTag == 0);
2430 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
2431 __ Ret();
2432
2433 __ bind(&miss);
2434 GenerateMiss(masm);
2435 }
2436
2437
GenerateStrings(MacroAssembler * masm)2438 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
2439 DCHECK(state() == CompareICState::STRING);
2440 Label miss;
2441
2442 bool equality = Token::IsEqualityOp(op());
2443
2444 // Registers containing left and right operands respectively.
2445 Register left = r1;
2446 Register right = r0;
2447 Register tmp1 = r2;
2448 Register tmp2 = r3;
2449 Register tmp3 = r4;
2450 Register tmp4 = r5;
2451
2452 // Check that both operands are heap objects.
2453 __ JumpIfEitherSmi(left, right, &miss);
2454
2455 // Check that both operands are strings. This leaves the instance
2456 // types loaded in tmp1 and tmp2.
2457 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2458 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2459 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2460 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2461 STATIC_ASSERT(kNotStringTag != 0);
2462 __ orr(tmp3, tmp1, tmp2);
2463 __ tst(tmp3, Operand(kIsNotStringMask));
2464 __ b(ne, &miss);
2465
2466 // Fast check for identical strings.
2467 __ cmp(left, right);
2468 STATIC_ASSERT(EQUAL == 0);
2469 STATIC_ASSERT(kSmiTag == 0);
2470 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
2471 __ Ret(eq);
2472
2473 // Handle not identical strings.
2474
2475 // Check that both strings are internalized strings. If they are, we're done
2476 // because we already know they are not identical. We know they are both
2477 // strings.
2478 if (equality) {
2479 DCHECK(GetCondition() == eq);
2480 STATIC_ASSERT(kInternalizedTag == 0);
2481 __ orr(tmp3, tmp1, Operand(tmp2));
2482 __ tst(tmp3, Operand(kIsNotInternalizedMask));
2483 // Make sure r0 is non-zero. At this point input operands are
2484 // guaranteed to be non-zero.
2485 DCHECK(right.is(r0));
2486 __ Ret(eq);
2487 }
2488
2489 // Check that both strings are sequential one-byte.
2490 Label runtime;
2491 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
2492 &runtime);
2493
2494 // Compare flat one-byte strings. Returns when done.
2495 if (equality) {
2496 StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
2497 tmp3);
2498 } else {
2499 StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
2500 tmp2, tmp3, tmp4);
2501 }
2502
2503 // Handle more complex cases in runtime.
2504 __ bind(&runtime);
2505 if (equality) {
2506 {
2507 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2508 __ Push(left, right);
2509 __ CallRuntime(Runtime::kStringEqual);
2510 }
2511 __ LoadRoot(r1, Heap::kTrueValueRootIndex);
2512 __ sub(r0, r0, r1);
2513 __ Ret();
2514 } else {
2515 __ Push(left, right);
2516 __ TailCallRuntime(Runtime::kStringCompare);
2517 }
2518
2519 __ bind(&miss);
2520 GenerateMiss(masm);
2521 }
2522
2523
GenerateReceivers(MacroAssembler * masm)2524 void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
2525 DCHECK_EQ(CompareICState::RECEIVER, state());
2526 Label miss;
2527 __ and_(r2, r1, Operand(r0));
2528 __ JumpIfSmi(r2, &miss);
2529
2530 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2531 __ CompareObjectType(r0, r2, r2, FIRST_JS_RECEIVER_TYPE);
2532 __ b(lt, &miss);
2533 __ CompareObjectType(r1, r2, r2, FIRST_JS_RECEIVER_TYPE);
2534 __ b(lt, &miss);
2535
2536 DCHECK(GetCondition() == eq);
2537 __ sub(r0, r0, Operand(r1));
2538 __ Ret();
2539
2540 __ bind(&miss);
2541 GenerateMiss(masm);
2542 }
2543
2544
GenerateKnownReceivers(MacroAssembler * masm)2545 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
2546 Label miss;
2547 Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
2548 __ and_(r2, r1, Operand(r0));
2549 __ JumpIfSmi(r2, &miss);
2550 __ GetWeakValue(r4, cell);
2551 __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
2552 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
2553 __ cmp(r2, r4);
2554 __ b(ne, &miss);
2555 __ cmp(r3, r4);
2556 __ b(ne, &miss);
2557
2558 if (Token::IsEqualityOp(op())) {
2559 __ sub(r0, r0, Operand(r1));
2560 __ Ret();
2561 } else {
2562 if (op() == Token::LT || op() == Token::LTE) {
2563 __ mov(r2, Operand(Smi::FromInt(GREATER)));
2564 } else {
2565 __ mov(r2, Operand(Smi::FromInt(LESS)));
2566 }
2567 __ Push(r1, r0, r2);
2568 __ TailCallRuntime(Runtime::kCompare);
2569 }
2570
2571 __ bind(&miss);
2572 GenerateMiss(masm);
2573 }
2574
2575
GenerateMiss(MacroAssembler * masm)2576 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
2577 {
2578 // Call the runtime system in a fresh internal frame.
2579 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2580 __ Push(r1, r0);
2581 __ Push(lr, r1, r0);
2582 __ mov(ip, Operand(Smi::FromInt(op())));
2583 __ push(ip);
2584 __ CallRuntime(Runtime::kCompareIC_Miss);
2585 // Compute the entry point of the rewritten stub.
2586 __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
2587 // Restore registers.
2588 __ pop(lr);
2589 __ Pop(r1, r0);
2590 }
2591
2592 __ Jump(r2);
2593 }
2594
2595
Generate(MacroAssembler * masm)2596 void DirectCEntryStub::Generate(MacroAssembler* masm) {
2597 // Place the return address on the stack, making the call
2598 // GC safe. The RegExp backend also relies on this.
2599 __ str(lr, MemOperand(sp, 0));
2600 __ blx(ip); // Call the C++ function.
2601 __ ldr(pc, MemOperand(sp, 0));
2602 }
2603
2604
GenerateCall(MacroAssembler * masm,Register target)2605 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
2606 Register target) {
2607 intptr_t code =
2608 reinterpret_cast<intptr_t>(GetCode().location());
2609 __ Move(ip, target);
2610 __ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
2611 __ blx(lr); // Call the stub.
2612 }
2613
2614
GenerateNegativeLookup(MacroAssembler * masm,Label * miss,Label * done,Register receiver,Register properties,Handle<Name> name,Register scratch0)2615 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
2616 Label* miss,
2617 Label* done,
2618 Register receiver,
2619 Register properties,
2620 Handle<Name> name,
2621 Register scratch0) {
2622 DCHECK(name->IsUniqueName());
2623 // If names of slots in range from 1 to kProbes - 1 for the hash value are
2624 // not equal to the name and kProbes-th slot is not used (its name is the
2625 // undefined value), it guarantees the hash table doesn't contain the
2626 // property. It's true even if some slots represent deleted properties
2627 // (their names are the hole value).
2628 for (int i = 0; i < kInlinedProbes; i++) {
2629 // scratch0 points to properties hash.
2630 // Compute the masked index: (hash + i + i * i) & mask.
2631 Register index = scratch0;
2632 // Capacity is smi 2^n.
2633 __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
2634 __ sub(index, index, Operand(1));
2635 __ and_(index, index, Operand(
2636 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
2637
2638 // Scale the index by multiplying by the entry size.
2639 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2640 __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
2641
2642 Register entity_name = scratch0;
2643 // Having undefined at this place means the name is not contained.
2644 STATIC_ASSERT(kSmiTagSize == 1);
2645 Register tmp = properties;
2646 __ add(tmp, properties, Operand(index, LSL, 1));
2647 __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
2648
2649 DCHECK(!tmp.is(entity_name));
2650 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
2651 __ cmp(entity_name, tmp);
2652 __ b(eq, done);
2653
2654 // Load the hole ready for use below:
2655 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
2656
2657 // Stop if found the property.
2658 __ cmp(entity_name, Operand(Handle<Name>(name)));
2659 __ b(eq, miss);
2660
2661 Label good;
2662 __ cmp(entity_name, tmp);
2663 __ b(eq, &good);
2664
2665 // Check if the entry name is not a unique name.
2666 __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
2667 __ ldrb(entity_name,
2668 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
2669 __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
2670 __ bind(&good);
2671
2672 // Restore the properties.
2673 __ ldr(properties,
2674 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
2675 }
2676
2677 const int spill_mask =
2678 (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
2679 r2.bit() | r1.bit() | r0.bit());
2680
2681 __ stm(db_w, sp, spill_mask);
2682 __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
2683 __ mov(r1, Operand(Handle<Name>(name)));
2684 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
2685 __ CallStub(&stub);
2686 __ cmp(r0, Operand::Zero());
2687 __ ldm(ia_w, sp, spill_mask);
2688
2689 __ b(eq, done);
2690 __ b(ne, miss);
2691 }
2692
2693
2694 // Probe the name dictionary in the |elements| register. Jump to the
2695 // |done| label if a property with the given name is found. Jump to
2696 // the |miss| label otherwise.
2697 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
GeneratePositiveLookup(MacroAssembler * masm,Label * miss,Label * done,Register elements,Register name,Register scratch1,Register scratch2)2698 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
2699 Label* miss,
2700 Label* done,
2701 Register elements,
2702 Register name,
2703 Register scratch1,
2704 Register scratch2) {
2705 DCHECK(!elements.is(scratch1));
2706 DCHECK(!elements.is(scratch2));
2707 DCHECK(!name.is(scratch1));
2708 DCHECK(!name.is(scratch2));
2709
2710 __ AssertName(name);
2711
2712 // Compute the capacity mask.
2713 __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
2714 __ SmiUntag(scratch1);
2715 __ sub(scratch1, scratch1, Operand(1));
2716
2717 // Generate an unrolled loop that performs a few probes before
2718 // giving up. Measurements done on Gmail indicate that 2 probes
2719 // cover ~93% of loads from dictionaries.
2720 for (int i = 0; i < kInlinedProbes; i++) {
2721 // Compute the masked index: (hash + i + i * i) & mask.
2722 __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
2723 if (i > 0) {
2724 // Add the probe offset (i + i * i) left shifted to avoid right shifting
2725 // the hash in a separate instruction. The value hash + i + i * i is right
2726 // shifted in the following and instruction.
2727 DCHECK(NameDictionary::GetProbeOffset(i) <
2728 1 << (32 - Name::kHashFieldOffset));
2729 __ add(scratch2, scratch2, Operand(
2730 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
2731 }
2732 __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
2733
2734 // Scale the index by multiplying by the entry size.
2735 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2736 // scratch2 = scratch2 * 3.
2737 __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
2738
2739 // Check if the key is identical to the name.
2740 __ add(scratch2, elements, Operand(scratch2, LSL, 2));
2741 __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
2742 __ cmp(name, Operand(ip));
2743 __ b(eq, done);
2744 }
2745
2746 const int spill_mask =
2747 (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
2748 r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
2749 ~(scratch1.bit() | scratch2.bit());
2750
2751 __ stm(db_w, sp, spill_mask);
2752 if (name.is(r0)) {
2753 DCHECK(!elements.is(r1));
2754 __ Move(r1, name);
2755 __ Move(r0, elements);
2756 } else {
2757 __ Move(r0, elements);
2758 __ Move(r1, name);
2759 }
2760 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
2761 __ CallStub(&stub);
2762 __ cmp(r0, Operand::Zero());
2763 __ mov(scratch2, Operand(r2));
2764 __ ldm(ia_w, sp, spill_mask);
2765
2766 __ b(ne, done);
2767 __ b(eq, miss);
2768 }
2769
2770
Generate(MacroAssembler * masm)2771 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
2772 // This stub overrides SometimesSetsUpAFrame() to return false. That means
2773 // we cannot call anything that could cause a GC from this stub.
2774 // Registers:
2775 // result: NameDictionary to probe
2776 // r1: key
2777 // dictionary: NameDictionary to probe.
2778 // index: will hold an index of entry if lookup is successful.
2779 // might alias with result_.
2780 // Returns:
2781 // result_ is zero if lookup failed, non zero otherwise.
2782
2783 Register result = r0;
2784 Register dictionary = r0;
2785 Register key = r1;
2786 Register index = r2;
2787 Register mask = r3;
2788 Register hash = r4;
2789 Register undefined = r5;
2790 Register entry_key = r6;
2791
2792 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
2793
2794 __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
2795 __ SmiUntag(mask);
2796 __ sub(mask, mask, Operand(1));
2797
2798 __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
2799
2800 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
2801
2802 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
2803 // Compute the masked index: (hash + i + i * i) & mask.
2804 // Capacity is smi 2^n.
2805 if (i > 0) {
2806 // Add the probe offset (i + i * i) left shifted to avoid right shifting
2807 // the hash in a separate instruction. The value hash + i + i * i is right
2808 // shifted in the following and instruction.
2809 DCHECK(NameDictionary::GetProbeOffset(i) <
2810 1 << (32 - Name::kHashFieldOffset));
2811 __ add(index, hash, Operand(
2812 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
2813 } else {
2814 __ mov(index, Operand(hash));
2815 }
2816 __ and_(index, mask, Operand(index, LSR, Name::kHashShift));
2817
2818 // Scale the index by multiplying by the entry size.
2819 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2820 __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
2821
2822 STATIC_ASSERT(kSmiTagSize == 1);
2823 __ add(index, dictionary, Operand(index, LSL, 2));
2824 __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
2825
2826 // Having undefined at this place means the name is not contained.
2827 __ cmp(entry_key, Operand(undefined));
2828 __ b(eq, ¬_in_dictionary);
2829
2830 // Stop if found the property.
2831 __ cmp(entry_key, Operand(key));
2832 __ b(eq, &in_dictionary);
2833
2834 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
2835 // Check if the entry name is not a unique name.
2836 __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
2837 __ ldrb(entry_key,
2838 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
2839 __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
2840 }
2841 }
2842
2843 __ bind(&maybe_in_dictionary);
2844 // If we are doing negative lookup then probing failure should be
2845 // treated as a lookup success. For positive lookup probing failure
2846 // should be treated as lookup failure.
2847 if (mode() == POSITIVE_LOOKUP) {
2848 __ mov(result, Operand::Zero());
2849 __ Ret();
2850 }
2851
2852 __ bind(&in_dictionary);
2853 __ mov(result, Operand(1));
2854 __ Ret();
2855
2856 __ bind(¬_in_dictionary);
2857 __ mov(result, Operand::Zero());
2858 __ Ret();
2859 }
2860
2861
GenerateFixedRegStubsAheadOfTime(Isolate * isolate)2862 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
2863 Isolate* isolate) {
2864 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
2865 stub1.GetCode();
2866 // Hydrogen code stubs need stub2 at snapshot time.
2867 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
2868 stub2.GetCode();
2869 }
2870
2871
2872 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
2873 // the value has just been written into the object, now this stub makes sure
2874 // we keep the GC informed. The word in the object where the value has been
2875 // written is in the address register.
Generate(MacroAssembler * masm)2876 void RecordWriteStub::Generate(MacroAssembler* masm) {
2877 Label skip_to_incremental_noncompacting;
2878 Label skip_to_incremental_compacting;
2879
2880 // The first two instructions are generated with labels so as to get the
2881 // offset fixed up correctly by the bind(Label*) call. We patch it back and
2882 // forth between a compare instructions (a nop in this position) and the
2883 // real branch when we start and stop incremental heap marking.
2884 // See RecordWriteStub::Patch for details.
2885 {
2886 // Block literal pool emission, as the position of these two instructions
2887 // is assumed by the patching code.
2888 Assembler::BlockConstPoolScope block_const_pool(masm);
2889 __ b(&skip_to_incremental_noncompacting);
2890 __ b(&skip_to_incremental_compacting);
2891 }
2892
2893 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
2894 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
2895 MacroAssembler::kReturnAtEnd);
2896 }
2897 __ Ret();
2898
2899 __ bind(&skip_to_incremental_noncompacting);
2900 GenerateIncremental(masm, INCREMENTAL);
2901
2902 __ bind(&skip_to_incremental_compacting);
2903 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
2904
2905 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
2906 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
2907 DCHECK(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
2908 DCHECK(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
2909 PatchBranchIntoNop(masm, 0);
2910 PatchBranchIntoNop(masm, Assembler::kInstrSize);
2911 }
2912
2913
GenerateIncremental(MacroAssembler * masm,Mode mode)2914 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
2915 regs_.Save(masm);
2916
2917 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
2918 Label dont_need_remembered_set;
2919
2920 __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
2921 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
2922 regs_.scratch0(),
2923 &dont_need_remembered_set);
2924
2925 __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
2926 &dont_need_remembered_set);
2927
2928 // First notify the incremental marker if necessary, then update the
2929 // remembered set.
2930 CheckNeedsToInformIncrementalMarker(
2931 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
2932 InformIncrementalMarker(masm);
2933 regs_.Restore(masm);
2934 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
2935 MacroAssembler::kReturnAtEnd);
2936
2937 __ bind(&dont_need_remembered_set);
2938 }
2939
2940 CheckNeedsToInformIncrementalMarker(
2941 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
2942 InformIncrementalMarker(masm);
2943 regs_.Restore(masm);
2944 __ Ret();
2945 }
2946
2947
InformIncrementalMarker(MacroAssembler * masm)2948 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
2949 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
2950 int argument_count = 3;
2951 __ PrepareCallCFunction(argument_count, regs_.scratch0());
2952 Register address =
2953 r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
2954 DCHECK(!address.is(regs_.object()));
2955 DCHECK(!address.is(r0));
2956 __ Move(address, regs_.address());
2957 __ Move(r0, regs_.object());
2958 __ Move(r1, address);
2959 __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
2960
2961 AllowExternalCallThatCantCauseGC scope(masm);
2962 __ CallCFunction(
2963 ExternalReference::incremental_marking_record_write_function(isolate()),
2964 argument_count);
2965 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
2966 }
2967
2968
CheckNeedsToInformIncrementalMarker(MacroAssembler * masm,OnNoNeedToInformIncrementalMarker on_no_need,Mode mode)2969 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
2970 MacroAssembler* masm,
2971 OnNoNeedToInformIncrementalMarker on_no_need,
2972 Mode mode) {
2973 Label on_black;
2974 Label need_incremental;
2975 Label need_incremental_pop_scratch;
2976
2977 // Let's look at the color of the object: If it is not black we don't have
2978 // to inform the incremental marker.
2979 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
2980
2981 regs_.Restore(masm);
2982 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
2983 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
2984 MacroAssembler::kReturnAtEnd);
2985 } else {
2986 __ Ret();
2987 }
2988
2989 __ bind(&on_black);
2990
2991 // Get the value from the slot.
2992 __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
2993
2994 if (mode == INCREMENTAL_COMPACTION) {
2995 Label ensure_not_white;
2996
2997 __ CheckPageFlag(regs_.scratch0(), // Contains value.
2998 regs_.scratch1(), // Scratch.
2999 MemoryChunk::kEvacuationCandidateMask,
3000 eq,
3001 &ensure_not_white);
3002
3003 __ CheckPageFlag(regs_.object(),
3004 regs_.scratch1(), // Scratch.
3005 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
3006 eq,
3007 &need_incremental);
3008
3009 __ bind(&ensure_not_white);
3010 }
3011
3012 // We need extra registers for this, so we push the object and the address
3013 // register temporarily.
3014 __ Push(regs_.object(), regs_.address());
3015 __ JumpIfWhite(regs_.scratch0(), // The value.
3016 regs_.scratch1(), // Scratch.
3017 regs_.object(), // Scratch.
3018 regs_.address(), // Scratch.
3019 &need_incremental_pop_scratch);
3020 __ Pop(regs_.object(), regs_.address());
3021
3022 regs_.Restore(masm);
3023 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
3024 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3025 MacroAssembler::kReturnAtEnd);
3026 } else {
3027 __ Ret();
3028 }
3029
3030 __ bind(&need_incremental_pop_scratch);
3031 __ Pop(regs_.object(), regs_.address());
3032
3033 __ bind(&need_incremental);
3034
3035 // Fall through when we need to inform the incremental marker.
3036 }
3037
3038
Generate(MacroAssembler * masm)3039 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
3040 CEntryStub ces(isolate(), 1, kSaveFPRegs);
3041 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
3042 int parameter_count_offset =
3043 StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
3044 __ ldr(r1, MemOperand(fp, parameter_count_offset));
3045 if (function_mode() == JS_FUNCTION_STUB_MODE) {
3046 __ add(r1, r1, Operand(1));
3047 }
3048 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
3049 __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
3050 __ add(sp, sp, r1);
3051 __ Ret();
3052 }
3053
Generate(MacroAssembler * masm)3054 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
3055 __ EmitLoadTypeFeedbackVector(r2);
3056 CallICStub stub(isolate(), state());
3057 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3058 }
3059
3060
HandleArrayCases(MacroAssembler * masm,Register feedback,Register receiver_map,Register scratch1,Register scratch2,bool is_polymorphic,Label * miss)3061 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
3062 Register receiver_map, Register scratch1,
3063 Register scratch2, bool is_polymorphic,
3064 Label* miss) {
3065 // feedback initially contains the feedback array
3066 Label next_loop, prepare_next;
3067 Label start_polymorphic;
3068
3069 Register cached_map = scratch1;
3070
3071 __ ldr(cached_map,
3072 FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
3073 __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3074 __ cmp(receiver_map, cached_map);
3075 __ b(ne, &start_polymorphic);
3076 // found, now call handler.
3077 Register handler = feedback;
3078 __ ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
3079 __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
3080
3081
3082 Register length = scratch2;
3083 __ bind(&start_polymorphic);
3084 __ ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
3085 if (!is_polymorphic) {
3086 // If the IC could be monomorphic we have to make sure we don't go past the
3087 // end of the feedback array.
3088 __ cmp(length, Operand(Smi::FromInt(2)));
3089 __ b(eq, miss);
3090 }
3091
3092 Register too_far = length;
3093 Register pointer_reg = feedback;
3094
3095 // +-----+------+------+-----+-----+ ... ----+
3096 // | map | len | wm0 | h0 | wm1 | hN |
3097 // +-----+------+------+-----+-----+ ... ----+
3098 // 0 1 2 len-1
3099 // ^ ^
3100 // | |
3101 // pointer_reg too_far
3102 // aka feedback scratch2
3103 // also need receiver_map
3104 // use cached_map (scratch1) to look in the weak map values.
3105 __ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(length));
3106 __ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3107 __ add(pointer_reg, feedback,
3108 Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
3109
3110 __ bind(&next_loop);
3111 __ ldr(cached_map, MemOperand(pointer_reg));
3112 __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3113 __ cmp(receiver_map, cached_map);
3114 __ b(ne, &prepare_next);
3115 __ ldr(handler, MemOperand(pointer_reg, kPointerSize));
3116 __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
3117
3118 __ bind(&prepare_next);
3119 __ add(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
3120 __ cmp(pointer_reg, too_far);
3121 __ b(lt, &next_loop);
3122
3123 // We exhausted our array of map handler pairs.
3124 __ jmp(miss);
3125 }
3126
3127
HandleMonomorphicCase(MacroAssembler * masm,Register receiver,Register receiver_map,Register feedback,Register vector,Register slot,Register scratch,Label * compare_map,Label * load_smi_map,Label * try_array)3128 static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
3129 Register receiver_map, Register feedback,
3130 Register vector, Register slot,
3131 Register scratch, Label* compare_map,
3132 Label* load_smi_map, Label* try_array) {
3133 __ JumpIfSmi(receiver, load_smi_map);
3134 __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
3135 __ bind(compare_map);
3136 Register cached_map = scratch;
3137 // Move the weak map into the weak_cell register.
3138 __ ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
3139 __ cmp(cached_map, receiver_map);
3140 __ b(ne, try_array);
3141 Register handler = feedback;
3142 __ add(handler, vector, Operand::PointerOffsetFromSmiKey(slot));
3143 __ ldr(handler,
3144 FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
3145 __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
3146 }
3147
Generate(MacroAssembler * masm)3148 void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
3149 __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
3150 KeyedStoreICStub stub(isolate(), state());
3151 stub.GenerateForTrampoline(masm);
3152 }
3153
Generate(MacroAssembler * masm)3154 void KeyedStoreICStub::Generate(MacroAssembler* masm) {
3155 GenerateImpl(masm, false);
3156 }
3157
GenerateForTrampoline(MacroAssembler * masm)3158 void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
3159 GenerateImpl(masm, true);
3160 }
3161
3162
HandlePolymorphicStoreCase(MacroAssembler * masm,Register feedback,Register receiver_map,Register scratch1,Register scratch2,Label * miss)3163 static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
3164 Register receiver_map, Register scratch1,
3165 Register scratch2, Label* miss) {
3166 // feedback initially contains the feedback array
3167 Label next_loop, prepare_next;
3168 Label start_polymorphic;
3169 Label transition_call;
3170
3171 Register cached_map = scratch1;
3172 Register too_far = scratch2;
3173 Register pointer_reg = feedback;
3174 __ ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
3175
3176 // +-----+------+------+-----+-----+-----+ ... ----+
3177 // | map | len | wm0 | wt0 | h0 | wm1 | hN |
3178 // +-----+------+------+-----+-----+ ----+ ... ----+
3179 // 0 1 2 len-1
3180 // ^ ^
3181 // | |
3182 // pointer_reg too_far
3183 // aka feedback scratch2
3184 // also need receiver_map
3185 // use cached_map (scratch1) to look in the weak map values.
3186 __ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(too_far));
3187 __ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3188 __ add(pointer_reg, feedback,
3189 Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
3190
3191 __ bind(&next_loop);
3192 __ ldr(cached_map, MemOperand(pointer_reg));
3193 __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3194 __ cmp(receiver_map, cached_map);
3195 __ b(ne, &prepare_next);
3196 // Is it a transitioning store?
3197 __ ldr(too_far, MemOperand(pointer_reg, kPointerSize));
3198 __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
3199 __ b(ne, &transition_call);
3200 __ ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
3201 __ add(pc, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
3202
3203 __ bind(&transition_call);
3204 __ ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
3205 __ JumpIfSmi(too_far, miss);
3206
3207 __ ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
3208
3209 // Load the map into the correct register.
3210 DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
3211 __ mov(feedback, too_far);
3212
3213 __ add(pc, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
3214
3215 __ bind(&prepare_next);
3216 __ add(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
3217 __ cmp(pointer_reg, too_far);
3218 __ b(lt, &next_loop);
3219
3220 // We exhausted our array of map handler pairs.
3221 __ jmp(miss);
3222 }
3223
GenerateImpl(MacroAssembler * masm,bool in_frame)3224 void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
3225 Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r1
3226 Register key = StoreWithVectorDescriptor::NameRegister(); // r2
3227 Register vector = StoreWithVectorDescriptor::VectorRegister(); // r3
3228 Register slot = StoreWithVectorDescriptor::SlotRegister(); // r4
3229 DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r0)); // r0
3230 Register feedback = r5;
3231 Register receiver_map = r6;
3232 Register scratch1 = r9;
3233
3234 __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
3235 __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
3236
3237 // Try to quickly handle the monomorphic case without knowing for sure
3238 // if we have a weak cell in feedback. We do know it's safe to look
3239 // at WeakCell::kValueOffset.
3240 Label try_array, load_smi_map, compare_map;
3241 Label not_array, miss;
3242 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
3243 scratch1, &compare_map, &load_smi_map, &try_array);
3244
3245 __ bind(&try_array);
3246 // Is it a fixed array?
3247 __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
3248 __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
3249 __ b(ne, ¬_array);
3250
3251 // We have a polymorphic element handler.
3252 Label polymorphic, try_poly_name;
3253 __ bind(&polymorphic);
3254
3255 // We are using register r8, which is used for the embedded constant pool
3256 // when FLAG_enable_embedded_constant_pool is true.
3257 DCHECK(!FLAG_enable_embedded_constant_pool);
3258 Register scratch2 = r8;
3259
3260 HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
3261 &miss);
3262
3263 __ bind(¬_array);
3264 // Is it generic?
3265 __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
3266 __ b(ne, &try_poly_name);
3267 Handle<Code> megamorphic_stub =
3268 KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
3269 __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
3270
3271 __ bind(&try_poly_name);
3272 // We might have a name in feedback, and a fixed array in the next slot.
3273 __ cmp(key, feedback);
3274 __ b(ne, &miss);
3275 // If the name comparison succeeded, we know we have a fixed array with
3276 // at least one map/handler pair.
3277 __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
3278 __ ldr(feedback,
3279 FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
3280 HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
3281 &miss);
3282
3283 __ bind(&miss);
3284 KeyedStoreIC::GenerateMiss(masm);
3285
3286 __ bind(&load_smi_map);
3287 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
3288 __ jmp(&compare_map);
3289 }
3290
3291
MaybeCallEntryHook(MacroAssembler * masm)3292 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
3293 if (masm->isolate()->function_entry_hook() != NULL) {
3294 ProfileEntryHookStub stub(masm->isolate());
3295 PredictableCodeSizeScope predictable(masm);
3296 predictable.ExpectSize(masm->CallStubSize(&stub) +
3297 2 * Assembler::kInstrSize);
3298 __ push(lr);
3299 __ CallStub(&stub);
3300 __ pop(lr);
3301 }
3302 }
3303
3304
Generate(MacroAssembler * masm)3305 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
3306 // The entry hook is a "push lr" instruction, followed by a call.
3307 const int32_t kReturnAddressDistanceFromFunctionStart =
3308 3 * Assembler::kInstrSize;
3309
3310 // This should contain all kCallerSaved registers.
3311 const RegList kSavedRegs =
3312 1 << 0 | // r0
3313 1 << 1 | // r1
3314 1 << 2 | // r2
3315 1 << 3 | // r3
3316 1 << 5 | // r5
3317 1 << 9; // r9
3318 // We also save lr, so the count here is one higher than the mask indicates.
3319 const int32_t kNumSavedRegs = 7;
3320
3321 DCHECK((kCallerSaved & kSavedRegs) == kCallerSaved);
3322
3323 // Save all caller-save registers as this may be called from anywhere.
3324 __ stm(db_w, sp, kSavedRegs | lr.bit());
3325
3326 // Compute the function's address for the first argument.
3327 __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
3328
3329 // The caller's return address is above the saved temporaries.
3330 // Grab that for the second argument to the hook.
3331 __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
3332
3333 // Align the stack if necessary.
3334 int frame_alignment = masm->ActivationFrameAlignment();
3335 if (frame_alignment > kPointerSize) {
3336 __ mov(r5, sp);
3337 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3338 __ and_(sp, sp, Operand(-frame_alignment));
3339 }
3340
3341 #if V8_HOST_ARCH_ARM
3342 int32_t entry_hook =
3343 reinterpret_cast<int32_t>(isolate()->function_entry_hook());
3344 __ mov(ip, Operand(entry_hook));
3345 #else
3346 // Under the simulator we need to indirect the entry hook through a
3347 // trampoline function at a known address.
3348 // It additionally takes an isolate as a third parameter
3349 __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
3350
3351 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
3352 __ mov(ip, Operand(ExternalReference(&dispatcher,
3353 ExternalReference::BUILTIN_CALL,
3354 isolate())));
3355 #endif
3356 __ Call(ip);
3357
3358 // Restore the stack pointer if needed.
3359 if (frame_alignment > kPointerSize) {
3360 __ mov(sp, r5);
3361 }
3362
3363 // Also pop pc to get Ret(0).
3364 __ ldm(ia_w, sp, kSavedRegs | pc.bit());
3365 }
3366
3367
3368 template<class T>
CreateArrayDispatch(MacroAssembler * masm,AllocationSiteOverrideMode mode)3369 static void CreateArrayDispatch(MacroAssembler* masm,
3370 AllocationSiteOverrideMode mode) {
3371 if (mode == DISABLE_ALLOCATION_SITES) {
3372 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
3373 __ TailCallStub(&stub);
3374 } else if (mode == DONT_OVERRIDE) {
3375 int last_index = GetSequenceIndexFromFastElementsKind(
3376 TERMINAL_FAST_ELEMENTS_KIND);
3377 for (int i = 0; i <= last_index; ++i) {
3378 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3379 __ cmp(r3, Operand(kind));
3380 T stub(masm->isolate(), kind);
3381 __ TailCallStub(&stub, eq);
3382 }
3383
3384 // If we reached this point there is a problem.
3385 __ Abort(kUnexpectedElementsKindInArrayConstructor);
3386 } else {
3387 UNREACHABLE();
3388 }
3389 }
3390
3391
CreateArrayDispatchOneArgument(MacroAssembler * masm,AllocationSiteOverrideMode mode)3392 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
3393 AllocationSiteOverrideMode mode) {
3394 // r2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
3395 // r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
3396 // r0 - number of arguments
3397 // r1 - constructor?
3398 // sp[0] - last argument
3399 Label normal_sequence;
3400 if (mode == DONT_OVERRIDE) {
3401 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3402 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3403 STATIC_ASSERT(FAST_ELEMENTS == 2);
3404 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3405 STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
3406 STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
3407
3408 // is the low bit set? If so, we are holey and that is good.
3409 __ tst(r3, Operand(1));
3410 __ b(ne, &normal_sequence);
3411 }
3412
3413 // look at the first argument
3414 __ ldr(r5, MemOperand(sp, 0));
3415 __ cmp(r5, Operand::Zero());
3416 __ b(eq, &normal_sequence);
3417
3418 if (mode == DISABLE_ALLOCATION_SITES) {
3419 ElementsKind initial = GetInitialFastElementsKind();
3420 ElementsKind holey_initial = GetHoleyElementsKind(initial);
3421
3422 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
3423 holey_initial,
3424 DISABLE_ALLOCATION_SITES);
3425 __ TailCallStub(&stub_holey);
3426
3427 __ bind(&normal_sequence);
3428 ArraySingleArgumentConstructorStub stub(masm->isolate(),
3429 initial,
3430 DISABLE_ALLOCATION_SITES);
3431 __ TailCallStub(&stub);
3432 } else if (mode == DONT_OVERRIDE) {
3433 // We are going to create a holey array, but our kind is non-holey.
3434 // Fix kind and retry (only if we have an allocation site in the slot).
3435 __ add(r3, r3, Operand(1));
3436
3437 if (FLAG_debug_code) {
3438 __ ldr(r5, FieldMemOperand(r2, 0));
3439 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
3440 __ Assert(eq, kExpectedAllocationSite);
3441 }
3442
3443 // Save the resulting elements kind in type info. We can't just store r3
3444 // in the AllocationSite::transition_info field because elements kind is
3445 // restricted to a portion of the field...upper bits need to be left alone.
3446 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
3447 __ ldr(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
3448 __ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
3449 __ str(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
3450
3451 __ bind(&normal_sequence);
3452 int last_index = GetSequenceIndexFromFastElementsKind(
3453 TERMINAL_FAST_ELEMENTS_KIND);
3454 for (int i = 0; i <= last_index; ++i) {
3455 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3456 __ cmp(r3, Operand(kind));
3457 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
3458 __ TailCallStub(&stub, eq);
3459 }
3460
3461 // If we reached this point there is a problem.
3462 __ Abort(kUnexpectedElementsKindInArrayConstructor);
3463 } else {
3464 UNREACHABLE();
3465 }
3466 }
3467
3468
3469 template<class T>
ArrayConstructorStubAheadOfTimeHelper(Isolate * isolate)3470 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
3471 int to_index = GetSequenceIndexFromFastElementsKind(
3472 TERMINAL_FAST_ELEMENTS_KIND);
3473 for (int i = 0; i <= to_index; ++i) {
3474 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3475 T stub(isolate, kind);
3476 stub.GetCode();
3477 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
3478 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
3479 stub1.GetCode();
3480 }
3481 }
3482 }
3483
GenerateStubsAheadOfTime(Isolate * isolate)3484 void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
3485 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
3486 isolate);
3487 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
3488 isolate);
3489 ArrayNArgumentsConstructorStub stub(isolate);
3490 stub.GetCode();
3491 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
3492 for (int i = 0; i < 2; i++) {
3493 // For internal arrays we only need a few things
3494 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
3495 stubh1.GetCode();
3496 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
3497 stubh2.GetCode();
3498 }
3499 }
3500
3501
GenerateDispatchToArrayStub(MacroAssembler * masm,AllocationSiteOverrideMode mode)3502 void ArrayConstructorStub::GenerateDispatchToArrayStub(
3503 MacroAssembler* masm,
3504 AllocationSiteOverrideMode mode) {
3505 Label not_zero_case, not_one_case;
3506 __ tst(r0, r0);
3507 __ b(ne, ¬_zero_case);
3508 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
3509
3510 __ bind(¬_zero_case);
3511 __ cmp(r0, Operand(1));
3512 __ b(gt, ¬_one_case);
3513 CreateArrayDispatchOneArgument(masm, mode);
3514
3515 __ bind(¬_one_case);
3516 ArrayNArgumentsConstructorStub stub(masm->isolate());
3517 __ TailCallStub(&stub);
3518 }
3519
3520
Generate(MacroAssembler * masm)3521 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
3522 // ----------- S t a t e -------------
3523 // -- r0 : argc (only if argument_count() == ANY)
3524 // -- r1 : constructor
3525 // -- r2 : AllocationSite or undefined
3526 // -- r3 : new target
3527 // -- sp[0] : return address
3528 // -- sp[4] : last argument
3529 // -----------------------------------
3530
3531 if (FLAG_debug_code) {
3532 // The array construct code is only set for the global and natives
3533 // builtin Array functions which always have maps.
3534
3535 // Initial map for the builtin Array function should be a map.
3536 __ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
3537 // Will both indicate a NULL and a Smi.
3538 __ tst(r4, Operand(kSmiTagMask));
3539 __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
3540 __ CompareObjectType(r4, r4, r5, MAP_TYPE);
3541 __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
3542
3543 // We should either have undefined in r2 or a valid AllocationSite
3544 __ AssertUndefinedOrAllocationSite(r2, r4);
3545 }
3546
3547 // Enter the context of the Array function.
3548 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
3549
3550 Label subclassing;
3551 __ cmp(r3, r1);
3552 __ b(ne, &subclassing);
3553
3554 Label no_info;
3555 // Get the elements kind and case on that.
3556 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
3557 __ b(eq, &no_info);
3558
3559 __ ldr(r3, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
3560 __ SmiUntag(r3);
3561 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
3562 __ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask));
3563 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
3564
3565 __ bind(&no_info);
3566 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
3567
3568 __ bind(&subclassing);
3569 __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
3570 __ add(r0, r0, Operand(3));
3571 __ Push(r3, r2);
3572 __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
3573 }
3574
3575
GenerateCase(MacroAssembler * masm,ElementsKind kind)3576 void InternalArrayConstructorStub::GenerateCase(
3577 MacroAssembler* masm, ElementsKind kind) {
3578 __ cmp(r0, Operand(1));
3579
3580 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
3581 __ TailCallStub(&stub0, lo);
3582
3583 ArrayNArgumentsConstructorStub stubN(isolate());
3584 __ TailCallStub(&stubN, hi);
3585
3586 if (IsFastPackedElementsKind(kind)) {
3587 // We might need to create a holey array
3588 // look at the first argument
3589 __ ldr(r3, MemOperand(sp, 0));
3590 __ cmp(r3, Operand::Zero());
3591
3592 InternalArraySingleArgumentConstructorStub
3593 stub1_holey(isolate(), GetHoleyElementsKind(kind));
3594 __ TailCallStub(&stub1_holey, ne);
3595 }
3596
3597 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
3598 __ TailCallStub(&stub1);
3599 }
3600
3601
Generate(MacroAssembler * masm)3602 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
3603 // ----------- S t a t e -------------
3604 // -- r0 : argc
3605 // -- r1 : constructor
3606 // -- sp[0] : return address
3607 // -- sp[4] : last argument
3608 // -----------------------------------
3609
3610 if (FLAG_debug_code) {
3611 // The array construct code is only set for the global and natives
3612 // builtin Array functions which always have maps.
3613
3614 // Initial map for the builtin Array function should be a map.
3615 __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
3616 // Will both indicate a NULL and a Smi.
3617 __ tst(r3, Operand(kSmiTagMask));
3618 __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
3619 __ CompareObjectType(r3, r3, r4, MAP_TYPE);
3620 __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
3621 }
3622
3623 // Figure out the right elements kind
3624 __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
3625 // Load the map's "bit field 2" into |result|. We only need the first byte,
3626 // but the following bit field extraction takes care of that anyway.
3627 __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
3628 // Retrieve elements_kind from bit field 2.
3629 __ DecodeField<Map::ElementsKindBits>(r3);
3630
3631 if (FLAG_debug_code) {
3632 Label done;
3633 __ cmp(r3, Operand(FAST_ELEMENTS));
3634 __ b(eq, &done);
3635 __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
3636 __ Assert(eq,
3637 kInvalidElementsKindForInternalArrayOrInternalPackedArray);
3638 __ bind(&done);
3639 }
3640
3641 Label fast_elements_case;
3642 __ cmp(r3, Operand(FAST_ELEMENTS));
3643 __ b(eq, &fast_elements_case);
3644 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
3645
3646 __ bind(&fast_elements_case);
3647 GenerateCase(masm, FAST_ELEMENTS);
3648 }
3649
3650
Generate(MacroAssembler * masm)3651 void FastNewObjectStub::Generate(MacroAssembler* masm) {
3652 // ----------- S t a t e -------------
3653 // -- r1 : target
3654 // -- r3 : new target
3655 // -- cp : context
3656 // -- lr : return address
3657 // -----------------------------------
3658 __ AssertFunction(r1);
3659 __ AssertReceiver(r3);
3660
3661 // Verify that the new target is a JSFunction.
3662 Label new_object;
3663 __ CompareObjectType(r3, r2, r2, JS_FUNCTION_TYPE);
3664 __ b(ne, &new_object);
3665
3666 // Load the initial map and verify that it's in fact a map.
3667 __ ldr(r2, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
3668 __ JumpIfSmi(r2, &new_object);
3669 __ CompareObjectType(r2, r0, r0, MAP_TYPE);
3670 __ b(ne, &new_object);
3671
3672 // Fall back to runtime if the target differs from the new target's
3673 // initial map constructor.
3674 __ ldr(r0, FieldMemOperand(r2, Map::kConstructorOrBackPointerOffset));
3675 __ cmp(r0, r1);
3676 __ b(ne, &new_object);
3677
3678 // Allocate the JSObject on the heap.
3679 Label allocate, done_allocate;
3680 __ ldrb(r4, FieldMemOperand(r2, Map::kInstanceSizeOffset));
3681 __ Allocate(r4, r0, r5, r6, &allocate, SIZE_IN_WORDS);
3682 __ bind(&done_allocate);
3683
3684 // Initialize the JSObject fields.
3685 __ str(r2, FieldMemOperand(r0, JSObject::kMapOffset));
3686 __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
3687 __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
3688 __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
3689 STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
3690 __ add(r1, r0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
3691
3692 // ----------- S t a t e -------------
3693 // -- r0 : result (tagged)
3694 // -- r1 : result fields (untagged)
3695 // -- r5 : result end (untagged)
3696 // -- r2 : initial map
3697 // -- cp : context
3698 // -- lr : return address
3699 // -----------------------------------
3700
3701 // Perform in-object slack tracking if requested.
3702 Label slack_tracking;
3703 STATIC_ASSERT(Map::kNoSlackTracking == 0);
3704 __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
3705 __ ldr(r3, FieldMemOperand(r2, Map::kBitField3Offset));
3706 __ tst(r3, Operand(Map::ConstructionCounter::kMask));
3707 __ b(ne, &slack_tracking);
3708 {
3709 // Initialize all in-object fields with undefined.
3710 __ InitializeFieldsWithFiller(r1, r5, r6);
3711 __ Ret();
3712 }
3713 __ bind(&slack_tracking);
3714 {
3715 // Decrease generous allocation count.
3716 STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
3717 __ sub(r3, r3, Operand(1 << Map::ConstructionCounter::kShift));
3718 __ str(r3, FieldMemOperand(r2, Map::kBitField3Offset));
3719
3720 // Initialize the in-object fields with undefined.
3721 __ ldrb(r4, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
3722 __ sub(r4, r5, Operand(r4, LSL, kPointerSizeLog2));
3723 __ InitializeFieldsWithFiller(r1, r4, r6);
3724
3725 // Initialize the remaining (reserved) fields with one pointer filler map.
3726 __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
3727 __ InitializeFieldsWithFiller(r1, r5, r6);
3728
3729 // Check if we can finalize the instance size.
3730 STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
3731 __ tst(r3, Operand(Map::ConstructionCounter::kMask));
3732 __ Ret(ne);
3733
3734 // Finalize the instance size.
3735 {
3736 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
3737 __ Push(r0, r2);
3738 __ CallRuntime(Runtime::kFinalizeInstanceSize);
3739 __ Pop(r0);
3740 }
3741 __ Ret();
3742 }
3743
3744 // Fall back to %AllocateInNewSpace.
3745 __ bind(&allocate);
3746 {
3747 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
3748 STATIC_ASSERT(kSmiTag == 0);
3749 STATIC_ASSERT(kSmiTagSize == 1);
3750 __ mov(r4, Operand(r4, LSL, kPointerSizeLog2 + 1));
3751 __ Push(r2, r4);
3752 __ CallRuntime(Runtime::kAllocateInNewSpace);
3753 __ Pop(r2);
3754 }
3755 __ ldrb(r5, FieldMemOperand(r2, Map::kInstanceSizeOffset));
3756 __ add(r5, r0, Operand(r5, LSL, kPointerSizeLog2));
3757 STATIC_ASSERT(kHeapObjectTag == 1);
3758 __ sub(r5, r5, Operand(kHeapObjectTag));
3759 __ b(&done_allocate);
3760
3761 // Fall back to %NewObject.
3762 __ bind(&new_object);
3763 __ Push(r1, r3);
3764 __ TailCallRuntime(Runtime::kNewObject);
3765 }
3766
3767
Generate(MacroAssembler * masm)3768 void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
3769 // ----------- S t a t e -------------
3770 // -- r1 : function
3771 // -- cp : context
3772 // -- fp : frame pointer
3773 // -- lr : return address
3774 // -----------------------------------
3775 __ AssertFunction(r1);
3776
3777 // Make r2 point to the JavaScript frame.
3778 __ mov(r2, fp);
3779 if (skip_stub_frame()) {
3780 // For Ignition we need to skip the handler/stub frame to reach the
3781 // JavaScript frame for the function.
3782 __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
3783 }
3784 if (FLAG_debug_code) {
3785 Label ok;
3786 __ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
3787 __ cmp(ip, r1);
3788 __ b(eq, &ok);
3789 __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
3790 __ bind(&ok);
3791 }
3792
3793 // Check if we have rest parameters (only possible if we have an
3794 // arguments adaptor frame below the function frame).
3795 Label no_rest_parameters;
3796 __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
3797 __ ldr(ip, MemOperand(r2, CommonFrameConstants::kContextOrFrameTypeOffset));
3798 __ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3799 __ b(ne, &no_rest_parameters);
3800
3801 // Check if the arguments adaptor frame contains more arguments than
3802 // specified by the function's internal formal parameter count.
3803 Label rest_parameters;
3804 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
3805 __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
3806 __ ldr(r3,
3807 FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
3808 __ sub(r0, r0, r3, SetCC);
3809 __ b(gt, &rest_parameters);
3810
3811 // Return an empty rest parameter array.
3812 __ bind(&no_rest_parameters);
3813 {
3814 // ----------- S t a t e -------------
3815 // -- cp : context
3816 // -- lr : return address
3817 // -----------------------------------
3818
3819 // Allocate an empty rest parameter array.
3820 Label allocate, done_allocate;
3821 __ Allocate(JSArray::kSize, r0, r1, r2, &allocate, NO_ALLOCATION_FLAGS);
3822 __ bind(&done_allocate);
3823
3824 // Setup the rest parameter array in r0.
3825 __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r1);
3826 __ str(r1, FieldMemOperand(r0, JSArray::kMapOffset));
3827 __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
3828 __ str(r1, FieldMemOperand(r0, JSArray::kPropertiesOffset));
3829 __ str(r1, FieldMemOperand(r0, JSArray::kElementsOffset));
3830 __ mov(r1, Operand(0));
3831 __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
3832 STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
3833 __ Ret();
3834
3835 // Fall back to %AllocateInNewSpace.
3836 __ bind(&allocate);
3837 {
3838 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
3839 __ Push(Smi::FromInt(JSArray::kSize));
3840 __ CallRuntime(Runtime::kAllocateInNewSpace);
3841 }
3842 __ jmp(&done_allocate);
3843 }
3844
3845 __ bind(&rest_parameters);
3846 {
3847 // Compute the pointer to the first rest parameter (skippping the receiver).
3848 __ add(r2, r2, Operand(r0, LSL, kPointerSizeLog2 - 1));
3849 __ add(r2, r2,
3850 Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
3851
3852 // ----------- S t a t e -------------
3853 // -- cp : context
3854 // -- r0 : number of rest parameters (tagged)
3855 // -- r1 : function
3856 // -- r2 : pointer to first rest parameters
3857 // -- lr : return address
3858 // -----------------------------------
3859
3860 // Allocate space for the rest parameter array plus the backing store.
3861 Label allocate, done_allocate;
3862 __ mov(r6, Operand(JSArray::kSize + FixedArray::kHeaderSize));
3863 __ add(r6, r6, Operand(r0, LSL, kPointerSizeLog2 - 1));
3864 __ Allocate(r6, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
3865 __ bind(&done_allocate);
3866
3867 // Setup the elements array in r3.
3868 __ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
3869 __ str(r1, FieldMemOperand(r3, FixedArray::kMapOffset));
3870 __ str(r0, FieldMemOperand(r3, FixedArray::kLengthOffset));
3871 __ add(r4, r3, Operand(FixedArray::kHeaderSize));
3872 {
3873 Label loop, done_loop;
3874 __ add(r1, r4, Operand(r0, LSL, kPointerSizeLog2 - 1));
3875 __ bind(&loop);
3876 __ cmp(r4, r1);
3877 __ b(eq, &done_loop);
3878 __ ldr(ip, MemOperand(r2, 1 * kPointerSize, NegPostIndex));
3879 __ str(ip, FieldMemOperand(r4, 0 * kPointerSize));
3880 __ add(r4, r4, Operand(1 * kPointerSize));
3881 __ b(&loop);
3882 __ bind(&done_loop);
3883 }
3884
3885 // Setup the rest parameter array in r4.
3886 __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r1);
3887 __ str(r1, FieldMemOperand(r4, JSArray::kMapOffset));
3888 __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
3889 __ str(r1, FieldMemOperand(r4, JSArray::kPropertiesOffset));
3890 __ str(r3, FieldMemOperand(r4, JSArray::kElementsOffset));
3891 __ str(r0, FieldMemOperand(r4, JSArray::kLengthOffset));
3892 STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
3893 __ mov(r0, r4);
3894 __ Ret();
3895
3896 // Fall back to %AllocateInNewSpace (if not too big).
3897 Label too_big_for_new_space;
3898 __ bind(&allocate);
3899 __ cmp(r6, Operand(kMaxRegularHeapObjectSize));
3900 __ b(gt, &too_big_for_new_space);
3901 {
3902 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
3903 __ SmiTag(r6);
3904 __ Push(r0, r2, r6);
3905 __ CallRuntime(Runtime::kAllocateInNewSpace);
3906 __ mov(r3, r0);
3907 __ Pop(r0, r2);
3908 }
3909 __ jmp(&done_allocate);
3910
3911 // Fall back to %NewRestParameter.
3912 __ bind(&too_big_for_new_space);
3913 __ push(r1);
3914 __ TailCallRuntime(Runtime::kNewRestParameter);
3915 }
3916 }
3917
3918
Generate(MacroAssembler * masm)3919 void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
3920 // ----------- S t a t e -------------
3921 // -- r1 : function
3922 // -- cp : context
3923 // -- fp : frame pointer
3924 // -- lr : return address
3925 // -----------------------------------
3926 __ AssertFunction(r1);
3927
3928 // Make r9 point to the JavaScript frame.
3929 __ mov(r9, fp);
3930 if (skip_stub_frame()) {
3931 // For Ignition we need to skip the handler/stub frame to reach the
3932 // JavaScript frame for the function.
3933 __ ldr(r9, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
3934 }
3935 if (FLAG_debug_code) {
3936 Label ok;
3937 __ ldr(ip, MemOperand(r9, StandardFrameConstants::kFunctionOffset));
3938 __ cmp(ip, r1);
3939 __ b(eq, &ok);
3940 __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
3941 __ bind(&ok);
3942 }
3943
3944 // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
3945 __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
3946 __ ldr(r2,
3947 FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
3948 __ add(r3, r9, Operand(r2, LSL, kPointerSizeLog2 - 1));
3949 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
3950
3951 // r1 : function
3952 // r2 : number of parameters (tagged)
3953 // r3 : parameters pointer
3954 // r9 : JavaScript frame pointer
3955 // Registers used over whole function:
3956 // r5 : arguments count (tagged)
3957 // r6 : mapped parameter count (tagged)
3958
3959 // Check if the calling frame is an arguments adaptor frame.
3960 Label adaptor_frame, try_allocate, runtime;
3961 __ ldr(r4, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
3962 __ ldr(r0, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
3963 __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3964 __ b(eq, &adaptor_frame);
3965
3966 // No adaptor, parameter count = argument count.
3967 __ mov(r5, r2);
3968 __ mov(r6, r2);
3969 __ b(&try_allocate);
3970
3971 // We have an adaptor frame. Patch the parameters pointer.
3972 __ bind(&adaptor_frame);
3973 __ ldr(r5, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
3974 __ add(r4, r4, Operand(r5, LSL, 1));
3975 __ add(r3, r4, Operand(StandardFrameConstants::kCallerSPOffset));
3976
3977 // r5 = argument count (tagged)
3978 // r6 = parameter count (tagged)
3979 // Compute the mapped parameter count = min(r6, r5) in r6.
3980 __ mov(r6, r2);
3981 __ cmp(r6, Operand(r5));
3982 __ mov(r6, Operand(r5), LeaveCC, gt);
3983
3984 __ bind(&try_allocate);
3985
3986 // Compute the sizes of backing store, parameter map, and arguments object.
3987 // 1. Parameter map, has 2 extra words containing context and backing store.
3988 const int kParameterMapHeaderSize =
3989 FixedArray::kHeaderSize + 2 * kPointerSize;
3990 // If there are no mapped parameters, we do not need the parameter_map.
3991 __ cmp(r6, Operand(Smi::kZero));
3992 __ mov(r9, Operand::Zero(), LeaveCC, eq);
3993 __ mov(r9, Operand(r6, LSL, 1), LeaveCC, ne);
3994 __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
3995
3996 // 2. Backing store.
3997 __ add(r9, r9, Operand(r5, LSL, 1));
3998 __ add(r9, r9, Operand(FixedArray::kHeaderSize));
3999
4000 // 3. Arguments object.
4001 __ add(r9, r9, Operand(JSSloppyArgumentsObject::kSize));
4002
4003 // Do the allocation of all three objects in one go.
4004 __ Allocate(r9, r0, r9, r4, &runtime, NO_ALLOCATION_FLAGS);
4005
4006 // r0 = address of new object(s) (tagged)
4007 // r2 = argument count (smi-tagged)
4008 // Get the arguments boilerplate from the current native context into r4.
4009 const int kNormalOffset =
4010 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
4011 const int kAliasedOffset =
4012 Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
4013
4014 __ ldr(r4, NativeContextMemOperand());
4015 __ cmp(r6, Operand::Zero());
4016 __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
4017 __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
4018
4019 // r0 = address of new object (tagged)
4020 // r2 = argument count (smi-tagged)
4021 // r4 = address of arguments map (tagged)
4022 // r6 = mapped parameter count (tagged)
4023 __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
4024 __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
4025 __ str(r9, FieldMemOperand(r0, JSObject::kPropertiesOffset));
4026 __ str(r9, FieldMemOperand(r0, JSObject::kElementsOffset));
4027
4028 // Set up the callee in-object property.
4029 __ AssertNotSmi(r1);
4030 __ str(r1, FieldMemOperand(r0, JSSloppyArgumentsObject::kCalleeOffset));
4031
4032 // Use the length (smi tagged) and set that as an in-object property too.
4033 __ AssertSmi(r5);
4034 __ str(r5, FieldMemOperand(r0, JSSloppyArgumentsObject::kLengthOffset));
4035
4036 // Set up the elements pointer in the allocated arguments object.
4037 // If we allocated a parameter map, r4 will point there, otherwise
4038 // it will point to the backing store.
4039 __ add(r4, r0, Operand(JSSloppyArgumentsObject::kSize));
4040 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
4041
4042 // r0 = address of new object (tagged)
4043 // r2 = argument count (tagged)
4044 // r4 = address of parameter map or backing store (tagged)
4045 // r6 = mapped parameter count (tagged)
4046 // Initialize parameter map. If there are no mapped arguments, we're done.
4047 Label skip_parameter_map;
4048 __ cmp(r6, Operand(Smi::kZero));
4049 // Move backing store address to r1, because it is
4050 // expected there when filling in the unmapped arguments.
4051 __ mov(r1, r4, LeaveCC, eq);
4052 __ b(eq, &skip_parameter_map);
4053
4054 __ LoadRoot(r5, Heap::kSloppyArgumentsElementsMapRootIndex);
4055 __ str(r5, FieldMemOperand(r4, FixedArray::kMapOffset));
4056 __ add(r5, r6, Operand(Smi::FromInt(2)));
4057 __ str(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
4058 __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
4059 __ add(r5, r4, Operand(r6, LSL, 1));
4060 __ add(r5, r5, Operand(kParameterMapHeaderSize));
4061 __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
4062
4063 // Copy the parameter slots and the holes in the arguments.
4064 // We need to fill in mapped_parameter_count slots. They index the context,
4065 // where parameters are stored in reverse order, at
4066 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4067 // The mapped parameter thus need to get indices
4068 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
4069 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4070 // We loop from right to left.
4071 Label parameters_loop, parameters_test;
4072 __ mov(r5, r6);
4073 __ add(r9, r2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
4074 __ sub(r9, r9, Operand(r6));
4075 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
4076 __ add(r1, r4, Operand(r5, LSL, 1));
4077 __ add(r1, r1, Operand(kParameterMapHeaderSize));
4078
4079 // r1 = address of backing store (tagged)
4080 // r4 = address of parameter map (tagged), which is also the address of new
4081 // object + Heap::kSloppyArgumentsObjectSize (tagged)
4082 // r0 = temporary scratch (a.o., for address calculation)
4083 // r5 = loop variable (tagged)
4084 // ip = the hole value
4085 __ jmp(¶meters_test);
4086
4087 __ bind(¶meters_loop);
4088 __ sub(r5, r5, Operand(Smi::FromInt(1)));
4089 __ mov(r0, Operand(r5, LSL, 1));
4090 __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4091 __ str(r9, MemOperand(r4, r0));
4092 __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
4093 __ str(ip, MemOperand(r1, r0));
4094 __ add(r9, r9, Operand(Smi::FromInt(1)));
4095 __ bind(¶meters_test);
4096 __ cmp(r5, Operand(Smi::kZero));
4097 __ b(ne, ¶meters_loop);
4098
4099 // Restore r0 = new object (tagged) and r5 = argument count (tagged).
4100 __ sub(r0, r4, Operand(JSSloppyArgumentsObject::kSize));
4101 __ ldr(r5, FieldMemOperand(r0, JSSloppyArgumentsObject::kLengthOffset));
4102
4103 __ bind(&skip_parameter_map);
4104 // r0 = address of new object (tagged)
4105 // r1 = address of backing store (tagged)
4106 // r5 = argument count (tagged)
4107 // r6 = mapped parameter count (tagged)
4108 // r9 = scratch
4109 // Copy arguments header and remaining slots (if there are any).
4110 __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
4111 __ str(r9, FieldMemOperand(r1, FixedArray::kMapOffset));
4112 __ str(r5, FieldMemOperand(r1, FixedArray::kLengthOffset));
4113
4114 Label arguments_loop, arguments_test;
4115 __ sub(r3, r3, Operand(r6, LSL, 1));
4116 __ jmp(&arguments_test);
4117
4118 __ bind(&arguments_loop);
4119 __ sub(r3, r3, Operand(kPointerSize));
4120 __ ldr(r4, MemOperand(r3, 0));
4121 __ add(r9, r1, Operand(r6, LSL, 1));
4122 __ str(r4, FieldMemOperand(r9, FixedArray::kHeaderSize));
4123 __ add(r6, r6, Operand(Smi::FromInt(1)));
4124
4125 __ bind(&arguments_test);
4126 __ cmp(r6, Operand(r5));
4127 __ b(lt, &arguments_loop);
4128
4129 // Return.
4130 __ Ret();
4131
4132 // Do the runtime call to allocate the arguments object.
4133 // r0 = address of new object (tagged)
4134 // r5 = argument count (tagged)
4135 __ bind(&runtime);
4136 __ Push(r1, r3, r5);
4137 __ TailCallRuntime(Runtime::kNewSloppyArguments);
4138 }
4139
4140
Generate(MacroAssembler * masm)4141 void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
4142 // ----------- S t a t e -------------
4143 // -- r1 : function
4144 // -- cp : context
4145 // -- fp : frame pointer
4146 // -- lr : return address
4147 // -----------------------------------
4148 __ AssertFunction(r1);
4149
4150 // Make r2 point to the JavaScript frame.
4151 __ mov(r2, fp);
4152 if (skip_stub_frame()) {
4153 // For Ignition we need to skip the handler/stub frame to reach the
4154 // JavaScript frame for the function.
4155 __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
4156 }
4157 if (FLAG_debug_code) {
4158 Label ok;
4159 __ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
4160 __ cmp(ip, r1);
4161 __ b(eq, &ok);
4162 __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4163 __ bind(&ok);
4164 }
4165
4166 // Check if we have an arguments adaptor frame below the function frame.
4167 Label arguments_adaptor, arguments_done;
4168 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
4169 __ ldr(ip, MemOperand(r3, CommonFrameConstants::kContextOrFrameTypeOffset));
4170 __ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4171 __ b(eq, &arguments_adaptor);
4172 {
4173 __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
4174 __ ldr(r0, FieldMemOperand(
4175 r4, SharedFunctionInfo::kFormalParameterCountOffset));
4176 __ add(r2, r2, Operand(r0, LSL, kPointerSizeLog2 - 1));
4177 __ add(r2, r2,
4178 Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
4179 }
4180 __ b(&arguments_done);
4181 __ bind(&arguments_adaptor);
4182 {
4183 __ ldr(r0, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4184 __ add(r2, r3, Operand(r0, LSL, kPointerSizeLog2 - 1));
4185 __ add(r2, r2,
4186 Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
4187 }
4188 __ bind(&arguments_done);
4189
4190 // ----------- S t a t e -------------
4191 // -- cp : context
4192 // -- r0 : number of rest parameters (tagged)
4193 // -- r1 : function
4194 // -- r2 : pointer to first rest parameters
4195 // -- lr : return address
4196 // -----------------------------------
4197
4198 // Allocate space for the strict arguments object plus the backing store.
4199 Label allocate, done_allocate;
4200 __ mov(r6, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
4201 __ add(r6, r6, Operand(r0, LSL, kPointerSizeLog2 - 1));
4202 __ Allocate(r6, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
4203 __ bind(&done_allocate);
4204
4205 // Setup the elements array in r3.
4206 __ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
4207 __ str(r1, FieldMemOperand(r3, FixedArray::kMapOffset));
4208 __ str(r0, FieldMemOperand(r3, FixedArray::kLengthOffset));
4209 __ add(r4, r3, Operand(FixedArray::kHeaderSize));
4210 {
4211 Label loop, done_loop;
4212 __ add(r1, r4, Operand(r0, LSL, kPointerSizeLog2 - 1));
4213 __ bind(&loop);
4214 __ cmp(r4, r1);
4215 __ b(eq, &done_loop);
4216 __ ldr(ip, MemOperand(r2, 1 * kPointerSize, NegPostIndex));
4217 __ str(ip, FieldMemOperand(r4, 0 * kPointerSize));
4218 __ add(r4, r4, Operand(1 * kPointerSize));
4219 __ b(&loop);
4220 __ bind(&done_loop);
4221 }
4222
4223 // Setup the strict arguments object in r4.
4224 __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r1);
4225 __ str(r1, FieldMemOperand(r4, JSStrictArgumentsObject::kMapOffset));
4226 __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
4227 __ str(r1, FieldMemOperand(r4, JSStrictArgumentsObject::kPropertiesOffset));
4228 __ str(r3, FieldMemOperand(r4, JSStrictArgumentsObject::kElementsOffset));
4229 __ str(r0, FieldMemOperand(r4, JSStrictArgumentsObject::kLengthOffset));
4230 STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
4231 __ mov(r0, r4);
4232 __ Ret();
4233
4234 // Fall back to %AllocateInNewSpace (if not too big).
4235 Label too_big_for_new_space;
4236 __ bind(&allocate);
4237 __ cmp(r6, Operand(kMaxRegularHeapObjectSize));
4238 __ b(gt, &too_big_for_new_space);
4239 {
4240 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
4241 __ SmiTag(r6);
4242 __ Push(r0, r2, r6);
4243 __ CallRuntime(Runtime::kAllocateInNewSpace);
4244 __ mov(r3, r0);
4245 __ Pop(r0, r2);
4246 }
4247 __ b(&done_allocate);
4248
4249 // Fall back to %NewStrictArguments.
4250 __ bind(&too_big_for_new_space);
4251 __ push(r1);
4252 __ TailCallRuntime(Runtime::kNewStrictArguments);
4253 }
4254
4255
AddressOffset(ExternalReference ref0,ExternalReference ref1)4256 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
4257 return ref0.address() - ref1.address();
4258 }
4259
4260
4261 // Calls an API function. Allocates HandleScope, extracts returned value
4262 // from handle and propagates exceptions. Restores context. stack_space
4263 // - space to be unwound on exit (includes the call JS arguments space and
4264 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,MemOperand return_value_operand,MemOperand * context_restore_operand)4265 static void CallApiFunctionAndReturn(MacroAssembler* masm,
4266 Register function_address,
4267 ExternalReference thunk_ref,
4268 int stack_space,
4269 MemOperand* stack_space_operand,
4270 MemOperand return_value_operand,
4271 MemOperand* context_restore_operand) {
4272 Isolate* isolate = masm->isolate();
4273 ExternalReference next_address =
4274 ExternalReference::handle_scope_next_address(isolate);
4275 const int kNextOffset = 0;
4276 const int kLimitOffset = AddressOffset(
4277 ExternalReference::handle_scope_limit_address(isolate), next_address);
4278 const int kLevelOffset = AddressOffset(
4279 ExternalReference::handle_scope_level_address(isolate), next_address);
4280
4281 DCHECK(function_address.is(r1) || function_address.is(r2));
4282
4283 Label profiler_disabled;
4284 Label end_profiler_check;
4285 __ mov(r9, Operand(ExternalReference::is_profiling_address(isolate)));
4286 __ ldrb(r9, MemOperand(r9, 0));
4287 __ cmp(r9, Operand(0));
4288 __ b(eq, &profiler_disabled);
4289
4290 // Additional parameter is the address of the actual callback.
4291 __ mov(r3, Operand(thunk_ref));
4292 __ jmp(&end_profiler_check);
4293
4294 __ bind(&profiler_disabled);
4295 __ Move(r3, function_address);
4296 __ bind(&end_profiler_check);
4297
4298 // Allocate HandleScope in callee-save registers.
4299 __ mov(r9, Operand(next_address));
4300 __ ldr(r4, MemOperand(r9, kNextOffset));
4301 __ ldr(r5, MemOperand(r9, kLimitOffset));
4302 __ ldr(r6, MemOperand(r9, kLevelOffset));
4303 __ add(r6, r6, Operand(1));
4304 __ str(r6, MemOperand(r9, kLevelOffset));
4305
4306 if (FLAG_log_timer_events) {
4307 FrameScope frame(masm, StackFrame::MANUAL);
4308 __ PushSafepointRegisters();
4309 __ PrepareCallCFunction(1, r0);
4310 __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
4311 __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
4312 1);
4313 __ PopSafepointRegisters();
4314 }
4315
4316 // Native call returns to the DirectCEntry stub which redirects to the
4317 // return address pushed on stack (could have moved after GC).
4318 // DirectCEntry stub itself is generated early and never moves.
4319 DirectCEntryStub stub(isolate);
4320 stub.GenerateCall(masm, r3);
4321
4322 if (FLAG_log_timer_events) {
4323 FrameScope frame(masm, StackFrame::MANUAL);
4324 __ PushSafepointRegisters();
4325 __ PrepareCallCFunction(1, r0);
4326 __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
4327 __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
4328 1);
4329 __ PopSafepointRegisters();
4330 }
4331
4332 Label promote_scheduled_exception;
4333 Label delete_allocated_handles;
4334 Label leave_exit_frame;
4335 Label return_value_loaded;
4336
4337 // load value from ReturnValue
4338 __ ldr(r0, return_value_operand);
4339 __ bind(&return_value_loaded);
4340 // No more valid handles (the result handle was the last one). Restore
4341 // previous handle scope.
4342 __ str(r4, MemOperand(r9, kNextOffset));
4343 if (__ emit_debug_code()) {
4344 __ ldr(r1, MemOperand(r9, kLevelOffset));
4345 __ cmp(r1, r6);
4346 __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
4347 }
4348 __ sub(r6, r6, Operand(1));
4349 __ str(r6, MemOperand(r9, kLevelOffset));
4350 __ ldr(ip, MemOperand(r9, kLimitOffset));
4351 __ cmp(r5, ip);
4352 __ b(ne, &delete_allocated_handles);
4353
4354 // Leave the API exit frame.
4355 __ bind(&leave_exit_frame);
4356 bool restore_context = context_restore_operand != NULL;
4357 if (restore_context) {
4358 __ ldr(cp, *context_restore_operand);
4359 }
4360 // LeaveExitFrame expects unwind space to be in a register.
4361 if (stack_space_operand != NULL) {
4362 __ ldr(r4, *stack_space_operand);
4363 } else {
4364 __ mov(r4, Operand(stack_space));
4365 }
4366 __ LeaveExitFrame(false, r4, !restore_context, stack_space_operand != NULL);
4367
4368 // Check if the function scheduled an exception.
4369 __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
4370 __ mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate)));
4371 __ ldr(r5, MemOperand(ip));
4372 __ cmp(r4, r5);
4373 __ b(ne, &promote_scheduled_exception);
4374
4375 __ mov(pc, lr);
4376
4377 // Re-throw by promoting a scheduled exception.
4378 __ bind(&promote_scheduled_exception);
4379 __ TailCallRuntime(Runtime::kPromoteScheduledException);
4380
4381 // HandleScope limit has changed. Delete allocated extensions.
4382 __ bind(&delete_allocated_handles);
4383 __ str(r5, MemOperand(r9, kLimitOffset));
4384 __ mov(r4, r0);
4385 __ PrepareCallCFunction(1, r5);
4386 __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
4387 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
4388 1);
4389 __ mov(r0, r4);
4390 __ jmp(&leave_exit_frame);
4391 }
4392
Generate(MacroAssembler * masm)4393 void CallApiCallbackStub::Generate(MacroAssembler* masm) {
4394 // ----------- S t a t e -------------
4395 // -- r0 : callee
4396 // -- r4 : call_data
4397 // -- r2 : holder
4398 // -- r1 : api_function_address
4399 // -- cp : context
4400 // --
4401 // -- sp[0] : last argument
4402 // -- ...
4403 // -- sp[(argc - 1)* 4] : first argument
4404 // -- sp[argc * 4] : receiver
4405 // -----------------------------------
4406
4407 Register callee = r0;
4408 Register call_data = r4;
4409 Register holder = r2;
4410 Register api_function_address = r1;
4411 Register context = cp;
4412
4413 typedef FunctionCallbackArguments FCA;
4414
4415 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
4416 STATIC_ASSERT(FCA::kCalleeIndex == 5);
4417 STATIC_ASSERT(FCA::kDataIndex == 4);
4418 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
4419 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
4420 STATIC_ASSERT(FCA::kIsolateIndex == 1);
4421 STATIC_ASSERT(FCA::kHolderIndex == 0);
4422 STATIC_ASSERT(FCA::kNewTargetIndex == 7);
4423 STATIC_ASSERT(FCA::kArgsLength == 8);
4424
4425 // new target
4426 __ PushRoot(Heap::kUndefinedValueRootIndex);
4427
4428 // context save
4429 __ push(context);
4430 if (!is_lazy()) {
4431 // load context from callee
4432 __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
4433 }
4434
4435 // callee
4436 __ push(callee);
4437
4438 // call data
4439 __ push(call_data);
4440
4441 Register scratch = call_data;
4442 if (!call_data_undefined()) {
4443 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4444 }
4445 // return value
4446 __ push(scratch);
4447 // return value default
4448 __ push(scratch);
4449 // isolate
4450 __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
4451 __ push(scratch);
4452 // holder
4453 __ push(holder);
4454
4455 // Prepare arguments.
4456 __ mov(scratch, sp);
4457
4458 // Allocate the v8::Arguments structure in the arguments' space since
4459 // it's not controlled by GC.
4460 const int kApiStackSpace = 3;
4461
4462 FrameScope frame_scope(masm, StackFrame::MANUAL);
4463 __ EnterExitFrame(false, kApiStackSpace);
4464
4465 DCHECK(!api_function_address.is(r0) && !scratch.is(r0));
4466 // r0 = FunctionCallbackInfo&
4467 // Arguments is after the return address.
4468 __ add(r0, sp, Operand(1 * kPointerSize));
4469 // FunctionCallbackInfo::implicit_args_
4470 __ str(scratch, MemOperand(r0, 0 * kPointerSize));
4471 // FunctionCallbackInfo::values_
4472 __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
4473 __ str(ip, MemOperand(r0, 1 * kPointerSize));
4474 // FunctionCallbackInfo::length_ = argc
4475 __ mov(ip, Operand(argc()));
4476 __ str(ip, MemOperand(r0, 2 * kPointerSize));
4477
4478 ExternalReference thunk_ref =
4479 ExternalReference::invoke_function_callback(masm->isolate());
4480
4481 AllowExternalCallThatCantCauseGC scope(masm);
4482 MemOperand context_restore_operand(
4483 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
4484 // Stores return the first js argument
4485 int return_value_offset = 0;
4486 if (is_store()) {
4487 return_value_offset = 2 + FCA::kArgsLength;
4488 } else {
4489 return_value_offset = 2 + FCA::kReturnValueOffset;
4490 }
4491 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
4492 int stack_space = 0;
4493 MemOperand length_operand = MemOperand(sp, 3 * kPointerSize);
4494 MemOperand* stack_space_operand = &length_operand;
4495 stack_space = argc() + FCA::kArgsLength + 1;
4496 stack_space_operand = NULL;
4497
4498 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
4499 stack_space_operand, return_value_operand,
4500 &context_restore_operand);
4501 }
4502
4503
Generate(MacroAssembler * masm)4504 void CallApiGetterStub::Generate(MacroAssembler* masm) {
4505 // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
4506 // name below the exit frame to make GC aware of them.
4507 STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
4508 STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
4509 STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
4510 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
4511 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
4512 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
4513 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
4514 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
4515
4516 Register receiver = ApiGetterDescriptor::ReceiverRegister();
4517 Register holder = ApiGetterDescriptor::HolderRegister();
4518 Register callback = ApiGetterDescriptor::CallbackRegister();
4519 Register scratch = r4;
4520 DCHECK(!AreAliased(receiver, holder, callback, scratch));
4521
4522 Register api_function_address = r2;
4523
4524 __ push(receiver);
4525 // Push data from AccessorInfo.
4526 __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
4527 __ push(scratch);
4528 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4529 __ Push(scratch, scratch);
4530 __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
4531 __ Push(scratch, holder);
4532 __ Push(Smi::kZero); // should_throw_on_error -> false
4533 __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
4534 __ push(scratch);
4535 // v8::PropertyCallbackInfo::args_ array and name handle.
4536 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
4537
4538 // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
4539 __ mov(r0, sp); // r0 = Handle<Name>
4540 __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = v8::PCI::args_
4541
4542 const int kApiStackSpace = 1;
4543 FrameScope frame_scope(masm, StackFrame::MANUAL);
4544 __ EnterExitFrame(false, kApiStackSpace);
4545
4546 // Create v8::PropertyCallbackInfo object on the stack and initialize
4547 // it's args_ field.
4548 __ str(r1, MemOperand(sp, 1 * kPointerSize));
4549 __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = v8::PropertyCallbackInfo&
4550
4551 ExternalReference thunk_ref =
4552 ExternalReference::invoke_accessor_getter_callback(isolate());
4553
4554 __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
4555 __ ldr(api_function_address,
4556 FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
4557
4558 // +3 is to skip prolog, return address and name handle.
4559 MemOperand return_value_operand(
4560 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
4561 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
4562 kStackUnwindSpace, NULL, return_value_operand, NULL);
4563 }
4564
4565 #undef __
4566
4567 } // namespace internal
4568 } // namespace v8
4569
4570 #endif // V8_TARGET_ARCH_ARM
4571