1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_ARM64
6
7 #include "src/code-stubs.h"
8 #include "src/api-arguments.h"
9 #include "src/bootstrapper.h"
10 #include "src/codegen.h"
11 #include "src/ic/handler-compiler.h"
12 #include "src/ic/ic.h"
13 #include "src/ic/stub-cache.h"
14 #include "src/isolate.h"
15 #include "src/regexp/jsregexp.h"
16 #include "src/regexp/regexp-macro-assembler.h"
17 #include "src/runtime/runtime.h"
18
19 #include "src/arm64/code-stubs-arm64.h"
20 #include "src/arm64/frames-arm64.h"
21
22 namespace v8 {
23 namespace internal {
24
25 #define __ ACCESS_MASM(masm)
26
Generate(MacroAssembler * masm)27 void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
28 __ Mov(x5, Operand(x0, LSL, kPointerSizeLog2));
29 __ Str(x1, MemOperand(jssp, x5));
30 __ Push(x1);
31 __ Push(x2);
32 __ Add(x0, x0, Operand(3));
33 __ TailCallRuntime(Runtime::kNewArray);
34 }
35
InitializeDescriptor(CodeStubDescriptor * descriptor)36 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
37 Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
38 descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
39 }
40
InitializeDescriptor(CodeStubDescriptor * descriptor)41 void FastFunctionBindStub::InitializeDescriptor(
42 CodeStubDescriptor* descriptor) {
43 Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
44 descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
45 }
46
GenerateLightweightMiss(MacroAssembler * masm,ExternalReference miss)47 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
48 ExternalReference miss) {
49 // Update the static counter each time a new code stub is generated.
50 isolate()->counters()->code_stubs()->Increment();
51
52 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
53 int param_count = descriptor.GetRegisterParameterCount();
54 {
55 // Call the runtime system in a fresh internal frame.
56 FrameScope scope(masm, StackFrame::INTERNAL);
57 DCHECK((param_count == 0) ||
58 x0.Is(descriptor.GetRegisterParameter(param_count - 1)));
59
60 // Push arguments
61 MacroAssembler::PushPopQueue queue(masm);
62 for (int i = 0; i < param_count; ++i) {
63 queue.Queue(descriptor.GetRegisterParameter(i));
64 }
65 queue.PushQueued();
66
67 __ CallExternalReference(miss, param_count);
68 }
69
70 __ Ret();
71 }
72
73
Generate(MacroAssembler * masm)74 void DoubleToIStub::Generate(MacroAssembler* masm) {
75 Label done;
76 Register input = source();
77 Register result = destination();
78 DCHECK(is_truncating());
79
80 DCHECK(result.Is64Bits());
81 DCHECK(jssp.Is(masm->StackPointer()));
82
83 int double_offset = offset();
84
85 DoubleRegister double_scratch = d0; // only used if !skip_fastpath()
86 Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result);
87 Register scratch2 =
88 GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1);
89
90 __ Push(scratch1, scratch2);
91 // Account for saved regs if input is jssp.
92 if (input.is(jssp)) double_offset += 2 * kPointerSize;
93
94 if (!skip_fastpath()) {
95 __ Push(double_scratch);
96 if (input.is(jssp)) double_offset += 1 * kDoubleSize;
97 __ Ldr(double_scratch, MemOperand(input, double_offset));
98 // Try to convert with a FPU convert instruction. This handles all
99 // non-saturating cases.
100 __ TryConvertDoubleToInt64(result, double_scratch, &done);
101 __ Fmov(result, double_scratch);
102 } else {
103 __ Ldr(result, MemOperand(input, double_offset));
104 }
105
106 // If we reach here we need to manually convert the input to an int32.
107
108 // Extract the exponent.
109 Register exponent = scratch1;
110 __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
111 HeapNumber::kExponentBits);
112
113 // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
114 // the mantissa gets shifted completely out of the int32_t result.
115 __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
116 __ CzeroX(result, ge);
117 __ B(ge, &done);
118
119 // The Fcvtzs sequence handles all cases except where the conversion causes
120 // signed overflow in the int64_t target. Since we've already handled
121 // exponents >= 84, we can guarantee that 63 <= exponent < 84.
122
123 if (masm->emit_debug_code()) {
124 __ Cmp(exponent, HeapNumber::kExponentBias + 63);
125 // Exponents less than this should have been handled by the Fcvt case.
126 __ Check(ge, kUnexpectedValue);
127 }
128
129 // Isolate the mantissa bits, and set the implicit '1'.
130 Register mantissa = scratch2;
131 __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
132 __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
133
134 // Negate the mantissa if necessary.
135 __ Tst(result, kXSignMask);
136 __ Cneg(mantissa, mantissa, ne);
137
138 // Shift the mantissa bits in the correct place. We know that we have to shift
139 // it left here, because exponent >= 63 >= kMantissaBits.
140 __ Sub(exponent, exponent,
141 HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
142 __ Lsl(result, mantissa, exponent);
143
144 __ Bind(&done);
145 if (!skip_fastpath()) {
146 __ Pop(double_scratch);
147 }
148 __ Pop(scratch2, scratch1);
149 __ Ret();
150 }
151
152
153 // See call site for description.
EmitIdenticalObjectComparison(MacroAssembler * masm,Register left,Register right,Register scratch,FPRegister double_scratch,Label * slow,Condition cond)154 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
155 Register right, Register scratch,
156 FPRegister double_scratch,
157 Label* slow, Condition cond) {
158 DCHECK(!AreAliased(left, right, scratch));
159 Label not_identical, return_equal, heap_number;
160 Register result = x0;
161
162 __ Cmp(right, left);
163 __ B(ne, ¬_identical);
164
165 // Test for NaN. Sadly, we can't just compare to factory::nan_value(),
166 // so we do the second best thing - test it ourselves.
167 // They are both equal and they are not both Smis so both of them are not
168 // Smis. If it's not a heap number, then return equal.
169 Register right_type = scratch;
170 if ((cond == lt) || (cond == gt)) {
171 // Call runtime on identical JSObjects. Otherwise return equal.
172 __ JumpIfObjectType(right, right_type, right_type, FIRST_JS_RECEIVER_TYPE,
173 slow, ge);
174 // Call runtime on identical symbols since we need to throw a TypeError.
175 __ Cmp(right_type, SYMBOL_TYPE);
176 __ B(eq, slow);
177 // Call runtime on identical SIMD values since we must throw a TypeError.
178 __ Cmp(right_type, SIMD128_VALUE_TYPE);
179 __ B(eq, slow);
180 } else if (cond == eq) {
181 __ JumpIfHeapNumber(right, &heap_number);
182 } else {
183 __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
184 &heap_number);
185 // Comparing JS objects with <=, >= is complicated.
186 __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
187 __ B(ge, slow);
188 // Call runtime on identical symbols since we need to throw a TypeError.
189 __ Cmp(right_type, SYMBOL_TYPE);
190 __ B(eq, slow);
191 // Call runtime on identical SIMD values since we must throw a TypeError.
192 __ Cmp(right_type, SIMD128_VALUE_TYPE);
193 __ B(eq, slow);
194 // Normally here we fall through to return_equal, but undefined is
195 // special: (undefined == undefined) == true, but
196 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
197 if ((cond == le) || (cond == ge)) {
198 __ Cmp(right_type, ODDBALL_TYPE);
199 __ B(ne, &return_equal);
200 __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
201 if (cond == le) {
202 // undefined <= undefined should fail.
203 __ Mov(result, GREATER);
204 } else {
205 // undefined >= undefined should fail.
206 __ Mov(result, LESS);
207 }
208 __ Ret();
209 }
210 }
211
212 __ Bind(&return_equal);
213 if (cond == lt) {
214 __ Mov(result, GREATER); // Things aren't less than themselves.
215 } else if (cond == gt) {
216 __ Mov(result, LESS); // Things aren't greater than themselves.
217 } else {
218 __ Mov(result, EQUAL); // Things are <=, >=, ==, === themselves.
219 }
220 __ Ret();
221
222 // Cases lt and gt have been handled earlier, and case ne is never seen, as
223 // it is handled in the parser (see Parser::ParseBinaryExpression). We are
224 // only concerned with cases ge, le and eq here.
225 if ((cond != lt) && (cond != gt)) {
226 DCHECK((cond == ge) || (cond == le) || (cond == eq));
227 __ Bind(&heap_number);
228 // Left and right are identical pointers to a heap number object. Return
229 // non-equal if the heap number is a NaN, and equal otherwise. Comparing
230 // the number to itself will set the overflow flag iff the number is NaN.
231 __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset));
232 __ Fcmp(double_scratch, double_scratch);
233 __ B(vc, &return_equal); // Not NaN, so treat as normal heap number.
234
235 if (cond == le) {
236 __ Mov(result, GREATER);
237 } else {
238 __ Mov(result, LESS);
239 }
240 __ Ret();
241 }
242
243 // No fall through here.
244 if (FLAG_debug_code) {
245 __ Unreachable();
246 }
247
248 __ Bind(¬_identical);
249 }
250
251
252 // See call site for description.
EmitStrictTwoHeapObjectCompare(MacroAssembler * masm,Register left,Register right,Register left_type,Register right_type,Register scratch)253 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
254 Register left,
255 Register right,
256 Register left_type,
257 Register right_type,
258 Register scratch) {
259 DCHECK(!AreAliased(left, right, left_type, right_type, scratch));
260
261 if (masm->emit_debug_code()) {
262 // We assume that the arguments are not identical.
263 __ Cmp(left, right);
264 __ Assert(ne, kExpectedNonIdenticalObjects);
265 }
266
267 // If either operand is a JS object or an oddball value, then they are not
268 // equal since their pointers are different.
269 // There is no test for undetectability in strict equality.
270 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
271 Label right_non_object;
272
273 __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
274 __ B(lt, &right_non_object);
275
276 // Return non-zero - x0 already contains a non-zero pointer.
277 DCHECK(left.is(x0) || right.is(x0));
278 Label return_not_equal;
279 __ Bind(&return_not_equal);
280 __ Ret();
281
282 __ Bind(&right_non_object);
283
284 // Check for oddballs: true, false, null, undefined.
285 __ Cmp(right_type, ODDBALL_TYPE);
286
287 // If right is not ODDBALL, test left. Otherwise, set eq condition.
288 __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
289
290 // If right or left is not ODDBALL, test left >= FIRST_JS_RECEIVER_TYPE.
291 // Otherwise, right or left is ODDBALL, so set a ge condition.
292 __ Ccmp(left_type, FIRST_JS_RECEIVER_TYPE, NVFlag, ne);
293
294 __ B(ge, &return_not_equal);
295
296 // Internalized strings are unique, so they can only be equal if they are the
297 // same object. We have already tested that case, so if left and right are
298 // both internalized strings, they cannot be equal.
299 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
300 __ Orr(scratch, left_type, right_type);
301 __ TestAndBranchIfAllClear(
302 scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal);
303 }
304
305
306 // See call site for description.
EmitSmiNonsmiComparison(MacroAssembler * masm,Register left,Register right,FPRegister left_d,FPRegister right_d,Label * slow,bool strict)307 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
308 Register left,
309 Register right,
310 FPRegister left_d,
311 FPRegister right_d,
312 Label* slow,
313 bool strict) {
314 DCHECK(!AreAliased(left_d, right_d));
315 DCHECK((left.is(x0) && right.is(x1)) ||
316 (right.is(x0) && left.is(x1)));
317 Register result = x0;
318
319 Label right_is_smi, done;
320 __ JumpIfSmi(right, &right_is_smi);
321
322 // Left is the smi. Check whether right is a heap number.
323 if (strict) {
324 // If right is not a number and left is a smi, then strict equality cannot
325 // succeed. Return non-equal.
326 Label is_heap_number;
327 __ JumpIfHeapNumber(right, &is_heap_number);
328 // Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
329 if (!right.is(result)) {
330 __ Mov(result, NOT_EQUAL);
331 }
332 __ Ret();
333 __ Bind(&is_heap_number);
334 } else {
335 // Smi compared non-strictly with a non-smi, non-heap-number. Call the
336 // runtime.
337 __ JumpIfNotHeapNumber(right, slow);
338 }
339
340 // Left is the smi. Right is a heap number. Load right value into right_d, and
341 // convert left smi into double in left_d.
342 __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset));
343 __ SmiUntagToDouble(left_d, left);
344 __ B(&done);
345
346 __ Bind(&right_is_smi);
347 // Right is a smi. Check whether the non-smi left is a heap number.
348 if (strict) {
349 // If left is not a number and right is a smi then strict equality cannot
350 // succeed. Return non-equal.
351 Label is_heap_number;
352 __ JumpIfHeapNumber(left, &is_heap_number);
353 // Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
354 if (!left.is(result)) {
355 __ Mov(result, NOT_EQUAL);
356 }
357 __ Ret();
358 __ Bind(&is_heap_number);
359 } else {
360 // Smi compared non-strictly with a non-smi, non-heap-number. Call the
361 // runtime.
362 __ JumpIfNotHeapNumber(left, slow);
363 }
364
365 // Right is the smi. Left is a heap number. Load left value into left_d, and
366 // convert right smi into double in right_d.
367 __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset));
368 __ SmiUntagToDouble(right_d, right);
369
370 // Fall through to both_loaded_as_doubles.
371 __ Bind(&done);
372 }
373
374
375 // Fast negative check for internalized-to-internalized equality or receiver
376 // equality. Also handles the undetectable receiver to null/undefined
377 // comparison.
378 // See call site for description.
EmitCheckForInternalizedStringsOrObjects(MacroAssembler * masm,Register left,Register right,Register left_map,Register right_map,Register left_type,Register right_type,Label * possible_strings,Label * runtime_call)379 static void EmitCheckForInternalizedStringsOrObjects(
380 MacroAssembler* masm, Register left, Register right, Register left_map,
381 Register right_map, Register left_type, Register right_type,
382 Label* possible_strings, Label* runtime_call) {
383 DCHECK(!AreAliased(left, right, left_map, right_map, left_type, right_type));
384 Register result = x0;
385 DCHECK(left.is(x0) || right.is(x0));
386
387 Label object_test, return_equal, return_unequal, undetectable;
388 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
389 // TODO(all): reexamine this branch sequence for optimisation wrt branch
390 // prediction.
391 __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test);
392 __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
393 __ Tbnz(left_type, MaskToBit(kIsNotStringMask), runtime_call);
394 __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
395
396 // Both are internalized. We already checked they weren't the same pointer so
397 // they are not equal. Return non-equal by returning the non-zero object
398 // pointer in x0.
399 __ Ret();
400
401 __ Bind(&object_test);
402
403 Register left_bitfield = left_type;
404 Register right_bitfield = right_type;
405 __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset));
406 __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset));
407 __ Tbnz(right_bitfield, MaskToBit(1 << Map::kIsUndetectable), &undetectable);
408 __ Tbnz(left_bitfield, MaskToBit(1 << Map::kIsUndetectable), &return_unequal);
409
410 __ CompareInstanceType(right_map, right_type, FIRST_JS_RECEIVER_TYPE);
411 __ B(lt, runtime_call);
412 __ CompareInstanceType(left_map, left_type, FIRST_JS_RECEIVER_TYPE);
413 __ B(lt, runtime_call);
414
415 __ Bind(&return_unequal);
416 // Return non-equal by returning the non-zero object pointer in x0.
417 __ Ret();
418
419 __ Bind(&undetectable);
420 __ Tbz(left_bitfield, MaskToBit(1 << Map::kIsUndetectable), &return_unequal);
421
422 // If both sides are JSReceivers, then the result is false according to
423 // the HTML specification, which says that only comparisons with null or
424 // undefined are affected by special casing for document.all.
425 __ CompareInstanceType(right_map, right_type, ODDBALL_TYPE);
426 __ B(eq, &return_equal);
427 __ CompareInstanceType(left_map, left_type, ODDBALL_TYPE);
428 __ B(ne, &return_unequal);
429
430 __ Bind(&return_equal);
431 __ Mov(result, EQUAL);
432 __ Ret();
433 }
434
435
CompareICStub_CheckInputType(MacroAssembler * masm,Register input,CompareICState::State expected,Label * fail)436 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
437 CompareICState::State expected,
438 Label* fail) {
439 Label ok;
440 if (expected == CompareICState::SMI) {
441 __ JumpIfNotSmi(input, fail);
442 } else if (expected == CompareICState::NUMBER) {
443 __ JumpIfSmi(input, &ok);
444 __ JumpIfNotHeapNumber(input, fail);
445 }
446 // We could be strict about internalized/non-internalized here, but as long as
447 // hydrogen doesn't care, the stub doesn't have to care either.
448 __ Bind(&ok);
449 }
450
451
GenerateGeneric(MacroAssembler * masm)452 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
453 Register lhs = x1;
454 Register rhs = x0;
455 Register result = x0;
456 Condition cond = GetCondition();
457
458 Label miss;
459 CompareICStub_CheckInputType(masm, lhs, left(), &miss);
460 CompareICStub_CheckInputType(masm, rhs, right(), &miss);
461
462 Label slow; // Call builtin.
463 Label not_smis, both_loaded_as_doubles;
464 Label not_two_smis, smi_done;
465 __ JumpIfEitherNotSmi(lhs, rhs, ¬_two_smis);
466 __ SmiUntag(lhs);
467 __ Sub(result, lhs, Operand::UntagSmi(rhs));
468 __ Ret();
469
470 __ Bind(¬_two_smis);
471
472 // NOTICE! This code is only reached after a smi-fast-case check, so it is
473 // certain that at least one operand isn't a smi.
474
475 // Handle the case where the objects are identical. Either returns the answer
476 // or goes to slow. Only falls through if the objects were not identical.
477 EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
478
479 // If either is a smi (we know that at least one is not a smi), then they can
480 // only be strictly equal if the other is a HeapNumber.
481 __ JumpIfBothNotSmi(lhs, rhs, ¬_smis);
482
483 // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that
484 // can:
485 // 1) Return the answer.
486 // 2) Branch to the slow case.
487 // 3) Fall through to both_loaded_as_doubles.
488 // In case 3, we have found out that we were dealing with a number-number
489 // comparison. The double values of the numbers have been loaded, right into
490 // rhs_d, left into lhs_d.
491 FPRegister rhs_d = d0;
492 FPRegister lhs_d = d1;
493 EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict());
494
495 __ Bind(&both_loaded_as_doubles);
496 // The arguments have been converted to doubles and stored in rhs_d and
497 // lhs_d.
498 Label nan;
499 __ Fcmp(lhs_d, rhs_d);
500 __ B(vs, &nan); // Overflow flag set if either is NaN.
501 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
502 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
503 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
504 __ Ret();
505
506 __ Bind(&nan);
507 // Left and/or right is a NaN. Load the result register with whatever makes
508 // the comparison fail, since comparisons with NaN always fail (except ne,
509 // which is filtered out at a higher level.)
510 DCHECK(cond != ne);
511 if ((cond == lt) || (cond == le)) {
512 __ Mov(result, GREATER);
513 } else {
514 __ Mov(result, LESS);
515 }
516 __ Ret();
517
518 __ Bind(¬_smis);
519 // At this point we know we are dealing with two different objects, and
520 // neither of them is a smi. The objects are in rhs_ and lhs_.
521
522 // Load the maps and types of the objects.
523 Register rhs_map = x10;
524 Register rhs_type = x11;
525 Register lhs_map = x12;
526 Register lhs_type = x13;
527 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
528 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
529 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
530 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
531
532 if (strict()) {
533 // This emits a non-equal return sequence for some object types, or falls
534 // through if it was not lucky.
535 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14);
536 }
537
538 Label check_for_internalized_strings;
539 Label flat_string_check;
540 // Check for heap number comparison. Branch to earlier double comparison code
541 // if they are heap numbers, otherwise, branch to internalized string check.
542 __ Cmp(rhs_type, HEAP_NUMBER_TYPE);
543 __ B(ne, &check_for_internalized_strings);
544 __ Cmp(lhs_map, rhs_map);
545
546 // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat
547 // string check.
548 __ B(ne, &flat_string_check);
549
550 // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double
551 // comparison code.
552 __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
553 __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
554 __ B(&both_loaded_as_doubles);
555
556 __ Bind(&check_for_internalized_strings);
557 // In the strict case, the EmitStrictTwoHeapObjectCompare already took care
558 // of internalized strings.
559 if ((cond == eq) && !strict()) {
560 // Returns an answer for two internalized strings or two detectable objects.
561 // Otherwise branches to the string case or not both strings case.
562 EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map,
563 lhs_type, rhs_type,
564 &flat_string_check, &slow);
565 }
566
567 // Check for both being sequential one-byte strings,
568 // and inline if that is the case.
569 __ Bind(&flat_string_check);
570 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x14,
571 x15, &slow);
572
573 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x10,
574 x11);
575 if (cond == eq) {
576 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
577 x12);
578 } else {
579 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
580 x12, x13);
581 }
582
583 // Never fall through to here.
584 if (FLAG_debug_code) {
585 __ Unreachable();
586 }
587
588 __ Bind(&slow);
589
590 if (cond == eq) {
591 {
592 FrameScope scope(masm, StackFrame::INTERNAL);
593 __ Push(lhs, rhs);
594 __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
595 }
596 // Turn true into 0 and false into some non-zero value.
597 STATIC_ASSERT(EQUAL == 0);
598 __ LoadRoot(x1, Heap::kTrueValueRootIndex);
599 __ Sub(x0, x0, x1);
600 __ Ret();
601 } else {
602 __ Push(lhs, rhs);
603 int ncr; // NaN compare result
604 if ((cond == lt) || (cond == le)) {
605 ncr = GREATER;
606 } else {
607 DCHECK((cond == gt) || (cond == ge)); // remaining cases
608 ncr = LESS;
609 }
610 __ Mov(x10, Smi::FromInt(ncr));
611 __ Push(x10);
612
613 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
614 // tagged as a small integer.
615 __ TailCallRuntime(Runtime::kCompare);
616 }
617
618 __ Bind(&miss);
619 GenerateMiss(masm);
620 }
621
622
Generate(MacroAssembler * masm)623 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
624 CPURegList saved_regs = kCallerSaved;
625 CPURegList saved_fp_regs = kCallerSavedFP;
626
627 // We don't allow a GC during a store buffer overflow so there is no need to
628 // store the registers in any particular way, but we do have to store and
629 // restore them.
630
631 // We don't care if MacroAssembler scratch registers are corrupted.
632 saved_regs.Remove(*(masm->TmpList()));
633 saved_fp_regs.Remove(*(masm->FPTmpList()));
634
635 __ PushCPURegList(saved_regs);
636 if (save_doubles()) {
637 __ PushCPURegList(saved_fp_regs);
638 }
639
640 AllowExternalCallThatCantCauseGC scope(masm);
641 __ Mov(x0, ExternalReference::isolate_address(isolate()));
642 __ CallCFunction(
643 ExternalReference::store_buffer_overflow_function(isolate()), 1, 0);
644
645 if (save_doubles()) {
646 __ PopCPURegList(saved_fp_regs);
647 }
648 __ PopCPURegList(saved_regs);
649 __ Ret();
650 }
651
652
GenerateFixedRegStubsAheadOfTime(Isolate * isolate)653 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
654 Isolate* isolate) {
655 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
656 stub1.GetCode();
657 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
658 stub2.GetCode();
659 }
660
661
Generate(MacroAssembler * masm)662 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
663 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
664 UseScratchRegisterScope temps(masm);
665 Register saved_lr = temps.UnsafeAcquire(to_be_pushed_lr());
666 Register return_address = temps.AcquireX();
667 __ Mov(return_address, lr);
668 // Restore lr with the value it had before the call to this stub (the value
669 // which must be pushed).
670 __ Mov(lr, saved_lr);
671 __ PushSafepointRegisters();
672 __ Ret(return_address);
673 }
674
675
Generate(MacroAssembler * masm)676 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
677 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
678 UseScratchRegisterScope temps(masm);
679 Register return_address = temps.AcquireX();
680 // Preserve the return address (lr will be clobbered by the pop).
681 __ Mov(return_address, lr);
682 __ PopSafepointRegisters();
683 __ Ret(return_address);
684 }
685
Generate(MacroAssembler * masm)686 void MathPowStub::Generate(MacroAssembler* masm) {
687 // Stack on entry:
688 // jssp[0]: Exponent (as a tagged value).
689 // jssp[1]: Base (as a tagged value).
690 //
691 // The (tagged) result will be returned in x0, as a heap number.
692
693 Register exponent_tagged = MathPowTaggedDescriptor::exponent();
694 DCHECK(exponent_tagged.is(x11));
695 Register exponent_integer = MathPowIntegerDescriptor::exponent();
696 DCHECK(exponent_integer.is(x12));
697 Register saved_lr = x19;
698 FPRegister result_double = d0;
699 FPRegister base_double = d0;
700 FPRegister exponent_double = d1;
701 FPRegister base_double_copy = d2;
702 FPRegister scratch1_double = d6;
703 FPRegister scratch0_double = d7;
704
705 // A fast-path for integer exponents.
706 Label exponent_is_smi, exponent_is_integer;
707 // Allocate a heap number for the result, and return it.
708 Label done;
709
710 // Unpack the inputs.
711 if (exponent_type() == TAGGED) {
712 __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
713 __ Ldr(exponent_double,
714 FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
715 }
716
717 // Handle double (heap number) exponents.
718 if (exponent_type() != INTEGER) {
719 // Detect integer exponents stored as doubles and handle those in the
720 // integer fast-path.
721 __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
722 scratch0_double, &exponent_is_integer);
723
724 {
725 AllowExternalCallThatCantCauseGC scope(masm);
726 __ Mov(saved_lr, lr);
727 __ CallCFunction(
728 ExternalReference::power_double_double_function(isolate()), 0, 2);
729 __ Mov(lr, saved_lr);
730 __ B(&done);
731 }
732
733 // Handle SMI exponents.
734 __ Bind(&exponent_is_smi);
735 // x10 base_tagged The tagged base (input).
736 // x11 exponent_tagged The tagged exponent (input).
737 // d1 base_double The base as a double.
738 __ SmiUntag(exponent_integer, exponent_tagged);
739 }
740
741 __ Bind(&exponent_is_integer);
742 // x10 base_tagged The tagged base (input).
743 // x11 exponent_tagged The tagged exponent (input).
744 // x12 exponent_integer The exponent as an integer.
745 // d1 base_double The base as a double.
746
747 // Find abs(exponent). For negative exponents, we can find the inverse later.
748 Register exponent_abs = x13;
749 __ Cmp(exponent_integer, 0);
750 __ Cneg(exponent_abs, exponent_integer, mi);
751 // x13 exponent_abs The value of abs(exponent_integer).
752
753 // Repeatedly multiply to calculate the power.
754 // result = 1.0;
755 // For each bit n (exponent_integer{n}) {
756 // if (exponent_integer{n}) {
757 // result *= base;
758 // }
759 // base *= base;
760 // if (remaining bits in exponent_integer are all zero) {
761 // break;
762 // }
763 // }
764 Label power_loop, power_loop_entry, power_loop_exit;
765 __ Fmov(scratch1_double, base_double);
766 __ Fmov(base_double_copy, base_double);
767 __ Fmov(result_double, 1.0);
768 __ B(&power_loop_entry);
769
770 __ Bind(&power_loop);
771 __ Fmul(scratch1_double, scratch1_double, scratch1_double);
772 __ Lsr(exponent_abs, exponent_abs, 1);
773 __ Cbz(exponent_abs, &power_loop_exit);
774
775 __ Bind(&power_loop_entry);
776 __ Tbz(exponent_abs, 0, &power_loop);
777 __ Fmul(result_double, result_double, scratch1_double);
778 __ B(&power_loop);
779
780 __ Bind(&power_loop_exit);
781
782 // If the exponent was positive, result_double holds the result.
783 __ Tbz(exponent_integer, kXSignBit, &done);
784
785 // The exponent was negative, so find the inverse.
786 __ Fmov(scratch0_double, 1.0);
787 __ Fdiv(result_double, scratch0_double, result_double);
788 // ECMA-262 only requires Math.pow to return an 'implementation-dependent
789 // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
790 // to calculate the subnormal value 2^-1074. This method of calculating
791 // negative powers doesn't work because 2^1074 overflows to infinity. To
792 // catch this corner-case, we bail out if the result was 0. (This can only
793 // occur if the divisor is infinity or the base is zero.)
794 __ Fcmp(result_double, 0.0);
795 __ B(&done, ne);
796
797 AllowExternalCallThatCantCauseGC scope(masm);
798 __ Mov(saved_lr, lr);
799 __ Fmov(base_double, base_double_copy);
800 __ Scvtf(exponent_double, exponent_integer);
801 __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
802 0, 2);
803 __ Mov(lr, saved_lr);
804 __ Bind(&done);
805 __ Ret();
806 }
807
GenerateStubsAheadOfTime(Isolate * isolate)808 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
809 // It is important that the following stubs are generated in this order
810 // because pregenerated stubs can only call other pregenerated stubs.
811 // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
812 // CEntryStub.
813 CEntryStub::GenerateAheadOfTime(isolate);
814 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
815 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
816 CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
817 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
818 CreateWeakCellStub::GenerateAheadOfTime(isolate);
819 BinaryOpICStub::GenerateAheadOfTime(isolate);
820 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
821 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
822 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
823 StoreFastElementStub::GenerateAheadOfTime(isolate);
824 }
825
826
GenerateAheadOfTime(Isolate * isolate)827 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
828 StoreRegistersStateStub stub(isolate);
829 stub.GetCode();
830 }
831
832
GenerateAheadOfTime(Isolate * isolate)833 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
834 RestoreRegistersStateStub stub(isolate);
835 stub.GetCode();
836 }
837
838
GenerateFPStubs(Isolate * isolate)839 void CodeStub::GenerateFPStubs(Isolate* isolate) {
840 // Floating-point code doesn't get special handling in ARM64, so there's
841 // nothing to do here.
842 USE(isolate);
843 }
844
845
NeedsImmovableCode()846 bool CEntryStub::NeedsImmovableCode() {
847 // CEntryStub stores the return address on the stack before calling into
848 // C++ code. In some cases, the VM accesses this address, but it is not used
849 // when the C++ code returns to the stub because LR holds the return address
850 // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up
851 // returning to dead code.
852 // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
853 // find any comment to confirm this, and I don't hit any crashes whatever
854 // this function returns. The anaylsis should be properly confirmed.
855 return true;
856 }
857
858
GenerateAheadOfTime(Isolate * isolate)859 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
860 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
861 stub.GetCode();
862 CEntryStub stub_fp(isolate, 1, kSaveFPRegs);
863 stub_fp.GetCode();
864 }
865
866
Generate(MacroAssembler * masm)867 void CEntryStub::Generate(MacroAssembler* masm) {
868 // The Abort mechanism relies on CallRuntime, which in turn relies on
869 // CEntryStub, so until this stub has been generated, we have to use a
870 // fall-back Abort mechanism.
871 //
872 // Note that this stub must be generated before any use of Abort.
873 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
874
875 ASM_LOCATION("CEntryStub::Generate entry");
876 ProfileEntryHookStub::MaybeCallEntryHook(masm);
877
878 // Register parameters:
879 // x0: argc (including receiver, untagged)
880 // x1: target
881 // If argv_in_register():
882 // x11: argv (pointer to first argument)
883 //
884 // The stack on entry holds the arguments and the receiver, with the receiver
885 // at the highest address:
886 //
887 // jssp]argc-1]: receiver
888 // jssp[argc-2]: arg[argc-2]
889 // ... ...
890 // jssp[1]: arg[1]
891 // jssp[0]: arg[0]
892 //
893 // The arguments are in reverse order, so that arg[argc-2] is actually the
894 // first argument to the target function and arg[0] is the last.
895 DCHECK(jssp.Is(__ StackPointer()));
896 const Register& argc_input = x0;
897 const Register& target_input = x1;
898
899 // Calculate argv, argc and the target address, and store them in
900 // callee-saved registers so we can retry the call without having to reload
901 // these arguments.
902 // TODO(jbramley): If the first call attempt succeeds in the common case (as
903 // it should), then we might be better off putting these parameters directly
904 // into their argument registers, rather than using callee-saved registers and
905 // preserving them on the stack.
906 const Register& argv = x21;
907 const Register& argc = x22;
908 const Register& target = x23;
909
910 // Derive argv from the stack pointer so that it points to the first argument
911 // (arg[argc-2]), or just below the receiver in case there are no arguments.
912 // - Adjust for the arg[] array.
913 Register temp_argv = x11;
914 if (!argv_in_register()) {
915 __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
916 // - Adjust for the receiver.
917 __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
918 }
919
920 // Reserve three slots to preserve x21-x23 callee-saved registers. If the
921 // result size is too large to be returned in registers then also reserve
922 // space for the return value.
923 int extra_stack_space = 3 + (result_size() <= 2 ? 0 : result_size());
924 // Enter the exit frame.
925 FrameScope scope(masm, StackFrame::MANUAL);
926 __ EnterExitFrame(
927 save_doubles(), x10, extra_stack_space,
928 is_builtin_exit() ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
929 DCHECK(csp.Is(__ StackPointer()));
930
931 // Poke callee-saved registers into reserved space.
932 __ Poke(argv, 1 * kPointerSize);
933 __ Poke(argc, 2 * kPointerSize);
934 __ Poke(target, 3 * kPointerSize);
935
936 if (result_size() > 2) {
937 // Save the location of the return value into x8 for call.
938 __ Add(x8, __ StackPointer(), Operand(4 * kPointerSize));
939 }
940
941 // We normally only keep tagged values in callee-saved registers, as they
942 // could be pushed onto the stack by called stubs and functions, and on the
943 // stack they can confuse the GC. However, we're only calling C functions
944 // which can push arbitrary data onto the stack anyway, and so the GC won't
945 // examine that part of the stack.
946 __ Mov(argc, argc_input);
947 __ Mov(target, target_input);
948 __ Mov(argv, temp_argv);
949
950 // x21 : argv
951 // x22 : argc
952 // x23 : call target
953 //
954 // The stack (on entry) holds the arguments and the receiver, with the
955 // receiver at the highest address:
956 //
957 // argv[8]: receiver
958 // argv -> argv[0]: arg[argc-2]
959 // ... ...
960 // argv[...]: arg[1]
961 // argv[...]: arg[0]
962 //
963 // Immediately below (after) this is the exit frame, as constructed by
964 // EnterExitFrame:
965 // fp[8]: CallerPC (lr)
966 // fp -> fp[0]: CallerFP (old fp)
967 // fp[-8]: Space reserved for SPOffset.
968 // fp[-16]: CodeObject()
969 // csp[...]: Saved doubles, if saved_doubles is true.
970 // csp[32]: Alignment padding, if necessary.
971 // csp[24]: Preserved x23 (used for target).
972 // csp[16]: Preserved x22 (used for argc).
973 // csp[8]: Preserved x21 (used for argv).
974 // csp -> csp[0]: Space reserved for the return address.
975 //
976 // After a successful call, the exit frame, preserved registers (x21-x23) and
977 // the arguments (including the receiver) are dropped or popped as
978 // appropriate. The stub then returns.
979 //
980 // After an unsuccessful call, the exit frame and suchlike are left
981 // untouched, and the stub either throws an exception by jumping to one of
982 // the exception_returned label.
983
984 DCHECK(csp.Is(__ StackPointer()));
985
986 // Prepare AAPCS64 arguments to pass to the builtin.
987 __ Mov(x0, argc);
988 __ Mov(x1, argv);
989 __ Mov(x2, ExternalReference::isolate_address(isolate()));
990
991 Label return_location;
992 __ Adr(x12, &return_location);
993 __ Poke(x12, 0);
994
995 if (__ emit_debug_code()) {
996 // Verify that the slot below fp[kSPOffset]-8 points to the return location
997 // (currently in x12).
998 UseScratchRegisterScope temps(masm);
999 Register temp = temps.AcquireX();
1000 __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
1001 __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
1002 __ Cmp(temp, x12);
1003 __ Check(eq, kReturnAddressNotFoundInFrame);
1004 }
1005
1006 // Call the builtin.
1007 __ Blr(target);
1008 __ Bind(&return_location);
1009
1010 if (result_size() > 2) {
1011 DCHECK_EQ(3, result_size());
1012 // Read result values stored on stack.
1013 __ Ldr(x0, MemOperand(__ StackPointer(), 4 * kPointerSize));
1014 __ Ldr(x1, MemOperand(__ StackPointer(), 5 * kPointerSize));
1015 __ Ldr(x2, MemOperand(__ StackPointer(), 6 * kPointerSize));
1016 }
1017 // Result returned in x0, x1:x0 or x2:x1:x0 - do not destroy these registers!
1018
1019 // x0 result0 The return code from the call.
1020 // x1 result1 For calls which return ObjectPair or ObjectTriple.
1021 // x2 result2 For calls which return ObjectTriple.
1022 // x21 argv
1023 // x22 argc
1024 // x23 target
1025 const Register& result = x0;
1026
1027 // Check result for exception sentinel.
1028 Label exception_returned;
1029 __ CompareRoot(result, Heap::kExceptionRootIndex);
1030 __ B(eq, &exception_returned);
1031
1032 // The call succeeded, so unwind the stack and return.
1033
1034 // Restore callee-saved registers x21-x23.
1035 __ Mov(x11, argc);
1036
1037 __ Peek(argv, 1 * kPointerSize);
1038 __ Peek(argc, 2 * kPointerSize);
1039 __ Peek(target, 3 * kPointerSize);
1040
1041 __ LeaveExitFrame(save_doubles(), x10, true);
1042 DCHECK(jssp.Is(__ StackPointer()));
1043 if (!argv_in_register()) {
1044 // Drop the remaining stack slots and return from the stub.
1045 __ Drop(x11);
1046 }
1047 __ AssertFPCRState();
1048 __ Ret();
1049
1050 // The stack pointer is still csp if we aren't returning, and the frame
1051 // hasn't changed (except for the return address).
1052 __ SetStackPointer(csp);
1053
1054 // Handling of exception.
1055 __ Bind(&exception_returned);
1056
1057 ExternalReference pending_handler_context_address(
1058 Isolate::kPendingHandlerContextAddress, isolate());
1059 ExternalReference pending_handler_code_address(
1060 Isolate::kPendingHandlerCodeAddress, isolate());
1061 ExternalReference pending_handler_offset_address(
1062 Isolate::kPendingHandlerOffsetAddress, isolate());
1063 ExternalReference pending_handler_fp_address(
1064 Isolate::kPendingHandlerFPAddress, isolate());
1065 ExternalReference pending_handler_sp_address(
1066 Isolate::kPendingHandlerSPAddress, isolate());
1067
1068 // Ask the runtime for help to determine the handler. This will set x0 to
1069 // contain the current pending exception, don't clobber it.
1070 ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
1071 isolate());
1072 DCHECK(csp.Is(masm->StackPointer()));
1073 {
1074 FrameScope scope(masm, StackFrame::MANUAL);
1075 __ Mov(x0, 0); // argc.
1076 __ Mov(x1, 0); // argv.
1077 __ Mov(x2, ExternalReference::isolate_address(isolate()));
1078 __ CallCFunction(find_handler, 3);
1079 }
1080
1081 // We didn't execute a return case, so the stack frame hasn't been updated
1082 // (except for the return address slot). However, we don't need to initialize
1083 // jssp because the throw method will immediately overwrite it when it
1084 // unwinds the stack.
1085 __ SetStackPointer(jssp);
1086
1087 // Retrieve the handler context, SP and FP.
1088 __ Mov(cp, Operand(pending_handler_context_address));
1089 __ Ldr(cp, MemOperand(cp));
1090 __ Mov(jssp, Operand(pending_handler_sp_address));
1091 __ Ldr(jssp, MemOperand(jssp));
1092 __ Mov(csp, jssp);
1093 __ Mov(fp, Operand(pending_handler_fp_address));
1094 __ Ldr(fp, MemOperand(fp));
1095
1096 // If the handler is a JS frame, restore the context to the frame. Note that
1097 // the context will be set to (cp == 0) for non-JS frames.
1098 Label not_js_frame;
1099 __ Cbz(cp, ¬_js_frame);
1100 __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1101 __ Bind(¬_js_frame);
1102
1103 // Compute the handler entry address and jump to it.
1104 __ Mov(x10, Operand(pending_handler_code_address));
1105 __ Ldr(x10, MemOperand(x10));
1106 __ Mov(x11, Operand(pending_handler_offset_address));
1107 __ Ldr(x11, MemOperand(x11));
1108 __ Add(x10, x10, Code::kHeaderSize - kHeapObjectTag);
1109 __ Add(x10, x10, x11);
1110 __ Br(x10);
1111 }
1112
1113
1114 // This is the entry point from C++. 5 arguments are provided in x0-x4.
1115 // See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
1116 // Input:
1117 // x0: code entry.
1118 // x1: function.
1119 // x2: receiver.
1120 // x3: argc.
1121 // x4: argv.
1122 // Output:
1123 // x0: result.
Generate(MacroAssembler * masm)1124 void JSEntryStub::Generate(MacroAssembler* masm) {
1125 DCHECK(jssp.Is(__ StackPointer()));
1126 Register code_entry = x0;
1127
1128 // Enable instruction instrumentation. This only works on the simulator, and
1129 // will have no effect on the model or real hardware.
1130 __ EnableInstrumentation();
1131
1132 Label invoke, handler_entry, exit;
1133
1134 // Push callee-saved registers and synchronize the system stack pointer (csp)
1135 // and the JavaScript stack pointer (jssp).
1136 //
1137 // We must not write to jssp until after the PushCalleeSavedRegisters()
1138 // call, since jssp is itself a callee-saved register.
1139 __ SetStackPointer(csp);
1140 __ PushCalleeSavedRegisters();
1141 __ Mov(jssp, csp);
1142 __ SetStackPointer(jssp);
1143
1144 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1145
1146 // Set up the reserved register for 0.0.
1147 __ Fmov(fp_zero, 0.0);
1148
1149 // Build an entry frame (see layout below).
1150 int marker = type();
1151 int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
1152 __ Mov(x13, bad_frame_pointer);
1153 __ Mov(x12, Smi::FromInt(marker));
1154 __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
1155 __ Ldr(x10, MemOperand(x11));
1156
1157 __ Push(x13, x12, xzr, x10);
1158 // Set up fp.
1159 __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
1160
1161 // Push the JS entry frame marker. Also set js_entry_sp if this is the
1162 // outermost JS call.
1163 Label non_outermost_js, done;
1164 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1165 __ Mov(x10, ExternalReference(js_entry_sp));
1166 __ Ldr(x11, MemOperand(x10));
1167 __ Cbnz(x11, &non_outermost_js);
1168 __ Str(fp, MemOperand(x10));
1169 __ Mov(x12, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
1170 __ Push(x12);
1171 __ B(&done);
1172 __ Bind(&non_outermost_js);
1173 // We spare one instruction by pushing xzr since the marker is 0.
1174 DCHECK(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
1175 __ Push(xzr);
1176 __ Bind(&done);
1177
1178 // The frame set up looks like this:
1179 // jssp[0] : JS entry frame marker.
1180 // jssp[1] : C entry FP.
1181 // jssp[2] : stack frame marker.
1182 // jssp[3] : stack frmae marker.
1183 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
1184
1185
1186 // Jump to a faked try block that does the invoke, with a faked catch
1187 // block that sets the pending exception.
1188 __ B(&invoke);
1189
1190 // Prevent the constant pool from being emitted between the record of the
1191 // handler_entry position and the first instruction of the sequence here.
1192 // There is no risk because Assembler::Emit() emits the instruction before
1193 // checking for constant pool emission, but we do not want to depend on
1194 // that.
1195 {
1196 Assembler::BlockPoolsScope block_pools(masm);
1197 __ bind(&handler_entry);
1198 handler_offset_ = handler_entry.pos();
1199 // Caught exception: Store result (exception) in the pending exception
1200 // field in the JSEnv and return a failure sentinel. Coming in here the
1201 // fp will be invalid because the PushTryHandler below sets it to 0 to
1202 // signal the existence of the JSEntry frame.
1203 __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1204 isolate())));
1205 }
1206 __ Str(code_entry, MemOperand(x10));
1207 __ LoadRoot(x0, Heap::kExceptionRootIndex);
1208 __ B(&exit);
1209
1210 // Invoke: Link this frame into the handler chain.
1211 __ Bind(&invoke);
1212 __ PushStackHandler();
1213 // If an exception not caught by another handler occurs, this handler
1214 // returns control to the code after the B(&invoke) above, which
1215 // restores all callee-saved registers (including cp and fp) to their
1216 // saved values before returning a failure to C.
1217
1218 // Invoke the function by calling through the JS entry trampoline builtin.
1219 // Notice that we cannot store a reference to the trampoline code directly in
1220 // this stub, because runtime stubs are not traversed when doing GC.
1221
1222 // Expected registers by Builtins::JSEntryTrampoline
1223 // x0: code entry.
1224 // x1: function.
1225 // x2: receiver.
1226 // x3: argc.
1227 // x4: argv.
1228 ExternalReference entry(type() == StackFrame::ENTRY_CONSTRUCT
1229 ? Builtins::kJSConstructEntryTrampoline
1230 : Builtins::kJSEntryTrampoline,
1231 isolate());
1232 __ Mov(x10, entry);
1233
1234 // Call the JSEntryTrampoline.
1235 __ Ldr(x11, MemOperand(x10)); // Dereference the address.
1236 __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
1237 __ Blr(x12);
1238
1239 // Unlink this frame from the handler chain.
1240 __ PopStackHandler();
1241
1242
1243 __ Bind(&exit);
1244 // x0 holds the result.
1245 // The stack pointer points to the top of the entry frame pushed on entry from
1246 // C++ (at the beginning of this stub):
1247 // jssp[0] : JS entry frame marker.
1248 // jssp[1] : C entry FP.
1249 // jssp[2] : stack frame marker.
1250 // jssp[3] : stack frmae marker.
1251 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
1252
1253 // Check if the current stack frame is marked as the outermost JS frame.
1254 Label non_outermost_js_2;
1255 __ Pop(x10);
1256 __ Cmp(x10, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
1257 __ B(ne, &non_outermost_js_2);
1258 __ Mov(x11, ExternalReference(js_entry_sp));
1259 __ Str(xzr, MemOperand(x11));
1260 __ Bind(&non_outermost_js_2);
1261
1262 // Restore the top frame descriptors from the stack.
1263 __ Pop(x10);
1264 __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
1265 __ Str(x10, MemOperand(x11));
1266
1267 // Reset the stack to the callee saved registers.
1268 __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
1269 // Restore the callee-saved registers and return.
1270 DCHECK(jssp.Is(__ StackPointer()));
1271 __ Mov(csp, jssp);
1272 __ SetStackPointer(csp);
1273 __ PopCalleeSavedRegisters();
1274 // After this point, we must not modify jssp because it is a callee-saved
1275 // register which we have just restored.
1276 __ Ret();
1277 }
1278
1279
Generate(MacroAssembler * masm)1280 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1281 Label miss;
1282 Register receiver = LoadDescriptor::ReceiverRegister();
1283 // Ensure that the vector and slot registers won't be clobbered before
1284 // calling the miss handler.
1285 DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::VectorRegister(),
1286 LoadWithVectorDescriptor::SlotRegister()));
1287
1288 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
1289 x11, &miss);
1290
1291 __ Bind(&miss);
1292 PropertyAccessCompiler::TailCallBuiltin(
1293 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1294 }
1295
1296
Generate(MacroAssembler * masm)1297 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1298 // Return address is in lr.
1299 Label miss;
1300
1301 Register receiver = LoadDescriptor::ReceiverRegister();
1302 Register index = LoadDescriptor::NameRegister();
1303 Register result = x0;
1304 Register scratch = x10;
1305 DCHECK(!scratch.is(receiver) && !scratch.is(index));
1306 DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
1307 result.is(LoadWithVectorDescriptor::SlotRegister()));
1308
1309 // StringCharAtGenerator doesn't use the result register until it's passed
1310 // the different miss possibilities. If it did, we would have a conflict
1311 // when FLAG_vector_ics is true.
1312 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1313 &miss, // When not a string.
1314 &miss, // When not a number.
1315 &miss, // When index out of range.
1316 RECEIVER_IS_STRING);
1317 char_at_generator.GenerateFast(masm);
1318 __ Ret();
1319
1320 StubRuntimeCallHelper call_helper;
1321 char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
1322
1323 __ Bind(&miss);
1324 PropertyAccessCompiler::TailCallBuiltin(
1325 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1326 }
1327
1328
Generate(MacroAssembler * masm)1329 void RegExpExecStub::Generate(MacroAssembler* masm) {
1330 #ifdef V8_INTERPRETED_REGEXP
1331 __ TailCallRuntime(Runtime::kRegExpExec);
1332 #else // V8_INTERPRETED_REGEXP
1333
1334 // Stack frame on entry.
1335 // jssp[0]: last_match_info (expected JSArray)
1336 // jssp[8]: previous index
1337 // jssp[16]: subject string
1338 // jssp[24]: JSRegExp object
1339 Label runtime;
1340
1341 // Use of registers for this function.
1342
1343 // Variable registers:
1344 // x10-x13 used as scratch registers
1345 // w0 string_type type of subject string
1346 // x2 jsstring_length subject string length
1347 // x3 jsregexp_object JSRegExp object
1348 // w4 string_encoding Latin1 or UC16
1349 // w5 sliced_string_offset if the string is a SlicedString
1350 // offset to the underlying string
1351 // w6 string_representation groups attributes of the string:
1352 // - is a string
1353 // - type of the string
1354 // - is a short external string
1355 Register string_type = w0;
1356 Register jsstring_length = x2;
1357 Register jsregexp_object = x3;
1358 Register string_encoding = w4;
1359 Register sliced_string_offset = w5;
1360 Register string_representation = w6;
1361
1362 // These are in callee save registers and will be preserved by the call
1363 // to the native RegExp code, as this code is called using the normal
1364 // C calling convention. When calling directly from generated code the
1365 // native RegExp code will not do a GC and therefore the content of
1366 // these registers are safe to use after the call.
1367
1368 // x19 subject subject string
1369 // x20 regexp_data RegExp data (FixedArray)
1370 // x21 last_match_info_elements info relative to the last match
1371 // (FixedArray)
1372 // x22 code_object generated regexp code
1373 Register subject = x19;
1374 Register regexp_data = x20;
1375 Register last_match_info_elements = x21;
1376 Register code_object = x22;
1377
1378 // Stack frame.
1379 // jssp[00]: last_match_info (JSArray)
1380 // jssp[08]: previous index
1381 // jssp[16]: subject string
1382 // jssp[24]: JSRegExp object
1383
1384 const int kLastMatchInfoOffset = 0 * kPointerSize;
1385 const int kPreviousIndexOffset = 1 * kPointerSize;
1386 const int kSubjectOffset = 2 * kPointerSize;
1387 const int kJSRegExpOffset = 3 * kPointerSize;
1388
1389 // Ensure that a RegExp stack is allocated.
1390 ExternalReference address_of_regexp_stack_memory_address =
1391 ExternalReference::address_of_regexp_stack_memory_address(isolate());
1392 ExternalReference address_of_regexp_stack_memory_size =
1393 ExternalReference::address_of_regexp_stack_memory_size(isolate());
1394 __ Mov(x10, address_of_regexp_stack_memory_size);
1395 __ Ldr(x10, MemOperand(x10));
1396 __ Cbz(x10, &runtime);
1397
1398 // Check that the first argument is a JSRegExp object.
1399 DCHECK(jssp.Is(__ StackPointer()));
1400 __ Peek(jsregexp_object, kJSRegExpOffset);
1401 __ JumpIfSmi(jsregexp_object, &runtime);
1402 __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
1403
1404 // Check that the RegExp has been compiled (data contains a fixed array).
1405 __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
1406 if (FLAG_debug_code) {
1407 STATIC_ASSERT(kSmiTag == 0);
1408 __ Tst(regexp_data, kSmiTagMask);
1409 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1410 __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
1411 __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1412 }
1413
1414 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1415 __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
1416 __ Cmp(x10, Smi::FromInt(JSRegExp::IRREGEXP));
1417 __ B(ne, &runtime);
1418
1419 // Check that the number of captures fit in the static offsets vector buffer.
1420 // We have always at least one capture for the whole match, plus additional
1421 // ones due to capturing parentheses. A capture takes 2 registers.
1422 // The number of capture registers then is (number_of_captures + 1) * 2.
1423 __ Ldrsw(x10,
1424 UntagSmiFieldMemOperand(regexp_data,
1425 JSRegExp::kIrregexpCaptureCountOffset));
1426 // Check (number_of_captures + 1) * 2 <= offsets vector size
1427 // number_of_captures * 2 <= offsets vector size - 2
1428 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1429 __ Add(x10, x10, x10);
1430 __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
1431 __ B(hi, &runtime);
1432
1433 // Initialize offset for possibly sliced string.
1434 __ Mov(sliced_string_offset, 0);
1435
1436 DCHECK(jssp.Is(__ StackPointer()));
1437 __ Peek(subject, kSubjectOffset);
1438 __ JumpIfSmi(subject, &runtime);
1439
1440 __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
1441
1442 // Handle subject string according to its encoding and representation:
1443 // (1) Sequential string? If yes, go to (4).
1444 // (2) Sequential or cons? If not, go to (5).
1445 // (3) Cons string. If the string is flat, replace subject with first string
1446 // and go to (1). Otherwise bail out to runtime.
1447 // (4) Sequential string. Load regexp code according to encoding.
1448 // (E) Carry on.
1449 /// [...]
1450
1451 // Deferred code at the end of the stub:
1452 // (5) Long external string? If not, go to (7).
1453 // (6) External string. Make it, offset-wise, look like a sequential string.
1454 // Go to (4).
1455 // (7) Short external string or not a string? If yes, bail out to runtime.
1456 // (8) Sliced string. Replace subject with parent. Go to (1).
1457
1458 Label check_underlying; // (1)
1459 Label seq_string; // (4)
1460 Label not_seq_nor_cons; // (5)
1461 Label external_string; // (6)
1462 Label not_long_external; // (7)
1463
1464 __ Bind(&check_underlying);
1465 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
1466 __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
1467
1468 // (1) Sequential string? If yes, go to (4).
1469 __ And(string_representation,
1470 string_type,
1471 kIsNotStringMask |
1472 kStringRepresentationMask |
1473 kShortExternalStringMask);
1474 // We depend on the fact that Strings of type
1475 // SeqString and not ShortExternalString are defined
1476 // by the following pattern:
1477 // string_type: 0XX0 XX00
1478 // ^ ^ ^^
1479 // | | ||
1480 // | | is a SeqString
1481 // | is not a short external String
1482 // is a String
1483 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
1484 STATIC_ASSERT(kShortExternalStringTag != 0);
1485 __ Cbz(string_representation, &seq_string); // Go to (4).
1486
1487 // (2) Sequential or cons? If not, go to (5).
1488 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
1489 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
1490 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
1491 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
1492 __ Cmp(string_representation, kExternalStringTag);
1493 __ B(ge, ¬_seq_nor_cons); // Go to (5).
1494
1495 // (3) Cons string. Check that it's flat.
1496 __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
1497 __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
1498 // Replace subject with first string.
1499 __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
1500 __ B(&check_underlying);
1501
1502 // (4) Sequential string. Load regexp code according to encoding.
1503 __ Bind(&seq_string);
1504
1505 // Check that the third argument is a positive smi less than the subject
1506 // string length. A negative value will be greater (unsigned comparison).
1507 DCHECK(jssp.Is(__ StackPointer()));
1508 __ Peek(x10, kPreviousIndexOffset);
1509 __ JumpIfNotSmi(x10, &runtime);
1510 __ Cmp(jsstring_length, x10);
1511 __ B(ls, &runtime);
1512
1513 // Argument 2 (x1): We need to load argument 2 (the previous index) into x1
1514 // before entering the exit frame.
1515 __ SmiUntag(x1, x10);
1516
1517 // The third bit determines the string encoding in string_type.
1518 STATIC_ASSERT(kOneByteStringTag == 0x04);
1519 STATIC_ASSERT(kTwoByteStringTag == 0x00);
1520 STATIC_ASSERT(kStringEncodingMask == 0x04);
1521
1522 // Find the code object based on the assumptions above.
1523 // kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
1524 // of kPointerSize to reach the latter.
1525 STATIC_ASSERT(JSRegExp::kDataOneByteCodeOffset + kPointerSize ==
1526 JSRegExp::kDataUC16CodeOffset);
1527 __ Mov(x10, kPointerSize);
1528 // We will need the encoding later: Latin1 = 0x04
1529 // UC16 = 0x00
1530 __ Ands(string_encoding, string_type, kStringEncodingMask);
1531 __ CzeroX(x10, ne);
1532 __ Add(x10, regexp_data, x10);
1533 __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataOneByteCodeOffset));
1534
1535 // (E) Carry on. String handling is done.
1536
1537 // Check that the irregexp code has been generated for the actual string
1538 // encoding. If it has, the field contains a code object otherwise it contains
1539 // a smi (code flushing support).
1540 __ JumpIfSmi(code_object, &runtime);
1541
1542 // All checks done. Now push arguments for native regexp code.
1543 __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1,
1544 x10,
1545 x11);
1546
1547 // Isolates: note we add an additional parameter here (isolate pointer).
1548 __ EnterExitFrame(false, x10, 1);
1549 DCHECK(csp.Is(__ StackPointer()));
1550
1551 // We have 9 arguments to pass to the regexp code, therefore we have to pass
1552 // one on the stack and the rest as registers.
1553
1554 // Note that the placement of the argument on the stack isn't standard
1555 // AAPCS64:
1556 // csp[0]: Space for the return address placed by DirectCEntryStub.
1557 // csp[8]: Argument 9, the current isolate address.
1558
1559 __ Mov(x10, ExternalReference::isolate_address(isolate()));
1560 __ Poke(x10, kPointerSize);
1561
1562 Register length = w11;
1563 Register previous_index_in_bytes = w12;
1564 Register start = x13;
1565
1566 // Load start of the subject string.
1567 __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
1568 // Load the length from the original subject string from the previous stack
1569 // frame. Therefore we have to use fp, which points exactly to two pointer
1570 // sizes below the previous sp. (Because creating a new stack frame pushes
1571 // the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
1572 __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
1573 __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
1574
1575 // Handle UC16 encoding, two bytes make one character.
1576 // string_encoding: if Latin1: 0x04
1577 // if UC16: 0x00
1578 STATIC_ASSERT(kStringEncodingMask == 0x04);
1579 __ Ubfx(string_encoding, string_encoding, 2, 1);
1580 __ Eor(string_encoding, string_encoding, 1);
1581 // string_encoding: if Latin1: 0
1582 // if UC16: 1
1583
1584 // Convert string positions from characters to bytes.
1585 // Previous index is in x1.
1586 __ Lsl(previous_index_in_bytes, w1, string_encoding);
1587 __ Lsl(length, length, string_encoding);
1588 __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
1589
1590 // Argument 1 (x0): Subject string.
1591 __ Mov(x0, subject);
1592
1593 // Argument 2 (x1): Previous index, already there.
1594
1595 // Argument 3 (x2): Get the start of input.
1596 // Start of input = start of string + previous index + substring offset
1597 // (0 if the string
1598 // is not sliced).
1599 __ Add(w10, previous_index_in_bytes, sliced_string_offset);
1600 __ Add(x2, start, Operand(w10, UXTW));
1601
1602 // Argument 4 (x3):
1603 // End of input = start of input + (length of input - previous index)
1604 __ Sub(w10, length, previous_index_in_bytes);
1605 __ Add(x3, x2, Operand(w10, UXTW));
1606
1607 // Argument 5 (x4): static offsets vector buffer.
1608 __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate()));
1609
1610 // Argument 6 (x5): Set the number of capture registers to zero to force
1611 // global regexps to behave as non-global. This stub is not used for global
1612 // regexps.
1613 __ Mov(x5, 0);
1614
1615 // Argument 7 (x6): Start (high end) of backtracking stack memory area.
1616 __ Mov(x10, address_of_regexp_stack_memory_address);
1617 __ Ldr(x10, MemOperand(x10));
1618 __ Mov(x11, address_of_regexp_stack_memory_size);
1619 __ Ldr(x11, MemOperand(x11));
1620 __ Add(x6, x10, x11);
1621
1622 // Argument 8 (x7): Indicate that this is a direct call from JavaScript.
1623 __ Mov(x7, 1);
1624
1625 // Locate the code entry and call it.
1626 __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
1627 DirectCEntryStub stub(isolate());
1628 stub.GenerateCall(masm, code_object);
1629
1630 __ LeaveExitFrame(false, x10, true);
1631
1632 // The generated regexp code returns an int32 in w0.
1633 Label failure, exception;
1634 __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
1635 __ CompareAndBranch(w0,
1636 NativeRegExpMacroAssembler::EXCEPTION,
1637 eq,
1638 &exception);
1639 __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
1640
1641 // Success: process the result from the native regexp code.
1642 Register number_of_capture_registers = x12;
1643
1644 // Calculate number of capture registers (number_of_captures + 1) * 2
1645 // and store it in the last match info.
1646 __ Ldrsw(x10,
1647 UntagSmiFieldMemOperand(regexp_data,
1648 JSRegExp::kIrregexpCaptureCountOffset));
1649 __ Add(x10, x10, x10);
1650 __ Add(number_of_capture_registers, x10, 2);
1651
1652 // Check that the last match info is a FixedArray.
1653 DCHECK(jssp.Is(__ StackPointer()));
1654 __ Peek(last_match_info_elements, kLastMatchInfoOffset);
1655 __ JumpIfSmi(last_match_info_elements, &runtime);
1656
1657 // Check that the object has fast elements.
1658 __ Ldr(x10,
1659 FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
1660 __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
1661
1662 // Check that the last match info has space for the capture registers and the
1663 // additional information (overhead).
1664 // (number_of_captures + 1) * 2 + overhead <= last match info size
1665 // (number_of_captures * 2) + 2 + overhead <= last match info size
1666 // number_of_capture_registers + overhead <= last match info size
1667 __ Ldrsw(x10,
1668 UntagSmiFieldMemOperand(last_match_info_elements,
1669 FixedArray::kLengthOffset));
1670 __ Add(x11, number_of_capture_registers, RegExpMatchInfo::kLastMatchOverhead);
1671 __ Cmp(x11, x10);
1672 __ B(gt, &runtime);
1673
1674 // Store the capture count.
1675 __ SmiTag(x10, number_of_capture_registers);
1676 __ Str(x10, FieldMemOperand(last_match_info_elements,
1677 RegExpMatchInfo::kNumberOfCapturesOffset));
1678 // Store last subject and last input.
1679 __ Str(subject, FieldMemOperand(last_match_info_elements,
1680 RegExpMatchInfo::kLastSubjectOffset));
1681 // Use x10 as the subject string in order to only need
1682 // one RecordWriteStub.
1683 __ Mov(x10, subject);
1684 __ RecordWriteField(last_match_info_elements,
1685 RegExpMatchInfo::kLastSubjectOffset, x10, x11,
1686 kLRHasNotBeenSaved, kDontSaveFPRegs);
1687 __ Str(subject, FieldMemOperand(last_match_info_elements,
1688 RegExpMatchInfo::kLastInputOffset));
1689 __ Mov(x10, subject);
1690 __ RecordWriteField(last_match_info_elements,
1691 RegExpMatchInfo::kLastInputOffset, x10, x11,
1692 kLRHasNotBeenSaved, kDontSaveFPRegs);
1693
1694 Register last_match_offsets = x13;
1695 Register offsets_vector_index = x14;
1696 Register current_offset = x15;
1697
1698 // Get the static offsets vector filled by the native regexp code
1699 // and fill the last match info.
1700 ExternalReference address_of_static_offsets_vector =
1701 ExternalReference::address_of_static_offsets_vector(isolate());
1702 __ Mov(offsets_vector_index, address_of_static_offsets_vector);
1703
1704 Label next_capture, done;
1705 // Capture register counter starts from number of capture registers and
1706 // iterates down to zero (inclusive).
1707 __ Add(last_match_offsets, last_match_info_elements,
1708 RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag);
1709 __ Bind(&next_capture);
1710 __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
1711 __ B(mi, &done);
1712 // Read two 32 bit values from the static offsets vector buffer into
1713 // an X register
1714 __ Ldr(current_offset,
1715 MemOperand(offsets_vector_index, kWRegSize * 2, PostIndex));
1716 // Store the smi values in the last match info.
1717 __ SmiTag(x10, current_offset);
1718 // Clearing the 32 bottom bits gives us a Smi.
1719 STATIC_ASSERT(kSmiTag == 0);
1720 __ Bic(x11, current_offset, kSmiShiftMask);
1721 __ Stp(x10,
1722 x11,
1723 MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
1724 __ B(&next_capture);
1725 __ Bind(&done);
1726
1727 // Return last match info.
1728 __ Mov(x0, last_match_info_elements);
1729 // Drop the 4 arguments of the stub from the stack.
1730 __ Drop(4);
1731 __ Ret();
1732
1733 __ Bind(&exception);
1734 Register exception_value = x0;
1735 // A stack overflow (on the backtrack stack) may have occured
1736 // in the RegExp code but no exception has been created yet.
1737 // If there is no pending exception, handle that in the runtime system.
1738 __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
1739 __ Mov(x11,
1740 Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1741 isolate())));
1742 __ Ldr(exception_value, MemOperand(x11));
1743 __ Cmp(x10, exception_value);
1744 __ B(eq, &runtime);
1745
1746 // For exception, throw the exception again.
1747 __ TailCallRuntime(Runtime::kRegExpExecReThrow);
1748
1749 __ Bind(&failure);
1750 __ Mov(x0, Operand(isolate()->factory()->null_value()));
1751 // Drop the 4 arguments of the stub from the stack.
1752 __ Drop(4);
1753 __ Ret();
1754
1755 __ Bind(&runtime);
1756 __ TailCallRuntime(Runtime::kRegExpExec);
1757
1758 // Deferred code for string handling.
1759 // (5) Long external string? If not, go to (7).
1760 __ Bind(¬_seq_nor_cons);
1761 // Compare flags are still set.
1762 __ B(ne, ¬_long_external); // Go to (7).
1763
1764 // (6) External string. Make it, offset-wise, look like a sequential string.
1765 __ Bind(&external_string);
1766 if (masm->emit_debug_code()) {
1767 // Assert that we do not have a cons or slice (indirect strings) here.
1768 // Sequential strings have already been ruled out.
1769 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
1770 __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
1771 __ Tst(x10, kIsIndirectStringMask);
1772 __ Check(eq, kExternalStringExpectedButNotFound);
1773 __ And(x10, x10, kStringRepresentationMask);
1774 __ Cmp(x10, 0);
1775 __ Check(ne, kExternalStringExpectedButNotFound);
1776 }
1777 __ Ldr(subject,
1778 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
1779 // Move the pointer so that offset-wise, it looks like a sequential string.
1780 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1781 __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
1782 __ B(&seq_string); // Go to (4).
1783
1784 // (7) If this is a short external string or not a string, bail out to
1785 // runtime.
1786 __ Bind(¬_long_external);
1787 STATIC_ASSERT(kShortExternalStringTag != 0);
1788 __ TestAndBranchIfAnySet(string_representation,
1789 kShortExternalStringMask | kIsNotStringMask,
1790 &runtime);
1791
1792 // (8) Sliced string. Replace subject with parent.
1793 __ Ldr(sliced_string_offset,
1794 UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
1795 __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
1796 __ B(&check_underlying); // Go to (1).
1797 #endif
1798 }
1799
1800
CallStubInRecordCallTarget(MacroAssembler * masm,CodeStub * stub,Register argc,Register function,Register feedback_vector,Register index,Register new_target)1801 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
1802 Register argc, Register function,
1803 Register feedback_vector, Register index,
1804 Register new_target) {
1805 FrameScope scope(masm, StackFrame::INTERNAL);
1806
1807 // Number-of-arguments register must be smi-tagged to call out.
1808 __ SmiTag(argc);
1809 __ Push(argc, function, feedback_vector, index);
1810 __ Push(cp);
1811
1812 DCHECK(feedback_vector.Is(x2) && index.Is(x3));
1813 __ CallStub(stub);
1814
1815 __ Pop(cp);
1816 __ Pop(index, feedback_vector, function, argc);
1817 __ SmiUntag(argc);
1818 }
1819
1820
GenerateRecordCallTarget(MacroAssembler * masm,Register argc,Register function,Register feedback_vector,Register index,Register new_target,Register scratch1,Register scratch2,Register scratch3)1821 static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
1822 Register function,
1823 Register feedback_vector, Register index,
1824 Register new_target, Register scratch1,
1825 Register scratch2, Register scratch3) {
1826 ASM_LOCATION("GenerateRecordCallTarget");
1827 DCHECK(!AreAliased(scratch1, scratch2, scratch3, argc, function,
1828 feedback_vector, index, new_target));
1829 // Cache the called function in a feedback vector slot. Cache states are
1830 // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
1831 // argc : number of arguments to the construct function
1832 // function : the function to call
1833 // feedback_vector : the feedback vector
1834 // index : slot in feedback vector (smi)
1835 Label initialize, done, miss, megamorphic, not_array_function;
1836
1837 DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
1838 masm->isolate()->heap()->megamorphic_symbol());
1839 DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
1840 masm->isolate()->heap()->uninitialized_symbol());
1841
1842 // Load the cache state.
1843 Register feedback = scratch1;
1844 Register feedback_map = scratch2;
1845 Register feedback_value = scratch3;
1846 __ Add(feedback, feedback_vector,
1847 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
1848 __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
1849
1850 // A monomorphic cache hit or an already megamorphic state: invoke the
1851 // function without changing the state.
1852 // We don't know if feedback value is a WeakCell or a Symbol, but it's
1853 // harmless to read at this position in a symbol (see static asserts in
1854 // type-feedback-vector.h).
1855 Label check_allocation_site;
1856 __ Ldr(feedback_value, FieldMemOperand(feedback, WeakCell::kValueOffset));
1857 __ Cmp(function, feedback_value);
1858 __ B(eq, &done);
1859 __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
1860 __ B(eq, &done);
1861 __ Ldr(feedback_map, FieldMemOperand(feedback, HeapObject::kMapOffset));
1862 __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
1863 __ B(ne, &check_allocation_site);
1864
1865 // If the weak cell is cleared, we have a new chance to become monomorphic.
1866 __ JumpIfSmi(feedback_value, &initialize);
1867 __ B(&megamorphic);
1868
1869 __ bind(&check_allocation_site);
1870 // If we came here, we need to see if we are the array function.
1871 // If we didn't have a matching function, and we didn't find the megamorph
1872 // sentinel, then we have in the slot either some other function or an
1873 // AllocationSite.
1874 __ JumpIfNotRoot(feedback_map, Heap::kAllocationSiteMapRootIndex, &miss);
1875
1876 // Make sure the function is the Array() function
1877 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
1878 __ Cmp(function, scratch1);
1879 __ B(ne, &megamorphic);
1880 __ B(&done);
1881
1882 __ Bind(&miss);
1883
1884 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
1885 // megamorphic.
1886 __ JumpIfRoot(scratch1, Heap::kuninitialized_symbolRootIndex, &initialize);
1887 // MegamorphicSentinel is an immortal immovable object (undefined) so no
1888 // write-barrier is needed.
1889 __ Bind(&megamorphic);
1890 __ Add(scratch1, feedback_vector,
1891 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
1892 __ LoadRoot(scratch2, Heap::kmegamorphic_symbolRootIndex);
1893 __ Str(scratch2, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
1894 __ B(&done);
1895
1896 // An uninitialized cache is patched with the function or sentinel to
1897 // indicate the ElementsKind if function is the Array constructor.
1898 __ Bind(&initialize);
1899
1900 // Make sure the function is the Array() function
1901 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
1902 __ Cmp(function, scratch1);
1903 __ B(ne, ¬_array_function);
1904
1905 // The target function is the Array constructor,
1906 // Create an AllocationSite if we don't already have it, store it in the
1907 // slot.
1908 CreateAllocationSiteStub create_stub(masm->isolate());
1909 CallStubInRecordCallTarget(masm, &create_stub, argc, function,
1910 feedback_vector, index, new_target);
1911 __ B(&done);
1912
1913 __ Bind(¬_array_function);
1914 CreateWeakCellStub weak_cell_stub(masm->isolate());
1915 CallStubInRecordCallTarget(masm, &weak_cell_stub, argc, function,
1916 feedback_vector, index, new_target);
1917
1918 __ Bind(&done);
1919
1920 // Increment the call count for all function calls.
1921 __ Add(scratch1, feedback_vector,
1922 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
1923 __ Add(scratch1, scratch1, Operand(FixedArray::kHeaderSize + kPointerSize));
1924 __ Ldr(scratch2, FieldMemOperand(scratch1, 0));
1925 __ Add(scratch2, scratch2, Operand(Smi::FromInt(1)));
1926 __ Str(scratch2, FieldMemOperand(scratch1, 0));
1927 }
1928
1929
Generate(MacroAssembler * masm)1930 void CallConstructStub::Generate(MacroAssembler* masm) {
1931 ASM_LOCATION("CallConstructStub::Generate");
1932 // x0 : number of arguments
1933 // x1 : the function to call
1934 // x2 : feedback vector
1935 // x3 : slot in feedback vector (Smi, for RecordCallTarget)
1936 Register function = x1;
1937
1938 Label non_function;
1939 // Check that the function is not a smi.
1940 __ JumpIfSmi(function, &non_function);
1941 // Check that the function is a JSFunction.
1942 Register object_type = x10;
1943 __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
1944 &non_function);
1945
1946 GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11, x12);
1947
1948 __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
1949 Label feedback_register_initialized;
1950 // Put the AllocationSite from the feedback vector into x2, or undefined.
1951 __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
1952 __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
1953 __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
1954 &feedback_register_initialized);
1955 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
1956 __ bind(&feedback_register_initialized);
1957
1958 __ AssertUndefinedOrAllocationSite(x2, x5);
1959
1960 __ Mov(x3, function);
1961
1962 // Tail call to the function-specific construct stub (still in the caller
1963 // context at this point).
1964 __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
1965 __ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
1966 __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
1967 __ Br(x4);
1968
1969 __ Bind(&non_function);
1970 __ Mov(x3, function);
1971 __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
1972 }
1973
1974 // Note: feedback_vector and slot are clobbered after the call.
IncrementCallCount(MacroAssembler * masm,Register feedback_vector,Register slot)1975 static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
1976 Register slot) {
1977 __ Add(feedback_vector, feedback_vector,
1978 Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
1979 __ Add(feedback_vector, feedback_vector,
1980 Operand(FixedArray::kHeaderSize + kPointerSize));
1981 __ Ldr(slot, FieldMemOperand(feedback_vector, 0));
1982 __ Add(slot, slot, Operand(Smi::FromInt(1)));
1983 __ Str(slot, FieldMemOperand(feedback_vector, 0));
1984 }
1985
HandleArrayCase(MacroAssembler * masm,Label * miss)1986 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
1987 // x0 - number of arguments
1988 // x1 - function
1989 // x3 - slot id
1990 // x2 - vector
1991 // x4 - allocation site (loaded from vector[slot])
1992 Register function = x1;
1993 Register feedback_vector = x2;
1994 Register index = x3;
1995 Register allocation_site = x4;
1996 Register scratch = x5;
1997
1998 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch);
1999 __ Cmp(function, scratch);
2000 __ B(ne, miss);
2001
2002 // Increment the call count for monomorphic function calls.
2003 IncrementCallCount(masm, feedback_vector, index);
2004
2005 // Set up arguments for the array constructor stub.
2006 Register allocation_site_arg = feedback_vector;
2007 Register new_target_arg = index;
2008 __ Mov(allocation_site_arg, allocation_site);
2009 __ Mov(new_target_arg, function);
2010 ArrayConstructorStub stub(masm->isolate());
2011 __ TailCallStub(&stub);
2012 }
2013
2014
Generate(MacroAssembler * masm)2015 void CallICStub::Generate(MacroAssembler* masm) {
2016 ASM_LOCATION("CallICStub");
2017
2018 // x0 - number of arguments
2019 // x1 - function
2020 // x3 - slot id (Smi)
2021 // x2 - vector
2022 Label extra_checks_or_miss, call, call_function, call_count_incremented;
2023
2024 Register function = x1;
2025 Register feedback_vector = x2;
2026 Register index = x3;
2027
2028 // The checks. First, does x1 match the recorded monomorphic target?
2029 __ Add(x4, feedback_vector,
2030 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2031 __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
2032
2033 // We don't know that we have a weak cell. We might have a private symbol
2034 // or an AllocationSite, but the memory is safe to examine.
2035 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
2036 // FixedArray.
2037 // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
2038 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
2039 // computed, meaning that it can't appear to be a pointer. If the low bit is
2040 // 0, then hash is computed, but the 0 bit prevents the field from appearing
2041 // to be a pointer.
2042 STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
2043 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
2044 WeakCell::kValueOffset &&
2045 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
2046
2047 __ Ldr(x5, FieldMemOperand(x4, WeakCell::kValueOffset));
2048 __ Cmp(x5, function);
2049 __ B(ne, &extra_checks_or_miss);
2050
2051 // The compare above could have been a SMI/SMI comparison. Guard against this
2052 // convincing us that we have a monomorphic JSFunction.
2053 __ JumpIfSmi(function, &extra_checks_or_miss);
2054
2055 __ Bind(&call_function);
2056
2057 // Increment the call count for monomorphic function calls.
2058 IncrementCallCount(masm, feedback_vector, index);
2059
2060 __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
2061 tail_call_mode()),
2062 RelocInfo::CODE_TARGET);
2063
2064 __ bind(&extra_checks_or_miss);
2065 Label uninitialized, miss, not_allocation_site;
2066
2067 __ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &call);
2068
2069 __ Ldr(x5, FieldMemOperand(x4, HeapObject::kMapOffset));
2070 __ JumpIfNotRoot(x5, Heap::kAllocationSiteMapRootIndex, ¬_allocation_site);
2071
2072 HandleArrayCase(masm, &miss);
2073
2074 __ bind(¬_allocation_site);
2075
2076 // The following cases attempt to handle MISS cases without going to the
2077 // runtime.
2078 if (FLAG_trace_ic) {
2079 __ jmp(&miss);
2080 }
2081
2082 // TODO(mvstanton): the code below is effectively disabled. Investigate.
2083 __ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
2084
2085 // We are going megamorphic. If the feedback is a JSFunction, it is fine
2086 // to handle it here. More complex cases are dealt with in the runtime.
2087 __ AssertNotSmi(x4);
2088 __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
2089 __ Add(x4, feedback_vector,
2090 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2091 __ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
2092 __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
2093
2094 __ Bind(&call);
2095
2096 // Increment the call count for megamorphic function calls.
2097 IncrementCallCount(masm, feedback_vector, index);
2098
2099 __ Bind(&call_count_incremented);
2100 __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
2101 RelocInfo::CODE_TARGET);
2102
2103 __ bind(&uninitialized);
2104
2105 // We are going monomorphic, provided we actually have a JSFunction.
2106 __ JumpIfSmi(function, &miss);
2107
2108 // Goto miss case if we do not have a function.
2109 __ JumpIfNotObjectType(function, x5, x5, JS_FUNCTION_TYPE, &miss);
2110
2111 // Make sure the function is not the Array() function, which requires special
2112 // behavior on MISS.
2113 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, x5);
2114 __ Cmp(function, x5);
2115 __ B(eq, &miss);
2116
2117 // Make sure the function belongs to the same native context.
2118 __ Ldr(x4, FieldMemOperand(function, JSFunction::kContextOffset));
2119 __ Ldr(x4, ContextMemOperand(x4, Context::NATIVE_CONTEXT_INDEX));
2120 __ Ldr(x5, NativeContextMemOperand());
2121 __ Cmp(x4, x5);
2122 __ B(ne, &miss);
2123
2124 // Store the function. Use a stub since we need a frame for allocation.
2125 // x2 - vector
2126 // x3 - slot
2127 // x1 - function
2128 // x0 - number of arguments
2129 {
2130 FrameScope scope(masm, StackFrame::INTERNAL);
2131 CreateWeakCellStub create_stub(masm->isolate());
2132 __ SmiTag(x0);
2133 __ Push(x0);
2134 __ Push(feedback_vector, index);
2135
2136 __ Push(cp, function);
2137 __ CallStub(&create_stub);
2138 __ Pop(cp, function);
2139
2140 __ Pop(feedback_vector, index);
2141 __ Pop(x0);
2142 __ SmiUntag(x0);
2143 }
2144
2145 __ B(&call_function);
2146
2147 // We are here because tracing is on or we encountered a MISS case we can't
2148 // handle here.
2149 __ bind(&miss);
2150 GenerateMiss(masm);
2151
2152 // The runtime increments the call count in the vector for us.
2153 __ B(&call_count_incremented);
2154 }
2155
2156
GenerateMiss(MacroAssembler * masm)2157 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2158 ASM_LOCATION("CallICStub[Miss]");
2159
2160 FrameScope scope(masm, StackFrame::INTERNAL);
2161
2162 // Preserve the number of arguments as Smi.
2163 __ SmiTag(x0);
2164
2165 // Push the receiver and the function and feedback info.
2166 __ Push(x0, x1, x2, x3);
2167
2168 // Call the entry.
2169 __ CallRuntime(Runtime::kCallIC_Miss);
2170
2171 // Move result to edi and exit the internal frame.
2172 __ Mov(x1, x0);
2173
2174 // Restore number of arguments.
2175 __ Pop(x0);
2176 __ SmiUntag(x0);
2177 }
2178
2179
GenerateFast(MacroAssembler * masm)2180 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2181 // If the receiver is a smi trigger the non-string case.
2182 if (check_mode_ == RECEIVER_IS_UNKNOWN) {
2183 __ JumpIfSmi(object_, receiver_not_string_);
2184
2185 // Fetch the instance type of the receiver into result register.
2186 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2187 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2188
2189 // If the receiver is not a string trigger the non-string case.
2190 __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
2191 }
2192
2193 // If the index is non-smi trigger the non-smi case.
2194 __ JumpIfNotSmi(index_, &index_not_smi_);
2195
2196 __ Bind(&got_smi_index_);
2197 // Check for index out of range.
2198 __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset));
2199 __ Cmp(result_, Operand::UntagSmi(index_));
2200 __ B(ls, index_out_of_range_);
2201
2202 __ SmiUntag(index_);
2203
2204 StringCharLoadGenerator::Generate(masm,
2205 object_,
2206 index_.W(),
2207 result_,
2208 &call_runtime_);
2209 __ SmiTag(result_);
2210 __ Bind(&exit_);
2211 }
2212
2213
GenerateSlow(MacroAssembler * masm,EmbedMode embed_mode,const RuntimeCallHelper & call_helper)2214 void StringCharCodeAtGenerator::GenerateSlow(
2215 MacroAssembler* masm, EmbedMode embed_mode,
2216 const RuntimeCallHelper& call_helper) {
2217 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2218
2219 __ Bind(&index_not_smi_);
2220 // If index is a heap number, try converting it to an integer.
2221 __ JumpIfNotHeapNumber(index_, index_not_number_);
2222 call_helper.BeforeCall(masm);
2223 if (embed_mode == PART_OF_IC_HANDLER) {
2224 __ Push(LoadWithVectorDescriptor::VectorRegister(),
2225 LoadWithVectorDescriptor::SlotRegister(), object_, index_);
2226 } else {
2227 // Save object_ on the stack and pass index_ as argument for runtime call.
2228 __ Push(object_, index_);
2229 }
2230 __ CallRuntime(Runtime::kNumberToSmi);
2231 // Save the conversion result before the pop instructions below
2232 // have a chance to overwrite it.
2233 __ Mov(index_, x0);
2234 if (embed_mode == PART_OF_IC_HANDLER) {
2235 __ Pop(object_, LoadWithVectorDescriptor::SlotRegister(),
2236 LoadWithVectorDescriptor::VectorRegister());
2237 } else {
2238 __ Pop(object_);
2239 }
2240 // Reload the instance type.
2241 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2242 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2243 call_helper.AfterCall(masm);
2244
2245 // If index is still not a smi, it must be out of range.
2246 __ JumpIfNotSmi(index_, index_out_of_range_);
2247 // Otherwise, return to the fast path.
2248 __ B(&got_smi_index_);
2249
2250 // Call runtime. We get here when the receiver is a string and the
2251 // index is a number, but the code of getting the actual character
2252 // is too complex (e.g., when the string needs to be flattened).
2253 __ Bind(&call_runtime_);
2254 call_helper.BeforeCall(masm);
2255 __ SmiTag(index_);
2256 __ Push(object_, index_);
2257 __ CallRuntime(Runtime::kStringCharCodeAtRT);
2258 __ Mov(result_, x0);
2259 call_helper.AfterCall(masm);
2260 __ B(&exit_);
2261
2262 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2263 }
2264
2265
GenerateFast(MacroAssembler * masm)2266 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
2267 __ JumpIfNotSmi(code_, &slow_case_);
2268 __ Cmp(code_, Smi::FromInt(String::kMaxOneByteCharCode));
2269 __ B(hi, &slow_case_);
2270
2271 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
2272 // At this point code register contains smi tagged one-byte char code.
2273 __ Add(result_, result_, Operand::UntagSmiAndScale(code_, kPointerSizeLog2));
2274 __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
2275 __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
2276 __ Bind(&exit_);
2277 }
2278
2279
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)2280 void StringCharFromCodeGenerator::GenerateSlow(
2281 MacroAssembler* masm,
2282 const RuntimeCallHelper& call_helper) {
2283 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
2284
2285 __ Bind(&slow_case_);
2286 call_helper.BeforeCall(masm);
2287 __ Push(code_);
2288 __ CallRuntime(Runtime::kStringCharFromCode);
2289 __ Mov(result_, x0);
2290 call_helper.AfterCall(masm);
2291 __ B(&exit_);
2292
2293 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
2294 }
2295
2296
GenerateBooleans(MacroAssembler * masm)2297 void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
2298 // Inputs are in x0 (lhs) and x1 (rhs).
2299 DCHECK_EQ(CompareICState::BOOLEAN, state());
2300 ASM_LOCATION("CompareICStub[Booleans]");
2301 Label miss;
2302
2303 __ CheckMap(x1, x2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2304 __ CheckMap(x0, x3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2305 if (!Token::IsEqualityOp(op())) {
2306 __ Ldr(x1, FieldMemOperand(x1, Oddball::kToNumberOffset));
2307 __ AssertSmi(x1);
2308 __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
2309 __ AssertSmi(x0);
2310 }
2311 __ Sub(x0, x1, x0);
2312 __ Ret();
2313
2314 __ Bind(&miss);
2315 GenerateMiss(masm);
2316 }
2317
2318
GenerateSmis(MacroAssembler * masm)2319 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
2320 // Inputs are in x0 (lhs) and x1 (rhs).
2321 DCHECK(state() == CompareICState::SMI);
2322 ASM_LOCATION("CompareICStub[Smis]");
2323 Label miss;
2324 // Bail out (to 'miss') unless both x0 and x1 are smis.
2325 __ JumpIfEitherNotSmi(x0, x1, &miss);
2326
2327 if (GetCondition() == eq) {
2328 // For equality we do not care about the sign of the result.
2329 __ Sub(x0, x0, x1);
2330 } else {
2331 // Untag before subtracting to avoid handling overflow.
2332 __ SmiUntag(x1);
2333 __ Sub(x0, x1, Operand::UntagSmi(x0));
2334 }
2335 __ Ret();
2336
2337 __ Bind(&miss);
2338 GenerateMiss(masm);
2339 }
2340
2341
GenerateNumbers(MacroAssembler * masm)2342 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
2343 DCHECK(state() == CompareICState::NUMBER);
2344 ASM_LOCATION("CompareICStub[HeapNumbers]");
2345
2346 Label unordered, maybe_undefined1, maybe_undefined2;
2347 Label miss, handle_lhs, values_in_d_regs;
2348 Label untag_rhs, untag_lhs;
2349
2350 Register result = x0;
2351 Register rhs = x0;
2352 Register lhs = x1;
2353 FPRegister rhs_d = d0;
2354 FPRegister lhs_d = d1;
2355
2356 if (left() == CompareICState::SMI) {
2357 __ JumpIfNotSmi(lhs, &miss);
2358 }
2359 if (right() == CompareICState::SMI) {
2360 __ JumpIfNotSmi(rhs, &miss);
2361 }
2362
2363 __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag);
2364 __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag);
2365
2366 // Load rhs if it's a heap number.
2367 __ JumpIfSmi(rhs, &handle_lhs);
2368 __ JumpIfNotHeapNumber(rhs, &maybe_undefined1);
2369 __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
2370
2371 // Load lhs if it's a heap number.
2372 __ Bind(&handle_lhs);
2373 __ JumpIfSmi(lhs, &values_in_d_regs);
2374 __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
2375 __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
2376
2377 __ Bind(&values_in_d_regs);
2378 __ Fcmp(lhs_d, rhs_d);
2379 __ B(vs, &unordered); // Overflow flag set if either is NaN.
2380 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
2381 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
2382 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
2383 __ Ret();
2384
2385 __ Bind(&unordered);
2386 CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
2387 CompareICState::GENERIC, CompareICState::GENERIC);
2388 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2389
2390 __ Bind(&maybe_undefined1);
2391 if (Token::IsOrderedRelationalCompareOp(op())) {
2392 __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
2393 __ JumpIfSmi(lhs, &unordered);
2394 __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
2395 __ B(&unordered);
2396 }
2397
2398 __ Bind(&maybe_undefined2);
2399 if (Token::IsOrderedRelationalCompareOp(op())) {
2400 __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
2401 }
2402
2403 __ Bind(&miss);
2404 GenerateMiss(masm);
2405 }
2406
2407
GenerateInternalizedStrings(MacroAssembler * masm)2408 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
2409 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
2410 ASM_LOCATION("CompareICStub[InternalizedStrings]");
2411 Label miss;
2412
2413 Register result = x0;
2414 Register rhs = x0;
2415 Register lhs = x1;
2416
2417 // Check that both operands are heap objects.
2418 __ JumpIfEitherSmi(lhs, rhs, &miss);
2419
2420 // Check that both operands are internalized strings.
2421 Register rhs_map = x10;
2422 Register lhs_map = x11;
2423 Register rhs_type = x10;
2424 Register lhs_type = x11;
2425 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
2426 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
2427 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
2428 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
2429
2430 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
2431 __ Orr(x12, lhs_type, rhs_type);
2432 __ TestAndBranchIfAnySet(
2433 x12, kIsNotStringMask | kIsNotInternalizedMask, &miss);
2434
2435 // Internalized strings are compared by identity.
2436 STATIC_ASSERT(EQUAL == 0);
2437 __ Cmp(lhs, rhs);
2438 __ Cset(result, ne);
2439 __ Ret();
2440
2441 __ Bind(&miss);
2442 GenerateMiss(masm);
2443 }
2444
2445
GenerateUniqueNames(MacroAssembler * masm)2446 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
2447 DCHECK(state() == CompareICState::UNIQUE_NAME);
2448 ASM_LOCATION("CompareICStub[UniqueNames]");
2449 DCHECK(GetCondition() == eq);
2450 Label miss;
2451
2452 Register result = x0;
2453 Register rhs = x0;
2454 Register lhs = x1;
2455
2456 Register lhs_instance_type = w2;
2457 Register rhs_instance_type = w3;
2458
2459 // Check that both operands are heap objects.
2460 __ JumpIfEitherSmi(lhs, rhs, &miss);
2461
2462 // Check that both operands are unique names. This leaves the instance
2463 // types loaded in tmp1 and tmp2.
2464 __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset));
2465 __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset));
2466 __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2467 __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset));
2468
2469 // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
2470 // should have kInternalizedTag set.
2471 __ JumpIfNotUniqueNameInstanceType(lhs_instance_type, &miss);
2472 __ JumpIfNotUniqueNameInstanceType(rhs_instance_type, &miss);
2473
2474 // Unique names are compared by identity.
2475 STATIC_ASSERT(EQUAL == 0);
2476 __ Cmp(lhs, rhs);
2477 __ Cset(result, ne);
2478 __ Ret();
2479
2480 __ Bind(&miss);
2481 GenerateMiss(masm);
2482 }
2483
2484
GenerateStrings(MacroAssembler * masm)2485 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
2486 DCHECK(state() == CompareICState::STRING);
2487 ASM_LOCATION("CompareICStub[Strings]");
2488
2489 Label miss;
2490
2491 bool equality = Token::IsEqualityOp(op());
2492
2493 Register result = x0;
2494 Register rhs = x0;
2495 Register lhs = x1;
2496
2497 // Check that both operands are heap objects.
2498 __ JumpIfEitherSmi(rhs, lhs, &miss);
2499
2500 // Check that both operands are strings.
2501 Register rhs_map = x10;
2502 Register lhs_map = x11;
2503 Register rhs_type = x10;
2504 Register lhs_type = x11;
2505 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
2506 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
2507 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
2508 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
2509 STATIC_ASSERT(kNotStringTag != 0);
2510 __ Orr(x12, lhs_type, rhs_type);
2511 __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss);
2512
2513 // Fast check for identical strings.
2514 Label not_equal;
2515 __ Cmp(lhs, rhs);
2516 __ B(ne, ¬_equal);
2517 __ Mov(result, EQUAL);
2518 __ Ret();
2519
2520 __ Bind(¬_equal);
2521 // Handle not identical strings
2522
2523 // Check that both strings are internalized strings. If they are, we're done
2524 // because we already know they are not identical. We know they are both
2525 // strings.
2526 if (equality) {
2527 DCHECK(GetCondition() == eq);
2528 STATIC_ASSERT(kInternalizedTag == 0);
2529 Label not_internalized_strings;
2530 __ Orr(x12, lhs_type, rhs_type);
2531 __ TestAndBranchIfAnySet(
2532 x12, kIsNotInternalizedMask, ¬_internalized_strings);
2533 // Result is in rhs (x0), and not EQUAL, as rhs is not a smi.
2534 __ Ret();
2535 __ Bind(¬_internalized_strings);
2536 }
2537
2538 // Check that both strings are sequential one-byte.
2539 Label runtime;
2540 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x12,
2541 x13, &runtime);
2542
2543 // Compare flat one-byte strings. Returns when done.
2544 if (equality) {
2545 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
2546 x12);
2547 } else {
2548 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
2549 x12, x13);
2550 }
2551
2552 // Handle more complex cases in runtime.
2553 __ Bind(&runtime);
2554 if (equality) {
2555 {
2556 FrameScope scope(masm, StackFrame::INTERNAL);
2557 __ Push(lhs, rhs);
2558 __ CallRuntime(Runtime::kStringEqual);
2559 }
2560 __ LoadRoot(x1, Heap::kTrueValueRootIndex);
2561 __ Sub(x0, x0, x1);
2562 __ Ret();
2563 } else {
2564 __ Push(lhs, rhs);
2565 __ TailCallRuntime(Runtime::kStringCompare);
2566 }
2567
2568 __ Bind(&miss);
2569 GenerateMiss(masm);
2570 }
2571
2572
GenerateReceivers(MacroAssembler * masm)2573 void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
2574 DCHECK_EQ(CompareICState::RECEIVER, state());
2575 ASM_LOCATION("CompareICStub[Receivers]");
2576
2577 Label miss;
2578
2579 Register result = x0;
2580 Register rhs = x0;
2581 Register lhs = x1;
2582
2583 __ JumpIfEitherSmi(rhs, lhs, &miss);
2584
2585 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2586 __ JumpIfObjectType(rhs, x10, x10, FIRST_JS_RECEIVER_TYPE, &miss, lt);
2587 __ JumpIfObjectType(lhs, x10, x10, FIRST_JS_RECEIVER_TYPE, &miss, lt);
2588
2589 DCHECK_EQ(eq, GetCondition());
2590 __ Sub(result, rhs, lhs);
2591 __ Ret();
2592
2593 __ Bind(&miss);
2594 GenerateMiss(masm);
2595 }
2596
2597
GenerateKnownReceivers(MacroAssembler * masm)2598 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
2599 ASM_LOCATION("CompareICStub[KnownReceivers]");
2600
2601 Label miss;
2602 Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
2603
2604 Register result = x0;
2605 Register rhs = x0;
2606 Register lhs = x1;
2607
2608 __ JumpIfEitherSmi(rhs, lhs, &miss);
2609
2610 Register rhs_map = x10;
2611 Register lhs_map = x11;
2612 Register map = x12;
2613 __ GetWeakValue(map, cell);
2614 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
2615 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
2616 __ Cmp(rhs_map, map);
2617 __ B(ne, &miss);
2618 __ Cmp(lhs_map, map);
2619 __ B(ne, &miss);
2620
2621 if (Token::IsEqualityOp(op())) {
2622 __ Sub(result, rhs, lhs);
2623 __ Ret();
2624 } else {
2625 Register ncr = x2;
2626 if (op() == Token::LT || op() == Token::LTE) {
2627 __ Mov(ncr, Smi::FromInt(GREATER));
2628 } else {
2629 __ Mov(ncr, Smi::FromInt(LESS));
2630 }
2631 __ Push(lhs, rhs, ncr);
2632 __ TailCallRuntime(Runtime::kCompare);
2633 }
2634
2635 __ Bind(&miss);
2636 GenerateMiss(masm);
2637 }
2638
2639
2640 // This method handles the case where a compare stub had the wrong
2641 // implementation. It calls a miss handler, which re-writes the stub. All other
2642 // CompareICStub::Generate* methods should fall back into this one if their
2643 // operands were not the expected types.
GenerateMiss(MacroAssembler * masm)2644 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
2645 ASM_LOCATION("CompareICStub[Miss]");
2646
2647 Register stub_entry = x11;
2648 {
2649 FrameScope scope(masm, StackFrame::INTERNAL);
2650 Register op = x10;
2651 Register left = x1;
2652 Register right = x0;
2653 // Preserve some caller-saved registers.
2654 __ Push(x1, x0, lr);
2655 // Push the arguments.
2656 __ Mov(op, Smi::FromInt(this->op()));
2657 __ Push(left, right, op);
2658
2659 // Call the miss handler. This also pops the arguments.
2660 __ CallRuntime(Runtime::kCompareIC_Miss);
2661
2662 // Compute the entry point of the rewritten stub.
2663 __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
2664 // Restore caller-saved registers.
2665 __ Pop(lr, x0, x1);
2666 }
2667
2668 // Tail-call to the new stub.
2669 __ Jump(stub_entry);
2670 }
2671
2672
GenerateFlatOneByteStringEquals(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3)2673 void StringHelper::GenerateFlatOneByteStringEquals(
2674 MacroAssembler* masm, Register left, Register right, Register scratch1,
2675 Register scratch2, Register scratch3) {
2676 DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3));
2677 Register result = x0;
2678 Register left_length = scratch1;
2679 Register right_length = scratch2;
2680
2681 // Compare lengths. If lengths differ, strings can't be equal. Lengths are
2682 // smis, and don't need to be untagged.
2683 Label strings_not_equal, check_zero_length;
2684 __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
2685 __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
2686 __ Cmp(left_length, right_length);
2687 __ B(eq, &check_zero_length);
2688
2689 __ Bind(&strings_not_equal);
2690 __ Mov(result, Smi::FromInt(NOT_EQUAL));
2691 __ Ret();
2692
2693 // Check if the length is zero. If so, the strings must be equal (and empty.)
2694 Label compare_chars;
2695 __ Bind(&check_zero_length);
2696 STATIC_ASSERT(kSmiTag == 0);
2697 __ Cbnz(left_length, &compare_chars);
2698 __ Mov(result, Smi::FromInt(EQUAL));
2699 __ Ret();
2700
2701 // Compare characters. Falls through if all characters are equal.
2702 __ Bind(&compare_chars);
2703 GenerateOneByteCharsCompareLoop(masm, left, right, left_length, scratch2,
2704 scratch3, &strings_not_equal);
2705
2706 // Characters in strings are equal.
2707 __ Mov(result, Smi::FromInt(EQUAL));
2708 __ Ret();
2709 }
2710
2711
GenerateCompareFlatOneByteStrings(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3,Register scratch4)2712 void StringHelper::GenerateCompareFlatOneByteStrings(
2713 MacroAssembler* masm, Register left, Register right, Register scratch1,
2714 Register scratch2, Register scratch3, Register scratch4) {
2715 DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
2716 Label result_not_equal, compare_lengths;
2717
2718 // Find minimum length and length difference.
2719 Register length_delta = scratch3;
2720 __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
2721 __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
2722 __ Subs(length_delta, scratch1, scratch2);
2723
2724 Register min_length = scratch1;
2725 __ Csel(min_length, scratch2, scratch1, gt);
2726 __ Cbz(min_length, &compare_lengths);
2727
2728 // Compare loop.
2729 GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
2730 scratch4, &result_not_equal);
2731
2732 // Compare lengths - strings up to min-length are equal.
2733 __ Bind(&compare_lengths);
2734
2735 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
2736
2737 // Use length_delta as result if it's zero.
2738 Register result = x0;
2739 __ Subs(result, length_delta, 0);
2740
2741 __ Bind(&result_not_equal);
2742 Register greater = x10;
2743 Register less = x11;
2744 __ Mov(greater, Smi::FromInt(GREATER));
2745 __ Mov(less, Smi::FromInt(LESS));
2746 __ CmovX(result, greater, gt);
2747 __ CmovX(result, less, lt);
2748 __ Ret();
2749 }
2750
2751
GenerateOneByteCharsCompareLoop(MacroAssembler * masm,Register left,Register right,Register length,Register scratch1,Register scratch2,Label * chars_not_equal)2752 void StringHelper::GenerateOneByteCharsCompareLoop(
2753 MacroAssembler* masm, Register left, Register right, Register length,
2754 Register scratch1, Register scratch2, Label* chars_not_equal) {
2755 DCHECK(!AreAliased(left, right, length, scratch1, scratch2));
2756
2757 // Change index to run from -length to -1 by adding length to string
2758 // start. This means that loop ends when index reaches zero, which
2759 // doesn't need an additional compare.
2760 __ SmiUntag(length);
2761 __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
2762 __ Add(left, left, scratch1);
2763 __ Add(right, right, scratch1);
2764
2765 Register index = length;
2766 __ Neg(index, length); // index = -length;
2767
2768 // Compare loop
2769 Label loop;
2770 __ Bind(&loop);
2771 __ Ldrb(scratch1, MemOperand(left, index));
2772 __ Ldrb(scratch2, MemOperand(right, index));
2773 __ Cmp(scratch1, scratch2);
2774 __ B(ne, chars_not_equal);
2775 __ Add(index, index, 1);
2776 __ Cbnz(index, &loop);
2777 }
2778
2779
Generate(MacroAssembler * masm)2780 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
2781 // ----------- S t a t e -------------
2782 // -- x1 : left
2783 // -- x0 : right
2784 // -- lr : return address
2785 // -----------------------------------
2786
2787 // Load x2 with the allocation site. We stick an undefined dummy value here
2788 // and replace it with the real allocation site later when we instantiate this
2789 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
2790 __ LoadObject(x2, handle(isolate()->heap()->undefined_value()));
2791
2792 // Make sure that we actually patched the allocation site.
2793 if (FLAG_debug_code) {
2794 __ AssertNotSmi(x2, kExpectedAllocationSite);
2795 __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
2796 __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex,
2797 kExpectedAllocationSite);
2798 }
2799
2800 // Tail call into the stub that handles binary operations with allocation
2801 // sites.
2802 BinaryOpWithAllocationSiteStub stub(isolate(), state());
2803 __ TailCallStub(&stub);
2804 }
2805
2806
GenerateIncremental(MacroAssembler * masm,Mode mode)2807 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
2808 // We need some extra registers for this stub, they have been allocated
2809 // but we need to save them before using them.
2810 regs_.Save(masm);
2811
2812 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
2813 Label dont_need_remembered_set;
2814
2815 Register val = regs_.scratch0();
2816 __ Ldr(val, MemOperand(regs_.address()));
2817 __ JumpIfNotInNewSpace(val, &dont_need_remembered_set);
2818
2819 __ JumpIfInNewSpace(regs_.object(), &dont_need_remembered_set);
2820
2821 // First notify the incremental marker if necessary, then update the
2822 // remembered set.
2823 CheckNeedsToInformIncrementalMarker(
2824 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
2825 InformIncrementalMarker(masm);
2826 regs_.Restore(masm); // Restore the extra scratch registers we used.
2827
2828 __ RememberedSetHelper(object(), address(),
2829 value(), // scratch1
2830 save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
2831
2832 __ Bind(&dont_need_remembered_set);
2833 }
2834
2835 CheckNeedsToInformIncrementalMarker(
2836 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
2837 InformIncrementalMarker(masm);
2838 regs_.Restore(masm); // Restore the extra scratch registers we used.
2839 __ Ret();
2840 }
2841
2842
InformIncrementalMarker(MacroAssembler * masm)2843 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
2844 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
2845 Register address =
2846 x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
2847 DCHECK(!address.Is(regs_.object()));
2848 DCHECK(!address.Is(x0));
2849 __ Mov(address, regs_.address());
2850 __ Mov(x0, regs_.object());
2851 __ Mov(x1, address);
2852 __ Mov(x2, ExternalReference::isolate_address(isolate()));
2853
2854 AllowExternalCallThatCantCauseGC scope(masm);
2855 ExternalReference function =
2856 ExternalReference::incremental_marking_record_write_function(
2857 isolate());
2858 __ CallCFunction(function, 3, 0);
2859
2860 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
2861 }
2862
2863
CheckNeedsToInformIncrementalMarker(MacroAssembler * masm,OnNoNeedToInformIncrementalMarker on_no_need,Mode mode)2864 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
2865 MacroAssembler* masm,
2866 OnNoNeedToInformIncrementalMarker on_no_need,
2867 Mode mode) {
2868 Label on_black;
2869 Label need_incremental;
2870 Label need_incremental_pop_scratch;
2871
2872 // If the object is not black we don't have to inform the incremental marker.
2873 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
2874
2875 regs_.Restore(masm); // Restore the extra scratch registers we used.
2876 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
2877 __ RememberedSetHelper(object(), address(),
2878 value(), // scratch1
2879 save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
2880 } else {
2881 __ Ret();
2882 }
2883
2884 __ Bind(&on_black);
2885 // Get the value from the slot.
2886 Register val = regs_.scratch0();
2887 __ Ldr(val, MemOperand(regs_.address()));
2888
2889 if (mode == INCREMENTAL_COMPACTION) {
2890 Label ensure_not_white;
2891
2892 __ CheckPageFlagClear(val, regs_.scratch1(),
2893 MemoryChunk::kEvacuationCandidateMask,
2894 &ensure_not_white);
2895
2896 __ CheckPageFlagClear(regs_.object(),
2897 regs_.scratch1(),
2898 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
2899 &need_incremental);
2900
2901 __ Bind(&ensure_not_white);
2902 }
2903
2904 // We need extra registers for this, so we push the object and the address
2905 // register temporarily.
2906 __ Push(regs_.address(), regs_.object());
2907 __ JumpIfWhite(val,
2908 regs_.scratch1(), // Scratch.
2909 regs_.object(), // Scratch.
2910 regs_.address(), // Scratch.
2911 regs_.scratch2(), // Scratch.
2912 &need_incremental_pop_scratch);
2913 __ Pop(regs_.object(), regs_.address());
2914
2915 regs_.Restore(masm); // Restore the extra scratch registers we used.
2916 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
2917 __ RememberedSetHelper(object(), address(),
2918 value(), // scratch1
2919 save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
2920 } else {
2921 __ Ret();
2922 }
2923
2924 __ Bind(&need_incremental_pop_scratch);
2925 __ Pop(regs_.object(), regs_.address());
2926
2927 __ Bind(&need_incremental);
2928 // Fall through when we need to inform the incremental marker.
2929 }
2930
2931
Generate(MacroAssembler * masm)2932 void RecordWriteStub::Generate(MacroAssembler* masm) {
2933 Label skip_to_incremental_noncompacting;
2934 Label skip_to_incremental_compacting;
2935
2936 // We patch these two first instructions back and forth between a nop and
2937 // real branch when we start and stop incremental heap marking.
2938 // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
2939 // are generated.
2940 // See RecordWriteStub::Patch for details.
2941 {
2942 InstructionAccurateScope scope(masm, 2);
2943 __ adr(xzr, &skip_to_incremental_noncompacting);
2944 __ adr(xzr, &skip_to_incremental_compacting);
2945 }
2946
2947 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
2948 __ RememberedSetHelper(object(), address(),
2949 value(), // scratch1
2950 save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
2951 }
2952 __ Ret();
2953
2954 __ Bind(&skip_to_incremental_noncompacting);
2955 GenerateIncremental(masm, INCREMENTAL);
2956
2957 __ Bind(&skip_to_incremental_compacting);
2958 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
2959 }
2960
2961
Generate(MacroAssembler * masm)2962 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
2963 CEntryStub ces(isolate(), 1, kSaveFPRegs);
2964 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
2965 int parameter_count_offset =
2966 StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
2967 __ Ldr(x1, MemOperand(fp, parameter_count_offset));
2968 if (function_mode() == JS_FUNCTION_STUB_MODE) {
2969 __ Add(x1, x1, 1);
2970 }
2971 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
2972 __ Drop(x1);
2973 // Return to IC Miss stub, continuation still on stack.
2974 __ Ret();
2975 }
2976
Generate(MacroAssembler * masm)2977 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
2978 __ EmitLoadTypeFeedbackVector(x2);
2979 CallICStub stub(isolate(), state());
2980 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2981 }
2982
2983
HandleArrayCases(MacroAssembler * masm,Register feedback,Register receiver_map,Register scratch1,Register scratch2,bool is_polymorphic,Label * miss)2984 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
2985 Register receiver_map, Register scratch1,
2986 Register scratch2, bool is_polymorphic,
2987 Label* miss) {
2988 // feedback initially contains the feedback array
2989 Label next_loop, prepare_next;
2990 Label load_smi_map, compare_map;
2991 Label start_polymorphic;
2992
2993 Register cached_map = scratch1;
2994
2995 __ Ldr(cached_map,
2996 FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
2997 __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
2998 __ Cmp(receiver_map, cached_map);
2999 __ B(ne, &start_polymorphic);
3000 // found, now call handler.
3001 Register handler = feedback;
3002 __ Ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
3003 __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
3004 __ Jump(feedback);
3005
3006 Register length = scratch2;
3007 __ Bind(&start_polymorphic);
3008 __ Ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
3009 if (!is_polymorphic) {
3010 __ Cmp(length, Operand(Smi::FromInt(2)));
3011 __ B(eq, miss);
3012 }
3013
3014 Register too_far = length;
3015 Register pointer_reg = feedback;
3016
3017 // +-----+------+------+-----+-----+ ... ----+
3018 // | map | len | wm0 | h0 | wm1 | hN |
3019 // +-----+------+------+-----+-----+ ... ----+
3020 // 0 1 2 len-1
3021 // ^ ^
3022 // | |
3023 // pointer_reg too_far
3024 // aka feedback scratch2
3025 // also need receiver_map
3026 // use cached_map (scratch1) to look in the weak map values.
3027 __ Add(too_far, feedback,
3028 Operand::UntagSmiAndScale(length, kPointerSizeLog2));
3029 __ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
3030 __ Add(pointer_reg, feedback,
3031 FixedArray::OffsetOfElementAt(2) - kHeapObjectTag);
3032
3033 __ Bind(&next_loop);
3034 __ Ldr(cached_map, MemOperand(pointer_reg));
3035 __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3036 __ Cmp(receiver_map, cached_map);
3037 __ B(ne, &prepare_next);
3038 __ Ldr(handler, MemOperand(pointer_reg, kPointerSize));
3039 __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
3040 __ Jump(handler);
3041
3042 __ Bind(&prepare_next);
3043 __ Add(pointer_reg, pointer_reg, kPointerSize * 2);
3044 __ Cmp(pointer_reg, too_far);
3045 __ B(lt, &next_loop);
3046
3047 // We exhausted our array of map handler pairs.
3048 __ jmp(miss);
3049 }
3050
3051
HandleMonomorphicCase(MacroAssembler * masm,Register receiver,Register receiver_map,Register feedback,Register vector,Register slot,Register scratch,Label * compare_map,Label * load_smi_map,Label * try_array)3052 static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
3053 Register receiver_map, Register feedback,
3054 Register vector, Register slot,
3055 Register scratch, Label* compare_map,
3056 Label* load_smi_map, Label* try_array) {
3057 __ JumpIfSmi(receiver, load_smi_map);
3058 __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
3059 __ bind(compare_map);
3060 Register cached_map = scratch;
3061 // Move the weak map into the weak_cell register.
3062 __ Ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
3063 __ Cmp(cached_map, receiver_map);
3064 __ B(ne, try_array);
3065
3066 Register handler = feedback;
3067 __ Add(handler, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
3068 __ Ldr(handler,
3069 FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
3070 __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
3071 __ Jump(handler);
3072 }
3073
Generate(MacroAssembler * masm)3074 void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
3075 __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
3076 KeyedStoreICStub stub(isolate(), state());
3077 stub.GenerateForTrampoline(masm);
3078 }
3079
Generate(MacroAssembler * masm)3080 void KeyedStoreICStub::Generate(MacroAssembler* masm) {
3081 GenerateImpl(masm, false);
3082 }
3083
GenerateForTrampoline(MacroAssembler * masm)3084 void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
3085 GenerateImpl(masm, true);
3086 }
3087
3088
HandlePolymorphicStoreCase(MacroAssembler * masm,Register feedback,Register receiver_map,Register scratch1,Register scratch2,Label * miss)3089 static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
3090 Register receiver_map, Register scratch1,
3091 Register scratch2, Label* miss) {
3092 // feedback initially contains the feedback array
3093 Label next_loop, prepare_next;
3094 Label start_polymorphic;
3095 Label transition_call;
3096
3097 Register cached_map = scratch1;
3098 Register too_far = scratch2;
3099 Register pointer_reg = feedback;
3100
3101 __ Ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
3102
3103 // +-----+------+------+-----+-----+-----+ ... ----+
3104 // | map | len | wm0 | wt0 | h0 | wm1 | hN |
3105 // +-----+------+------+-----+-----+ ----+ ... ----+
3106 // 0 1 2 len-1
3107 // ^ ^
3108 // | |
3109 // pointer_reg too_far
3110 // aka feedback scratch2
3111 // also need receiver_map
3112 // use cached_map (scratch1) to look in the weak map values.
3113 __ Add(too_far, feedback,
3114 Operand::UntagSmiAndScale(too_far, kPointerSizeLog2));
3115 __ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
3116 __ Add(pointer_reg, feedback,
3117 FixedArray::OffsetOfElementAt(0) - kHeapObjectTag);
3118
3119 __ Bind(&next_loop);
3120 __ Ldr(cached_map, MemOperand(pointer_reg));
3121 __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3122 __ Cmp(receiver_map, cached_map);
3123 __ B(ne, &prepare_next);
3124 // Is it a transitioning store?
3125 __ Ldr(too_far, MemOperand(pointer_reg, kPointerSize));
3126 __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
3127 __ B(ne, &transition_call);
3128
3129 __ Ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
3130 __ Add(pointer_reg, pointer_reg, Code::kHeaderSize - kHeapObjectTag);
3131 __ Jump(pointer_reg);
3132
3133 __ Bind(&transition_call);
3134 __ Ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
3135 __ JumpIfSmi(too_far, miss);
3136
3137 __ Ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
3138 // Load the map into the correct register.
3139 DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
3140 __ mov(feedback, too_far);
3141 __ Add(receiver_map, receiver_map, Code::kHeaderSize - kHeapObjectTag);
3142 __ Jump(receiver_map);
3143
3144 __ Bind(&prepare_next);
3145 __ Add(pointer_reg, pointer_reg, kPointerSize * 3);
3146 __ Cmp(pointer_reg, too_far);
3147 __ B(lt, &next_loop);
3148
3149 // We exhausted our array of map handler pairs.
3150 __ jmp(miss);
3151 }
3152
GenerateImpl(MacroAssembler * masm,bool in_frame)3153 void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
3154 Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // x1
3155 Register key = StoreWithVectorDescriptor::NameRegister(); // x2
3156 Register vector = StoreWithVectorDescriptor::VectorRegister(); // x3
3157 Register slot = StoreWithVectorDescriptor::SlotRegister(); // x4
3158 DCHECK(StoreWithVectorDescriptor::ValueRegister().is(x0)); // x0
3159 Register feedback = x5;
3160 Register receiver_map = x6;
3161 Register scratch1 = x7;
3162
3163 __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
3164 __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
3165
3166 // Try to quickly handle the monomorphic case without knowing for sure
3167 // if we have a weak cell in feedback. We do know it's safe to look
3168 // at WeakCell::kValueOffset.
3169 Label try_array, load_smi_map, compare_map;
3170 Label not_array, miss;
3171 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
3172 scratch1, &compare_map, &load_smi_map, &try_array);
3173
3174 __ Bind(&try_array);
3175 // Is it a fixed array?
3176 __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
3177 __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, ¬_array);
3178
3179 // We have a polymorphic element handler.
3180 Label try_poly_name;
3181 HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, x8, &miss);
3182
3183 __ Bind(¬_array);
3184 // Is it generic?
3185 __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
3186 &try_poly_name);
3187 Handle<Code> megamorphic_stub =
3188 KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
3189 __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
3190
3191 __ Bind(&try_poly_name);
3192 // We might have a name in feedback, and a fixed array in the next slot.
3193 __ Cmp(key, feedback);
3194 __ B(ne, &miss);
3195 // If the name comparison succeeded, we know we have a fixed array with
3196 // at least one map/handler pair.
3197 __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
3198 __ Ldr(feedback,
3199 FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
3200 HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, false, &miss);
3201
3202 __ Bind(&miss);
3203 KeyedStoreIC::GenerateMiss(masm);
3204
3205 __ Bind(&load_smi_map);
3206 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
3207 __ jmp(&compare_map);
3208 }
3209
3210
3211 // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
3212 // a "Push lr" instruction, followed by a call.
3213 static const unsigned int kProfileEntryHookCallSize =
3214 Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
3215
3216
MaybeCallEntryHook(MacroAssembler * masm)3217 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
3218 if (masm->isolate()->function_entry_hook() != NULL) {
3219 ProfileEntryHookStub stub(masm->isolate());
3220 Assembler::BlockConstPoolScope no_const_pools(masm);
3221 DontEmitDebugCodeScope no_debug_code(masm);
3222 Label entry_hook_call_start;
3223 __ Bind(&entry_hook_call_start);
3224 __ Push(lr);
3225 __ CallStub(&stub);
3226 DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
3227 kProfileEntryHookCallSize);
3228
3229 __ Pop(lr);
3230 }
3231 }
3232
3233
Generate(MacroAssembler * masm)3234 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
3235 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
3236
3237 // Save all kCallerSaved registers (including lr), since this can be called
3238 // from anywhere.
3239 // TODO(jbramley): What about FP registers?
3240 __ PushCPURegList(kCallerSaved);
3241 DCHECK(kCallerSaved.IncludesAliasOf(lr));
3242 const int kNumSavedRegs = kCallerSaved.Count();
3243
3244 // Compute the function's address as the first argument.
3245 __ Sub(x0, lr, kProfileEntryHookCallSize);
3246
3247 #if V8_HOST_ARCH_ARM64
3248 uintptr_t entry_hook =
3249 reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
3250 __ Mov(x10, entry_hook);
3251 #else
3252 // Under the simulator we need to indirect the entry hook through a trampoline
3253 // function at a known address.
3254 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
3255 __ Mov(x10, Operand(ExternalReference(&dispatcher,
3256 ExternalReference::BUILTIN_CALL,
3257 isolate())));
3258 // It additionally takes an isolate as a third parameter
3259 __ Mov(x2, ExternalReference::isolate_address(isolate()));
3260 #endif
3261
3262 // The caller's return address is above the saved temporaries.
3263 // Grab its location for the second argument to the hook.
3264 __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
3265
3266 {
3267 // Create a dummy frame, as CallCFunction requires this.
3268 FrameScope frame(masm, StackFrame::MANUAL);
3269 __ CallCFunction(x10, 2, 0);
3270 }
3271
3272 __ PopCPURegList(kCallerSaved);
3273 __ Ret();
3274 }
3275
3276
Generate(MacroAssembler * masm)3277 void DirectCEntryStub::Generate(MacroAssembler* masm) {
3278 // When calling into C++ code the stack pointer must be csp.
3279 // Therefore this code must use csp for peek/poke operations when the
3280 // stub is generated. When the stub is called
3281 // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
3282 // and configure the stack pointer *before* doing the call.
3283 const Register old_stack_pointer = __ StackPointer();
3284 __ SetStackPointer(csp);
3285
3286 // Put return address on the stack (accessible to GC through exit frame pc).
3287 __ Poke(lr, 0);
3288 // Call the C++ function.
3289 __ Blr(x10);
3290 // Return to calling code.
3291 __ Peek(lr, 0);
3292 __ AssertFPCRState();
3293 __ Ret();
3294
3295 __ SetStackPointer(old_stack_pointer);
3296 }
3297
GenerateCall(MacroAssembler * masm,Register target)3298 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
3299 Register target) {
3300 // Make sure the caller configured the stack pointer (see comment in
3301 // DirectCEntryStub::Generate).
3302 DCHECK(csp.Is(__ StackPointer()));
3303
3304 intptr_t code =
3305 reinterpret_cast<intptr_t>(GetCode().location());
3306 __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
3307 __ Mov(x10, target);
3308 // Branch to the stub.
3309 __ Blr(lr);
3310 }
3311
3312
3313 // Probe the name dictionary in the 'elements' register.
3314 // Jump to the 'done' label if a property with the given name is found.
3315 // Jump to the 'miss' label otherwise.
3316 //
3317 // If lookup was successful 'scratch2' will be equal to elements + 4 * index.
3318 // 'elements' and 'name' registers are preserved on miss.
GeneratePositiveLookup(MacroAssembler * masm,Label * miss,Label * done,Register elements,Register name,Register scratch1,Register scratch2)3319 void NameDictionaryLookupStub::GeneratePositiveLookup(
3320 MacroAssembler* masm,
3321 Label* miss,
3322 Label* done,
3323 Register elements,
3324 Register name,
3325 Register scratch1,
3326 Register scratch2) {
3327 DCHECK(!AreAliased(elements, name, scratch1, scratch2));
3328
3329 // Assert that name contains a string.
3330 __ AssertName(name);
3331
3332 // Compute the capacity mask.
3333 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
3334 __ Sub(scratch1, scratch1, 1);
3335
3336 // Generate an unrolled loop that performs a few probes before giving up.
3337 for (int i = 0; i < kInlinedProbes; i++) {
3338 // Compute the masked index: (hash + i + i * i) & mask.
3339 __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
3340 if (i > 0) {
3341 // Add the probe offset (i + i * i) left shifted to avoid right shifting
3342 // the hash in a separate instruction. The value hash + i + i * i is right
3343 // shifted in the following and instruction.
3344 DCHECK(NameDictionary::GetProbeOffset(i) <
3345 1 << (32 - Name::kHashFieldOffset));
3346 __ Add(scratch2, scratch2, Operand(
3347 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
3348 }
3349 __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
3350
3351 // Scale the index by multiplying by the element size.
3352 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3353 __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
3354
3355 // Check if the key is identical to the name.
3356 UseScratchRegisterScope temps(masm);
3357 Register scratch3 = temps.AcquireX();
3358 __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
3359 __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
3360 __ Cmp(name, scratch3);
3361 __ B(eq, done);
3362 }
3363
3364 // The inlined probes didn't find the entry.
3365 // Call the complete stub to scan the whole dictionary.
3366
3367 CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
3368 spill_list.Combine(lr);
3369 spill_list.Remove(scratch1);
3370 spill_list.Remove(scratch2);
3371
3372 __ PushCPURegList(spill_list);
3373
3374 if (name.is(x0)) {
3375 DCHECK(!elements.is(x1));
3376 __ Mov(x1, name);
3377 __ Mov(x0, elements);
3378 } else {
3379 __ Mov(x0, elements);
3380 __ Mov(x1, name);
3381 }
3382
3383 Label not_found;
3384 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
3385 __ CallStub(&stub);
3386 __ Cbz(x0, ¬_found);
3387 __ Mov(scratch2, x2); // Move entry index into scratch2.
3388 __ PopCPURegList(spill_list);
3389 __ B(done);
3390
3391 __ Bind(¬_found);
3392 __ PopCPURegList(spill_list);
3393 __ B(miss);
3394 }
3395
3396
GenerateNegativeLookup(MacroAssembler * masm,Label * miss,Label * done,Register receiver,Register properties,Handle<Name> name,Register scratch0)3397 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
3398 Label* miss,
3399 Label* done,
3400 Register receiver,
3401 Register properties,
3402 Handle<Name> name,
3403 Register scratch0) {
3404 DCHECK(!AreAliased(receiver, properties, scratch0));
3405 DCHECK(name->IsUniqueName());
3406 // If names of slots in range from 1 to kProbes - 1 for the hash value are
3407 // not equal to the name and kProbes-th slot is not used (its name is the
3408 // undefined value), it guarantees the hash table doesn't contain the
3409 // property. It's true even if some slots represent deleted properties
3410 // (their names are the hole value).
3411 for (int i = 0; i < kInlinedProbes; i++) {
3412 // scratch0 points to properties hash.
3413 // Compute the masked index: (hash + i + i * i) & mask.
3414 Register index = scratch0;
3415 // Capacity is smi 2^n.
3416 __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
3417 __ Sub(index, index, 1);
3418 __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
3419
3420 // Scale the index by multiplying by the entry size.
3421 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3422 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
3423
3424 Register entity_name = scratch0;
3425 // Having undefined at this place means the name is not contained.
3426 Register tmp = index;
3427 __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
3428 __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
3429
3430 __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
3431
3432 // Stop if found the property.
3433 __ Cmp(entity_name, Operand(name));
3434 __ B(eq, miss);
3435
3436 Label good;
3437 __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
3438
3439 // Check if the entry name is not a unique name.
3440 __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
3441 __ Ldrb(entity_name,
3442 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
3443 __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
3444 __ Bind(&good);
3445 }
3446
3447 CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
3448 spill_list.Combine(lr);
3449 spill_list.Remove(scratch0); // Scratch registers don't need to be preserved.
3450
3451 __ PushCPURegList(spill_list);
3452
3453 __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3454 __ Mov(x1, Operand(name));
3455 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
3456 __ CallStub(&stub);
3457 // Move stub return value to scratch0. Note that scratch0 is not included in
3458 // spill_list and won't be clobbered by PopCPURegList.
3459 __ Mov(scratch0, x0);
3460 __ PopCPURegList(spill_list);
3461
3462 __ Cbz(scratch0, done);
3463 __ B(miss);
3464 }
3465
3466
Generate(MacroAssembler * masm)3467 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
3468 // This stub overrides SometimesSetsUpAFrame() to return false. That means
3469 // we cannot call anything that could cause a GC from this stub.
3470 //
3471 // Arguments are in x0 and x1:
3472 // x0: property dictionary.
3473 // x1: the name of the property we are looking for.
3474 //
3475 // Return value is in x0 and is zero if lookup failed, non zero otherwise.
3476 // If the lookup is successful, x2 will contains the index of the entry.
3477
3478 Register result = x0;
3479 Register dictionary = x0;
3480 Register key = x1;
3481 Register index = x2;
3482 Register mask = x3;
3483 Register hash = x4;
3484 Register undefined = x5;
3485 Register entry_key = x6;
3486
3487 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
3488
3489 __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
3490 __ Sub(mask, mask, 1);
3491
3492 __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
3493 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
3494
3495 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
3496 // Compute the masked index: (hash + i + i * i) & mask.
3497 // Capacity is smi 2^n.
3498 if (i > 0) {
3499 // Add the probe offset (i + i * i) left shifted to avoid right shifting
3500 // the hash in a separate instruction. The value hash + i + i * i is right
3501 // shifted in the following and instruction.
3502 DCHECK(NameDictionary::GetProbeOffset(i) <
3503 1 << (32 - Name::kHashFieldOffset));
3504 __ Add(index, hash,
3505 NameDictionary::GetProbeOffset(i) << Name::kHashShift);
3506 } else {
3507 __ Mov(index, hash);
3508 }
3509 __ And(index, mask, Operand(index, LSR, Name::kHashShift));
3510
3511 // Scale the index by multiplying by the entry size.
3512 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3513 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
3514
3515 __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
3516 __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
3517
3518 // Having undefined at this place means the name is not contained.
3519 __ Cmp(entry_key, undefined);
3520 __ B(eq, ¬_in_dictionary);
3521
3522 // Stop if found the property.
3523 __ Cmp(entry_key, key);
3524 __ B(eq, &in_dictionary);
3525
3526 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
3527 // Check if the entry name is not a unique name.
3528 __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
3529 __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
3530 __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
3531 }
3532 }
3533
3534 __ Bind(&maybe_in_dictionary);
3535 // If we are doing negative lookup then probing failure should be
3536 // treated as a lookup success. For positive lookup, probing failure
3537 // should be treated as lookup failure.
3538 if (mode() == POSITIVE_LOOKUP) {
3539 __ Mov(result, 0);
3540 __ Ret();
3541 }
3542
3543 __ Bind(&in_dictionary);
3544 __ Mov(result, 1);
3545 __ Ret();
3546
3547 __ Bind(¬_in_dictionary);
3548 __ Mov(result, 0);
3549 __ Ret();
3550 }
3551
3552
3553 template<class T>
CreateArrayDispatch(MacroAssembler * masm,AllocationSiteOverrideMode mode)3554 static void CreateArrayDispatch(MacroAssembler* masm,
3555 AllocationSiteOverrideMode mode) {
3556 ASM_LOCATION("CreateArrayDispatch");
3557 if (mode == DISABLE_ALLOCATION_SITES) {
3558 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
3559 __ TailCallStub(&stub);
3560
3561 } else if (mode == DONT_OVERRIDE) {
3562 Register kind = x3;
3563 int last_index =
3564 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
3565 for (int i = 0; i <= last_index; ++i) {
3566 Label next;
3567 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
3568 // TODO(jbramley): Is this the best way to handle this? Can we make the
3569 // tail calls conditional, rather than hopping over each one?
3570 __ CompareAndBranch(kind, candidate_kind, ne, &next);
3571 T stub(masm->isolate(), candidate_kind);
3572 __ TailCallStub(&stub);
3573 __ Bind(&next);
3574 }
3575
3576 // If we reached this point there is a problem.
3577 __ Abort(kUnexpectedElementsKindInArrayConstructor);
3578
3579 } else {
3580 UNREACHABLE();
3581 }
3582 }
3583
3584
3585 // TODO(jbramley): If this needs to be a special case, make it a proper template
3586 // specialization, and not a separate function.
CreateArrayDispatchOneArgument(MacroAssembler * masm,AllocationSiteOverrideMode mode)3587 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
3588 AllocationSiteOverrideMode mode) {
3589 ASM_LOCATION("CreateArrayDispatchOneArgument");
3590 // x0 - argc
3591 // x1 - constructor?
3592 // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
3593 // x3 - kind (if mode != DISABLE_ALLOCATION_SITES)
3594 // sp[0] - last argument
3595
3596 Register allocation_site = x2;
3597 Register kind = x3;
3598
3599 Label normal_sequence;
3600 if (mode == DONT_OVERRIDE) {
3601 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3602 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3603 STATIC_ASSERT(FAST_ELEMENTS == 2);
3604 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3605 STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
3606 STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
3607
3608 // Is the low bit set? If so, the array is holey.
3609 __ Tbnz(kind, 0, &normal_sequence);
3610 }
3611
3612 // Look at the last argument.
3613 // TODO(jbramley): What does a 0 argument represent?
3614 __ Peek(x10, 0);
3615 __ Cbz(x10, &normal_sequence);
3616
3617 if (mode == DISABLE_ALLOCATION_SITES) {
3618 ElementsKind initial = GetInitialFastElementsKind();
3619 ElementsKind holey_initial = GetHoleyElementsKind(initial);
3620
3621 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
3622 holey_initial,
3623 DISABLE_ALLOCATION_SITES);
3624 __ TailCallStub(&stub_holey);
3625
3626 __ Bind(&normal_sequence);
3627 ArraySingleArgumentConstructorStub stub(masm->isolate(),
3628 initial,
3629 DISABLE_ALLOCATION_SITES);
3630 __ TailCallStub(&stub);
3631 } else if (mode == DONT_OVERRIDE) {
3632 // We are going to create a holey array, but our kind is non-holey.
3633 // Fix kind and retry (only if we have an allocation site in the slot).
3634 __ Orr(kind, kind, 1);
3635
3636 if (FLAG_debug_code) {
3637 __ Ldr(x10, FieldMemOperand(allocation_site, 0));
3638 __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
3639 &normal_sequence);
3640 __ Assert(eq, kExpectedAllocationSite);
3641 }
3642
3643 // Save the resulting elements kind in type info. We can't just store 'kind'
3644 // in the AllocationSite::transition_info field because elements kind is
3645 // restricted to a portion of the field; upper bits need to be left alone.
3646 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
3647 __ Ldr(x11, FieldMemOperand(allocation_site,
3648 AllocationSite::kTransitionInfoOffset));
3649 __ Add(x11, x11, Smi::FromInt(kFastElementsKindPackedToHoley));
3650 __ Str(x11, FieldMemOperand(allocation_site,
3651 AllocationSite::kTransitionInfoOffset));
3652
3653 __ Bind(&normal_sequence);
3654 int last_index =
3655 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
3656 for (int i = 0; i <= last_index; ++i) {
3657 Label next;
3658 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
3659 __ CompareAndBranch(kind, candidate_kind, ne, &next);
3660 ArraySingleArgumentConstructorStub stub(masm->isolate(), candidate_kind);
3661 __ TailCallStub(&stub);
3662 __ Bind(&next);
3663 }
3664
3665 // If we reached this point there is a problem.
3666 __ Abort(kUnexpectedElementsKindInArrayConstructor);
3667 } else {
3668 UNREACHABLE();
3669 }
3670 }
3671
3672
3673 template<class T>
ArrayConstructorStubAheadOfTimeHelper(Isolate * isolate)3674 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
3675 int to_index = GetSequenceIndexFromFastElementsKind(
3676 TERMINAL_FAST_ELEMENTS_KIND);
3677 for (int i = 0; i <= to_index; ++i) {
3678 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3679 T stub(isolate, kind);
3680 stub.GetCode();
3681 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
3682 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
3683 stub1.GetCode();
3684 }
3685 }
3686 }
3687
GenerateStubsAheadOfTime(Isolate * isolate)3688 void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
3689 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
3690 isolate);
3691 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
3692 isolate);
3693 ArrayNArgumentsConstructorStub stub(isolate);
3694 stub.GetCode();
3695 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
3696 for (int i = 0; i < 2; i++) {
3697 // For internal arrays we only need a few things
3698 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
3699 stubh1.GetCode();
3700 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
3701 stubh2.GetCode();
3702 }
3703 }
3704
3705
GenerateDispatchToArrayStub(MacroAssembler * masm,AllocationSiteOverrideMode mode)3706 void ArrayConstructorStub::GenerateDispatchToArrayStub(
3707 MacroAssembler* masm,
3708 AllocationSiteOverrideMode mode) {
3709 Register argc = x0;
3710 Label zero_case, n_case;
3711 __ Cbz(argc, &zero_case);
3712 __ Cmp(argc, 1);
3713 __ B(ne, &n_case);
3714
3715 // One argument.
3716 CreateArrayDispatchOneArgument(masm, mode);
3717
3718 __ Bind(&zero_case);
3719 // No arguments.
3720 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
3721
3722 __ Bind(&n_case);
3723 // N arguments.
3724 ArrayNArgumentsConstructorStub stub(masm->isolate());
3725 __ TailCallStub(&stub);
3726 }
3727
3728
Generate(MacroAssembler * masm)3729 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
3730 ASM_LOCATION("ArrayConstructorStub::Generate");
3731 // ----------- S t a t e -------------
3732 // -- x0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
3733 // -- x1 : constructor
3734 // -- x2 : AllocationSite or undefined
3735 // -- x3 : new target
3736 // -- sp[0] : last argument
3737 // -----------------------------------
3738 Register constructor = x1;
3739 Register allocation_site = x2;
3740 Register new_target = x3;
3741
3742 if (FLAG_debug_code) {
3743 // The array construct code is only set for the global and natives
3744 // builtin Array functions which always have maps.
3745
3746 Label unexpected_map, map_ok;
3747 // Initial map for the builtin Array function should be a map.
3748 __ Ldr(x10, FieldMemOperand(constructor,
3749 JSFunction::kPrototypeOrInitialMapOffset));
3750 // Will both indicate a NULL and a Smi.
3751 __ JumpIfSmi(x10, &unexpected_map);
3752 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
3753 __ Bind(&unexpected_map);
3754 __ Abort(kUnexpectedInitialMapForArrayFunction);
3755 __ Bind(&map_ok);
3756
3757 // We should either have undefined in the allocation_site register or a
3758 // valid AllocationSite.
3759 __ AssertUndefinedOrAllocationSite(allocation_site, x10);
3760 }
3761
3762 // Enter the context of the Array function.
3763 __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
3764
3765 Label subclassing;
3766 __ Cmp(new_target, constructor);
3767 __ B(ne, &subclassing);
3768
3769 Register kind = x3;
3770 Label no_info;
3771 // Get the elements kind and case on that.
3772 __ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info);
3773
3774 __ Ldrsw(kind,
3775 UntagSmiFieldMemOperand(allocation_site,
3776 AllocationSite::kTransitionInfoOffset));
3777 __ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
3778 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
3779
3780 __ Bind(&no_info);
3781 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
3782
3783 // Subclassing support.
3784 __ Bind(&subclassing);
3785 __ Poke(constructor, Operand(x0, LSL, kPointerSizeLog2));
3786 __ Add(x0, x0, Operand(3));
3787 __ Push(new_target, allocation_site);
3788 __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
3789 }
3790
3791
GenerateCase(MacroAssembler * masm,ElementsKind kind)3792 void InternalArrayConstructorStub::GenerateCase(
3793 MacroAssembler* masm, ElementsKind kind) {
3794 Label zero_case, n_case;
3795 Register argc = x0;
3796
3797 __ Cbz(argc, &zero_case);
3798 __ CompareAndBranch(argc, 1, ne, &n_case);
3799
3800 // One argument.
3801 if (IsFastPackedElementsKind(kind)) {
3802 Label packed_case;
3803
3804 // We might need to create a holey array; look at the first argument.
3805 __ Peek(x10, 0);
3806 __ Cbz(x10, &packed_case);
3807
3808 InternalArraySingleArgumentConstructorStub
3809 stub1_holey(isolate(), GetHoleyElementsKind(kind));
3810 __ TailCallStub(&stub1_holey);
3811
3812 __ Bind(&packed_case);
3813 }
3814 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
3815 __ TailCallStub(&stub1);
3816
3817 __ Bind(&zero_case);
3818 // No arguments.
3819 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
3820 __ TailCallStub(&stub0);
3821
3822 __ Bind(&n_case);
3823 // N arguments.
3824 ArrayNArgumentsConstructorStub stubN(isolate());
3825 __ TailCallStub(&stubN);
3826 }
3827
3828
Generate(MacroAssembler * masm)3829 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
3830 // ----------- S t a t e -------------
3831 // -- x0 : argc
3832 // -- x1 : constructor
3833 // -- sp[0] : return address
3834 // -- sp[4] : last argument
3835 // -----------------------------------
3836
3837 Register constructor = x1;
3838
3839 if (FLAG_debug_code) {
3840 // The array construct code is only set for the global and natives
3841 // builtin Array functions which always have maps.
3842
3843 Label unexpected_map, map_ok;
3844 // Initial map for the builtin Array function should be a map.
3845 __ Ldr(x10, FieldMemOperand(constructor,
3846 JSFunction::kPrototypeOrInitialMapOffset));
3847 // Will both indicate a NULL and a Smi.
3848 __ JumpIfSmi(x10, &unexpected_map);
3849 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
3850 __ Bind(&unexpected_map);
3851 __ Abort(kUnexpectedInitialMapForArrayFunction);
3852 __ Bind(&map_ok);
3853 }
3854
3855 Register kind = w3;
3856 // Figure out the right elements kind
3857 __ Ldr(x10, FieldMemOperand(constructor,
3858 JSFunction::kPrototypeOrInitialMapOffset));
3859
3860 // Retrieve elements_kind from map.
3861 __ LoadElementsKindFromMap(kind, x10);
3862
3863 if (FLAG_debug_code) {
3864 Label done;
3865 __ Cmp(x3, FAST_ELEMENTS);
3866 __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne);
3867 __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
3868 }
3869
3870 Label fast_elements_case;
3871 __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case);
3872 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
3873
3874 __ Bind(&fast_elements_case);
3875 GenerateCase(masm, FAST_ELEMENTS);
3876 }
3877
3878
Generate(MacroAssembler * masm)3879 void FastNewObjectStub::Generate(MacroAssembler* masm) {
3880 // ----------- S t a t e -------------
3881 // -- x1 : target
3882 // -- x3 : new target
3883 // -- cp : context
3884 // -- lr : return address
3885 // -----------------------------------
3886 __ AssertFunction(x1);
3887 __ AssertReceiver(x3);
3888
3889 // Verify that the new target is a JSFunction.
3890 Label new_object;
3891 __ JumpIfNotObjectType(x3, x2, x2, JS_FUNCTION_TYPE, &new_object);
3892
3893 // Load the initial map and verify that it's in fact a map.
3894 __ Ldr(x2, FieldMemOperand(x3, JSFunction::kPrototypeOrInitialMapOffset));
3895 __ JumpIfSmi(x2, &new_object);
3896 __ JumpIfNotObjectType(x2, x0, x0, MAP_TYPE, &new_object);
3897
3898 // Fall back to runtime if the target differs from the new target's
3899 // initial map constructor.
3900 __ Ldr(x0, FieldMemOperand(x2, Map::kConstructorOrBackPointerOffset));
3901 __ CompareAndBranch(x0, x1, ne, &new_object);
3902
3903 // Allocate the JSObject on the heap.
3904 Label allocate, done_allocate;
3905 __ Ldrb(x4, FieldMemOperand(x2, Map::kInstanceSizeOffset));
3906 __ Allocate(x4, x0, x5, x6, &allocate, SIZE_IN_WORDS);
3907 __ Bind(&done_allocate);
3908
3909 // Initialize the JSObject fields.
3910 STATIC_ASSERT(JSObject::kMapOffset == 0 * kPointerSize);
3911 __ Str(x2, FieldMemOperand(x0, JSObject::kMapOffset));
3912 __ LoadRoot(x3, Heap::kEmptyFixedArrayRootIndex);
3913 STATIC_ASSERT(JSObject::kPropertiesOffset == 1 * kPointerSize);
3914 STATIC_ASSERT(JSObject::kElementsOffset == 2 * kPointerSize);
3915 __ Str(x3, FieldMemOperand(x0, JSObject::kPropertiesOffset));
3916 __ Str(x3, FieldMemOperand(x0, JSObject::kElementsOffset));
3917 STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
3918 __ Add(x1, x0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
3919
3920 // ----------- S t a t e -------------
3921 // -- x0 : result (tagged)
3922 // -- x1 : result fields (untagged)
3923 // -- x5 : result end (untagged)
3924 // -- x2 : initial map
3925 // -- cp : context
3926 // -- lr : return address
3927 // -----------------------------------
3928
3929 // Perform in-object slack tracking if requested.
3930 Label slack_tracking;
3931 STATIC_ASSERT(Map::kNoSlackTracking == 0);
3932 __ LoadRoot(x6, Heap::kUndefinedValueRootIndex);
3933 __ Ldr(w3, FieldMemOperand(x2, Map::kBitField3Offset));
3934 __ TestAndBranchIfAnySet(w3, Map::ConstructionCounter::kMask,
3935 &slack_tracking);
3936 {
3937 // Initialize all in-object fields with undefined.
3938 __ InitializeFieldsWithFiller(x1, x5, x6);
3939 __ Ret();
3940 }
3941 __ Bind(&slack_tracking);
3942 {
3943 // Decrease generous allocation count.
3944 STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
3945 __ Sub(w3, w3, 1 << Map::ConstructionCounter::kShift);
3946 __ Str(w3, FieldMemOperand(x2, Map::kBitField3Offset));
3947
3948 // Initialize the in-object fields with undefined.
3949 __ Ldrb(x4, FieldMemOperand(x2, Map::kUnusedPropertyFieldsOffset));
3950 __ Sub(x4, x5, Operand(x4, LSL, kPointerSizeLog2));
3951 __ InitializeFieldsWithFiller(x1, x4, x6);
3952
3953 // Initialize the remaining (reserved) fields with one pointer filler map.
3954 __ LoadRoot(x6, Heap::kOnePointerFillerMapRootIndex);
3955 __ InitializeFieldsWithFiller(x1, x5, x6);
3956
3957 // Check if we can finalize the instance size.
3958 Label finalize;
3959 STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
3960 __ TestAndBranchIfAllClear(w3, Map::ConstructionCounter::kMask, &finalize);
3961 __ Ret();
3962
3963 // Finalize the instance size.
3964 __ Bind(&finalize);
3965 {
3966 FrameScope scope(masm, StackFrame::INTERNAL);
3967 __ Push(x0, x2);
3968 __ CallRuntime(Runtime::kFinalizeInstanceSize);
3969 __ Pop(x0);
3970 }
3971 __ Ret();
3972 }
3973
3974 // Fall back to %AllocateInNewSpace.
3975 __ Bind(&allocate);
3976 {
3977 FrameScope scope(masm, StackFrame::INTERNAL);
3978 STATIC_ASSERT(kSmiTag == 0);
3979 STATIC_ASSERT(kSmiTagSize == 1);
3980 __ Mov(x4,
3981 Operand(x4, LSL, kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
3982 __ Push(x2, x4);
3983 __ CallRuntime(Runtime::kAllocateInNewSpace);
3984 __ Pop(x2);
3985 }
3986 __ Ldrb(x5, FieldMemOperand(x2, Map::kInstanceSizeOffset));
3987 __ Add(x5, x0, Operand(x5, LSL, kPointerSizeLog2));
3988 STATIC_ASSERT(kHeapObjectTag == 1);
3989 __ Sub(x5, x5, kHeapObjectTag); // Subtract the tag from end.
3990 __ B(&done_allocate);
3991
3992 // Fall back to %NewObject.
3993 __ Bind(&new_object);
3994 __ Push(x1, x3);
3995 __ TailCallRuntime(Runtime::kNewObject);
3996 }
3997
3998
Generate(MacroAssembler * masm)3999 void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
4000 // ----------- S t a t e -------------
4001 // -- x1 : function
4002 // -- cp : context
4003 // -- fp : frame pointer
4004 // -- lr : return address
4005 // -----------------------------------
4006 __ AssertFunction(x1);
4007
4008 // Make x2 point to the JavaScript frame.
4009 __ Mov(x2, fp);
4010 if (skip_stub_frame()) {
4011 // For Ignition we need to skip the handler/stub frame to reach the
4012 // JavaScript frame for the function.
4013 __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
4014 }
4015 if (FLAG_debug_code) {
4016 Label ok;
4017 __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
4018 __ Cmp(x3, x1);
4019 __ B(eq, &ok);
4020 __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4021 __ Bind(&ok);
4022 }
4023
4024 // Check if we have rest parameters (only possible if we have an
4025 // arguments adaptor frame below the function frame).
4026 Label no_rest_parameters;
4027 __ Ldr(x2, MemOperand(x2, CommonFrameConstants::kCallerFPOffset));
4028 __ Ldr(x3, MemOperand(x2, CommonFrameConstants::kContextOrFrameTypeOffset));
4029 __ Cmp(x3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
4030 __ B(ne, &no_rest_parameters);
4031
4032 // Check if the arguments adaptor frame contains more arguments than
4033 // specified by the function's internal formal parameter count.
4034 Label rest_parameters;
4035 __ Ldrsw(x0, UntagSmiMemOperand(
4036 x2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4037 __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
4038 __ Ldrsw(
4039 x3, FieldMemOperand(x3, SharedFunctionInfo::kFormalParameterCountOffset));
4040 __ Subs(x0, x0, x3);
4041 __ B(gt, &rest_parameters);
4042
4043 // Return an empty rest parameter array.
4044 __ Bind(&no_rest_parameters);
4045 {
4046 // ----------- S t a t e -------------
4047 // -- cp : context
4048 // -- lr : return address
4049 // -----------------------------------
4050
4051 // Allocate an empty rest parameter array.
4052 Label allocate, done_allocate;
4053 __ Allocate(JSArray::kSize, x0, x1, x2, &allocate, NO_ALLOCATION_FLAGS);
4054 __ Bind(&done_allocate);
4055
4056 // Setup the rest parameter array in x0.
4057 __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, x1);
4058 __ Str(x1, FieldMemOperand(x0, JSArray::kMapOffset));
4059 __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
4060 __ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset));
4061 __ Str(x1, FieldMemOperand(x0, JSArray::kElementsOffset));
4062 __ Mov(x1, Smi::kZero);
4063 __ Str(x1, FieldMemOperand(x0, JSArray::kLengthOffset));
4064 STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
4065 __ Ret();
4066
4067 // Fall back to %AllocateInNewSpace.
4068 __ Bind(&allocate);
4069 {
4070 FrameScope scope(masm, StackFrame::INTERNAL);
4071 __ Push(Smi::FromInt(JSArray::kSize));
4072 __ CallRuntime(Runtime::kAllocateInNewSpace);
4073 }
4074 __ B(&done_allocate);
4075 }
4076
4077 __ Bind(&rest_parameters);
4078 {
4079 // Compute the pointer to the first rest parameter (skippping the receiver).
4080 __ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2));
4081 __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
4082
4083 // ----------- S t a t e -------------
4084 // -- cp : context
4085 // -- x0 : number of rest parameters
4086 // -- x1 : function
4087 // -- x2 : pointer to first rest parameters
4088 // -- lr : return address
4089 // -----------------------------------
4090
4091 // Allocate space for the rest parameter array plus the backing store.
4092 Label allocate, done_allocate;
4093 __ Mov(x6, JSArray::kSize + FixedArray::kHeaderSize);
4094 __ Add(x6, x6, Operand(x0, LSL, kPointerSizeLog2));
4095 __ Allocate(x6, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
4096 __ Bind(&done_allocate);
4097
4098 // Compute arguments.length in x6.
4099 __ SmiTag(x6, x0);
4100
4101 // Setup the elements array in x3.
4102 __ LoadRoot(x1, Heap::kFixedArrayMapRootIndex);
4103 __ Str(x1, FieldMemOperand(x3, FixedArray::kMapOffset));
4104 __ Str(x6, FieldMemOperand(x3, FixedArray::kLengthOffset));
4105 __ Add(x4, x3, FixedArray::kHeaderSize);
4106 {
4107 Label loop, done_loop;
4108 __ Add(x0, x4, Operand(x0, LSL, kPointerSizeLog2));
4109 __ Bind(&loop);
4110 __ Cmp(x4, x0);
4111 __ B(eq, &done_loop);
4112 __ Ldr(x5, MemOperand(x2, 0 * kPointerSize));
4113 __ Str(x5, FieldMemOperand(x4, 0 * kPointerSize));
4114 __ Sub(x2, x2, Operand(1 * kPointerSize));
4115 __ Add(x4, x4, Operand(1 * kPointerSize));
4116 __ B(&loop);
4117 __ Bind(&done_loop);
4118 }
4119
4120 // Setup the rest parameter array in x0.
4121 __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, x1);
4122 __ Str(x1, FieldMemOperand(x0, JSArray::kMapOffset));
4123 __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
4124 __ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset));
4125 __ Str(x3, FieldMemOperand(x0, JSArray::kElementsOffset));
4126 __ Str(x6, FieldMemOperand(x0, JSArray::kLengthOffset));
4127 STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
4128 __ Ret();
4129
4130 // Fall back to %AllocateInNewSpace (if not too big).
4131 Label too_big_for_new_space;
4132 __ Bind(&allocate);
4133 __ Cmp(x6, Operand(kMaxRegularHeapObjectSize));
4134 __ B(gt, &too_big_for_new_space);
4135 {
4136 FrameScope scope(masm, StackFrame::INTERNAL);
4137 __ SmiTag(x0);
4138 __ SmiTag(x6);
4139 __ Push(x0, x2, x6);
4140 __ CallRuntime(Runtime::kAllocateInNewSpace);
4141 __ Mov(x3, x0);
4142 __ Pop(x2, x0);
4143 __ SmiUntag(x0);
4144 }
4145 __ B(&done_allocate);
4146
4147 // Fall back to %NewRestParameter.
4148 __ Bind(&too_big_for_new_space);
4149 __ Push(x1);
4150 __ TailCallRuntime(Runtime::kNewRestParameter);
4151 }
4152 }
4153
4154
Generate(MacroAssembler * masm)4155 void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
4156 // ----------- S t a t e -------------
4157 // -- x1 : function
4158 // -- cp : context
4159 // -- fp : frame pointer
4160 // -- lr : return address
4161 // -----------------------------------
4162 __ AssertFunction(x1);
4163
4164 // Make x6 point to the JavaScript frame.
4165 __ Mov(x6, fp);
4166 if (skip_stub_frame()) {
4167 // For Ignition we need to skip the handler/stub frame to reach the
4168 // JavaScript frame for the function.
4169 __ Ldr(x6, MemOperand(x6, StandardFrameConstants::kCallerFPOffset));
4170 }
4171 if (FLAG_debug_code) {
4172 Label ok;
4173 __ Ldr(x3, MemOperand(x6, StandardFrameConstants::kFunctionOffset));
4174 __ Cmp(x3, x1);
4175 __ B(eq, &ok);
4176 __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4177 __ Bind(&ok);
4178 }
4179
4180 // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
4181 __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
4182 __ Ldrsw(
4183 x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
4184 __ Add(x3, x6, Operand(x2, LSL, kPointerSizeLog2));
4185 __ Add(x3, x3, Operand(StandardFrameConstants::kCallerSPOffset));
4186 __ SmiTag(x2);
4187
4188 // x1 : function
4189 // x2 : number of parameters (tagged)
4190 // x3 : parameters pointer
4191 // x6 : JavaScript frame pointer
4192 //
4193 // Returns pointer to result object in x0.
4194
4195 // Make an untagged copy of the parameter count.
4196 // Note: arg_count_smi is an alias of param_count_smi.
4197 Register function = x1;
4198 Register arg_count_smi = x2;
4199 Register param_count_smi = x2;
4200 Register recv_arg = x3;
4201 Register param_count = x7;
4202 __ SmiUntag(param_count, param_count_smi);
4203
4204 // Check if the calling frame is an arguments adaptor frame.
4205 Register caller_fp = x11;
4206 Register caller_ctx = x12;
4207 Label runtime;
4208 Label adaptor_frame, try_allocate;
4209 __ Ldr(caller_fp, MemOperand(x6, StandardFrameConstants::kCallerFPOffset));
4210 __ Ldr(
4211 caller_ctx,
4212 MemOperand(caller_fp, CommonFrameConstants::kContextOrFrameTypeOffset));
4213 __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
4214 __ B(eq, &adaptor_frame);
4215
4216 // No adaptor, parameter count = argument count.
4217
4218 // x1 function function pointer
4219 // x2 arg_count_smi number of function arguments (smi)
4220 // x3 recv_arg pointer to receiver arguments
4221 // x4 mapped_params number of mapped params, min(params, args) (uninit)
4222 // x7 param_count number of function parameters
4223 // x11 caller_fp caller's frame pointer
4224 // x14 arg_count number of function arguments (uninit)
4225
4226 Register arg_count = x14;
4227 Register mapped_params = x4;
4228 __ Mov(arg_count, param_count);
4229 __ Mov(mapped_params, param_count);
4230 __ B(&try_allocate);
4231
4232 // We have an adaptor frame. Patch the parameters pointer.
4233 __ Bind(&adaptor_frame);
4234 __ Ldr(arg_count_smi,
4235 MemOperand(caller_fp,
4236 ArgumentsAdaptorFrameConstants::kLengthOffset));
4237 __ SmiUntag(arg_count, arg_count_smi);
4238 __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
4239 __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
4240
4241 // Compute the mapped parameter count = min(param_count, arg_count)
4242 __ Cmp(param_count, arg_count);
4243 __ Csel(mapped_params, param_count, arg_count, lt);
4244
4245 __ Bind(&try_allocate);
4246
4247 // x0 alloc_obj pointer to allocated objects: param map, backing
4248 // store, arguments (uninit)
4249 // x1 function function pointer
4250 // x2 arg_count_smi number of function arguments (smi)
4251 // x3 recv_arg pointer to receiver arguments
4252 // x4 mapped_params number of mapped parameters, min(params, args)
4253 // x7 param_count number of function parameters
4254 // x10 size size of objects to allocate (uninit)
4255 // x14 arg_count number of function arguments
4256
4257 // Compute the size of backing store, parameter map, and arguments object.
4258 // 1. Parameter map, has two extra words containing context and backing
4259 // store.
4260 const int kParameterMapHeaderSize =
4261 FixedArray::kHeaderSize + 2 * kPointerSize;
4262
4263 // Calculate the parameter map size, assuming it exists.
4264 Register size = x10;
4265 __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
4266 __ Add(size, size, kParameterMapHeaderSize);
4267
4268 // If there are no mapped parameters, set the running size total to zero.
4269 // Otherwise, use the parameter map size calculated earlier.
4270 __ Cmp(mapped_params, 0);
4271 __ CzeroX(size, eq);
4272
4273 // 2. Add the size of the backing store and arguments object.
4274 __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
4275 __ Add(size, size, FixedArray::kHeaderSize + JSSloppyArgumentsObject::kSize);
4276
4277 // Do the allocation of all three objects in one go. Assign this to x0, as it
4278 // will be returned to the caller.
4279 Register alloc_obj = x0;
4280 __ Allocate(size, alloc_obj, x11, x12, &runtime, NO_ALLOCATION_FLAGS);
4281
4282 // Get the arguments boilerplate from the current (global) context.
4283
4284 // x0 alloc_obj pointer to allocated objects (param map, backing
4285 // store, arguments)
4286 // x1 function function pointer
4287 // x2 arg_count_smi number of function arguments (smi)
4288 // x3 recv_arg pointer to receiver arguments
4289 // x4 mapped_params number of mapped parameters, min(params, args)
4290 // x7 param_count number of function parameters
4291 // x11 sloppy_args_map offset to args (or aliased args) map (uninit)
4292 // x14 arg_count number of function arguments
4293
4294 Register global_ctx = x10;
4295 Register sloppy_args_map = x11;
4296 Register aliased_args_map = x10;
4297 __ Ldr(global_ctx, NativeContextMemOperand());
4298
4299 __ Ldr(sloppy_args_map,
4300 ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
4301 __ Ldr(
4302 aliased_args_map,
4303 ContextMemOperand(global_ctx, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX));
4304 __ Cmp(mapped_params, 0);
4305 __ CmovX(sloppy_args_map, aliased_args_map, ne);
4306
4307 // Copy the JS object part.
4308 __ Str(sloppy_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
4309 __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
4310 __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
4311 __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
4312
4313 // Set up the callee in-object property.
4314 __ AssertNotSmi(function);
4315 __ Str(function,
4316 FieldMemOperand(alloc_obj, JSSloppyArgumentsObject::kCalleeOffset));
4317
4318 // Use the length and set that as an in-object property.
4319 __ Str(arg_count_smi,
4320 FieldMemOperand(alloc_obj, JSSloppyArgumentsObject::kLengthOffset));
4321
4322 // Set up the elements pointer in the allocated arguments object.
4323 // If we allocated a parameter map, "elements" will point there, otherwise
4324 // it will point to the backing store.
4325
4326 // x0 alloc_obj pointer to allocated objects (param map, backing
4327 // store, arguments)
4328 // x1 function function pointer
4329 // x2 arg_count_smi number of function arguments (smi)
4330 // x3 recv_arg pointer to receiver arguments
4331 // x4 mapped_params number of mapped parameters, min(params, args)
4332 // x5 elements pointer to parameter map or backing store (uninit)
4333 // x6 backing_store pointer to backing store (uninit)
4334 // x7 param_count number of function parameters
4335 // x14 arg_count number of function arguments
4336
4337 Register elements = x5;
4338 __ Add(elements, alloc_obj, JSSloppyArgumentsObject::kSize);
4339 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
4340
4341 // Initialize parameter map. If there are no mapped arguments, we're done.
4342 Label skip_parameter_map;
4343 __ Cmp(mapped_params, 0);
4344 // Set up backing store address, because it is needed later for filling in
4345 // the unmapped arguments.
4346 Register backing_store = x6;
4347 __ CmovX(backing_store, elements, eq);
4348 __ B(eq, &skip_parameter_map);
4349
4350 __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
4351 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
4352 __ Add(x10, mapped_params, 2);
4353 __ SmiTag(x10);
4354 __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
4355 __ Str(cp, FieldMemOperand(elements,
4356 FixedArray::kHeaderSize + 0 * kPointerSize));
4357 __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
4358 __ Add(x10, x10, kParameterMapHeaderSize);
4359 __ Str(x10, FieldMemOperand(elements,
4360 FixedArray::kHeaderSize + 1 * kPointerSize));
4361
4362 // Copy the parameter slots and the holes in the arguments.
4363 // We need to fill in mapped_parameter_count slots. Then index the context,
4364 // where parameters are stored in reverse order, at:
4365 //
4366 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
4367 //
4368 // The mapped parameter thus needs to get indices:
4369 //
4370 // MIN_CONTEXT_SLOTS + parameter_count - 1 ..
4371 // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
4372 //
4373 // We loop from right to left.
4374
4375 // x0 alloc_obj pointer to allocated objects (param map, backing
4376 // store, arguments)
4377 // x1 function function pointer
4378 // x2 arg_count_smi number of function arguments (smi)
4379 // x3 recv_arg pointer to receiver arguments
4380 // x4 mapped_params number of mapped parameters, min(params, args)
4381 // x5 elements pointer to parameter map or backing store (uninit)
4382 // x6 backing_store pointer to backing store (uninit)
4383 // x7 param_count number of function parameters
4384 // x11 loop_count parameter loop counter (uninit)
4385 // x12 index parameter index (smi, uninit)
4386 // x13 the_hole hole value (uninit)
4387 // x14 arg_count number of function arguments
4388
4389 Register loop_count = x11;
4390 Register index = x12;
4391 Register the_hole = x13;
4392 Label parameters_loop, parameters_test;
4393 __ Mov(loop_count, mapped_params);
4394 __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
4395 __ Sub(index, index, mapped_params);
4396 __ SmiTag(index);
4397 __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
4398 __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
4399 __ Add(backing_store, backing_store, kParameterMapHeaderSize);
4400
4401 __ B(¶meters_test);
4402
4403 __ Bind(¶meters_loop);
4404 __ Sub(loop_count, loop_count, 1);
4405 __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
4406 __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
4407 __ Str(index, MemOperand(elements, x10));
4408 __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
4409 __ Str(the_hole, MemOperand(backing_store, x10));
4410 __ Add(index, index, Smi::FromInt(1));
4411 __ Bind(¶meters_test);
4412 __ Cbnz(loop_count, ¶meters_loop);
4413
4414 __ Bind(&skip_parameter_map);
4415 // Copy arguments header and remaining slots (if there are any.)
4416 __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
4417 __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
4418 __ Str(arg_count_smi, FieldMemOperand(backing_store,
4419 FixedArray::kLengthOffset));
4420
4421 // x0 alloc_obj pointer to allocated objects (param map, backing
4422 // store, arguments)
4423 // x1 function function pointer
4424 // x2 arg_count_smi number of function arguments (smi)
4425 // x3 recv_arg pointer to receiver arguments
4426 // x4 mapped_params number of mapped parameters, min(params, args)
4427 // x6 backing_store pointer to backing store (uninit)
4428 // x14 arg_count number of function arguments
4429
4430 Label arguments_loop, arguments_test;
4431 __ Mov(x10, mapped_params);
4432 __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
4433 __ B(&arguments_test);
4434
4435 __ Bind(&arguments_loop);
4436 __ Sub(recv_arg, recv_arg, kPointerSize);
4437 __ Ldr(x11, MemOperand(recv_arg));
4438 __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
4439 __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
4440 __ Add(x10, x10, 1);
4441
4442 __ Bind(&arguments_test);
4443 __ Cmp(x10, arg_count);
4444 __ B(lt, &arguments_loop);
4445
4446 __ Ret();
4447
4448 // Do the runtime call to allocate the arguments object.
4449 __ Bind(&runtime);
4450 __ Push(function, recv_arg, arg_count_smi);
4451 __ TailCallRuntime(Runtime::kNewSloppyArguments);
4452 }
4453
4454
Generate(MacroAssembler * masm)4455 void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
4456 // ----------- S t a t e -------------
4457 // -- x1 : function
4458 // -- cp : context
4459 // -- fp : frame pointer
4460 // -- lr : return address
4461 // -----------------------------------
4462 __ AssertFunction(x1);
4463
4464 // Make x2 point to the JavaScript frame.
4465 __ Mov(x2, fp);
4466 if (skip_stub_frame()) {
4467 // For Ignition we need to skip the handler/stub frame to reach the
4468 // JavaScript frame for the function.
4469 __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
4470 }
4471 if (FLAG_debug_code) {
4472 Label ok;
4473 __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
4474 __ Cmp(x3, x1);
4475 __ B(eq, &ok);
4476 __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4477 __ Bind(&ok);
4478 }
4479
4480 // Check if we have an arguments adaptor frame below the function frame.
4481 Label arguments_adaptor, arguments_done;
4482 __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
4483 __ Ldr(x4, MemOperand(x3, CommonFrameConstants::kContextOrFrameTypeOffset));
4484 __ Cmp(x4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
4485 __ B(eq, &arguments_adaptor);
4486 {
4487 __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
4488 __ Ldrsw(x0, FieldMemOperand(
4489 x4, SharedFunctionInfo::kFormalParameterCountOffset));
4490 __ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2));
4491 __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
4492 }
4493 __ B(&arguments_done);
4494 __ Bind(&arguments_adaptor);
4495 {
4496 __ Ldrsw(x0, UntagSmiMemOperand(
4497 x3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4498 __ Add(x2, x3, Operand(x0, LSL, kPointerSizeLog2));
4499 __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
4500 }
4501 __ Bind(&arguments_done);
4502
4503 // ----------- S t a t e -------------
4504 // -- cp : context
4505 // -- x0 : number of rest parameters
4506 // -- x1 : function
4507 // -- x2 : pointer to first rest parameters
4508 // -- lr : return address
4509 // -----------------------------------
4510
4511 // Allocate space for the strict arguments object plus the backing store.
4512 Label allocate, done_allocate;
4513 __ Mov(x6, JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize);
4514 __ Add(x6, x6, Operand(x0, LSL, kPointerSizeLog2));
4515 __ Allocate(x6, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
4516 __ Bind(&done_allocate);
4517
4518 // Compute arguments.length in x6.
4519 __ SmiTag(x6, x0);
4520
4521 // Setup the elements array in x3.
4522 __ LoadRoot(x1, Heap::kFixedArrayMapRootIndex);
4523 __ Str(x1, FieldMemOperand(x3, FixedArray::kMapOffset));
4524 __ Str(x6, FieldMemOperand(x3, FixedArray::kLengthOffset));
4525 __ Add(x4, x3, FixedArray::kHeaderSize);
4526 {
4527 Label loop, done_loop;
4528 __ Add(x0, x4, Operand(x0, LSL, kPointerSizeLog2));
4529 __ Bind(&loop);
4530 __ Cmp(x4, x0);
4531 __ B(eq, &done_loop);
4532 __ Ldr(x5, MemOperand(x2, 0 * kPointerSize));
4533 __ Str(x5, FieldMemOperand(x4, 0 * kPointerSize));
4534 __ Sub(x2, x2, Operand(1 * kPointerSize));
4535 __ Add(x4, x4, Operand(1 * kPointerSize));
4536 __ B(&loop);
4537 __ Bind(&done_loop);
4538 }
4539
4540 // Setup the strict arguments object in x0.
4541 __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, x1);
4542 __ Str(x1, FieldMemOperand(x0, JSStrictArgumentsObject::kMapOffset));
4543 __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
4544 __ Str(x1, FieldMemOperand(x0, JSStrictArgumentsObject::kPropertiesOffset));
4545 __ Str(x3, FieldMemOperand(x0, JSStrictArgumentsObject::kElementsOffset));
4546 __ Str(x6, FieldMemOperand(x0, JSStrictArgumentsObject::kLengthOffset));
4547 STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
4548 __ Ret();
4549
4550 // Fall back to %AllocateInNewSpace (if not too big).
4551 Label too_big_for_new_space;
4552 __ Bind(&allocate);
4553 __ Cmp(x6, Operand(kMaxRegularHeapObjectSize));
4554 __ B(gt, &too_big_for_new_space);
4555 {
4556 FrameScope scope(masm, StackFrame::INTERNAL);
4557 __ SmiTag(x0);
4558 __ SmiTag(x6);
4559 __ Push(x0, x2, x6);
4560 __ CallRuntime(Runtime::kAllocateInNewSpace);
4561 __ Mov(x3, x0);
4562 __ Pop(x2, x0);
4563 __ SmiUntag(x0);
4564 }
4565 __ B(&done_allocate);
4566
4567 // Fall back to %NewStrictArguments.
4568 __ Bind(&too_big_for_new_space);
4569 __ Push(x1);
4570 __ TailCallRuntime(Runtime::kNewStrictArguments);
4571 }
4572
4573
4574 // The number of register that CallApiFunctionAndReturn will need to save on
4575 // the stack. The space for these registers need to be allocated in the
4576 // ExitFrame before calling CallApiFunctionAndReturn.
4577 static const int kCallApiFunctionSpillSpace = 4;
4578
4579
AddressOffset(ExternalReference ref0,ExternalReference ref1)4580 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
4581 return static_cast<int>(ref0.address() - ref1.address());
4582 }
4583
4584
4585 // Calls an API function. Allocates HandleScope, extracts returned value
4586 // from handle and propagates exceptions.
4587 // 'stack_space' is the space to be unwound on exit (includes the call JS
4588 // arguments space and the additional space allocated for the fast call).
4589 // 'spill_offset' is the offset from the stack pointer where
4590 // CallApiFunctionAndReturn can spill registers.
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,int spill_offset,MemOperand return_value_operand,MemOperand * context_restore_operand)4591 static void CallApiFunctionAndReturn(
4592 MacroAssembler* masm, Register function_address,
4593 ExternalReference thunk_ref, int stack_space,
4594 MemOperand* stack_space_operand, int spill_offset,
4595 MemOperand return_value_operand, MemOperand* context_restore_operand) {
4596 ASM_LOCATION("CallApiFunctionAndReturn");
4597 Isolate* isolate = masm->isolate();
4598 ExternalReference next_address =
4599 ExternalReference::handle_scope_next_address(isolate);
4600 const int kNextOffset = 0;
4601 const int kLimitOffset = AddressOffset(
4602 ExternalReference::handle_scope_limit_address(isolate), next_address);
4603 const int kLevelOffset = AddressOffset(
4604 ExternalReference::handle_scope_level_address(isolate), next_address);
4605
4606 DCHECK(function_address.is(x1) || function_address.is(x2));
4607
4608 Label profiler_disabled;
4609 Label end_profiler_check;
4610 __ Mov(x10, ExternalReference::is_profiling_address(isolate));
4611 __ Ldrb(w10, MemOperand(x10));
4612 __ Cbz(w10, &profiler_disabled);
4613 __ Mov(x3, thunk_ref);
4614 __ B(&end_profiler_check);
4615
4616 __ Bind(&profiler_disabled);
4617 __ Mov(x3, function_address);
4618 __ Bind(&end_profiler_check);
4619
4620 // Save the callee-save registers we are going to use.
4621 // TODO(all): Is this necessary? ARM doesn't do it.
4622 STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
4623 __ Poke(x19, (spill_offset + 0) * kXRegSize);
4624 __ Poke(x20, (spill_offset + 1) * kXRegSize);
4625 __ Poke(x21, (spill_offset + 2) * kXRegSize);
4626 __ Poke(x22, (spill_offset + 3) * kXRegSize);
4627
4628 // Allocate HandleScope in callee-save registers.
4629 // We will need to restore the HandleScope after the call to the API function,
4630 // by allocating it in callee-save registers they will be preserved by C code.
4631 Register handle_scope_base = x22;
4632 Register next_address_reg = x19;
4633 Register limit_reg = x20;
4634 Register level_reg = w21;
4635
4636 __ Mov(handle_scope_base, next_address);
4637 __ Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
4638 __ Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
4639 __ Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
4640 __ Add(level_reg, level_reg, 1);
4641 __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
4642
4643 if (FLAG_log_timer_events) {
4644 FrameScope frame(masm, StackFrame::MANUAL);
4645 __ PushSafepointRegisters();
4646 __ Mov(x0, ExternalReference::isolate_address(isolate));
4647 __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
4648 1);
4649 __ PopSafepointRegisters();
4650 }
4651
4652 // Native call returns to the DirectCEntry stub which redirects to the
4653 // return address pushed on stack (could have moved after GC).
4654 // DirectCEntry stub itself is generated early and never moves.
4655 DirectCEntryStub stub(isolate);
4656 stub.GenerateCall(masm, x3);
4657
4658 if (FLAG_log_timer_events) {
4659 FrameScope frame(masm, StackFrame::MANUAL);
4660 __ PushSafepointRegisters();
4661 __ Mov(x0, ExternalReference::isolate_address(isolate));
4662 __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
4663 1);
4664 __ PopSafepointRegisters();
4665 }
4666
4667 Label promote_scheduled_exception;
4668 Label delete_allocated_handles;
4669 Label leave_exit_frame;
4670 Label return_value_loaded;
4671
4672 // Load value from ReturnValue.
4673 __ Ldr(x0, return_value_operand);
4674 __ Bind(&return_value_loaded);
4675 // No more valid handles (the result handle was the last one). Restore
4676 // previous handle scope.
4677 __ Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
4678 if (__ emit_debug_code()) {
4679 __ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
4680 __ Cmp(w1, level_reg);
4681 __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
4682 }
4683 __ Sub(level_reg, level_reg, 1);
4684 __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
4685 __ Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
4686 __ Cmp(limit_reg, x1);
4687 __ B(ne, &delete_allocated_handles);
4688
4689 // Leave the API exit frame.
4690 __ Bind(&leave_exit_frame);
4691 // Restore callee-saved registers.
4692 __ Peek(x19, (spill_offset + 0) * kXRegSize);
4693 __ Peek(x20, (spill_offset + 1) * kXRegSize);
4694 __ Peek(x21, (spill_offset + 2) * kXRegSize);
4695 __ Peek(x22, (spill_offset + 3) * kXRegSize);
4696
4697 bool restore_context = context_restore_operand != NULL;
4698 if (restore_context) {
4699 __ Ldr(cp, *context_restore_operand);
4700 }
4701
4702 if (stack_space_operand != NULL) {
4703 __ Ldr(w2, *stack_space_operand);
4704 }
4705
4706 __ LeaveExitFrame(false, x1, !restore_context);
4707
4708 // Check if the function scheduled an exception.
4709 __ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
4710 __ Ldr(x5, MemOperand(x5));
4711 __ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex,
4712 &promote_scheduled_exception);
4713
4714 if (stack_space_operand != NULL) {
4715 __ Drop(x2, 1);
4716 } else {
4717 __ Drop(stack_space);
4718 }
4719 __ Ret();
4720
4721 // Re-throw by promoting a scheduled exception.
4722 __ Bind(&promote_scheduled_exception);
4723 __ TailCallRuntime(Runtime::kPromoteScheduledException);
4724
4725 // HandleScope limit has changed. Delete allocated extensions.
4726 __ Bind(&delete_allocated_handles);
4727 __ Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
4728 // Save the return value in a callee-save register.
4729 Register saved_result = x19;
4730 __ Mov(saved_result, x0);
4731 __ Mov(x0, ExternalReference::isolate_address(isolate));
4732 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
4733 1);
4734 __ Mov(x0, saved_result);
4735 __ B(&leave_exit_frame);
4736 }
4737
Generate(MacroAssembler * masm)4738 void CallApiCallbackStub::Generate(MacroAssembler* masm) {
4739 // ----------- S t a t e -------------
4740 // -- x0 : callee
4741 // -- x4 : call_data
4742 // -- x2 : holder
4743 // -- x1 : api_function_address
4744 // -- cp : context
4745 // --
4746 // -- sp[0] : last argument
4747 // -- ...
4748 // -- sp[(argc - 1) * 8] : first argument
4749 // -- sp[argc * 8] : receiver
4750 // -----------------------------------
4751
4752 Register callee = x0;
4753 Register call_data = x4;
4754 Register holder = x2;
4755 Register api_function_address = x1;
4756 Register context = cp;
4757
4758 typedef FunctionCallbackArguments FCA;
4759
4760 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
4761 STATIC_ASSERT(FCA::kCalleeIndex == 5);
4762 STATIC_ASSERT(FCA::kDataIndex == 4);
4763 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
4764 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
4765 STATIC_ASSERT(FCA::kIsolateIndex == 1);
4766 STATIC_ASSERT(FCA::kHolderIndex == 0);
4767 STATIC_ASSERT(FCA::kNewTargetIndex == 7);
4768 STATIC_ASSERT(FCA::kArgsLength == 8);
4769
4770 // FunctionCallbackArguments
4771
4772 // new target
4773 __ PushRoot(Heap::kUndefinedValueRootIndex);
4774
4775 // context, callee and call data.
4776 __ Push(context, callee, call_data);
4777
4778 if (!is_lazy()) {
4779 // Load context from callee
4780 __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
4781 }
4782
4783 if (!call_data_undefined()) {
4784 __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
4785 }
4786 Register isolate_reg = x5;
4787 __ Mov(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
4788
4789 // FunctionCallbackArguments:
4790 // return value, return value default, isolate, holder.
4791 __ Push(call_data, call_data, isolate_reg, holder);
4792
4793 // Prepare arguments.
4794 Register args = x6;
4795 __ Mov(args, masm->StackPointer());
4796
4797 // Allocate the v8::Arguments structure in the arguments' space, since it's
4798 // not controlled by GC.
4799 const int kApiStackSpace = 3;
4800
4801 // Allocate space for CallApiFunctionAndReturn can store some scratch
4802 // registeres on the stack.
4803 const int kCallApiFunctionSpillSpace = 4;
4804
4805 FrameScope frame_scope(masm, StackFrame::MANUAL);
4806 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
4807
4808 DCHECK(!AreAliased(x0, api_function_address));
4809 // x0 = FunctionCallbackInfo&
4810 // Arguments is after the return address.
4811 __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
4812 // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
4813 __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
4814 __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
4815 // FunctionCallbackInfo::length_ = argc
4816 __ Mov(x10, argc());
4817 __ Str(x10, MemOperand(x0, 2 * kPointerSize));
4818
4819 ExternalReference thunk_ref =
4820 ExternalReference::invoke_function_callback(masm->isolate());
4821
4822 AllowExternalCallThatCantCauseGC scope(masm);
4823 MemOperand context_restore_operand(
4824 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
4825 // Stores return the first js argument
4826 int return_value_offset = 0;
4827 if (is_store()) {
4828 return_value_offset = 2 + FCA::kArgsLength;
4829 } else {
4830 return_value_offset = 2 + FCA::kReturnValueOffset;
4831 }
4832 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
4833 int stack_space = 0;
4834 MemOperand length_operand =
4835 MemOperand(masm->StackPointer(), 3 * kPointerSize);
4836 MemOperand* stack_space_operand = &length_operand;
4837 stack_space = argc() + FCA::kArgsLength + 1;
4838 stack_space_operand = NULL;
4839
4840 const int spill_offset = 1 + kApiStackSpace;
4841 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
4842 stack_space_operand, spill_offset,
4843 return_value_operand, &context_restore_operand);
4844 }
4845
4846
Generate(MacroAssembler * masm)4847 void CallApiGetterStub::Generate(MacroAssembler* masm) {
4848 // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
4849 // name below the exit frame to make GC aware of them.
4850 STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
4851 STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
4852 STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
4853 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
4854 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
4855 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
4856 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
4857 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
4858
4859 Register receiver = ApiGetterDescriptor::ReceiverRegister();
4860 Register holder = ApiGetterDescriptor::HolderRegister();
4861 Register callback = ApiGetterDescriptor::CallbackRegister();
4862 Register scratch = x4;
4863 Register scratch2 = x5;
4864 Register scratch3 = x6;
4865 DCHECK(!AreAliased(receiver, holder, callback, scratch));
4866
4867 __ Push(receiver);
4868
4869 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4870 __ Mov(scratch2, Operand(ExternalReference::isolate_address(isolate())));
4871 __ Ldr(scratch3, FieldMemOperand(callback, AccessorInfo::kDataOffset));
4872 __ Push(scratch3, scratch, scratch, scratch2, holder);
4873 __ Push(Smi::kZero); // should_throw_on_error -> false
4874 __ Ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
4875 __ Push(scratch);
4876
4877 // v8::PropertyCallbackInfo::args_ array and name handle.
4878 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
4879
4880 // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
4881 __ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
4882 __ Add(x1, x0, 1 * kPointerSize); // x1 = v8::PCI::args_
4883
4884 const int kApiStackSpace = 1;
4885
4886 // Allocate space for CallApiFunctionAndReturn can store some scratch
4887 // registeres on the stack.
4888 const int kCallApiFunctionSpillSpace = 4;
4889
4890 FrameScope frame_scope(masm, StackFrame::MANUAL);
4891 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
4892
4893 // Create v8::PropertyCallbackInfo object on the stack and initialize
4894 // it's args_ field.
4895 __ Poke(x1, 1 * kPointerSize);
4896 __ Add(x1, masm->StackPointer(), 1 * kPointerSize);
4897 // x1 = v8::PropertyCallbackInfo&
4898
4899 ExternalReference thunk_ref =
4900 ExternalReference::invoke_accessor_getter_callback(isolate());
4901
4902 Register api_function_address = x2;
4903 __ Ldr(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
4904 __ Ldr(api_function_address,
4905 FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
4906
4907 const int spill_offset = 1 + kApiStackSpace;
4908 // +3 is to skip prolog, return address and name handle.
4909 MemOperand return_value_operand(
4910 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
4911 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
4912 kStackUnwindSpace, NULL, spill_offset,
4913 return_value_operand, NULL);
4914 }
4915
4916 #undef __
4917
4918 } // namespace internal
4919 } // namespace v8
4920
4921 #endif // V8_TARGET_ARCH_ARM64
4922