1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_PPC
6
7 #include "src/code-stubs.h"
8 #include "src/api-arguments.h"
9 #include "src/base/bits.h"
10 #include "src/bootstrapper.h"
11 #include "src/codegen.h"
12 #include "src/ic/handler-compiler.h"
13 #include "src/ic/ic.h"
14 #include "src/ic/stub-cache.h"
15 #include "src/isolate.h"
16 #include "src/ppc/code-stubs-ppc.h"
17 #include "src/regexp/jsregexp.h"
18 #include "src/regexp/regexp-macro-assembler.h"
19 #include "src/runtime/runtime.h"
20
21 namespace v8 {
22 namespace internal {
23
24 #define __ ACCESS_MASM(masm)
25
Generate(MacroAssembler * masm)26 void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
27 __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
28 __ StorePX(r4, MemOperand(sp, r0));
29 __ push(r4);
30 __ push(r5);
31 __ addi(r3, r3, Operand(3));
32 __ TailCallRuntime(Runtime::kNewArray);
33 }
34
InitializeDescriptor(CodeStubDescriptor * descriptor)35 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
36 Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
37 descriptor->Initialize(r3, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
38 }
39
InitializeDescriptor(CodeStubDescriptor * descriptor)40 void FastFunctionBindStub::InitializeDescriptor(
41 CodeStubDescriptor* descriptor) {
42 Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
43 descriptor->Initialize(r3, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
44 }
45
46 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
47 Condition cond);
48 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
49 Register rhs, Label* lhs_not_nan,
50 Label* slow, bool strict);
51 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
52 Register rhs);
53
54
GenerateLightweightMiss(MacroAssembler * masm,ExternalReference miss)55 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
56 ExternalReference miss) {
57 // Update the static counter each time a new code stub is generated.
58 isolate()->counters()->code_stubs()->Increment();
59
60 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
61 int param_count = descriptor.GetRegisterParameterCount();
62 {
63 // Call the runtime system in a fresh internal frame.
64 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
65 DCHECK(param_count == 0 ||
66 r3.is(descriptor.GetRegisterParameter(param_count - 1)));
67 // Push arguments
68 for (int i = 0; i < param_count; ++i) {
69 __ push(descriptor.GetRegisterParameter(i));
70 }
71 __ CallExternalReference(miss, param_count);
72 }
73
74 __ Ret();
75 }
76
77
Generate(MacroAssembler * masm)78 void DoubleToIStub::Generate(MacroAssembler* masm) {
79 Label out_of_range, only_low, negate, done, fastpath_done;
80 Register input_reg = source();
81 Register result_reg = destination();
82 DCHECK(is_truncating());
83
84 int double_offset = offset();
85
86 // Immediate values for this stub fit in instructions, so it's safe to use ip.
87 Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
88 Register scratch_low =
89 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
90 Register scratch_high =
91 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
92 DoubleRegister double_scratch = kScratchDoubleReg;
93
94 __ push(scratch);
95 // Account for saved regs if input is sp.
96 if (input_reg.is(sp)) double_offset += kPointerSize;
97
98 if (!skip_fastpath()) {
99 // Load double input.
100 __ lfd(double_scratch, MemOperand(input_reg, double_offset));
101
102 // Do fast-path convert from double to int.
103 __ ConvertDoubleToInt64(double_scratch,
104 #if !V8_TARGET_ARCH_PPC64
105 scratch,
106 #endif
107 result_reg, d0);
108
109 // Test for overflow
110 #if V8_TARGET_ARCH_PPC64
111 __ TestIfInt32(result_reg, r0);
112 #else
113 __ TestIfInt32(scratch, result_reg, r0);
114 #endif
115 __ beq(&fastpath_done);
116 }
117
118 __ Push(scratch_high, scratch_low);
119 // Account for saved regs if input is sp.
120 if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
121
122 __ lwz(scratch_high,
123 MemOperand(input_reg, double_offset + Register::kExponentOffset));
124 __ lwz(scratch_low,
125 MemOperand(input_reg, double_offset + Register::kMantissaOffset));
126
127 __ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask);
128 // Load scratch with exponent - 1. This is faster than loading
129 // with exponent because Bias + 1 = 1024 which is a *PPC* immediate value.
130 STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
131 __ subi(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
132 // If exponent is greater than or equal to 84, the 32 less significant
133 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
134 // the result is 0.
135 // Compare exponent with 84 (compare exponent - 1 with 83).
136 __ cmpi(scratch, Operand(83));
137 __ bge(&out_of_range);
138
139 // If we reach this code, 31 <= exponent <= 83.
140 // So, we don't have to handle cases where 0 <= exponent <= 20 for
141 // which we would need to shift right the high part of the mantissa.
142 // Scratch contains exponent - 1.
143 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
144 __ subfic(scratch, scratch, Operand(51));
145 __ cmpi(scratch, Operand::Zero());
146 __ ble(&only_low);
147 // 21 <= exponent <= 51, shift scratch_low and scratch_high
148 // to generate the result.
149 __ srw(scratch_low, scratch_low, scratch);
150 // Scratch contains: 52 - exponent.
151 // We needs: exponent - 20.
152 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
153 __ subfic(scratch, scratch, Operand(32));
154 __ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask);
155 // Set the implicit 1 before the mantissa part in scratch_high.
156 STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16);
157 __ oris(result_reg, result_reg,
158 Operand(1 << ((HeapNumber::kMantissaBitsInTopWord) - 16)));
159 __ slw(r0, result_reg, scratch);
160 __ orx(result_reg, scratch_low, r0);
161 __ b(&negate);
162
163 __ bind(&out_of_range);
164 __ mov(result_reg, Operand::Zero());
165 __ b(&done);
166
167 __ bind(&only_low);
168 // 52 <= exponent <= 83, shift only scratch_low.
169 // On entry, scratch contains: 52 - exponent.
170 __ neg(scratch, scratch);
171 __ slw(result_reg, scratch_low, scratch);
172
173 __ bind(&negate);
174 // If input was positive, scratch_high ASR 31 equals 0 and
175 // scratch_high LSR 31 equals zero.
176 // New result = (result eor 0) + 0 = result.
177 // If the input was negative, we have to negate the result.
178 // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
179 // New result = (result eor 0xffffffff) + 1 = 0 - result.
180 __ srawi(r0, scratch_high, 31);
181 #if V8_TARGET_ARCH_PPC64
182 __ srdi(r0, r0, Operand(32));
183 #endif
184 __ xor_(result_reg, result_reg, r0);
185 __ srwi(r0, scratch_high, Operand(31));
186 __ add(result_reg, result_reg, r0);
187
188 __ bind(&done);
189 __ Pop(scratch_high, scratch_low);
190
191 __ bind(&fastpath_done);
192 __ pop(scratch);
193
194 __ Ret();
195 }
196
197
198 // Handle the case where the lhs and rhs are the same object.
199 // Equality is almost reflexive (everything but NaN), so this is a test
200 // for "identity and not NaN".
EmitIdenticalObjectComparison(MacroAssembler * masm,Label * slow,Condition cond)201 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
202 Condition cond) {
203 Label not_identical;
204 Label heap_number, return_equal;
205 __ cmp(r3, r4);
206 __ bne(¬_identical);
207
208 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
209 // so we do the second best thing - test it ourselves.
210 // They are both equal and they are not both Smis so both of them are not
211 // Smis. If it's not a heap number, then return equal.
212 if (cond == lt || cond == gt) {
213 // Call runtime on identical JSObjects.
214 __ CompareObjectType(r3, r7, r7, FIRST_JS_RECEIVER_TYPE);
215 __ bge(slow);
216 // Call runtime on identical symbols since we need to throw a TypeError.
217 __ cmpi(r7, Operand(SYMBOL_TYPE));
218 __ beq(slow);
219 // Call runtime on identical SIMD values since we must throw a TypeError.
220 __ cmpi(r7, Operand(SIMD128_VALUE_TYPE));
221 __ beq(slow);
222 } else {
223 __ CompareObjectType(r3, r7, r7, HEAP_NUMBER_TYPE);
224 __ beq(&heap_number);
225 // Comparing JS objects with <=, >= is complicated.
226 if (cond != eq) {
227 __ cmpi(r7, Operand(FIRST_JS_RECEIVER_TYPE));
228 __ bge(slow);
229 // Call runtime on identical symbols since we need to throw a TypeError.
230 __ cmpi(r7, Operand(SYMBOL_TYPE));
231 __ beq(slow);
232 // Call runtime on identical SIMD values since we must throw a TypeError.
233 __ cmpi(r7, Operand(SIMD128_VALUE_TYPE));
234 __ beq(slow);
235 // Normally here we fall through to return_equal, but undefined is
236 // special: (undefined == undefined) == true, but
237 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
238 if (cond == le || cond == ge) {
239 __ cmpi(r7, Operand(ODDBALL_TYPE));
240 __ bne(&return_equal);
241 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
242 __ cmp(r3, r5);
243 __ bne(&return_equal);
244 if (cond == le) {
245 // undefined <= undefined should fail.
246 __ li(r3, Operand(GREATER));
247 } else {
248 // undefined >= undefined should fail.
249 __ li(r3, Operand(LESS));
250 }
251 __ Ret();
252 }
253 }
254 }
255
256 __ bind(&return_equal);
257 if (cond == lt) {
258 __ li(r3, Operand(GREATER)); // Things aren't less than themselves.
259 } else if (cond == gt) {
260 __ li(r3, Operand(LESS)); // Things aren't greater than themselves.
261 } else {
262 __ li(r3, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
263 }
264 __ Ret();
265
266 // For less and greater we don't have to check for NaN since the result of
267 // x < x is false regardless. For the others here is some code to check
268 // for NaN.
269 if (cond != lt && cond != gt) {
270 __ bind(&heap_number);
271 // It is a heap number, so return non-equal if it's NaN and equal if it's
272 // not NaN.
273
274 // The representation of NaN values has all exponent bits (52..62) set,
275 // and not all mantissa bits (0..51) clear.
276 // Read top bits of double representation (second word of value).
277 __ lwz(r5, FieldMemOperand(r3, HeapNumber::kExponentOffset));
278 // Test that exponent bits are all set.
279 STATIC_ASSERT(HeapNumber::kExponentMask == 0x7ff00000u);
280 __ ExtractBitMask(r6, r5, HeapNumber::kExponentMask);
281 __ cmpli(r6, Operand(0x7ff));
282 __ bne(&return_equal);
283
284 // Shift out flag and all exponent bits, retaining only mantissa.
285 __ slwi(r5, r5, Operand(HeapNumber::kNonMantissaBitsInTopWord));
286 // Or with all low-bits of mantissa.
287 __ lwz(r6, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
288 __ orx(r3, r6, r5);
289 __ cmpi(r3, Operand::Zero());
290 // For equal we already have the right value in r3: Return zero (equal)
291 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
292 // not (it's a NaN). For <= and >= we need to load r0 with the failing
293 // value if it's a NaN.
294 if (cond != eq) {
295 if (CpuFeatures::IsSupported(ISELECT)) {
296 __ li(r4, Operand((cond == le) ? GREATER : LESS));
297 __ isel(eq, r3, r3, r4);
298 } else {
299 // All-zero means Infinity means equal.
300 __ Ret(eq);
301 if (cond == le) {
302 __ li(r3, Operand(GREATER)); // NaN <= NaN should fail.
303 } else {
304 __ li(r3, Operand(LESS)); // NaN >= NaN should fail.
305 }
306 }
307 }
308 __ Ret();
309 }
310 // No fall through here.
311
312 __ bind(¬_identical);
313 }
314
315
316 // See comment at call site.
EmitSmiNonsmiComparison(MacroAssembler * masm,Register lhs,Register rhs,Label * lhs_not_nan,Label * slow,bool strict)317 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
318 Register rhs, Label* lhs_not_nan,
319 Label* slow, bool strict) {
320 DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
321
322 Label rhs_is_smi;
323 __ JumpIfSmi(rhs, &rhs_is_smi);
324
325 // Lhs is a Smi. Check whether the rhs is a heap number.
326 __ CompareObjectType(rhs, r6, r7, HEAP_NUMBER_TYPE);
327 if (strict) {
328 // If rhs is not a number and lhs is a Smi then strict equality cannot
329 // succeed. Return non-equal
330 // If rhs is r3 then there is already a non zero value in it.
331 if (!rhs.is(r3)) {
332 Label skip;
333 __ beq(&skip);
334 __ mov(r3, Operand(NOT_EQUAL));
335 __ Ret();
336 __ bind(&skip);
337 } else {
338 __ Ret(ne);
339 }
340 } else {
341 // Smi compared non-strictly with a non-Smi non-heap-number. Call
342 // the runtime.
343 __ bne(slow);
344 }
345
346 // Lhs is a smi, rhs is a number.
347 // Convert lhs to a double in d7.
348 __ SmiToDouble(d7, lhs);
349 // Load the double from rhs, tagged HeapNumber r3, to d6.
350 __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
351
352 // We now have both loaded as doubles but we can skip the lhs nan check
353 // since it's a smi.
354 __ b(lhs_not_nan);
355
356 __ bind(&rhs_is_smi);
357 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
358 __ CompareObjectType(lhs, r7, r7, HEAP_NUMBER_TYPE);
359 if (strict) {
360 // If lhs is not a number and rhs is a smi then strict equality cannot
361 // succeed. Return non-equal.
362 // If lhs is r3 then there is already a non zero value in it.
363 if (!lhs.is(r3)) {
364 Label skip;
365 __ beq(&skip);
366 __ mov(r3, Operand(NOT_EQUAL));
367 __ Ret();
368 __ bind(&skip);
369 } else {
370 __ Ret(ne);
371 }
372 } else {
373 // Smi compared non-strictly with a non-smi non-heap-number. Call
374 // the runtime.
375 __ bne(slow);
376 }
377
378 // Rhs is a smi, lhs is a heap number.
379 // Load the double from lhs, tagged HeapNumber r4, to d7.
380 __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
381 // Convert rhs to a double in d6.
382 __ SmiToDouble(d6, rhs);
383 // Fall through to both_loaded_as_doubles.
384 }
385
386
387 // See comment at call site.
EmitStrictTwoHeapObjectCompare(MacroAssembler * masm,Register lhs,Register rhs)388 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
389 Register rhs) {
390 DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
391
392 // If either operand is a JS object or an oddball value, then they are
393 // not equal since their pointers are different.
394 // There is no test for undetectability in strict equality.
395 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
396 Label first_non_object;
397 // Get the type of the first operand into r5 and compare it with
398 // FIRST_JS_RECEIVER_TYPE.
399 __ CompareObjectType(rhs, r5, r5, FIRST_JS_RECEIVER_TYPE);
400 __ blt(&first_non_object);
401
402 // Return non-zero (r3 is not zero)
403 Label return_not_equal;
404 __ bind(&return_not_equal);
405 __ Ret();
406
407 __ bind(&first_non_object);
408 // Check for oddballs: true, false, null, undefined.
409 __ cmpi(r5, Operand(ODDBALL_TYPE));
410 __ beq(&return_not_equal);
411
412 __ CompareObjectType(lhs, r6, r6, FIRST_JS_RECEIVER_TYPE);
413 __ bge(&return_not_equal);
414
415 // Check for oddballs: true, false, null, undefined.
416 __ cmpi(r6, Operand(ODDBALL_TYPE));
417 __ beq(&return_not_equal);
418
419 // Now that we have the types we might as well check for
420 // internalized-internalized.
421 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
422 __ orx(r5, r5, r6);
423 __ andi(r0, r5, Operand(kIsNotStringMask | kIsNotInternalizedMask));
424 __ beq(&return_not_equal, cr0);
425 }
426
427
428 // See comment at call site.
EmitCheckForTwoHeapNumbers(MacroAssembler * masm,Register lhs,Register rhs,Label * both_loaded_as_doubles,Label * not_heap_numbers,Label * slow)429 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, Register lhs,
430 Register rhs,
431 Label* both_loaded_as_doubles,
432 Label* not_heap_numbers, Label* slow) {
433 DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
434
435 __ CompareObjectType(rhs, r6, r5, HEAP_NUMBER_TYPE);
436 __ bne(not_heap_numbers);
437 __ LoadP(r5, FieldMemOperand(lhs, HeapObject::kMapOffset));
438 __ cmp(r5, r6);
439 __ bne(slow); // First was a heap number, second wasn't. Go slow case.
440
441 // Both are heap numbers. Load them up then jump to the code we have
442 // for that.
443 __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
444 __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
445
446 __ b(both_loaded_as_doubles);
447 }
448
449 // Fast negative check for internalized-to-internalized equality or receiver
450 // equality. Also handles the undetectable receiver to null/undefined
451 // comparison.
EmitCheckForInternalizedStringsOrObjects(MacroAssembler * masm,Register lhs,Register rhs,Label * possible_strings,Label * runtime_call)452 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
453 Register lhs, Register rhs,
454 Label* possible_strings,
455 Label* runtime_call) {
456 DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
457
458 // r5 is object type of rhs.
459 Label object_test, return_equal, return_unequal, undetectable;
460 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
461 __ andi(r0, r5, Operand(kIsNotStringMask));
462 __ bne(&object_test, cr0);
463 __ andi(r0, r5, Operand(kIsNotInternalizedMask));
464 __ bne(possible_strings, cr0);
465 __ CompareObjectType(lhs, r6, r6, FIRST_NONSTRING_TYPE);
466 __ bge(runtime_call);
467 __ andi(r0, r6, Operand(kIsNotInternalizedMask));
468 __ bne(possible_strings, cr0);
469
470 // Both are internalized. We already checked they weren't the same pointer so
471 // they are not equal. Return non-equal by returning the non-zero object
472 // pointer in r3.
473 __ Ret();
474
475 __ bind(&object_test);
476 __ LoadP(r5, FieldMemOperand(lhs, HeapObject::kMapOffset));
477 __ LoadP(r6, FieldMemOperand(rhs, HeapObject::kMapOffset));
478 __ lbz(r7, FieldMemOperand(r5, Map::kBitFieldOffset));
479 __ lbz(r8, FieldMemOperand(r6, Map::kBitFieldOffset));
480 __ andi(r0, r7, Operand(1 << Map::kIsUndetectable));
481 __ bne(&undetectable, cr0);
482 __ andi(r0, r8, Operand(1 << Map::kIsUndetectable));
483 __ bne(&return_unequal, cr0);
484
485 __ CompareInstanceType(r5, r5, FIRST_JS_RECEIVER_TYPE);
486 __ blt(runtime_call);
487 __ CompareInstanceType(r6, r6, FIRST_JS_RECEIVER_TYPE);
488 __ blt(runtime_call);
489
490 __ bind(&return_unequal);
491 // Return non-equal by returning the non-zero object pointer in r3.
492 __ Ret();
493
494 __ bind(&undetectable);
495 __ andi(r0, r8, Operand(1 << Map::kIsUndetectable));
496 __ beq(&return_unequal, cr0);
497
498 // If both sides are JSReceivers, then the result is false according to
499 // the HTML specification, which says that only comparisons with null or
500 // undefined are affected by special casing for document.all.
501 __ CompareInstanceType(r5, r5, ODDBALL_TYPE);
502 __ beq(&return_equal);
503 __ CompareInstanceType(r6, r6, ODDBALL_TYPE);
504 __ bne(&return_unequal);
505
506 __ bind(&return_equal);
507 __ li(r3, Operand(EQUAL));
508 __ Ret();
509 }
510
511
CompareICStub_CheckInputType(MacroAssembler * masm,Register input,Register scratch,CompareICState::State expected,Label * fail)512 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
513 Register scratch,
514 CompareICState::State expected,
515 Label* fail) {
516 Label ok;
517 if (expected == CompareICState::SMI) {
518 __ JumpIfNotSmi(input, fail);
519 } else if (expected == CompareICState::NUMBER) {
520 __ JumpIfSmi(input, &ok);
521 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
522 DONT_DO_SMI_CHECK);
523 }
524 // We could be strict about internalized/non-internalized here, but as long as
525 // hydrogen doesn't care, the stub doesn't have to care either.
526 __ bind(&ok);
527 }
528
529
530 // On entry r4 and r5 are the values to be compared.
531 // On exit r3 is 0, positive or negative to indicate the result of
532 // the comparison.
GenerateGeneric(MacroAssembler * masm)533 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
534 Register lhs = r4;
535 Register rhs = r3;
536 Condition cc = GetCondition();
537
538 Label miss;
539 CompareICStub_CheckInputType(masm, lhs, r5, left(), &miss);
540 CompareICStub_CheckInputType(masm, rhs, r6, right(), &miss);
541
542 Label slow; // Call builtin.
543 Label not_smis, both_loaded_as_doubles, lhs_not_nan;
544
545 Label not_two_smis, smi_done;
546 __ orx(r5, r4, r3);
547 __ JumpIfNotSmi(r5, ¬_two_smis);
548 __ SmiUntag(r4);
549 __ SmiUntag(r3);
550 __ sub(r3, r4, r3);
551 __ Ret();
552 __ bind(¬_two_smis);
553
554 // NOTICE! This code is only reached after a smi-fast-case check, so
555 // it is certain that at least one operand isn't a smi.
556
557 // Handle the case where the objects are identical. Either returns the answer
558 // or goes to slow. Only falls through if the objects were not identical.
559 EmitIdenticalObjectComparison(masm, &slow, cc);
560
561 // If either is a Smi (we know that not both are), then they can only
562 // be strictly equal if the other is a HeapNumber.
563 STATIC_ASSERT(kSmiTag == 0);
564 DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
565 __ and_(r5, lhs, rhs);
566 __ JumpIfNotSmi(r5, ¬_smis);
567 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
568 // 1) Return the answer.
569 // 2) Go to slow.
570 // 3) Fall through to both_loaded_as_doubles.
571 // 4) Jump to lhs_not_nan.
572 // In cases 3 and 4 we have found out we were dealing with a number-number
573 // comparison. The double values of the numbers have been loaded
574 // into d7 and d6.
575 EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
576
577 __ bind(&both_loaded_as_doubles);
578 // The arguments have been converted to doubles and stored in d6 and d7
579 __ bind(&lhs_not_nan);
580 Label no_nan;
581 __ fcmpu(d7, d6);
582
583 Label nan, equal, less_than;
584 __ bunordered(&nan);
585 if (CpuFeatures::IsSupported(ISELECT)) {
586 DCHECK(EQUAL == 0);
587 __ li(r4, Operand(GREATER));
588 __ li(r5, Operand(LESS));
589 __ isel(eq, r3, r0, r4);
590 __ isel(lt, r3, r5, r3);
591 __ Ret();
592 } else {
593 __ beq(&equal);
594 __ blt(&less_than);
595 __ li(r3, Operand(GREATER));
596 __ Ret();
597 __ bind(&equal);
598 __ li(r3, Operand(EQUAL));
599 __ Ret();
600 __ bind(&less_than);
601 __ li(r3, Operand(LESS));
602 __ Ret();
603 }
604
605 __ bind(&nan);
606 // If one of the sides was a NaN then the v flag is set. Load r3 with
607 // whatever it takes to make the comparison fail, since comparisons with NaN
608 // always fail.
609 if (cc == lt || cc == le) {
610 __ li(r3, Operand(GREATER));
611 } else {
612 __ li(r3, Operand(LESS));
613 }
614 __ Ret();
615
616 __ bind(¬_smis);
617 // At this point we know we are dealing with two different objects,
618 // and neither of them is a Smi. The objects are in rhs_ and lhs_.
619 if (strict()) {
620 // This returns non-equal for some object types, or falls through if it
621 // was not lucky.
622 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
623 }
624
625 Label check_for_internalized_strings;
626 Label flat_string_check;
627 // Check for heap-number-heap-number comparison. Can jump to slow case,
628 // or load both doubles into r3, r4, r5, r6 and jump to the code that handles
629 // that case. If the inputs are not doubles then jumps to
630 // check_for_internalized_strings.
631 // In this case r5 will contain the type of rhs_. Never falls through.
632 EmitCheckForTwoHeapNumbers(masm, lhs, rhs, &both_loaded_as_doubles,
633 &check_for_internalized_strings,
634 &flat_string_check);
635
636 __ bind(&check_for_internalized_strings);
637 // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
638 // internalized strings.
639 if (cc == eq && !strict()) {
640 // Returns an answer for two internalized strings or two detectable objects.
641 // Otherwise jumps to string case or not both strings case.
642 // Assumes that r5 is the type of rhs_ on entry.
643 EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, &flat_string_check,
644 &slow);
645 }
646
647 // Check for both being sequential one-byte strings,
648 // and inline if that is the case.
649 __ bind(&flat_string_check);
650
651 __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r5, r6, &slow);
652
653 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r5,
654 r6);
655 if (cc == eq) {
656 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r5, r6);
657 } else {
658 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r5, r6, r7);
659 }
660 // Never falls through to here.
661
662 __ bind(&slow);
663
664 if (cc == eq) {
665 {
666 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
667 __ Push(lhs, rhs);
668 __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
669 }
670 // Turn true into 0 and false into some non-zero value.
671 STATIC_ASSERT(EQUAL == 0);
672 __ LoadRoot(r4, Heap::kTrueValueRootIndex);
673 __ sub(r3, r3, r4);
674 __ Ret();
675 } else {
676 __ Push(lhs, rhs);
677 int ncr; // NaN compare result
678 if (cc == lt || cc == le) {
679 ncr = GREATER;
680 } else {
681 DCHECK(cc == gt || cc == ge); // remaining cases
682 ncr = LESS;
683 }
684 __ LoadSmiLiteral(r3, Smi::FromInt(ncr));
685 __ push(r3);
686
687 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
688 // tagged as a small integer.
689 __ TailCallRuntime(Runtime::kCompare);
690 }
691
692 __ bind(&miss);
693 GenerateMiss(masm);
694 }
695
696
Generate(MacroAssembler * masm)697 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
698 // We don't allow a GC during a store buffer overflow so there is no need to
699 // store the registers in any particular way, but we do have to store and
700 // restore them.
701 __ mflr(r0);
702 __ MultiPush(kJSCallerSaved | r0.bit());
703 if (save_doubles()) {
704 __ MultiPushDoubles(kCallerSavedDoubles);
705 }
706 const int argument_count = 1;
707 const int fp_argument_count = 0;
708 const Register scratch = r4;
709
710 AllowExternalCallThatCantCauseGC scope(masm);
711 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
712 __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
713 __ CallCFunction(ExternalReference::store_buffer_overflow_function(isolate()),
714 argument_count);
715 if (save_doubles()) {
716 __ MultiPopDoubles(kCallerSavedDoubles);
717 }
718 __ MultiPop(kJSCallerSaved | r0.bit());
719 __ mtlr(r0);
720 __ Ret();
721 }
722
723
Generate(MacroAssembler * masm)724 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
725 __ PushSafepointRegisters();
726 __ blr();
727 }
728
729
Generate(MacroAssembler * masm)730 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
731 __ PopSafepointRegisters();
732 __ blr();
733 }
734
735
Generate(MacroAssembler * masm)736 void MathPowStub::Generate(MacroAssembler* masm) {
737 const Register exponent = MathPowTaggedDescriptor::exponent();
738 DCHECK(exponent.is(r5));
739 const DoubleRegister double_base = d1;
740 const DoubleRegister double_exponent = d2;
741 const DoubleRegister double_result = d3;
742 const DoubleRegister double_scratch = d0;
743 const Register scratch = r11;
744 const Register scratch2 = r10;
745
746 Label call_runtime, done, int_exponent;
747 if (exponent_type() == TAGGED) {
748 // Base is already in double_base.
749 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
750
751 __ lfd(double_exponent,
752 FieldMemOperand(exponent, HeapNumber::kValueOffset));
753 }
754
755 if (exponent_type() != INTEGER) {
756 // Detect integer exponents stored as double.
757 __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2,
758 double_scratch);
759 __ beq(&int_exponent);
760
761 __ mflr(r0);
762 __ push(r0);
763 {
764 AllowExternalCallThatCantCauseGC scope(masm);
765 __ PrepareCallCFunction(0, 2, scratch);
766 __ MovToFloatParameters(double_base, double_exponent);
767 __ CallCFunction(
768 ExternalReference::power_double_double_function(isolate()), 0, 2);
769 }
770 __ pop(r0);
771 __ mtlr(r0);
772 __ MovFromFloatResult(double_result);
773 __ b(&done);
774 }
775
776 // Calculate power with integer exponent.
777 __ bind(&int_exponent);
778
779 // Get two copies of exponent in the registers scratch and exponent.
780 if (exponent_type() == INTEGER) {
781 __ mr(scratch, exponent);
782 } else {
783 // Exponent has previously been stored into scratch as untagged integer.
784 __ mr(exponent, scratch);
785 }
786 __ fmr(double_scratch, double_base); // Back up base.
787 __ li(scratch2, Operand(1));
788 __ ConvertIntToDouble(scratch2, double_result);
789
790 // Get absolute value of exponent.
791 __ cmpi(scratch, Operand::Zero());
792 if (CpuFeatures::IsSupported(ISELECT)) {
793 __ neg(scratch2, scratch);
794 __ isel(lt, scratch, scratch2, scratch);
795 } else {
796 Label positive_exponent;
797 __ bge(&positive_exponent);
798 __ neg(scratch, scratch);
799 __ bind(&positive_exponent);
800 }
801
802 Label while_true, no_carry, loop_end;
803 __ bind(&while_true);
804 __ andi(scratch2, scratch, Operand(1));
805 __ beq(&no_carry, cr0);
806 __ fmul(double_result, double_result, double_scratch);
807 __ bind(&no_carry);
808 __ ShiftRightImm(scratch, scratch, Operand(1), SetRC);
809 __ beq(&loop_end, cr0);
810 __ fmul(double_scratch, double_scratch, double_scratch);
811 __ b(&while_true);
812 __ bind(&loop_end);
813
814 __ cmpi(exponent, Operand::Zero());
815 __ bge(&done);
816
817 __ li(scratch2, Operand(1));
818 __ ConvertIntToDouble(scratch2, double_scratch);
819 __ fdiv(double_result, double_scratch, double_result);
820 // Test whether result is zero. Bail out to check for subnormal result.
821 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
822 __ fcmpu(double_result, kDoubleRegZero);
823 __ bne(&done);
824 // double_exponent may not containe the exponent value if the input was a
825 // smi. We set it with exponent value before bailing out.
826 __ ConvertIntToDouble(exponent, double_exponent);
827
828 // Returning or bailing out.
829 __ mflr(r0);
830 __ push(r0);
831 {
832 AllowExternalCallThatCantCauseGC scope(masm);
833 __ PrepareCallCFunction(0, 2, scratch);
834 __ MovToFloatParameters(double_base, double_exponent);
835 __ CallCFunction(
836 ExternalReference::power_double_double_function(isolate()), 0, 2);
837 }
838 __ pop(r0);
839 __ mtlr(r0);
840 __ MovFromFloatResult(double_result);
841
842 __ bind(&done);
843 __ Ret();
844 }
845
846
NeedsImmovableCode()847 bool CEntryStub::NeedsImmovableCode() { return true; }
848
849
GenerateStubsAheadOfTime(Isolate * isolate)850 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
851 CEntryStub::GenerateAheadOfTime(isolate);
852 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
853 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
854 CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
855 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
856 CreateWeakCellStub::GenerateAheadOfTime(isolate);
857 BinaryOpICStub::GenerateAheadOfTime(isolate);
858 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
859 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
860 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
861 StoreFastElementStub::GenerateAheadOfTime(isolate);
862 }
863
864
GenerateAheadOfTime(Isolate * isolate)865 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
866 StoreRegistersStateStub stub(isolate);
867 stub.GetCode();
868 }
869
870
GenerateAheadOfTime(Isolate * isolate)871 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
872 RestoreRegistersStateStub stub(isolate);
873 stub.GetCode();
874 }
875
876
GenerateFPStubs(Isolate * isolate)877 void CodeStub::GenerateFPStubs(Isolate* isolate) {
878 // Generate if not already in cache.
879 SaveFPRegsMode mode = kSaveFPRegs;
880 CEntryStub(isolate, 1, mode).GetCode();
881 StoreBufferOverflowStub(isolate, mode).GetCode();
882 isolate->set_fp_stubs_generated(true);
883 }
884
885
GenerateAheadOfTime(Isolate * isolate)886 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
887 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
888 stub.GetCode();
889 }
890
891
Generate(MacroAssembler * masm)892 void CEntryStub::Generate(MacroAssembler* masm) {
893 // Called from JavaScript; parameters are on stack as if calling JS function.
894 // r3: number of arguments including receiver
895 // r4: pointer to builtin function
896 // fp: frame pointer (restored after C call)
897 // sp: stack pointer (restored as callee's sp after C call)
898 // cp: current context (C callee-saved)
899 //
900 // If argv_in_register():
901 // r5: pointer to the first argument
902 ProfileEntryHookStub::MaybeCallEntryHook(masm);
903
904 __ mr(r15, r4);
905
906 if (argv_in_register()) {
907 // Move argv into the correct register.
908 __ mr(r4, r5);
909 } else {
910 // Compute the argv pointer.
911 __ ShiftLeftImm(r4, r3, Operand(kPointerSizeLog2));
912 __ add(r4, r4, sp);
913 __ subi(r4, r4, Operand(kPointerSize));
914 }
915
916 // Enter the exit frame that transitions from JavaScript to C++.
917 FrameScope scope(masm, StackFrame::MANUAL);
918
919 // Need at least one extra slot for return address location.
920 int arg_stack_space = 1;
921
922 // Pass buffer for return value on stack if necessary
923 bool needs_return_buffer =
924 result_size() > 2 ||
925 (result_size() == 2 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS);
926 if (needs_return_buffer) {
927 arg_stack_space += result_size();
928 }
929
930 __ EnterExitFrame(save_doubles(), arg_stack_space, is_builtin_exit()
931 ? StackFrame::BUILTIN_EXIT
932 : StackFrame::EXIT);
933
934 // Store a copy of argc in callee-saved registers for later.
935 __ mr(r14, r3);
936
937 // r3, r14: number of arguments including receiver (C callee-saved)
938 // r4: pointer to the first argument
939 // r15: pointer to builtin function (C callee-saved)
940
941 // Result returned in registers or stack, depending on result size and ABI.
942
943 Register isolate_reg = r5;
944 if (needs_return_buffer) {
945 // The return value is a non-scalar value.
946 // Use frame storage reserved by calling function to pass return
947 // buffer as implicit first argument.
948 __ mr(r5, r4);
949 __ mr(r4, r3);
950 __ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
951 isolate_reg = r6;
952 }
953
954 // Call C built-in.
955 __ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate())));
956
957 Register target = r15;
958 if (ABI_USES_FUNCTION_DESCRIPTORS) {
959 // AIX/PPC64BE Linux use a function descriptor.
960 __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(r15, kPointerSize));
961 __ LoadP(ip, MemOperand(r15, 0)); // Instruction address
962 target = ip;
963 } else if (ABI_CALL_VIA_IP) {
964 __ Move(ip, r15);
965 target = ip;
966 }
967
968 // To let the GC traverse the return address of the exit frames, we need to
969 // know where the return address is. The CEntryStub is unmovable, so
970 // we can store the address on the stack to be able to find it again and
971 // we never have to restore it, because it will not change.
972 Label after_call;
973 __ mov_label_addr(r0, &after_call);
974 __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
975 __ Call(target);
976 __ bind(&after_call);
977
978 // If return value is on the stack, pop it to registers.
979 if (needs_return_buffer) {
980 if (result_size() > 2) __ LoadP(r5, MemOperand(r3, 2 * kPointerSize));
981 __ LoadP(r4, MemOperand(r3, kPointerSize));
982 __ LoadP(r3, MemOperand(r3));
983 }
984
985 // Check result for exception sentinel.
986 Label exception_returned;
987 __ CompareRoot(r3, Heap::kExceptionRootIndex);
988 __ beq(&exception_returned);
989
990 // Check that there is no pending exception, otherwise we
991 // should have returned the exception sentinel.
992 if (FLAG_debug_code) {
993 Label okay;
994 ExternalReference pending_exception_address(
995 Isolate::kPendingExceptionAddress, isolate());
996
997 __ mov(r6, Operand(pending_exception_address));
998 __ LoadP(r6, MemOperand(r6));
999 __ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
1000 // Cannot use check here as it attempts to generate call into runtime.
1001 __ beq(&okay);
1002 __ stop("Unexpected pending exception");
1003 __ bind(&okay);
1004 }
1005
1006 // Exit C frame and return.
1007 // r3:r4: result
1008 // sp: stack pointer
1009 // fp: frame pointer
1010 Register argc;
1011 if (argv_in_register()) {
1012 // We don't want to pop arguments so set argc to no_reg.
1013 argc = no_reg;
1014 } else {
1015 // r14: still holds argc (callee-saved).
1016 argc = r14;
1017 }
1018 __ LeaveExitFrame(save_doubles(), argc, true);
1019 __ blr();
1020
1021 // Handling of exception.
1022 __ bind(&exception_returned);
1023
1024 ExternalReference pending_handler_context_address(
1025 Isolate::kPendingHandlerContextAddress, isolate());
1026 ExternalReference pending_handler_code_address(
1027 Isolate::kPendingHandlerCodeAddress, isolate());
1028 ExternalReference pending_handler_offset_address(
1029 Isolate::kPendingHandlerOffsetAddress, isolate());
1030 ExternalReference pending_handler_fp_address(
1031 Isolate::kPendingHandlerFPAddress, isolate());
1032 ExternalReference pending_handler_sp_address(
1033 Isolate::kPendingHandlerSPAddress, isolate());
1034
1035 // Ask the runtime for help to determine the handler. This will set r3 to
1036 // contain the current pending exception, don't clobber it.
1037 ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
1038 isolate());
1039 {
1040 FrameScope scope(masm, StackFrame::MANUAL);
1041 __ PrepareCallCFunction(3, 0, r3);
1042 __ li(r3, Operand::Zero());
1043 __ li(r4, Operand::Zero());
1044 __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
1045 __ CallCFunction(find_handler, 3);
1046 }
1047
1048 // Retrieve the handler context, SP and FP.
1049 __ mov(cp, Operand(pending_handler_context_address));
1050 __ LoadP(cp, MemOperand(cp));
1051 __ mov(sp, Operand(pending_handler_sp_address));
1052 __ LoadP(sp, MemOperand(sp));
1053 __ mov(fp, Operand(pending_handler_fp_address));
1054 __ LoadP(fp, MemOperand(fp));
1055
1056 // If the handler is a JS frame, restore the context to the frame. Note that
1057 // the context will be set to (cp == 0) for non-JS frames.
1058 Label skip;
1059 __ cmpi(cp, Operand::Zero());
1060 __ beq(&skip);
1061 __ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1062 __ bind(&skip);
1063
1064 // Compute the handler entry address and jump to it.
1065 ConstantPoolUnavailableScope constant_pool_unavailable(masm);
1066 __ mov(r4, Operand(pending_handler_code_address));
1067 __ LoadP(r4, MemOperand(r4));
1068 __ mov(r5, Operand(pending_handler_offset_address));
1069 __ LoadP(r5, MemOperand(r5));
1070 __ addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
1071 if (FLAG_enable_embedded_constant_pool) {
1072 __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r4);
1073 }
1074 __ add(ip, r4, r5);
1075 __ Jump(ip);
1076 }
1077
1078
Generate(MacroAssembler * masm)1079 void JSEntryStub::Generate(MacroAssembler* masm) {
1080 // r3: code entry
1081 // r4: function
1082 // r5: receiver
1083 // r6: argc
1084 // [sp+0]: argv
1085
1086 Label invoke, handler_entry, exit;
1087
1088 // Called from C
1089 __ function_descriptor();
1090
1091 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1092
1093 // PPC LINUX ABI:
1094 // preserve LR in pre-reserved slot in caller's frame
1095 __ mflr(r0);
1096 __ StoreP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
1097
1098 // Save callee saved registers on the stack.
1099 __ MultiPush(kCalleeSaved);
1100
1101 // Save callee-saved double registers.
1102 __ MultiPushDoubles(kCalleeSavedDoubles);
1103 // Set up the reserved register for 0.0.
1104 __ LoadDoubleLiteral(kDoubleRegZero, 0.0, r0);
1105
1106 // Push a frame with special values setup to mark it as an entry frame.
1107 // r3: code entry
1108 // r4: function
1109 // r5: receiver
1110 // r6: argc
1111 // r7: argv
1112 __ li(r0, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1113 __ push(r0);
1114 if (FLAG_enable_embedded_constant_pool) {
1115 __ li(kConstantPoolRegister, Operand::Zero());
1116 __ push(kConstantPoolRegister);
1117 }
1118 int marker = type();
1119 __ LoadSmiLiteral(r0, Smi::FromInt(marker));
1120 __ push(r0);
1121 __ push(r0);
1122 // Save copies of the top frame descriptor on the stack.
1123 __ mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1124 __ LoadP(r0, MemOperand(r8));
1125 __ push(r0);
1126
1127 // Set up frame pointer for the frame to be pushed.
1128 __ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1129
1130 // If this is the outermost JS call, set js_entry_sp value.
1131 Label non_outermost_js;
1132 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1133 __ mov(r8, Operand(ExternalReference(js_entry_sp)));
1134 __ LoadP(r9, MemOperand(r8));
1135 __ cmpi(r9, Operand::Zero());
1136 __ bne(&non_outermost_js);
1137 __ StoreP(fp, MemOperand(r8));
1138 __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
1139 Label cont;
1140 __ b(&cont);
1141 __ bind(&non_outermost_js);
1142 __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
1143 __ bind(&cont);
1144 __ push(ip); // frame-type
1145
1146 // Jump to a faked try block that does the invoke, with a faked catch
1147 // block that sets the pending exception.
1148 __ b(&invoke);
1149
1150 __ bind(&handler_entry);
1151 handler_offset_ = handler_entry.pos();
1152 // Caught exception: Store result (exception) in the pending exception
1153 // field in the JSEnv and return a failure sentinel. Coming in here the
1154 // fp will be invalid because the PushStackHandler below sets it to 0 to
1155 // signal the existence of the JSEntry frame.
1156 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1157 isolate())));
1158
1159 __ StoreP(r3, MemOperand(ip));
1160 __ LoadRoot(r3, Heap::kExceptionRootIndex);
1161 __ b(&exit);
1162
1163 // Invoke: Link this frame into the handler chain.
1164 __ bind(&invoke);
1165 // Must preserve r3-r7.
1166 __ PushStackHandler();
1167 // If an exception not caught by another handler occurs, this handler
1168 // returns control to the code after the b(&invoke) above, which
1169 // restores all kCalleeSaved registers (including cp and fp) to their
1170 // saved values before returning a failure to C.
1171
1172 // Invoke the function by calling through JS entry trampoline builtin.
1173 // Notice that we cannot store a reference to the trampoline code directly in
1174 // this stub, because runtime stubs are not traversed when doing GC.
1175
1176 // Expected registers by Builtins::JSEntryTrampoline
1177 // r3: code entry
1178 // r4: function
1179 // r5: receiver
1180 // r6: argc
1181 // r7: argv
1182 if (type() == StackFrame::ENTRY_CONSTRUCT) {
1183 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1184 isolate());
1185 __ mov(ip, Operand(construct_entry));
1186 } else {
1187 ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
1188 __ mov(ip, Operand(entry));
1189 }
1190 __ LoadP(ip, MemOperand(ip)); // deref address
1191
1192 // Branch and link to JSEntryTrampoline.
1193 // the address points to the start of the code object, skip the header
1194 __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
1195 __ mtctr(ip);
1196 __ bctrl(); // make the call
1197
1198 // Unlink this frame from the handler chain.
1199 __ PopStackHandler();
1200
1201 __ bind(&exit); // r3 holds result
1202 // Check if the current stack frame is marked as the outermost JS frame.
1203 Label non_outermost_js_2;
1204 __ pop(r8);
1205 __ CmpSmiLiteral(r8, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME), r0);
1206 __ bne(&non_outermost_js_2);
1207 __ mov(r9, Operand::Zero());
1208 __ mov(r8, Operand(ExternalReference(js_entry_sp)));
1209 __ StoreP(r9, MemOperand(r8));
1210 __ bind(&non_outermost_js_2);
1211
1212 // Restore the top frame descriptors from the stack.
1213 __ pop(r6);
1214 __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1215 __ StoreP(r6, MemOperand(ip));
1216
1217 // Reset the stack to the callee saved registers.
1218 __ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1219
1220 // Restore callee-saved double registers.
1221 __ MultiPopDoubles(kCalleeSavedDoubles);
1222
1223 // Restore callee-saved registers.
1224 __ MultiPop(kCalleeSaved);
1225
1226 // Return
1227 __ LoadP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
1228 __ mtlr(r0);
1229 __ blr();
1230 }
1231
1232
Generate(MacroAssembler * masm)1233 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1234 Label miss;
1235 Register receiver = LoadDescriptor::ReceiverRegister();
1236 // Ensure that the vector and slot registers won't be clobbered before
1237 // calling the miss handler.
1238 DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::VectorRegister(),
1239 LoadWithVectorDescriptor::SlotRegister()));
1240
1241 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r7,
1242 r8, &miss);
1243 __ bind(&miss);
1244 PropertyAccessCompiler::TailCallBuiltin(
1245 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1246 }
1247
1248
Generate(MacroAssembler * masm)1249 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1250 // Return address is in lr.
1251 Label miss;
1252
1253 Register receiver = LoadDescriptor::ReceiverRegister();
1254 Register index = LoadDescriptor::NameRegister();
1255 Register scratch = r8;
1256 Register result = r3;
1257 DCHECK(!scratch.is(receiver) && !scratch.is(index));
1258 DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
1259 result.is(LoadWithVectorDescriptor::SlotRegister()));
1260
1261 // StringCharAtGenerator doesn't use the result register until it's passed
1262 // the different miss possibilities. If it did, we would have a conflict
1263 // when FLAG_vector_ics is true.
1264 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1265 &miss, // When not a string.
1266 &miss, // When not a number.
1267 &miss, // When index out of range.
1268 RECEIVER_IS_STRING);
1269 char_at_generator.GenerateFast(masm);
1270 __ Ret();
1271
1272 StubRuntimeCallHelper call_helper;
1273 char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
1274
1275 __ bind(&miss);
1276 PropertyAccessCompiler::TailCallBuiltin(
1277 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1278 }
1279
1280
Generate(MacroAssembler * masm)1281 void RegExpExecStub::Generate(MacroAssembler* masm) {
1282 // Just jump directly to runtime if native RegExp is not selected at compile
1283 // time or if regexp entry in generated code is turned off runtime switch or
1284 // at compilation.
1285 #ifdef V8_INTERPRETED_REGEXP
1286 __ TailCallRuntime(Runtime::kRegExpExec);
1287 #else // V8_INTERPRETED_REGEXP
1288
1289 // Stack frame on entry.
1290 // sp[0]: last_match_info (expected JSArray)
1291 // sp[4]: previous index
1292 // sp[8]: subject string
1293 // sp[12]: JSRegExp object
1294
1295 const int kLastMatchInfoOffset = 0 * kPointerSize;
1296 const int kPreviousIndexOffset = 1 * kPointerSize;
1297 const int kSubjectOffset = 2 * kPointerSize;
1298 const int kJSRegExpOffset = 3 * kPointerSize;
1299
1300 Label runtime, br_over, encoding_type_UC16;
1301
1302 // Allocation of registers for this function. These are in callee save
1303 // registers and will be preserved by the call to the native RegExp code, as
1304 // this code is called using the normal C calling convention. When calling
1305 // directly from generated code the native RegExp code will not do a GC and
1306 // therefore the content of these registers are safe to use after the call.
1307 Register subject = r14;
1308 Register regexp_data = r15;
1309 Register last_match_info_elements = r16;
1310 Register code = r17;
1311
1312 // Ensure register assigments are consistent with callee save masks
1313 DCHECK(subject.bit() & kCalleeSaved);
1314 DCHECK(regexp_data.bit() & kCalleeSaved);
1315 DCHECK(last_match_info_elements.bit() & kCalleeSaved);
1316 DCHECK(code.bit() & kCalleeSaved);
1317
1318 // Ensure that a RegExp stack is allocated.
1319 ExternalReference address_of_regexp_stack_memory_address =
1320 ExternalReference::address_of_regexp_stack_memory_address(isolate());
1321 ExternalReference address_of_regexp_stack_memory_size =
1322 ExternalReference::address_of_regexp_stack_memory_size(isolate());
1323 __ mov(r3, Operand(address_of_regexp_stack_memory_size));
1324 __ LoadP(r3, MemOperand(r3, 0));
1325 __ cmpi(r3, Operand::Zero());
1326 __ beq(&runtime);
1327
1328 // Check that the first argument is a JSRegExp object.
1329 __ LoadP(r3, MemOperand(sp, kJSRegExpOffset));
1330 __ JumpIfSmi(r3, &runtime);
1331 __ CompareObjectType(r3, r4, r4, JS_REGEXP_TYPE);
1332 __ bne(&runtime);
1333
1334 // Check that the RegExp has been compiled (data contains a fixed array).
1335 __ LoadP(regexp_data, FieldMemOperand(r3, JSRegExp::kDataOffset));
1336 if (FLAG_debug_code) {
1337 __ TestIfSmi(regexp_data, r0);
1338 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected, cr0);
1339 __ CompareObjectType(regexp_data, r3, r3, FIXED_ARRAY_TYPE);
1340 __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1341 }
1342
1343 // regexp_data: RegExp data (FixedArray)
1344 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1345 __ LoadP(r3, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
1346 // DCHECK(Smi::FromInt(JSRegExp::IRREGEXP) < (char *)0xffffu);
1347 __ CmpSmiLiteral(r3, Smi::FromInt(JSRegExp::IRREGEXP), r0);
1348 __ bne(&runtime);
1349
1350 // regexp_data: RegExp data (FixedArray)
1351 // Check that the number of captures fit in the static offsets vector buffer.
1352 __ LoadP(r5,
1353 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1354 // Check (number_of_captures + 1) * 2 <= offsets vector size
1355 // Or number_of_captures * 2 <= offsets vector size - 2
1356 // SmiToShortArrayOffset accomplishes the multiplication by 2 and
1357 // SmiUntag (which is a nop for 32-bit).
1358 __ SmiToShortArrayOffset(r5, r5);
1359 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1360 __ cmpli(r5, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
1361 __ bgt(&runtime);
1362
1363 // Reset offset for possibly sliced string.
1364 __ li(r11, Operand::Zero());
1365 __ LoadP(subject, MemOperand(sp, kSubjectOffset));
1366 __ JumpIfSmi(subject, &runtime);
1367 __ mr(r6, subject); // Make a copy of the original subject string.
1368 // subject: subject string
1369 // r6: subject string
1370 // regexp_data: RegExp data (FixedArray)
1371 // Handle subject string according to its encoding and representation:
1372 // (1) Sequential string? If yes, go to (4).
1373 // (2) Sequential or cons? If not, go to (5).
1374 // (3) Cons string. If the string is flat, replace subject with first string
1375 // and go to (1). Otherwise bail out to runtime.
1376 // (4) Sequential string. Load regexp code according to encoding.
1377 // (E) Carry on.
1378 /// [...]
1379
1380 // Deferred code at the end of the stub:
1381 // (5) Long external string? If not, go to (7).
1382 // (6) External string. Make it, offset-wise, look like a sequential string.
1383 // Go to (4).
1384 // (7) Short external string or not a string? If yes, bail out to runtime.
1385 // (8) Sliced string. Replace subject with parent. Go to (1).
1386
1387 Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
1388 not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
1389
1390 __ bind(&check_underlying);
1391 __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
1392 __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
1393
1394 // (1) Sequential string? If yes, go to (4).
1395
1396 STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask |
1397 kShortExternalStringMask) == 0x93);
1398 __ andi(r4, r3, Operand(kIsNotStringMask | kStringRepresentationMask |
1399 kShortExternalStringMask));
1400 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
1401 __ beq(&seq_string, cr0); // Go to (4).
1402
1403 // (2) Sequential or cons? If not, go to (5).
1404 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
1405 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
1406 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
1407 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
1408 STATIC_ASSERT(kExternalStringTag < 0xffffu);
1409 __ cmpi(r4, Operand(kExternalStringTag));
1410 __ bge(¬_seq_nor_cons); // Go to (5).
1411
1412 // (3) Cons string. Check that it's flat.
1413 // Replace subject with first string and reload instance type.
1414 __ LoadP(r3, FieldMemOperand(subject, ConsString::kSecondOffset));
1415 __ CompareRoot(r3, Heap::kempty_stringRootIndex);
1416 __ bne(&runtime);
1417 __ LoadP(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
1418 __ b(&check_underlying);
1419
1420 // (4) Sequential string. Load regexp code according to encoding.
1421 __ bind(&seq_string);
1422 // subject: sequential subject string (or look-alike, external string)
1423 // r6: original subject string
1424 // Load previous index and check range before r6 is overwritten. We have to
1425 // use r6 instead of subject here because subject might have been only made
1426 // to look like a sequential string when it actually is an external string.
1427 __ LoadP(r4, MemOperand(sp, kPreviousIndexOffset));
1428 __ JumpIfNotSmi(r4, &runtime);
1429 __ LoadP(r6, FieldMemOperand(r6, String::kLengthOffset));
1430 __ cmpl(r6, r4);
1431 __ ble(&runtime);
1432 __ SmiUntag(r4);
1433
1434 STATIC_ASSERT(4 == kOneByteStringTag);
1435 STATIC_ASSERT(kTwoByteStringTag == 0);
1436 STATIC_ASSERT(kStringEncodingMask == 4);
1437 __ ExtractBitMask(r6, r3, kStringEncodingMask, SetRC);
1438 __ beq(&encoding_type_UC16, cr0);
1439 __ LoadP(code,
1440 FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
1441 __ b(&br_over);
1442 __ bind(&encoding_type_UC16);
1443 __ LoadP(code, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
1444 __ bind(&br_over);
1445
1446 // (E) Carry on. String handling is done.
1447 // code: irregexp code
1448 // Check that the irregexp code has been generated for the actual string
1449 // encoding. If it has, the field contains a code object otherwise it contains
1450 // a smi (code flushing support).
1451 __ JumpIfSmi(code, &runtime);
1452
1453 // r4: previous index
1454 // r6: encoding of subject string (1 if one_byte, 0 if two_byte);
1455 // code: Address of generated regexp code
1456 // subject: Subject string
1457 // regexp_data: RegExp data (FixedArray)
1458 // All checks done. Now push arguments for native regexp code.
1459 __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r3, r5);
1460
1461 // Isolates: note we add an additional parameter here (isolate pointer).
1462 const int kRegExpExecuteArguments = 10;
1463 const int kParameterRegisters = 8;
1464 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
1465
1466 // Stack pointer now points to cell where return address is to be written.
1467 // Arguments are before that on the stack or in registers.
1468
1469 // Argument 10 (in stack parameter area): Pass current isolate address.
1470 __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
1471 __ StoreP(r3, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
1472
1473 // Argument 9 is a dummy that reserves the space used for
1474 // the return address added by the ExitFrame in native calls.
1475
1476 // Argument 8 (r10): Indicate that this is a direct call from JavaScript.
1477 __ li(r10, Operand(1));
1478
1479 // Argument 7 (r9): Start (high end) of backtracking stack memory area.
1480 __ mov(r3, Operand(address_of_regexp_stack_memory_address));
1481 __ LoadP(r3, MemOperand(r3, 0));
1482 __ mov(r5, Operand(address_of_regexp_stack_memory_size));
1483 __ LoadP(r5, MemOperand(r5, 0));
1484 __ add(r9, r3, r5);
1485
1486 // Argument 6 (r8): Set the number of capture registers to zero to force
1487 // global egexps to behave as non-global. This does not affect non-global
1488 // regexps.
1489 __ li(r8, Operand::Zero());
1490
1491 // Argument 5 (r7): static offsets vector buffer.
1492 __ mov(
1493 r7,
1494 Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
1495
1496 // For arguments 4 (r6) and 3 (r5) get string length, calculate start of data
1497 // and calculate the shift of the index (0 for one-byte and 1 for two-byte).
1498 __ addi(r18, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
1499 __ xori(r6, r6, Operand(1));
1500 // Load the length from the original subject string from the previous stack
1501 // frame. Therefore we have to use fp, which points exactly to two pointer
1502 // sizes below the previous sp. (Because creating a new stack frame pushes
1503 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
1504 __ LoadP(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
1505 // If slice offset is not 0, load the length from the original sliced string.
1506 // Argument 4, r6: End of string data
1507 // Argument 3, r5: Start of string data
1508 // Prepare start and end index of the input.
1509 __ ShiftLeft_(r11, r11, r6);
1510 __ add(r11, r18, r11);
1511 __ ShiftLeft_(r5, r4, r6);
1512 __ add(r5, r11, r5);
1513
1514 __ LoadP(r18, FieldMemOperand(subject, String::kLengthOffset));
1515 __ SmiUntag(r18);
1516 __ ShiftLeft_(r6, r18, r6);
1517 __ add(r6, r11, r6);
1518
1519 // Argument 2 (r4): Previous index.
1520 // Already there
1521
1522 // Argument 1 (r3): Subject string.
1523 __ mr(r3, subject);
1524
1525 // Locate the code entry and call it.
1526 __ addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
1527
1528 DirectCEntryStub stub(isolate());
1529 stub.GenerateCall(masm, code);
1530
1531 __ LeaveExitFrame(false, no_reg, true);
1532
1533 // r3: result (int32)
1534 // subject: subject string (callee saved)
1535 // regexp_data: RegExp data (callee saved)
1536 // last_match_info_elements: Last match info elements (callee saved)
1537 // Check the result.
1538 Label success;
1539 __ cmpwi(r3, Operand(1));
1540 // We expect exactly one result since we force the called regexp to behave
1541 // as non-global.
1542 __ beq(&success);
1543 Label failure;
1544 __ cmpwi(r3, Operand(NativeRegExpMacroAssembler::FAILURE));
1545 __ beq(&failure);
1546 __ cmpwi(r3, Operand(NativeRegExpMacroAssembler::EXCEPTION));
1547 // If not exception it can only be retry. Handle that in the runtime system.
1548 __ bne(&runtime);
1549 // Result must now be exception. If there is no pending exception already a
1550 // stack overflow (on the backtrack stack) was detected in RegExp code but
1551 // haven't created the exception yet. Handle that in the runtime system.
1552 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
1553 __ mov(r4, Operand(isolate()->factory()->the_hole_value()));
1554 __ mov(r5, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1555 isolate())));
1556 __ LoadP(r3, MemOperand(r5, 0));
1557 __ cmp(r3, r4);
1558 __ beq(&runtime);
1559
1560 // For exception, throw the exception again.
1561 __ TailCallRuntime(Runtime::kRegExpExecReThrow);
1562
1563 __ bind(&failure);
1564 // For failure and exception return null.
1565 __ mov(r3, Operand(isolate()->factory()->null_value()));
1566 __ addi(sp, sp, Operand(4 * kPointerSize));
1567 __ Ret();
1568
1569 // Process the result from the native regexp code.
1570 __ bind(&success);
1571 __ LoadP(r4,
1572 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1573 // Calculate number of capture registers (number_of_captures + 1) * 2.
1574 // SmiToShortArrayOffset accomplishes the multiplication by 2 and
1575 // SmiUntag (which is a nop for 32-bit).
1576 __ SmiToShortArrayOffset(r4, r4);
1577 __ addi(r4, r4, Operand(2));
1578
1579 // Check that the last match info is a FixedArray.
1580 __ LoadP(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
1581 __ JumpIfSmi(last_match_info_elements, &runtime);
1582 // Check that the object has fast elements.
1583 __ LoadP(r3,
1584 FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
1585 __ CompareRoot(r3, Heap::kFixedArrayMapRootIndex);
1586 __ bne(&runtime);
1587 // Check that the last match info has space for the capture registers and the
1588 // additional information.
1589 __ LoadP(
1590 r3, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
1591 __ addi(r5, r4, Operand(RegExpMatchInfo::kLastMatchOverhead));
1592 __ SmiUntag(r0, r3);
1593 __ cmp(r5, r0);
1594 __ bgt(&runtime);
1595
1596 // r4: number of capture registers
1597 // subject: subject string
1598 // Store the capture count.
1599 __ SmiTag(r5, r4);
1600 __ StoreP(r5, FieldMemOperand(last_match_info_elements,
1601 RegExpMatchInfo::kNumberOfCapturesOffset),
1602 r0);
1603 // Store last subject and last input.
1604 __ StoreP(subject, FieldMemOperand(last_match_info_elements,
1605 RegExpMatchInfo::kLastSubjectOffset),
1606 r0);
1607 __ mr(r5, subject);
1608 __ RecordWriteField(last_match_info_elements,
1609 RegExpMatchInfo::kLastSubjectOffset, subject, r10,
1610 kLRHasNotBeenSaved, kDontSaveFPRegs);
1611 __ mr(subject, r5);
1612 __ StoreP(subject, FieldMemOperand(last_match_info_elements,
1613 RegExpMatchInfo::kLastInputOffset),
1614 r0);
1615 __ RecordWriteField(last_match_info_elements,
1616 RegExpMatchInfo::kLastInputOffset, subject, r10,
1617 kLRHasNotBeenSaved, kDontSaveFPRegs);
1618
1619 // Get the static offsets vector filled by the native regexp code.
1620 ExternalReference address_of_static_offsets_vector =
1621 ExternalReference::address_of_static_offsets_vector(isolate());
1622 __ mov(r5, Operand(address_of_static_offsets_vector));
1623
1624 // r4: number of capture registers
1625 // r5: offsets vector
1626 Label next_capture;
1627 // Capture register counter starts from number of capture registers and
1628 // counts down until wrapping after zero.
1629 __ addi(r3, last_match_info_elements,
1630 Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag -
1631 kPointerSize));
1632 __ addi(r5, r5, Operand(-kIntSize)); // bias down for lwzu
1633 __ mtctr(r4);
1634 __ bind(&next_capture);
1635 // Read the value from the static offsets vector buffer.
1636 __ lwzu(r6, MemOperand(r5, kIntSize));
1637 // Store the smi value in the last match info.
1638 __ SmiTag(r6);
1639 __ StorePU(r6, MemOperand(r3, kPointerSize));
1640 __ bdnz(&next_capture);
1641
1642 // Return last match info.
1643 __ mr(r3, last_match_info_elements);
1644 __ addi(sp, sp, Operand(4 * kPointerSize));
1645 __ Ret();
1646
1647 // Do the runtime call to execute the regexp.
1648 __ bind(&runtime);
1649 __ TailCallRuntime(Runtime::kRegExpExec);
1650
1651 // Deferred code for string handling.
1652 // (5) Long external string? If not, go to (7).
1653 __ bind(¬_seq_nor_cons);
1654 // Compare flags are still set.
1655 __ bgt(¬_long_external); // Go to (7).
1656
1657 // (6) External string. Make it, offset-wise, look like a sequential string.
1658 __ bind(&external_string);
1659 __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
1660 __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
1661 if (FLAG_debug_code) {
1662 // Assert that we do not have a cons or slice (indirect strings) here.
1663 // Sequential strings have already been ruled out.
1664 STATIC_ASSERT(kIsIndirectStringMask == 1);
1665 __ andi(r0, r3, Operand(kIsIndirectStringMask));
1666 __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
1667 }
1668 __ LoadP(subject,
1669 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
1670 // Move the pointer so that offset-wise, it looks like a sequential string.
1671 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1672 __ subi(subject, subject,
1673 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
1674 __ b(&seq_string); // Go to (4).
1675
1676 // (7) Short external string or not a string? If yes, bail out to runtime.
1677 __ bind(¬_long_external);
1678 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag != 0);
1679 __ andi(r0, r4, Operand(kIsNotStringMask | kShortExternalStringMask));
1680 __ bne(&runtime, cr0);
1681
1682 // (8) Sliced string. Replace subject with parent. Go to (4).
1683 // Load offset into r11 and replace subject string with parent.
1684 __ LoadP(r11, FieldMemOperand(subject, SlicedString::kOffsetOffset));
1685 __ SmiUntag(r11);
1686 __ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
1687 __ b(&check_underlying); // Go to (4).
1688 #endif // V8_INTERPRETED_REGEXP
1689 }
1690
1691
CallStubInRecordCallTarget(MacroAssembler * masm,CodeStub * stub)1692 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
1693 // r3 : number of arguments to the construct function
1694 // r4 : the function to call
1695 // r5 : feedback vector
1696 // r6 : slot in feedback vector (Smi)
1697 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1698
1699 // Number-of-arguments register must be smi-tagged to call out.
1700 __ SmiTag(r3);
1701 __ Push(r6, r5, r4, r3);
1702 __ Push(cp);
1703
1704 __ CallStub(stub);
1705
1706 __ Pop(cp);
1707 __ Pop(r6, r5, r4, r3);
1708 __ SmiUntag(r3);
1709 }
1710
1711
GenerateRecordCallTarget(MacroAssembler * masm)1712 static void GenerateRecordCallTarget(MacroAssembler* masm) {
1713 // Cache the called function in a feedback vector slot. Cache states
1714 // are uninitialized, monomorphic (indicated by a JSFunction), and
1715 // megamorphic.
1716 // r3 : number of arguments to the construct function
1717 // r4 : the function to call
1718 // r5 : feedback vector
1719 // r6 : slot in feedback vector (Smi)
1720 Label initialize, done, miss, megamorphic, not_array_function;
1721
1722 DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
1723 masm->isolate()->heap()->megamorphic_symbol());
1724 DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
1725 masm->isolate()->heap()->uninitialized_symbol());
1726
1727 const int count_offset = FixedArray::kHeaderSize + kPointerSize;
1728
1729 // Load the cache state into r8.
1730 __ SmiToPtrArrayOffset(r8, r6);
1731 __ add(r8, r5, r8);
1732 __ LoadP(r8, FieldMemOperand(r8, FixedArray::kHeaderSize));
1733
1734 // A monomorphic cache hit or an already megamorphic state: invoke the
1735 // function without changing the state.
1736 // We don't know if r8 is a WeakCell or a Symbol, but it's harmless to read at
1737 // this position in a symbol (see static asserts in type-feedback-vector.h).
1738 Label check_allocation_site;
1739 Register feedback_map = r9;
1740 Register weak_value = r10;
1741 __ LoadP(weak_value, FieldMemOperand(r8, WeakCell::kValueOffset));
1742 __ cmp(r4, weak_value);
1743 __ beq(&done);
1744 __ CompareRoot(r8, Heap::kmegamorphic_symbolRootIndex);
1745 __ beq(&done);
1746 __ LoadP(feedback_map, FieldMemOperand(r8, HeapObject::kMapOffset));
1747 __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
1748 __ bne(&check_allocation_site);
1749
1750 // If the weak cell is cleared, we have a new chance to become monomorphic.
1751 __ JumpIfSmi(weak_value, &initialize);
1752 __ b(&megamorphic);
1753
1754 __ bind(&check_allocation_site);
1755 // If we came here, we need to see if we are the array function.
1756 // If we didn't have a matching function, and we didn't find the megamorph
1757 // sentinel, then we have in the slot either some other function or an
1758 // AllocationSite.
1759 __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
1760 __ bne(&miss);
1761
1762 // Make sure the function is the Array() function
1763 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
1764 __ cmp(r4, r8);
1765 __ bne(&megamorphic);
1766 __ b(&done);
1767
1768 __ bind(&miss);
1769
1770 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
1771 // megamorphic.
1772 __ CompareRoot(r8, Heap::kuninitialized_symbolRootIndex);
1773 __ beq(&initialize);
1774 // MegamorphicSentinel is an immortal immovable object (undefined) so no
1775 // write-barrier is needed.
1776 __ bind(&megamorphic);
1777 __ SmiToPtrArrayOffset(r8, r6);
1778 __ add(r8, r5, r8);
1779 __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
1780 __ StoreP(ip, FieldMemOperand(r8, FixedArray::kHeaderSize), r0);
1781 __ jmp(&done);
1782
1783 // An uninitialized cache is patched with the function
1784 __ bind(&initialize);
1785
1786 // Make sure the function is the Array() function.
1787 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
1788 __ cmp(r4, r8);
1789 __ bne(¬_array_function);
1790
1791 // The target function is the Array constructor,
1792 // Create an AllocationSite if we don't already have it, store it in the
1793 // slot.
1794 CreateAllocationSiteStub create_stub(masm->isolate());
1795 CallStubInRecordCallTarget(masm, &create_stub);
1796 __ b(&done);
1797
1798 __ bind(¬_array_function);
1799
1800 CreateWeakCellStub weak_cell_stub(masm->isolate());
1801 CallStubInRecordCallTarget(masm, &weak_cell_stub);
1802
1803 __ bind(&done);
1804
1805 // Increment the call count for all function calls.
1806 __ SmiToPtrArrayOffset(r8, r6);
1807 __ add(r8, r5, r8);
1808
1809 __ LoadP(r7, FieldMemOperand(r8, count_offset));
1810 __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
1811 __ StoreP(r7, FieldMemOperand(r8, count_offset), r0);
1812 }
1813
1814
Generate(MacroAssembler * masm)1815 void CallConstructStub::Generate(MacroAssembler* masm) {
1816 // r3 : number of arguments
1817 // r4 : the function to call
1818 // r5 : feedback vector
1819 // r6 : slot in feedback vector (Smi, for RecordCallTarget)
1820
1821 Label non_function;
1822 // Check that the function is not a smi.
1823 __ JumpIfSmi(r4, &non_function);
1824 // Check that the function is a JSFunction.
1825 __ CompareObjectType(r4, r8, r8, JS_FUNCTION_TYPE);
1826 __ bne(&non_function);
1827
1828 GenerateRecordCallTarget(masm);
1829
1830 __ SmiToPtrArrayOffset(r8, r6);
1831 __ add(r8, r5, r8);
1832 // Put the AllocationSite from the feedback vector into r5, or undefined.
1833 __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
1834 __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
1835 __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
1836 if (CpuFeatures::IsSupported(ISELECT)) {
1837 __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
1838 __ isel(eq, r5, r5, r8);
1839 } else {
1840 Label feedback_register_initialized;
1841 __ beq(&feedback_register_initialized);
1842 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
1843 __ bind(&feedback_register_initialized);
1844 }
1845
1846 __ AssertUndefinedOrAllocationSite(r5, r8);
1847
1848 // Pass function as new target.
1849 __ mr(r6, r4);
1850
1851 // Tail call to the function-specific construct stub (still in the caller
1852 // context at this point).
1853 __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
1854 __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kConstructStubOffset));
1855 __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
1856 __ JumpToJSEntry(ip);
1857
1858 __ bind(&non_function);
1859 __ mr(r6, r4);
1860 __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
1861 }
1862
1863 // Note: feedback_vector and slot are clobbered after the call.
IncrementCallCount(MacroAssembler * masm,Register feedback_vector,Register slot,Register temp)1864 static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
1865 Register slot, Register temp) {
1866 const int count_offset = FixedArray::kHeaderSize + kPointerSize;
1867 __ SmiToPtrArrayOffset(temp, slot);
1868 __ add(feedback_vector, feedback_vector, temp);
1869 __ LoadP(slot, FieldMemOperand(feedback_vector, count_offset));
1870 __ AddSmiLiteral(slot, slot, Smi::FromInt(1), temp);
1871 __ StoreP(slot, FieldMemOperand(feedback_vector, count_offset), temp);
1872 }
1873
HandleArrayCase(MacroAssembler * masm,Label * miss)1874 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
1875 // r3 - number of arguments
1876 // r4 - function
1877 // r6 - slot id
1878 // r5 - vector
1879 // r7 - allocation site (loaded from vector[slot])
1880 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
1881 __ cmp(r4, r8);
1882 __ bne(miss);
1883
1884 // Increment the call count for monomorphic function calls.
1885 IncrementCallCount(masm, r5, r6, r0);
1886
1887 __ mr(r5, r7);
1888 __ mr(r6, r4);
1889 ArrayConstructorStub stub(masm->isolate());
1890 __ TailCallStub(&stub);
1891 }
1892
1893
Generate(MacroAssembler * masm)1894 void CallICStub::Generate(MacroAssembler* masm) {
1895 // r3 - number of arguments
1896 // r4 - function
1897 // r6 - slot id (Smi)
1898 // r5 - vector
1899 Label extra_checks_or_miss, call, call_function, call_count_incremented;
1900
1901 // The checks. First, does r4 match the recorded monomorphic target?
1902 __ SmiToPtrArrayOffset(r9, r6);
1903 __ add(r9, r5, r9);
1904 __ LoadP(r7, FieldMemOperand(r9, FixedArray::kHeaderSize));
1905
1906 // We don't know that we have a weak cell. We might have a private symbol
1907 // or an AllocationSite, but the memory is safe to examine.
1908 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
1909 // FixedArray.
1910 // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
1911 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
1912 // computed, meaning that it can't appear to be a pointer. If the low bit is
1913 // 0, then hash is computed, but the 0 bit prevents the field from appearing
1914 // to be a pointer.
1915 STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
1916 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
1917 WeakCell::kValueOffset &&
1918 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
1919
1920 __ LoadP(r8, FieldMemOperand(r7, WeakCell::kValueOffset));
1921 __ cmp(r4, r8);
1922 __ bne(&extra_checks_or_miss);
1923
1924 // The compare above could have been a SMI/SMI comparison. Guard against this
1925 // convincing us that we have a monomorphic JSFunction.
1926 __ JumpIfSmi(r4, &extra_checks_or_miss);
1927
1928 __ bind(&call_function);
1929
1930 // Increment the call count for monomorphic function calls.
1931 IncrementCallCount(masm, r5, r6, r0);
1932
1933 __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
1934 tail_call_mode()),
1935 RelocInfo::CODE_TARGET);
1936
1937 __ bind(&extra_checks_or_miss);
1938 Label uninitialized, miss, not_allocation_site;
1939
1940 __ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
1941 __ beq(&call);
1942
1943 // Verify that r7 contains an AllocationSite
1944 __ LoadP(r8, FieldMemOperand(r7, HeapObject::kMapOffset));
1945 __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
1946 __ bne(¬_allocation_site);
1947
1948 // We have an allocation site.
1949 HandleArrayCase(masm, &miss);
1950
1951 __ bind(¬_allocation_site);
1952
1953 // The following cases attempt to handle MISS cases without going to the
1954 // runtime.
1955 if (FLAG_trace_ic) {
1956 __ b(&miss);
1957 }
1958
1959 __ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
1960 __ beq(&uninitialized);
1961
1962 // We are going megamorphic. If the feedback is a JSFunction, it is fine
1963 // to handle it here. More complex cases are dealt with in the runtime.
1964 __ AssertNotSmi(r7);
1965 __ CompareObjectType(r7, r8, r8, JS_FUNCTION_TYPE);
1966 __ bne(&miss);
1967 __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
1968 __ StoreP(ip, FieldMemOperand(r9, FixedArray::kHeaderSize), r0);
1969
1970 __ bind(&call);
1971
1972 // Increment the call count for megamorphic function calls.
1973 IncrementCallCount(masm, r5, r6, r0);
1974
1975 __ bind(&call_count_incremented);
1976 __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
1977 RelocInfo::CODE_TARGET);
1978
1979 __ bind(&uninitialized);
1980
1981 // We are going monomorphic, provided we actually have a JSFunction.
1982 __ JumpIfSmi(r4, &miss);
1983
1984 // Goto miss case if we do not have a function.
1985 __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
1986 __ bne(&miss);
1987
1988 // Make sure the function is not the Array() function, which requires special
1989 // behavior on MISS.
1990 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
1991 __ cmp(r4, r7);
1992 __ beq(&miss);
1993
1994 // Make sure the function belongs to the same native context.
1995 __ LoadP(r7, FieldMemOperand(r4, JSFunction::kContextOffset));
1996 __ LoadP(r7, ContextMemOperand(r7, Context::NATIVE_CONTEXT_INDEX));
1997 __ LoadP(ip, NativeContextMemOperand());
1998 __ cmp(r7, ip);
1999 __ bne(&miss);
2000
2001 // Store the function. Use a stub since we need a frame for allocation.
2002 // r5 - vector
2003 // r6 - slot
2004 // r4 - function
2005 {
2006 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2007 CreateWeakCellStub create_stub(masm->isolate());
2008 __ SmiTag(r3);
2009 __ Push(r3, r5, r6, cp, r4);
2010 __ CallStub(&create_stub);
2011 __ Pop(r5, r6, cp, r4);
2012 __ Pop(r3);
2013 __ SmiUntag(r3);
2014 }
2015
2016 __ b(&call_function);
2017
2018 // We are here because tracing is on or we encountered a MISS case we can't
2019 // handle here.
2020 __ bind(&miss);
2021 GenerateMiss(masm);
2022
2023 __ b(&call_count_incremented);
2024 }
2025
2026
GenerateMiss(MacroAssembler * masm)2027 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2028 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2029
2030 // Preserve the number of arguments as Smi.
2031 __ SmiTag(r3);
2032
2033 // Push the receiver and the function and feedback info.
2034 __ Push(r3, r4, r5, r6);
2035
2036 // Call the entry.
2037 __ CallRuntime(Runtime::kCallIC_Miss);
2038
2039 // Move result to r4 and exit the internal frame.
2040 __ mr(r4, r3);
2041
2042 // Restore number of arguments.
2043 __ Pop(r3);
2044 __ SmiUntag(r3);
2045 }
2046
2047
2048 // StringCharCodeAtGenerator
GenerateFast(MacroAssembler * masm)2049 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2050 // If the receiver is a smi trigger the non-string case.
2051 if (check_mode_ == RECEIVER_IS_UNKNOWN) {
2052 __ JumpIfSmi(object_, receiver_not_string_);
2053
2054 // Fetch the instance type of the receiver into result register.
2055 __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2056 __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2057 // If the receiver is not a string trigger the non-string case.
2058 __ andi(r0, result_, Operand(kIsNotStringMask));
2059 __ bne(receiver_not_string_, cr0);
2060 }
2061
2062 // If the index is non-smi trigger the non-smi case.
2063 __ JumpIfNotSmi(index_, &index_not_smi_);
2064 __ bind(&got_smi_index_);
2065
2066 // Check for index out of range.
2067 __ LoadP(ip, FieldMemOperand(object_, String::kLengthOffset));
2068 __ cmpl(ip, index_);
2069 __ ble(index_out_of_range_);
2070
2071 __ SmiUntag(index_);
2072
2073 StringCharLoadGenerator::Generate(masm, object_, index_, result_,
2074 &call_runtime_);
2075
2076 __ SmiTag(result_);
2077 __ bind(&exit_);
2078 }
2079
2080
GenerateSlow(MacroAssembler * masm,EmbedMode embed_mode,const RuntimeCallHelper & call_helper)2081 void StringCharCodeAtGenerator::GenerateSlow(
2082 MacroAssembler* masm, EmbedMode embed_mode,
2083 const RuntimeCallHelper& call_helper) {
2084 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2085
2086 // Index is not a smi.
2087 __ bind(&index_not_smi_);
2088 // If index is a heap number, try converting it to an integer.
2089 __ CheckMap(index_, result_, Heap::kHeapNumberMapRootIndex, index_not_number_,
2090 DONT_DO_SMI_CHECK);
2091 call_helper.BeforeCall(masm);
2092 if (embed_mode == PART_OF_IC_HANDLER) {
2093 __ Push(LoadWithVectorDescriptor::VectorRegister(),
2094 LoadWithVectorDescriptor::SlotRegister(), object_, index_);
2095 } else {
2096 // index_ is consumed by runtime conversion function.
2097 __ Push(object_, index_);
2098 }
2099 __ CallRuntime(Runtime::kNumberToSmi);
2100 // Save the conversion result before the pop instructions below
2101 // have a chance to overwrite it.
2102 __ Move(index_, r3);
2103 if (embed_mode == PART_OF_IC_HANDLER) {
2104 __ Pop(LoadWithVectorDescriptor::VectorRegister(),
2105 LoadWithVectorDescriptor::SlotRegister(), object_);
2106 } else {
2107 __ pop(object_);
2108 }
2109 // Reload the instance type.
2110 __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2111 __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2112 call_helper.AfterCall(masm);
2113 // If index is still not a smi, it must be out of range.
2114 __ JumpIfNotSmi(index_, index_out_of_range_);
2115 // Otherwise, return to the fast path.
2116 __ b(&got_smi_index_);
2117
2118 // Call runtime. We get here when the receiver is a string and the
2119 // index is a number, but the code of getting the actual character
2120 // is too complex (e.g., when the string needs to be flattened).
2121 __ bind(&call_runtime_);
2122 call_helper.BeforeCall(masm);
2123 __ SmiTag(index_);
2124 __ Push(object_, index_);
2125 __ CallRuntime(Runtime::kStringCharCodeAtRT);
2126 __ Move(result_, r3);
2127 call_helper.AfterCall(masm);
2128 __ b(&exit_);
2129
2130 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2131 }
2132
2133
2134 // -------------------------------------------------------------------------
2135 // StringCharFromCodeGenerator
2136
GenerateFast(MacroAssembler * masm)2137 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
2138 // Fast case of Heap::LookupSingleCharacterStringFromCode.
2139 DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
2140 __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxOneByteCharCodeU));
2141 __ ori(r0, r0, Operand(kSmiTagMask));
2142 __ and_(r0, code_, r0, SetRC);
2143 __ bne(&slow_case_, cr0);
2144
2145 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
2146 // At this point code register contains smi tagged one-byte char code.
2147 __ mr(r0, code_);
2148 __ SmiToPtrArrayOffset(code_, code_);
2149 __ add(result_, result_, code_);
2150 __ mr(code_, r0);
2151 __ LoadP(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
2152 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
2153 __ beq(&slow_case_);
2154 __ bind(&exit_);
2155 }
2156
2157
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)2158 void StringCharFromCodeGenerator::GenerateSlow(
2159 MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
2160 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
2161
2162 __ bind(&slow_case_);
2163 call_helper.BeforeCall(masm);
2164 __ push(code_);
2165 __ CallRuntime(Runtime::kStringCharFromCode);
2166 __ Move(result_, r3);
2167 call_helper.AfterCall(masm);
2168 __ b(&exit_);
2169
2170 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
2171 }
2172
2173
2174 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
2175
2176
GenerateCopyCharacters(MacroAssembler * masm,Register dest,Register src,Register count,Register scratch,String::Encoding encoding)2177 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest,
2178 Register src, Register count,
2179 Register scratch,
2180 String::Encoding encoding) {
2181 if (FLAG_debug_code) {
2182 // Check that destination is word aligned.
2183 __ andi(r0, dest, Operand(kPointerAlignmentMask));
2184 __ Check(eq, kDestinationOfCopyNotAligned, cr0);
2185 }
2186
2187 // Nothing to do for zero characters.
2188 Label done;
2189 if (encoding == String::TWO_BYTE_ENCODING) {
2190 // double the length
2191 __ add(count, count, count, LeaveOE, SetRC);
2192 __ beq(&done, cr0);
2193 } else {
2194 __ cmpi(count, Operand::Zero());
2195 __ beq(&done);
2196 }
2197
2198 // Copy count bytes from src to dst.
2199 Label byte_loop;
2200 __ mtctr(count);
2201 __ bind(&byte_loop);
2202 __ lbz(scratch, MemOperand(src));
2203 __ addi(src, src, Operand(1));
2204 __ stb(scratch, MemOperand(dest));
2205 __ addi(dest, dest, Operand(1));
2206 __ bdnz(&byte_loop);
2207
2208 __ bind(&done);
2209 }
2210
2211
GenerateFlatOneByteStringEquals(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2)2212 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
2213 Register left,
2214 Register right,
2215 Register scratch1,
2216 Register scratch2) {
2217 Register length = scratch1;
2218
2219 // Compare lengths.
2220 Label strings_not_equal, check_zero_length;
2221 __ LoadP(length, FieldMemOperand(left, String::kLengthOffset));
2222 __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
2223 __ cmp(length, scratch2);
2224 __ beq(&check_zero_length);
2225 __ bind(&strings_not_equal);
2226 __ LoadSmiLiteral(r3, Smi::FromInt(NOT_EQUAL));
2227 __ Ret();
2228
2229 // Check if the length is zero.
2230 Label compare_chars;
2231 __ bind(&check_zero_length);
2232 STATIC_ASSERT(kSmiTag == 0);
2233 __ cmpi(length, Operand::Zero());
2234 __ bne(&compare_chars);
2235 __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
2236 __ Ret();
2237
2238 // Compare characters.
2239 __ bind(&compare_chars);
2240 GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
2241 &strings_not_equal);
2242
2243 // Characters are equal.
2244 __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
2245 __ Ret();
2246 }
2247
2248
GenerateCompareFlatOneByteStrings(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3)2249 void StringHelper::GenerateCompareFlatOneByteStrings(
2250 MacroAssembler* masm, Register left, Register right, Register scratch1,
2251 Register scratch2, Register scratch3) {
2252 Label result_not_equal, compare_lengths;
2253 // Find minimum length and length difference.
2254 __ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset));
2255 __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
2256 __ sub(scratch3, scratch1, scratch2, LeaveOE, SetRC);
2257 Register length_delta = scratch3;
2258 if (CpuFeatures::IsSupported(ISELECT)) {
2259 __ isel(gt, scratch1, scratch2, scratch1, cr0);
2260 } else {
2261 Label skip;
2262 __ ble(&skip, cr0);
2263 __ mr(scratch1, scratch2);
2264 __ bind(&skip);
2265 }
2266 Register min_length = scratch1;
2267 STATIC_ASSERT(kSmiTag == 0);
2268 __ cmpi(min_length, Operand::Zero());
2269 __ beq(&compare_lengths);
2270
2271 // Compare loop.
2272 GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
2273 &result_not_equal);
2274
2275 // Compare lengths - strings up to min-length are equal.
2276 __ bind(&compare_lengths);
2277 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
2278 // Use length_delta as result if it's zero.
2279 __ mr(r3, length_delta);
2280 __ cmpi(r3, Operand::Zero());
2281 __ bind(&result_not_equal);
2282 // Conditionally update the result based either on length_delta or
2283 // the last comparion performed in the loop above.
2284 if (CpuFeatures::IsSupported(ISELECT)) {
2285 __ LoadSmiLiteral(r4, Smi::FromInt(GREATER));
2286 __ LoadSmiLiteral(r5, Smi::FromInt(LESS));
2287 __ isel(eq, r3, r0, r4);
2288 __ isel(lt, r3, r5, r3);
2289 __ Ret();
2290 } else {
2291 Label less_equal, equal;
2292 __ ble(&less_equal);
2293 __ LoadSmiLiteral(r3, Smi::FromInt(GREATER));
2294 __ Ret();
2295 __ bind(&less_equal);
2296 __ beq(&equal);
2297 __ LoadSmiLiteral(r3, Smi::FromInt(LESS));
2298 __ bind(&equal);
2299 __ Ret();
2300 }
2301 }
2302
2303
GenerateOneByteCharsCompareLoop(MacroAssembler * masm,Register left,Register right,Register length,Register scratch1,Label * chars_not_equal)2304 void StringHelper::GenerateOneByteCharsCompareLoop(
2305 MacroAssembler* masm, Register left, Register right, Register length,
2306 Register scratch1, Label* chars_not_equal) {
2307 // Change index to run from -length to -1 by adding length to string
2308 // start. This means that loop ends when index reaches zero, which
2309 // doesn't need an additional compare.
2310 __ SmiUntag(length);
2311 __ addi(scratch1, length,
2312 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
2313 __ add(left, left, scratch1);
2314 __ add(right, right, scratch1);
2315 __ subfic(length, length, Operand::Zero());
2316 Register index = length; // index = -length;
2317
2318 // Compare loop.
2319 Label loop;
2320 __ bind(&loop);
2321 __ lbzx(scratch1, MemOperand(left, index));
2322 __ lbzx(r0, MemOperand(right, index));
2323 __ cmp(scratch1, r0);
2324 __ bne(chars_not_equal);
2325 __ addi(index, index, Operand(1));
2326 __ cmpi(index, Operand::Zero());
2327 __ bne(&loop);
2328 }
2329
2330
Generate(MacroAssembler * masm)2331 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
2332 // ----------- S t a t e -------------
2333 // -- r4 : left
2334 // -- r3 : right
2335 // -- lr : return address
2336 // -----------------------------------
2337
2338 // Load r5 with the allocation site. We stick an undefined dummy value here
2339 // and replace it with the real allocation site later when we instantiate this
2340 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
2341 __ Move(r5, isolate()->factory()->undefined_value());
2342
2343 // Make sure that we actually patched the allocation site.
2344 if (FLAG_debug_code) {
2345 __ TestIfSmi(r5, r0);
2346 __ Assert(ne, kExpectedAllocationSite, cr0);
2347 __ push(r5);
2348 __ LoadP(r5, FieldMemOperand(r5, HeapObject::kMapOffset));
2349 __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
2350 __ cmp(r5, ip);
2351 __ pop(r5);
2352 __ Assert(eq, kExpectedAllocationSite);
2353 }
2354
2355 // Tail call into the stub that handles binary operations with allocation
2356 // sites.
2357 BinaryOpWithAllocationSiteStub stub(isolate(), state());
2358 __ TailCallStub(&stub);
2359 }
2360
2361
GenerateBooleans(MacroAssembler * masm)2362 void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
2363 DCHECK_EQ(CompareICState::BOOLEAN, state());
2364 Label miss;
2365
2366 __ CheckMap(r4, r5, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2367 __ CheckMap(r3, r6, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2368 if (!Token::IsEqualityOp(op())) {
2369 __ LoadP(r4, FieldMemOperand(r4, Oddball::kToNumberOffset));
2370 __ AssertSmi(r4);
2371 __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
2372 __ AssertSmi(r3);
2373 }
2374 __ sub(r3, r4, r3);
2375 __ Ret();
2376
2377 __ bind(&miss);
2378 GenerateMiss(masm);
2379 }
2380
2381
GenerateSmis(MacroAssembler * masm)2382 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
2383 DCHECK(state() == CompareICState::SMI);
2384 Label miss;
2385 __ orx(r5, r4, r3);
2386 __ JumpIfNotSmi(r5, &miss);
2387
2388 if (GetCondition() == eq) {
2389 // For equality we do not care about the sign of the result.
2390 // __ sub(r3, r3, r4, SetCC);
2391 __ sub(r3, r3, r4);
2392 } else {
2393 // Untag before subtracting to avoid handling overflow.
2394 __ SmiUntag(r4);
2395 __ SmiUntag(r3);
2396 __ sub(r3, r4, r3);
2397 }
2398 __ Ret();
2399
2400 __ bind(&miss);
2401 GenerateMiss(masm);
2402 }
2403
2404
GenerateNumbers(MacroAssembler * masm)2405 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
2406 DCHECK(state() == CompareICState::NUMBER);
2407
2408 Label generic_stub;
2409 Label unordered, maybe_undefined1, maybe_undefined2;
2410 Label miss;
2411 Label equal, less_than;
2412
2413 if (left() == CompareICState::SMI) {
2414 __ JumpIfNotSmi(r4, &miss);
2415 }
2416 if (right() == CompareICState::SMI) {
2417 __ JumpIfNotSmi(r3, &miss);
2418 }
2419
2420 // Inlining the double comparison and falling back to the general compare
2421 // stub if NaN is involved.
2422 // Load left and right operand.
2423 Label done, left, left_smi, right_smi;
2424 __ JumpIfSmi(r3, &right_smi);
2425 __ CheckMap(r3, r5, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
2426 DONT_DO_SMI_CHECK);
2427 __ lfd(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
2428 __ b(&left);
2429 __ bind(&right_smi);
2430 __ SmiToDouble(d1, r3);
2431
2432 __ bind(&left);
2433 __ JumpIfSmi(r4, &left_smi);
2434 __ CheckMap(r4, r5, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
2435 DONT_DO_SMI_CHECK);
2436 __ lfd(d0, FieldMemOperand(r4, HeapNumber::kValueOffset));
2437 __ b(&done);
2438 __ bind(&left_smi);
2439 __ SmiToDouble(d0, r4);
2440
2441 __ bind(&done);
2442
2443 // Compare operands
2444 __ fcmpu(d0, d1);
2445
2446 // Don't base result on status bits when a NaN is involved.
2447 __ bunordered(&unordered);
2448
2449 // Return a result of -1, 0, or 1, based on status bits.
2450 if (CpuFeatures::IsSupported(ISELECT)) {
2451 DCHECK(EQUAL == 0);
2452 __ li(r4, Operand(GREATER));
2453 __ li(r5, Operand(LESS));
2454 __ isel(eq, r3, r0, r4);
2455 __ isel(lt, r3, r5, r3);
2456 __ Ret();
2457 } else {
2458 __ beq(&equal);
2459 __ blt(&less_than);
2460 // assume greater than
2461 __ li(r3, Operand(GREATER));
2462 __ Ret();
2463 __ bind(&equal);
2464 __ li(r3, Operand(EQUAL));
2465 __ Ret();
2466 __ bind(&less_than);
2467 __ li(r3, Operand(LESS));
2468 __ Ret();
2469 }
2470
2471 __ bind(&unordered);
2472 __ bind(&generic_stub);
2473 CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
2474 CompareICState::GENERIC, CompareICState::GENERIC);
2475 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2476
2477 __ bind(&maybe_undefined1);
2478 if (Token::IsOrderedRelationalCompareOp(op())) {
2479 __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
2480 __ bne(&miss);
2481 __ JumpIfSmi(r4, &unordered);
2482 __ CompareObjectType(r4, r5, r5, HEAP_NUMBER_TYPE);
2483 __ bne(&maybe_undefined2);
2484 __ b(&unordered);
2485 }
2486
2487 __ bind(&maybe_undefined2);
2488 if (Token::IsOrderedRelationalCompareOp(op())) {
2489 __ CompareRoot(r4, Heap::kUndefinedValueRootIndex);
2490 __ beq(&unordered);
2491 }
2492
2493 __ bind(&miss);
2494 GenerateMiss(masm);
2495 }
2496
2497
GenerateInternalizedStrings(MacroAssembler * masm)2498 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
2499 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
2500 Label miss, not_equal;
2501
2502 // Registers containing left and right operands respectively.
2503 Register left = r4;
2504 Register right = r3;
2505 Register tmp1 = r5;
2506 Register tmp2 = r6;
2507
2508 // Check that both operands are heap objects.
2509 __ JumpIfEitherSmi(left, right, &miss);
2510
2511 // Check that both operands are symbols.
2512 __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2513 __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2514 __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2515 __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2516 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2517 __ orx(tmp1, tmp1, tmp2);
2518 __ andi(r0, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2519 __ bne(&miss, cr0);
2520
2521 // Internalized strings are compared by identity.
2522 __ cmp(left, right);
2523 __ bne(¬_equal);
2524 // Make sure r3 is non-zero. At this point input operands are
2525 // guaranteed to be non-zero.
2526 DCHECK(right.is(r3));
2527 STATIC_ASSERT(EQUAL == 0);
2528 STATIC_ASSERT(kSmiTag == 0);
2529 __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
2530 __ bind(¬_equal);
2531 __ Ret();
2532
2533 __ bind(&miss);
2534 GenerateMiss(masm);
2535 }
2536
2537
GenerateUniqueNames(MacroAssembler * masm)2538 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
2539 DCHECK(state() == CompareICState::UNIQUE_NAME);
2540 DCHECK(GetCondition() == eq);
2541 Label miss;
2542
2543 // Registers containing left and right operands respectively.
2544 Register left = r4;
2545 Register right = r3;
2546 Register tmp1 = r5;
2547 Register tmp2 = r6;
2548
2549 // Check that both operands are heap objects.
2550 __ JumpIfEitherSmi(left, right, &miss);
2551
2552 // Check that both operands are unique names. This leaves the instance
2553 // types loaded in tmp1 and tmp2.
2554 __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2555 __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2556 __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2557 __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2558
2559 __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
2560 __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
2561
2562 // Unique names are compared by identity.
2563 __ cmp(left, right);
2564 __ bne(&miss);
2565 // Make sure r3 is non-zero. At this point input operands are
2566 // guaranteed to be non-zero.
2567 DCHECK(right.is(r3));
2568 STATIC_ASSERT(EQUAL == 0);
2569 STATIC_ASSERT(kSmiTag == 0);
2570 __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
2571 __ Ret();
2572
2573 __ bind(&miss);
2574 GenerateMiss(masm);
2575 }
2576
2577
GenerateStrings(MacroAssembler * masm)2578 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
2579 DCHECK(state() == CompareICState::STRING);
2580 Label miss, not_identical, is_symbol;
2581
2582 bool equality = Token::IsEqualityOp(op());
2583
2584 // Registers containing left and right operands respectively.
2585 Register left = r4;
2586 Register right = r3;
2587 Register tmp1 = r5;
2588 Register tmp2 = r6;
2589 Register tmp3 = r7;
2590 Register tmp4 = r8;
2591
2592 // Check that both operands are heap objects.
2593 __ JumpIfEitherSmi(left, right, &miss);
2594
2595 // Check that both operands are strings. This leaves the instance
2596 // types loaded in tmp1 and tmp2.
2597 __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2598 __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2599 __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2600 __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2601 STATIC_ASSERT(kNotStringTag != 0);
2602 __ orx(tmp3, tmp1, tmp2);
2603 __ andi(r0, tmp3, Operand(kIsNotStringMask));
2604 __ bne(&miss, cr0);
2605
2606 // Fast check for identical strings.
2607 __ cmp(left, right);
2608 STATIC_ASSERT(EQUAL == 0);
2609 STATIC_ASSERT(kSmiTag == 0);
2610 __ bne(¬_identical);
2611 __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
2612 __ Ret();
2613 __ bind(¬_identical);
2614
2615 // Handle not identical strings.
2616
2617 // Check that both strings are internalized strings. If they are, we're done
2618 // because we already know they are not identical. We know they are both
2619 // strings.
2620 if (equality) {
2621 DCHECK(GetCondition() == eq);
2622 STATIC_ASSERT(kInternalizedTag == 0);
2623 __ orx(tmp3, tmp1, tmp2);
2624 __ andi(r0, tmp3, Operand(kIsNotInternalizedMask));
2625 // Make sure r3 is non-zero. At this point input operands are
2626 // guaranteed to be non-zero.
2627 DCHECK(right.is(r3));
2628 __ Ret(eq, cr0);
2629 }
2630
2631 // Check that both strings are sequential one-byte.
2632 Label runtime;
2633 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
2634 &runtime);
2635
2636 // Compare flat one-byte strings. Returns when done.
2637 if (equality) {
2638 StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
2639 tmp2);
2640 } else {
2641 StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
2642 tmp2, tmp3);
2643 }
2644
2645 // Handle more complex cases in runtime.
2646 __ bind(&runtime);
2647 if (equality) {
2648 {
2649 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2650 __ Push(left, right);
2651 __ CallRuntime(Runtime::kStringEqual);
2652 }
2653 __ LoadRoot(r4, Heap::kTrueValueRootIndex);
2654 __ sub(r3, r3, r4);
2655 __ Ret();
2656 } else {
2657 __ Push(left, right);
2658 __ TailCallRuntime(Runtime::kStringCompare);
2659 }
2660
2661 __ bind(&miss);
2662 GenerateMiss(masm);
2663 }
2664
2665
GenerateReceivers(MacroAssembler * masm)2666 void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
2667 DCHECK_EQ(CompareICState::RECEIVER, state());
2668 Label miss;
2669 __ and_(r5, r4, r3);
2670 __ JumpIfSmi(r5, &miss);
2671
2672 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2673 __ CompareObjectType(r3, r5, r5, FIRST_JS_RECEIVER_TYPE);
2674 __ blt(&miss);
2675 __ CompareObjectType(r4, r5, r5, FIRST_JS_RECEIVER_TYPE);
2676 __ blt(&miss);
2677
2678 DCHECK(GetCondition() == eq);
2679 __ sub(r3, r3, r4);
2680 __ Ret();
2681
2682 __ bind(&miss);
2683 GenerateMiss(masm);
2684 }
2685
2686
GenerateKnownReceivers(MacroAssembler * masm)2687 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
2688 Label miss;
2689 Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
2690 __ and_(r5, r4, r3);
2691 __ JumpIfSmi(r5, &miss);
2692 __ GetWeakValue(r7, cell);
2693 __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
2694 __ LoadP(r6, FieldMemOperand(r4, HeapObject::kMapOffset));
2695 __ cmp(r5, r7);
2696 __ bne(&miss);
2697 __ cmp(r6, r7);
2698 __ bne(&miss);
2699
2700 if (Token::IsEqualityOp(op())) {
2701 __ sub(r3, r3, r4);
2702 __ Ret();
2703 } else {
2704 if (op() == Token::LT || op() == Token::LTE) {
2705 __ LoadSmiLiteral(r5, Smi::FromInt(GREATER));
2706 } else {
2707 __ LoadSmiLiteral(r5, Smi::FromInt(LESS));
2708 }
2709 __ Push(r4, r3, r5);
2710 __ TailCallRuntime(Runtime::kCompare);
2711 }
2712
2713 __ bind(&miss);
2714 GenerateMiss(masm);
2715 }
2716
2717
GenerateMiss(MacroAssembler * masm)2718 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
2719 {
2720 // Call the runtime system in a fresh internal frame.
2721 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2722 __ Push(r4, r3);
2723 __ Push(r4, r3);
2724 __ LoadSmiLiteral(r0, Smi::FromInt(op()));
2725 __ push(r0);
2726 __ CallRuntime(Runtime::kCompareIC_Miss);
2727 // Compute the entry point of the rewritten stub.
2728 __ addi(r5, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
2729 // Restore registers.
2730 __ Pop(r4, r3);
2731 }
2732
2733 __ JumpToJSEntry(r5);
2734 }
2735
2736
2737 // This stub is paired with DirectCEntryStub::GenerateCall
Generate(MacroAssembler * masm)2738 void DirectCEntryStub::Generate(MacroAssembler* masm) {
2739 // Place the return address on the stack, making the call
2740 // GC safe. The RegExp backend also relies on this.
2741 __ mflr(r0);
2742 __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
2743 __ Call(ip); // Call the C++ function.
2744 __ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
2745 __ mtlr(r0);
2746 __ blr();
2747 }
2748
2749
GenerateCall(MacroAssembler * masm,Register target)2750 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
2751 if (ABI_USES_FUNCTION_DESCRIPTORS) {
2752 // AIX/PPC64BE Linux use a function descriptor.
2753 __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
2754 __ LoadP(ip, MemOperand(target, 0)); // Instruction address
2755 } else {
2756 // ip needs to be set for DirectCEentryStub::Generate, and also
2757 // for ABI_CALL_VIA_IP.
2758 __ Move(ip, target);
2759 }
2760
2761 intptr_t code = reinterpret_cast<intptr_t>(GetCode().location());
2762 __ mov(r0, Operand(code, RelocInfo::CODE_TARGET));
2763 __ Call(r0); // Call the stub.
2764 }
2765
2766
GenerateNegativeLookup(MacroAssembler * masm,Label * miss,Label * done,Register receiver,Register properties,Handle<Name> name,Register scratch0)2767 void NameDictionaryLookupStub::GenerateNegativeLookup(
2768 MacroAssembler* masm, Label* miss, Label* done, Register receiver,
2769 Register properties, Handle<Name> name, Register scratch0) {
2770 DCHECK(name->IsUniqueName());
2771 // If names of slots in range from 1 to kProbes - 1 for the hash value are
2772 // not equal to the name and kProbes-th slot is not used (its name is the
2773 // undefined value), it guarantees the hash table doesn't contain the
2774 // property. It's true even if some slots represent deleted properties
2775 // (their names are the hole value).
2776 for (int i = 0; i < kInlinedProbes; i++) {
2777 // scratch0 points to properties hash.
2778 // Compute the masked index: (hash + i + i * i) & mask.
2779 Register index = scratch0;
2780 // Capacity is smi 2^n.
2781 __ LoadP(index, FieldMemOperand(properties, kCapacityOffset));
2782 __ subi(index, index, Operand(1));
2783 __ LoadSmiLiteral(
2784 ip, Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)));
2785 __ and_(index, index, ip);
2786
2787 // Scale the index by multiplying by the entry size.
2788 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2789 __ ShiftLeftImm(ip, index, Operand(1));
2790 __ add(index, index, ip); // index *= 3.
2791
2792 Register entity_name = scratch0;
2793 // Having undefined at this place means the name is not contained.
2794 Register tmp = properties;
2795 __ SmiToPtrArrayOffset(ip, index);
2796 __ add(tmp, properties, ip);
2797 __ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
2798
2799 DCHECK(!tmp.is(entity_name));
2800 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
2801 __ cmp(entity_name, tmp);
2802 __ beq(done);
2803
2804 // Load the hole ready for use below:
2805 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
2806
2807 // Stop if found the property.
2808 __ Cmpi(entity_name, Operand(Handle<Name>(name)), r0);
2809 __ beq(miss);
2810
2811 Label good;
2812 __ cmp(entity_name, tmp);
2813 __ beq(&good);
2814
2815 // Check if the entry name is not a unique name.
2816 __ LoadP(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
2817 __ lbz(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
2818 __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
2819 __ bind(&good);
2820
2821 // Restore the properties.
2822 __ LoadP(properties,
2823 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
2824 }
2825
2826 const int spill_mask = (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() |
2827 r5.bit() | r4.bit() | r3.bit());
2828
2829 __ mflr(r0);
2830 __ MultiPush(spill_mask);
2831
2832 __ LoadP(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
2833 __ mov(r4, Operand(Handle<Name>(name)));
2834 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
2835 __ CallStub(&stub);
2836 __ cmpi(r3, Operand::Zero());
2837
2838 __ MultiPop(spill_mask); // MultiPop does not touch condition flags
2839 __ mtlr(r0);
2840
2841 __ beq(done);
2842 __ bne(miss);
2843 }
2844
2845
2846 // Probe the name dictionary in the |elements| register. Jump to the
2847 // |done| label if a property with the given name is found. Jump to
2848 // the |miss| label otherwise.
2849 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
GeneratePositiveLookup(MacroAssembler * masm,Label * miss,Label * done,Register elements,Register name,Register scratch1,Register scratch2)2850 void NameDictionaryLookupStub::GeneratePositiveLookup(
2851 MacroAssembler* masm, Label* miss, Label* done, Register elements,
2852 Register name, Register scratch1, Register scratch2) {
2853 DCHECK(!elements.is(scratch1));
2854 DCHECK(!elements.is(scratch2));
2855 DCHECK(!name.is(scratch1));
2856 DCHECK(!name.is(scratch2));
2857
2858 __ AssertName(name);
2859
2860 // Compute the capacity mask.
2861 __ LoadP(scratch1, FieldMemOperand(elements, kCapacityOffset));
2862 __ SmiUntag(scratch1); // convert smi to int
2863 __ subi(scratch1, scratch1, Operand(1));
2864
2865 // Generate an unrolled loop that performs a few probes before
2866 // giving up. Measurements done on Gmail indicate that 2 probes
2867 // cover ~93% of loads from dictionaries.
2868 for (int i = 0; i < kInlinedProbes; i++) {
2869 // Compute the masked index: (hash + i + i * i) & mask.
2870 __ lwz(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
2871 if (i > 0) {
2872 // Add the probe offset (i + i * i) left shifted to avoid right shifting
2873 // the hash in a separate instruction. The value hash + i + i * i is right
2874 // shifted in the following and instruction.
2875 DCHECK(NameDictionary::GetProbeOffset(i) <
2876 1 << (32 - Name::kHashFieldOffset));
2877 __ addi(scratch2, scratch2,
2878 Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
2879 }
2880 __ srwi(scratch2, scratch2, Operand(Name::kHashShift));
2881 __ and_(scratch2, scratch1, scratch2);
2882
2883 // Scale the index by multiplying by the entry size.
2884 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2885 // scratch2 = scratch2 * 3.
2886 __ ShiftLeftImm(ip, scratch2, Operand(1));
2887 __ add(scratch2, scratch2, ip);
2888
2889 // Check if the key is identical to the name.
2890 __ ShiftLeftImm(ip, scratch2, Operand(kPointerSizeLog2));
2891 __ add(scratch2, elements, ip);
2892 __ LoadP(ip, FieldMemOperand(scratch2, kElementsStartOffset));
2893 __ cmp(name, ip);
2894 __ beq(done);
2895 }
2896
2897 const int spill_mask = (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() |
2898 r5.bit() | r4.bit() | r3.bit()) &
2899 ~(scratch1.bit() | scratch2.bit());
2900
2901 __ mflr(r0);
2902 __ MultiPush(spill_mask);
2903 if (name.is(r3)) {
2904 DCHECK(!elements.is(r4));
2905 __ mr(r4, name);
2906 __ mr(r3, elements);
2907 } else {
2908 __ mr(r3, elements);
2909 __ mr(r4, name);
2910 }
2911 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
2912 __ CallStub(&stub);
2913 __ cmpi(r3, Operand::Zero());
2914 __ mr(scratch2, r5);
2915 __ MultiPop(spill_mask);
2916 __ mtlr(r0);
2917
2918 __ bne(done);
2919 __ beq(miss);
2920 }
2921
2922
Generate(MacroAssembler * masm)2923 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
2924 // This stub overrides SometimesSetsUpAFrame() to return false. That means
2925 // we cannot call anything that could cause a GC from this stub.
2926 // Registers:
2927 // result: NameDictionary to probe
2928 // r4: key
2929 // dictionary: NameDictionary to probe.
2930 // index: will hold an index of entry if lookup is successful.
2931 // might alias with result_.
2932 // Returns:
2933 // result_ is zero if lookup failed, non zero otherwise.
2934
2935 Register result = r3;
2936 Register dictionary = r3;
2937 Register key = r4;
2938 Register index = r5;
2939 Register mask = r6;
2940 Register hash = r7;
2941 Register undefined = r8;
2942 Register entry_key = r9;
2943 Register scratch = r9;
2944
2945 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
2946
2947 __ LoadP(mask, FieldMemOperand(dictionary, kCapacityOffset));
2948 __ SmiUntag(mask);
2949 __ subi(mask, mask, Operand(1));
2950
2951 __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset));
2952
2953 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
2954
2955 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
2956 // Compute the masked index: (hash + i + i * i) & mask.
2957 // Capacity is smi 2^n.
2958 if (i > 0) {
2959 // Add the probe offset (i + i * i) left shifted to avoid right shifting
2960 // the hash in a separate instruction. The value hash + i + i * i is right
2961 // shifted in the following and instruction.
2962 DCHECK(NameDictionary::GetProbeOffset(i) <
2963 1 << (32 - Name::kHashFieldOffset));
2964 __ addi(index, hash,
2965 Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
2966 } else {
2967 __ mr(index, hash);
2968 }
2969 __ srwi(r0, index, Operand(Name::kHashShift));
2970 __ and_(index, mask, r0);
2971
2972 // Scale the index by multiplying by the entry size.
2973 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2974 __ ShiftLeftImm(scratch, index, Operand(1));
2975 __ add(index, index, scratch); // index *= 3.
2976
2977 __ ShiftLeftImm(scratch, index, Operand(kPointerSizeLog2));
2978 __ add(index, dictionary, scratch);
2979 __ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset));
2980
2981 // Having undefined at this place means the name is not contained.
2982 __ cmp(entry_key, undefined);
2983 __ beq(¬_in_dictionary);
2984
2985 // Stop if found the property.
2986 __ cmp(entry_key, key);
2987 __ beq(&in_dictionary);
2988
2989 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
2990 // Check if the entry name is not a unique name.
2991 __ LoadP(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
2992 __ lbz(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
2993 __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
2994 }
2995 }
2996
2997 __ bind(&maybe_in_dictionary);
2998 // If we are doing negative lookup then probing failure should be
2999 // treated as a lookup success. For positive lookup probing failure
3000 // should be treated as lookup failure.
3001 if (mode() == POSITIVE_LOOKUP) {
3002 __ li(result, Operand::Zero());
3003 __ Ret();
3004 }
3005
3006 __ bind(&in_dictionary);
3007 __ li(result, Operand(1));
3008 __ Ret();
3009
3010 __ bind(¬_in_dictionary);
3011 __ li(result, Operand::Zero());
3012 __ Ret();
3013 }
3014
3015
GenerateFixedRegStubsAheadOfTime(Isolate * isolate)3016 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
3017 Isolate* isolate) {
3018 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
3019 stub1.GetCode();
3020 // Hydrogen code stubs need stub2 at snapshot time.
3021 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
3022 stub2.GetCode();
3023 }
3024
3025
3026 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
3027 // the value has just been written into the object, now this stub makes sure
3028 // we keep the GC informed. The word in the object where the value has been
3029 // written is in the address register.
Generate(MacroAssembler * masm)3030 void RecordWriteStub::Generate(MacroAssembler* masm) {
3031 Label skip_to_incremental_noncompacting;
3032 Label skip_to_incremental_compacting;
3033
3034 // The first two branch instructions are generated with labels so as to
3035 // get the offset fixed up correctly by the bind(Label*) call. We patch
3036 // it back and forth between branch condition True and False
3037 // when we start and stop incremental heap marking.
3038 // See RecordWriteStub::Patch for details.
3039
3040 // Clear the bit, branch on True for NOP action initially
3041 __ crclr(Assembler::encode_crbit(cr2, CR_LT));
3042 __ blt(&skip_to_incremental_noncompacting, cr2);
3043 __ blt(&skip_to_incremental_compacting, cr2);
3044
3045 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
3046 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3047 MacroAssembler::kReturnAtEnd);
3048 }
3049 __ Ret();
3050
3051 __ bind(&skip_to_incremental_noncompacting);
3052 GenerateIncremental(masm, INCREMENTAL);
3053
3054 __ bind(&skip_to_incremental_compacting);
3055 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
3056
3057 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
3058 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
3059 // patching not required on PPC as the initial path is effectively NOP
3060 }
3061
3062
GenerateIncremental(MacroAssembler * masm,Mode mode)3063 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
3064 regs_.Save(masm);
3065
3066 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
3067 Label dont_need_remembered_set;
3068
3069 __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
3070 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
3071 regs_.scratch0(), &dont_need_remembered_set);
3072
3073 __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
3074 &dont_need_remembered_set);
3075
3076 // First notify the incremental marker if necessary, then update the
3077 // remembered set.
3078 CheckNeedsToInformIncrementalMarker(
3079 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
3080 InformIncrementalMarker(masm);
3081 regs_.Restore(masm);
3082 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3083 MacroAssembler::kReturnAtEnd);
3084
3085 __ bind(&dont_need_remembered_set);
3086 }
3087
3088 CheckNeedsToInformIncrementalMarker(
3089 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
3090 InformIncrementalMarker(masm);
3091 regs_.Restore(masm);
3092 __ Ret();
3093 }
3094
3095
InformIncrementalMarker(MacroAssembler * masm)3096 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
3097 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
3098 int argument_count = 3;
3099 __ PrepareCallCFunction(argument_count, regs_.scratch0());
3100 Register address =
3101 r3.is(regs_.address()) ? regs_.scratch0() : regs_.address();
3102 DCHECK(!address.is(regs_.object()));
3103 DCHECK(!address.is(r3));
3104 __ mr(address, regs_.address());
3105 __ mr(r3, regs_.object());
3106 __ mr(r4, address);
3107 __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
3108
3109 AllowExternalCallThatCantCauseGC scope(masm);
3110 __ CallCFunction(
3111 ExternalReference::incremental_marking_record_write_function(isolate()),
3112 argument_count);
3113 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
3114 }
3115
3116
CheckNeedsToInformIncrementalMarker(MacroAssembler * masm,OnNoNeedToInformIncrementalMarker on_no_need,Mode mode)3117 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
3118 MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
3119 Mode mode) {
3120 Label on_black;
3121 Label need_incremental;
3122 Label need_incremental_pop_scratch;
3123
3124 // Let's look at the color of the object: If it is not black we don't have
3125 // to inform the incremental marker.
3126 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
3127
3128 regs_.Restore(masm);
3129 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
3130 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3131 MacroAssembler::kReturnAtEnd);
3132 } else {
3133 __ Ret();
3134 }
3135
3136 __ bind(&on_black);
3137
3138 // Get the value from the slot.
3139 __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
3140
3141 if (mode == INCREMENTAL_COMPACTION) {
3142 Label ensure_not_white;
3143
3144 __ CheckPageFlag(regs_.scratch0(), // Contains value.
3145 regs_.scratch1(), // Scratch.
3146 MemoryChunk::kEvacuationCandidateMask, eq,
3147 &ensure_not_white);
3148
3149 __ CheckPageFlag(regs_.object(),
3150 regs_.scratch1(), // Scratch.
3151 MemoryChunk::kSkipEvacuationSlotsRecordingMask, eq,
3152 &need_incremental);
3153
3154 __ bind(&ensure_not_white);
3155 }
3156
3157 // We need extra registers for this, so we push the object and the address
3158 // register temporarily.
3159 __ Push(regs_.object(), regs_.address());
3160 __ JumpIfWhite(regs_.scratch0(), // The value.
3161 regs_.scratch1(), // Scratch.
3162 regs_.object(), // Scratch.
3163 regs_.address(), // Scratch.
3164 &need_incremental_pop_scratch);
3165 __ Pop(regs_.object(), regs_.address());
3166
3167 regs_.Restore(masm);
3168 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
3169 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3170 MacroAssembler::kReturnAtEnd);
3171 } else {
3172 __ Ret();
3173 }
3174
3175 __ bind(&need_incremental_pop_scratch);
3176 __ Pop(regs_.object(), regs_.address());
3177
3178 __ bind(&need_incremental);
3179
3180 // Fall through when we need to inform the incremental marker.
3181 }
3182
3183
Generate(MacroAssembler * masm)3184 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
3185 CEntryStub ces(isolate(), 1, kSaveFPRegs);
3186 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
3187 int parameter_count_offset =
3188 StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
3189 __ LoadP(r4, MemOperand(fp, parameter_count_offset));
3190 if (function_mode() == JS_FUNCTION_STUB_MODE) {
3191 __ addi(r4, r4, Operand(1));
3192 }
3193 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
3194 __ slwi(r4, r4, Operand(kPointerSizeLog2));
3195 __ add(sp, sp, r4);
3196 __ Ret();
3197 }
3198
Generate(MacroAssembler * masm)3199 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
3200 __ EmitLoadTypeFeedbackVector(r5);
3201 CallICStub stub(isolate(), state());
3202 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3203 }
3204
3205
HandleArrayCases(MacroAssembler * masm,Register feedback,Register receiver_map,Register scratch1,Register scratch2,bool is_polymorphic,Label * miss)3206 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
3207 Register receiver_map, Register scratch1,
3208 Register scratch2, bool is_polymorphic,
3209 Label* miss) {
3210 // feedback initially contains the feedback array
3211 Label next_loop, prepare_next;
3212 Label start_polymorphic;
3213
3214 Register cached_map = scratch1;
3215
3216 __ LoadP(cached_map,
3217 FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
3218 __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3219 __ cmp(receiver_map, cached_map);
3220 __ bne(&start_polymorphic);
3221 // found, now call handler.
3222 Register handler = feedback;
3223 __ LoadP(handler,
3224 FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
3225 __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
3226 __ Jump(ip);
3227
3228
3229 Register length = scratch2;
3230 __ bind(&start_polymorphic);
3231 __ LoadP(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
3232 if (!is_polymorphic) {
3233 // If the IC could be monomorphic we have to make sure we don't go past the
3234 // end of the feedback array.
3235 __ CmpSmiLiteral(length, Smi::FromInt(2), r0);
3236 __ beq(miss);
3237 }
3238
3239 Register too_far = length;
3240 Register pointer_reg = feedback;
3241
3242 // +-----+------+------+-----+-----+ ... ----+
3243 // | map | len | wm0 | h0 | wm1 | hN |
3244 // +-----+------+------+-----+-----+ ... ----+
3245 // 0 1 2 len-1
3246 // ^ ^
3247 // | |
3248 // pointer_reg too_far
3249 // aka feedback scratch2
3250 // also need receiver_map
3251 // use cached_map (scratch1) to look in the weak map values.
3252 __ SmiToPtrArrayOffset(r0, length);
3253 __ add(too_far, feedback, r0);
3254 __ addi(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3255 __ addi(pointer_reg, feedback,
3256 Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
3257
3258 __ bind(&next_loop);
3259 __ LoadP(cached_map, MemOperand(pointer_reg));
3260 __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3261 __ cmp(receiver_map, cached_map);
3262 __ bne(&prepare_next);
3263 __ LoadP(handler, MemOperand(pointer_reg, kPointerSize));
3264 __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
3265 __ Jump(ip);
3266
3267 __ bind(&prepare_next);
3268 __ addi(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
3269 __ cmp(pointer_reg, too_far);
3270 __ blt(&next_loop);
3271
3272 // We exhausted our array of map handler pairs.
3273 __ b(miss);
3274 }
3275
3276
HandleMonomorphicCase(MacroAssembler * masm,Register receiver,Register receiver_map,Register feedback,Register vector,Register slot,Register scratch,Label * compare_map,Label * load_smi_map,Label * try_array)3277 static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
3278 Register receiver_map, Register feedback,
3279 Register vector, Register slot,
3280 Register scratch, Label* compare_map,
3281 Label* load_smi_map, Label* try_array) {
3282 __ JumpIfSmi(receiver, load_smi_map);
3283 __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
3284 __ bind(compare_map);
3285 Register cached_map = scratch;
3286 // Move the weak map into the weak_cell register.
3287 __ LoadP(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
3288 __ cmp(cached_map, receiver_map);
3289 __ bne(try_array);
3290 Register handler = feedback;
3291 __ SmiToPtrArrayOffset(r0, slot);
3292 __ add(handler, vector, r0);
3293 __ LoadP(handler,
3294 FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
3295 __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
3296 __ Jump(ip);
3297 }
3298
Generate(MacroAssembler * masm)3299 void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
3300 __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
3301 KeyedStoreICStub stub(isolate(), state());
3302 stub.GenerateForTrampoline(masm);
3303 }
3304
Generate(MacroAssembler * masm)3305 void KeyedStoreICStub::Generate(MacroAssembler* masm) {
3306 GenerateImpl(masm, false);
3307 }
3308
GenerateForTrampoline(MacroAssembler * masm)3309 void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
3310 GenerateImpl(masm, true);
3311 }
3312
3313
HandlePolymorphicStoreCase(MacroAssembler * masm,Register feedback,Register receiver_map,Register scratch1,Register scratch2,Label * miss)3314 static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
3315 Register receiver_map, Register scratch1,
3316 Register scratch2, Label* miss) {
3317 // feedback initially contains the feedback array
3318 Label next_loop, prepare_next;
3319 Label start_polymorphic;
3320 Label transition_call;
3321
3322 Register cached_map = scratch1;
3323 Register too_far = scratch2;
3324 Register pointer_reg = feedback;
3325 __ LoadP(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
3326
3327 // +-----+------+------+-----+-----+-----+ ... ----+
3328 // | map | len | wm0 | wt0 | h0 | wm1 | hN |
3329 // +-----+------+------+-----+-----+ ----+ ... ----+
3330 // 0 1 2 len-1
3331 // ^ ^
3332 // | |
3333 // pointer_reg too_far
3334 // aka feedback scratch2
3335 // also need receiver_map
3336 // use cached_map (scratch1) to look in the weak map values.
3337 __ SmiToPtrArrayOffset(r0, too_far);
3338 __ add(too_far, feedback, r0);
3339 __ addi(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3340 __ addi(pointer_reg, feedback,
3341 Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
3342
3343 __ bind(&next_loop);
3344 __ LoadP(cached_map, MemOperand(pointer_reg));
3345 __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3346 __ cmp(receiver_map, cached_map);
3347 __ bne(&prepare_next);
3348 // Is it a transitioning store?
3349 __ LoadP(too_far, MemOperand(pointer_reg, kPointerSize));
3350 __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
3351 __ bne(&transition_call);
3352 __ LoadP(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
3353 __ addi(ip, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
3354 __ Jump(ip);
3355
3356 __ bind(&transition_call);
3357 __ LoadP(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
3358 __ JumpIfSmi(too_far, miss);
3359
3360 __ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
3361
3362 // Load the map into the correct register.
3363 DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
3364 __ mr(feedback, too_far);
3365
3366 __ addi(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
3367 __ Jump(ip);
3368
3369 __ bind(&prepare_next);
3370 __ addi(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
3371 __ cmpl(pointer_reg, too_far);
3372 __ blt(&next_loop);
3373
3374 // We exhausted our array of map handler pairs.
3375 __ b(miss);
3376 }
3377
GenerateImpl(MacroAssembler * masm,bool in_frame)3378 void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
3379 Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r4
3380 Register key = StoreWithVectorDescriptor::NameRegister(); // r5
3381 Register vector = StoreWithVectorDescriptor::VectorRegister(); // r6
3382 Register slot = StoreWithVectorDescriptor::SlotRegister(); // r7
3383 DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r3)); // r3
3384 Register feedback = r8;
3385 Register receiver_map = r9;
3386 Register scratch1 = r10;
3387
3388 __ SmiToPtrArrayOffset(r0, slot);
3389 __ add(feedback, vector, r0);
3390 __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
3391
3392 // Try to quickly handle the monomorphic case without knowing for sure
3393 // if we have a weak cell in feedback. We do know it's safe to look
3394 // at WeakCell::kValueOffset.
3395 Label try_array, load_smi_map, compare_map;
3396 Label not_array, miss;
3397 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
3398 scratch1, &compare_map, &load_smi_map, &try_array);
3399
3400 __ bind(&try_array);
3401 // Is it a fixed array?
3402 __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
3403 __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
3404 __ bne(¬_array);
3405
3406 // We have a polymorphic element handler.
3407 Label polymorphic, try_poly_name;
3408 __ bind(&polymorphic);
3409
3410 Register scratch2 = r11;
3411
3412 HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
3413 &miss);
3414
3415 __ bind(¬_array);
3416 // Is it generic?
3417 __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
3418 __ bne(&try_poly_name);
3419 Handle<Code> megamorphic_stub =
3420 KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
3421 __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
3422
3423 __ bind(&try_poly_name);
3424 // We might have a name in feedback, and a fixed array in the next slot.
3425 __ cmp(key, feedback);
3426 __ bne(&miss);
3427 // If the name comparison succeeded, we know we have a fixed array with
3428 // at least one map/handler pair.
3429 __ SmiToPtrArrayOffset(r0, slot);
3430 __ add(feedback, vector, r0);
3431 __ LoadP(feedback,
3432 FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
3433 HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
3434 &miss);
3435
3436 __ bind(&miss);
3437 KeyedStoreIC::GenerateMiss(masm);
3438
3439 __ bind(&load_smi_map);
3440 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
3441 __ b(&compare_map);
3442 }
3443
3444
MaybeCallEntryHook(MacroAssembler * masm)3445 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
3446 if (masm->isolate()->function_entry_hook() != NULL) {
3447 PredictableCodeSizeScope predictable(masm,
3448 #if V8_TARGET_ARCH_PPC64
3449 14 * Assembler::kInstrSize);
3450 #else
3451 11 * Assembler::kInstrSize);
3452 #endif
3453 ProfileEntryHookStub stub(masm->isolate());
3454 __ mflr(r0);
3455 __ Push(r0, ip);
3456 __ CallStub(&stub);
3457 __ Pop(r0, ip);
3458 __ mtlr(r0);
3459 }
3460 }
3461
3462
Generate(MacroAssembler * masm)3463 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
3464 // The entry hook is a "push lr, ip" instruction, followed by a call.
3465 const int32_t kReturnAddressDistanceFromFunctionStart =
3466 Assembler::kCallTargetAddressOffset + 3 * Assembler::kInstrSize;
3467
3468 // This should contain all kJSCallerSaved registers.
3469 const RegList kSavedRegs = kJSCallerSaved | // Caller saved registers.
3470 r15.bit(); // Saved stack pointer.
3471
3472 // We also save lr, so the count here is one higher than the mask indicates.
3473 const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
3474
3475 // Save all caller-save registers as this may be called from anywhere.
3476 __ mflr(ip);
3477 __ MultiPush(kSavedRegs | ip.bit());
3478
3479 // Compute the function's address for the first argument.
3480 __ subi(r3, ip, Operand(kReturnAddressDistanceFromFunctionStart));
3481
3482 // The caller's return address is two slots above the saved temporaries.
3483 // Grab that for the second argument to the hook.
3484 __ addi(r4, sp, Operand((kNumSavedRegs + 1) * kPointerSize));
3485
3486 // Align the stack if necessary.
3487 int frame_alignment = masm->ActivationFrameAlignment();
3488 if (frame_alignment > kPointerSize) {
3489 __ mr(r15, sp);
3490 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3491 __ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
3492 }
3493
3494 #if !defined(USE_SIMULATOR)
3495 uintptr_t entry_hook =
3496 reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
3497 #else
3498 // Under the simulator we need to indirect the entry hook through a
3499 // trampoline function at a known address.
3500 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
3501 ExternalReference entry_hook = ExternalReference(
3502 &dispatcher, ExternalReference::BUILTIN_CALL, isolate());
3503
3504 // It additionally takes an isolate as a third parameter
3505 __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
3506 #endif
3507
3508 __ mov(ip, Operand(entry_hook));
3509
3510 if (ABI_USES_FUNCTION_DESCRIPTORS) {
3511 __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
3512 __ LoadP(ip, MemOperand(ip, 0));
3513 }
3514 // ip set above, so nothing more to do for ABI_CALL_VIA_IP.
3515
3516 // PPC LINUX ABI:
3517 __ li(r0, Operand::Zero());
3518 __ StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
3519
3520 __ Call(ip);
3521
3522 __ addi(sp, sp, Operand(kNumRequiredStackFrameSlots * kPointerSize));
3523
3524 // Restore the stack pointer if needed.
3525 if (frame_alignment > kPointerSize) {
3526 __ mr(sp, r15);
3527 }
3528
3529 // Also pop lr to get Ret(0).
3530 __ MultiPop(kSavedRegs | ip.bit());
3531 __ mtlr(ip);
3532 __ Ret();
3533 }
3534
3535
3536 template <class T>
CreateArrayDispatch(MacroAssembler * masm,AllocationSiteOverrideMode mode)3537 static void CreateArrayDispatch(MacroAssembler* masm,
3538 AllocationSiteOverrideMode mode) {
3539 if (mode == DISABLE_ALLOCATION_SITES) {
3540 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
3541 __ TailCallStub(&stub);
3542 } else if (mode == DONT_OVERRIDE) {
3543 int last_index =
3544 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
3545 for (int i = 0; i <= last_index; ++i) {
3546 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3547 __ Cmpi(r6, Operand(kind), r0);
3548 T stub(masm->isolate(), kind);
3549 __ TailCallStub(&stub, eq);
3550 }
3551
3552 // If we reached this point there is a problem.
3553 __ Abort(kUnexpectedElementsKindInArrayConstructor);
3554 } else {
3555 UNREACHABLE();
3556 }
3557 }
3558
3559
CreateArrayDispatchOneArgument(MacroAssembler * masm,AllocationSiteOverrideMode mode)3560 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
3561 AllocationSiteOverrideMode mode) {
3562 // r5 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
3563 // r6 - kind (if mode != DISABLE_ALLOCATION_SITES)
3564 // r3 - number of arguments
3565 // r4 - constructor?
3566 // sp[0] - last argument
3567 Label normal_sequence;
3568 if (mode == DONT_OVERRIDE) {
3569 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3570 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3571 STATIC_ASSERT(FAST_ELEMENTS == 2);
3572 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3573 STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
3574 STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
3575
3576 // is the low bit set? If so, we are holey and that is good.
3577 __ andi(r0, r6, Operand(1));
3578 __ bne(&normal_sequence, cr0);
3579 }
3580
3581 // look at the first argument
3582 __ LoadP(r8, MemOperand(sp, 0));
3583 __ cmpi(r8, Operand::Zero());
3584 __ beq(&normal_sequence);
3585
3586 if (mode == DISABLE_ALLOCATION_SITES) {
3587 ElementsKind initial = GetInitialFastElementsKind();
3588 ElementsKind holey_initial = GetHoleyElementsKind(initial);
3589
3590 ArraySingleArgumentConstructorStub stub_holey(
3591 masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES);
3592 __ TailCallStub(&stub_holey);
3593
3594 __ bind(&normal_sequence);
3595 ArraySingleArgumentConstructorStub stub(masm->isolate(), initial,
3596 DISABLE_ALLOCATION_SITES);
3597 __ TailCallStub(&stub);
3598 } else if (mode == DONT_OVERRIDE) {
3599 // We are going to create a holey array, but our kind is non-holey.
3600 // Fix kind and retry (only if we have an allocation site in the slot).
3601 __ addi(r6, r6, Operand(1));
3602
3603 if (FLAG_debug_code) {
3604 __ LoadP(r8, FieldMemOperand(r5, 0));
3605 __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
3606 __ Assert(eq, kExpectedAllocationSite);
3607 }
3608
3609 // Save the resulting elements kind in type info. We can't just store r6
3610 // in the AllocationSite::transition_info field because elements kind is
3611 // restricted to a portion of the field...upper bits need to be left alone.
3612 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
3613 __ LoadP(r7, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
3614 __ AddSmiLiteral(r7, r7, Smi::FromInt(kFastElementsKindPackedToHoley), r0);
3615 __ StoreP(r7, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset),
3616 r0);
3617
3618 __ bind(&normal_sequence);
3619 int last_index =
3620 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
3621 for (int i = 0; i <= last_index; ++i) {
3622 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3623 __ mov(r0, Operand(kind));
3624 __ cmp(r6, r0);
3625 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
3626 __ TailCallStub(&stub, eq);
3627 }
3628
3629 // If we reached this point there is a problem.
3630 __ Abort(kUnexpectedElementsKindInArrayConstructor);
3631 } else {
3632 UNREACHABLE();
3633 }
3634 }
3635
3636
3637 template <class T>
ArrayConstructorStubAheadOfTimeHelper(Isolate * isolate)3638 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
3639 int to_index =
3640 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
3641 for (int i = 0; i <= to_index; ++i) {
3642 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3643 T stub(isolate, kind);
3644 stub.GetCode();
3645 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
3646 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
3647 stub1.GetCode();
3648 }
3649 }
3650 }
3651
3652
GenerateStubsAheadOfTime(Isolate * isolate)3653 void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
3654 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
3655 isolate);
3656 ArrayNArgumentsConstructorStub stub(isolate);
3657 stub.GetCode();
3658 ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
3659 for (int i = 0; i < 2; i++) {
3660 // For internal arrays we only need a few things
3661 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
3662 stubh1.GetCode();
3663 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
3664 stubh2.GetCode();
3665 }
3666 }
3667
3668
GenerateDispatchToArrayStub(MacroAssembler * masm,AllocationSiteOverrideMode mode)3669 void ArrayConstructorStub::GenerateDispatchToArrayStub(
3670 MacroAssembler* masm, AllocationSiteOverrideMode mode) {
3671 Label not_zero_case, not_one_case;
3672 __ cmpi(r3, Operand::Zero());
3673 __ bne(¬_zero_case);
3674 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
3675
3676 __ bind(¬_zero_case);
3677 __ cmpi(r3, Operand(1));
3678 __ bgt(¬_one_case);
3679 CreateArrayDispatchOneArgument(masm, mode);
3680
3681 __ bind(¬_one_case);
3682 ArrayNArgumentsConstructorStub stub(masm->isolate());
3683 __ TailCallStub(&stub);
3684 }
3685
3686
Generate(MacroAssembler * masm)3687 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
3688 // ----------- S t a t e -------------
3689 // -- r3 : argc (only if argument_count() == ANY)
3690 // -- r4 : constructor
3691 // -- r5 : AllocationSite or undefined
3692 // -- r6 : new target
3693 // -- sp[0] : return address
3694 // -- sp[4] : last argument
3695 // -----------------------------------
3696
3697 if (FLAG_debug_code) {
3698 // The array construct code is only set for the global and natives
3699 // builtin Array functions which always have maps.
3700
3701 // Initial map for the builtin Array function should be a map.
3702 __ LoadP(r7, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
3703 // Will both indicate a NULL and a Smi.
3704 __ TestIfSmi(r7, r0);
3705 __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
3706 __ CompareObjectType(r7, r7, r8, MAP_TYPE);
3707 __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
3708
3709 // We should either have undefined in r5 or a valid AllocationSite
3710 __ AssertUndefinedOrAllocationSite(r5, r7);
3711 }
3712
3713 // Enter the context of the Array function.
3714 __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
3715
3716 Label subclassing;
3717 __ cmp(r6, r4);
3718 __ bne(&subclassing);
3719
3720 Label no_info;
3721 // Get the elements kind and case on that.
3722 __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
3723 __ beq(&no_info);
3724
3725 __ LoadP(r6, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
3726 __ SmiUntag(r6);
3727 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
3728 __ And(r6, r6, Operand(AllocationSite::ElementsKindBits::kMask));
3729 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
3730
3731 __ bind(&no_info);
3732 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
3733
3734 __ bind(&subclassing);
3735 __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
3736 __ StorePX(r4, MemOperand(sp, r0));
3737 __ addi(r3, r3, Operand(3));
3738 __ Push(r6, r5);
3739 __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
3740 }
3741
3742
GenerateCase(MacroAssembler * masm,ElementsKind kind)3743 void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm,
3744 ElementsKind kind) {
3745 __ cmpli(r3, Operand(1));
3746
3747 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
3748 __ TailCallStub(&stub0, lt);
3749
3750 ArrayNArgumentsConstructorStub stubN(isolate());
3751 __ TailCallStub(&stubN, gt);
3752
3753 if (IsFastPackedElementsKind(kind)) {
3754 // We might need to create a holey array
3755 // look at the first argument
3756 __ LoadP(r6, MemOperand(sp, 0));
3757 __ cmpi(r6, Operand::Zero());
3758
3759 InternalArraySingleArgumentConstructorStub stub1_holey(
3760 isolate(), GetHoleyElementsKind(kind));
3761 __ TailCallStub(&stub1_holey, ne);
3762 }
3763
3764 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
3765 __ TailCallStub(&stub1);
3766 }
3767
3768
Generate(MacroAssembler * masm)3769 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
3770 // ----------- S t a t e -------------
3771 // -- r3 : argc
3772 // -- r4 : constructor
3773 // -- sp[0] : return address
3774 // -- sp[4] : last argument
3775 // -----------------------------------
3776
3777 if (FLAG_debug_code) {
3778 // The array construct code is only set for the global and natives
3779 // builtin Array functions which always have maps.
3780
3781 // Initial map for the builtin Array function should be a map.
3782 __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
3783 // Will both indicate a NULL and a Smi.
3784 __ TestIfSmi(r6, r0);
3785 __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
3786 __ CompareObjectType(r6, r6, r7, MAP_TYPE);
3787 __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
3788 }
3789
3790 // Figure out the right elements kind
3791 __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
3792 // Load the map's "bit field 2" into |result|.
3793 __ lbz(r6, FieldMemOperand(r6, Map::kBitField2Offset));
3794 // Retrieve elements_kind from bit field 2.
3795 __ DecodeField<Map::ElementsKindBits>(r6);
3796
3797 if (FLAG_debug_code) {
3798 Label done;
3799 __ cmpi(r6, Operand(FAST_ELEMENTS));
3800 __ beq(&done);
3801 __ cmpi(r6, Operand(FAST_HOLEY_ELEMENTS));
3802 __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
3803 __ bind(&done);
3804 }
3805
3806 Label fast_elements_case;
3807 __ cmpi(r6, Operand(FAST_ELEMENTS));
3808 __ beq(&fast_elements_case);
3809 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
3810
3811 __ bind(&fast_elements_case);
3812 GenerateCase(masm, FAST_ELEMENTS);
3813 }
3814
Generate(MacroAssembler * masm)3815 void FastNewObjectStub::Generate(MacroAssembler* masm) {
3816 // ----------- S t a t e -------------
3817 // -- r4 : target
3818 // -- r6 : new target
3819 // -- cp : context
3820 // -- lr : return address
3821 // -----------------------------------
3822 __ AssertFunction(r4);
3823 __ AssertReceiver(r6);
3824
3825 // Verify that the new target is a JSFunction.
3826 Label new_object;
3827 __ CompareObjectType(r6, r5, r5, JS_FUNCTION_TYPE);
3828 __ bne(&new_object);
3829
3830 // Load the initial map and verify that it's in fact a map.
3831 __ LoadP(r5, FieldMemOperand(r6, JSFunction::kPrototypeOrInitialMapOffset));
3832 __ JumpIfSmi(r5, &new_object);
3833 __ CompareObjectType(r5, r3, r3, MAP_TYPE);
3834 __ bne(&new_object);
3835
3836 // Fall back to runtime if the target differs from the new target's
3837 // initial map constructor.
3838 __ LoadP(r3, FieldMemOperand(r5, Map::kConstructorOrBackPointerOffset));
3839 __ cmp(r3, r4);
3840 __ bne(&new_object);
3841
3842 // Allocate the JSObject on the heap.
3843 Label allocate, done_allocate;
3844 __ lbz(r7, FieldMemOperand(r5, Map::kInstanceSizeOffset));
3845 __ Allocate(r7, r3, r8, r9, &allocate, SIZE_IN_WORDS);
3846 __ bind(&done_allocate);
3847
3848 // Initialize the JSObject fields.
3849 __ StoreP(r5, FieldMemOperand(r3, JSObject::kMapOffset), r0);
3850 __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
3851 __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
3852 __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
3853 STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
3854 __ addi(r4, r3, Operand(JSObject::kHeaderSize - kHeapObjectTag));
3855
3856 // ----------- S t a t e -------------
3857 // -- r3 : result (tagged)
3858 // -- r4 : result fields (untagged)
3859 // -- r8 : result end (untagged)
3860 // -- r5 : initial map
3861 // -- cp : context
3862 // -- lr : return address
3863 // -----------------------------------
3864
3865 // Perform in-object slack tracking if requested.
3866 Label slack_tracking;
3867 STATIC_ASSERT(Map::kNoSlackTracking == 0);
3868 __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
3869 __ lwz(r6, FieldMemOperand(r5, Map::kBitField3Offset));
3870 __ DecodeField<Map::ConstructionCounter>(r10, r6, SetRC);
3871 __ bne(&slack_tracking, cr0);
3872 {
3873 // Initialize all in-object fields with undefined.
3874 __ InitializeFieldsWithFiller(r4, r8, r9);
3875 __ Ret();
3876 }
3877 __ bind(&slack_tracking);
3878 {
3879 // Decrease generous allocation count.
3880 STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
3881 __ Add(r6, r6, -(1 << Map::ConstructionCounter::kShift), r0);
3882 __ stw(r6, FieldMemOperand(r5, Map::kBitField3Offset));
3883
3884 // Initialize the in-object fields with undefined.
3885 __ lbz(r7, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
3886 __ ShiftLeftImm(r7, r7, Operand(kPointerSizeLog2));
3887 __ sub(r7, r8, r7);
3888 __ InitializeFieldsWithFiller(r4, r7, r9);
3889
3890 // Initialize the remaining (reserved) fields with one pointer filler map.
3891 __ LoadRoot(r9, Heap::kOnePointerFillerMapRootIndex);
3892 __ InitializeFieldsWithFiller(r4, r8, r9);
3893
3894 // Check if we can finalize the instance size.
3895 __ cmpi(r10, Operand(Map::kSlackTrackingCounterEnd));
3896 __ Ret(ne);
3897
3898 // Finalize the instance size.
3899 {
3900 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
3901 __ Push(r3, r5);
3902 __ CallRuntime(Runtime::kFinalizeInstanceSize);
3903 __ Pop(r3);
3904 }
3905 __ Ret();
3906 }
3907
3908 // Fall back to %AllocateInNewSpace.
3909 __ bind(&allocate);
3910 {
3911 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
3912 STATIC_ASSERT(kSmiTag == 0);
3913 __ ShiftLeftImm(r7, r7,
3914 Operand(kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
3915 __ Push(r5, r7);
3916 __ CallRuntime(Runtime::kAllocateInNewSpace);
3917 __ Pop(r5);
3918 }
3919 __ lbz(r8, FieldMemOperand(r5, Map::kInstanceSizeOffset));
3920 __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
3921 __ add(r8, r3, r8);
3922 __ subi(r8, r8, Operand(kHeapObjectTag));
3923 __ b(&done_allocate);
3924
3925 // Fall back to %NewObject.
3926 __ bind(&new_object);
3927 __ Push(r4, r6);
3928 __ TailCallRuntime(Runtime::kNewObject);
3929 }
3930
Generate(MacroAssembler * masm)3931 void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
3932 // ----------- S t a t e -------------
3933 // -- r4 : function
3934 // -- cp : context
3935 // -- fp : frame pointer
3936 // -- lr : return address
3937 // -----------------------------------
3938 __ AssertFunction(r4);
3939
3940 // Make r5 point to the JavaScript frame.
3941 __ mr(r5, fp);
3942 if (skip_stub_frame()) {
3943 // For Ignition we need to skip the handler/stub frame to reach the
3944 // JavaScript frame for the function.
3945 __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
3946 }
3947 if (FLAG_debug_code) {
3948 Label ok;
3949 __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kFunctionOffset));
3950 __ cmp(ip, r4);
3951 __ beq(&ok);
3952 __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
3953 __ bind(&ok);
3954 }
3955
3956 // Check if we have rest parameters (only possible if we have an
3957 // arguments adaptor frame below the function frame).
3958 Label no_rest_parameters;
3959 __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
3960 __ LoadP(ip, MemOperand(r5, CommonFrameConstants::kContextOrFrameTypeOffset));
3961 __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
3962 __ bne(&no_rest_parameters);
3963
3964 // Check if the arguments adaptor frame contains more arguments than
3965 // specified by the function's internal formal parameter count.
3966 Label rest_parameters;
3967 __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
3968 __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
3969 __ LoadWordArith(
3970 r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
3971 #if V8_TARGET_ARCH_PPC64
3972 __ SmiTag(r6);
3973 #endif
3974 __ sub(r3, r3, r6, LeaveOE, SetRC);
3975 __ bgt(&rest_parameters, cr0);
3976
3977 // Return an empty rest parameter array.
3978 __ bind(&no_rest_parameters);
3979 {
3980 // ----------- S t a t e -------------
3981 // -- cp : context
3982 // -- lr : return address
3983 // -----------------------------------
3984
3985 // Allocate an empty rest parameter array.
3986 Label allocate, done_allocate;
3987 __ Allocate(JSArray::kSize, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
3988 __ bind(&done_allocate);
3989
3990 // Setup the rest parameter array in r0.
3991 __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r4);
3992 __ StoreP(r4, FieldMemOperand(r3, JSArray::kMapOffset), r0);
3993 __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
3994 __ StoreP(r4, FieldMemOperand(r3, JSArray::kPropertiesOffset), r0);
3995 __ StoreP(r4, FieldMemOperand(r3, JSArray::kElementsOffset), r0);
3996 __ li(r4, Operand::Zero());
3997 __ StoreP(r4, FieldMemOperand(r3, JSArray::kLengthOffset), r0);
3998 STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
3999 __ Ret();
4000
4001 // Fall back to %AllocateInNewSpace.
4002 __ bind(&allocate);
4003 {
4004 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
4005 __ Push(Smi::FromInt(JSArray::kSize));
4006 __ CallRuntime(Runtime::kAllocateInNewSpace);
4007 }
4008 __ b(&done_allocate);
4009 }
4010
4011 __ bind(&rest_parameters);
4012 {
4013 // Compute the pointer to the first rest parameter (skippping the receiver).
4014 __ SmiToPtrArrayOffset(r9, r3);
4015 __ add(r5, r5, r9);
4016 __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
4017
4018 // ----------- S t a t e -------------
4019 // -- cp : context
4020 // -- r3 : number of rest parameters (tagged)
4021 // -- r4 : function
4022 // -- r5 : pointer just past first rest parameters
4023 // -- r9 : size of rest parameters
4024 // -- lr : return address
4025 // -----------------------------------
4026
4027 // Allocate space for the rest parameter array plus the backing store.
4028 Label allocate, done_allocate;
4029 __ mov(r10, Operand(JSArray::kSize + FixedArray::kHeaderSize));
4030 __ add(r10, r10, r9);
4031 __ Allocate(r10, r6, r7, r8, &allocate, NO_ALLOCATION_FLAGS);
4032 __ bind(&done_allocate);
4033
4034 // Setup the elements array in r6.
4035 __ LoadRoot(r4, Heap::kFixedArrayMapRootIndex);
4036 __ StoreP(r4, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
4037 __ StoreP(r3, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
4038 __ addi(r7, r6,
4039 Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
4040 {
4041 Label loop;
4042 __ SmiUntag(r0, r3);
4043 __ mtctr(r0);
4044 __ bind(&loop);
4045 __ LoadPU(ip, MemOperand(r5, -kPointerSize));
4046 __ StorePU(ip, MemOperand(r7, kPointerSize));
4047 __ bdnz(&loop);
4048 __ addi(r7, r7, Operand(kPointerSize));
4049 }
4050
4051 // Setup the rest parameter array in r7.
4052 __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r4);
4053 __ StoreP(r4, MemOperand(r7, JSArray::kMapOffset));
4054 __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
4055 __ StoreP(r4, MemOperand(r7, JSArray::kPropertiesOffset));
4056 __ StoreP(r6, MemOperand(r7, JSArray::kElementsOffset));
4057 __ StoreP(r3, MemOperand(r7, JSArray::kLengthOffset));
4058 STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
4059 __ addi(r3, r7, Operand(kHeapObjectTag));
4060 __ Ret();
4061
4062 // Fall back to %AllocateInNewSpace (if not too big).
4063 Label too_big_for_new_space;
4064 __ bind(&allocate);
4065 __ Cmpi(r10, Operand(kMaxRegularHeapObjectSize), r0);
4066 __ bgt(&too_big_for_new_space);
4067 {
4068 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
4069 __ SmiTag(r10);
4070 __ Push(r3, r5, r10);
4071 __ CallRuntime(Runtime::kAllocateInNewSpace);
4072 __ mr(r6, r3);
4073 __ Pop(r3, r5);
4074 }
4075 __ b(&done_allocate);
4076
4077 // Fall back to %NewRestParameter.
4078 __ bind(&too_big_for_new_space);
4079 __ push(r4);
4080 __ TailCallRuntime(Runtime::kNewRestParameter);
4081 }
4082 }
4083
Generate(MacroAssembler * masm)4084 void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
4085 // ----------- S t a t e -------------
4086 // -- r4 : function
4087 // -- cp : context
4088 // -- fp : frame pointer
4089 // -- lr : return address
4090 // -----------------------------------
4091 __ AssertFunction(r4);
4092
4093 // Make r10 point to the JavaScript frame.
4094 __ mr(r10, fp);
4095 if (skip_stub_frame()) {
4096 // For Ignition we need to skip the handler/stub frame to reach the
4097 // JavaScript frame for the function.
4098 __ LoadP(r10, MemOperand(r10, StandardFrameConstants::kCallerFPOffset));
4099 }
4100 if (FLAG_debug_code) {
4101 Label ok;
4102 __ LoadP(ip, MemOperand(r10, StandardFrameConstants::kFunctionOffset));
4103 __ cmp(ip, r4);
4104 __ beq(&ok);
4105 __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4106 __ bind(&ok);
4107 }
4108
4109
4110 // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
4111 __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
4112 __ LoadWordArith(
4113 r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
4114 #if V8_TARGET_ARCH_PPC64
4115 __ SmiTag(r5);
4116 #endif
4117 __ SmiToPtrArrayOffset(r6, r5);
4118 __ add(r6, r10, r6);
4119 __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
4120
4121 // r4 : function
4122 // r5 : number of parameters (tagged)
4123 // r6 : parameters pointer
4124 // r10 : JavaScript frame pointer
4125 // Registers used over whole function:
4126 // r8 : arguments count (tagged)
4127 // r9 : mapped parameter count (tagged)
4128
4129 // Check if the calling frame is an arguments adaptor frame.
4130 Label adaptor_frame, try_allocate, runtime;
4131 __ LoadP(r7, MemOperand(r10, StandardFrameConstants::kCallerFPOffset));
4132 __ LoadP(r3, MemOperand(r7, CommonFrameConstants::kContextOrFrameTypeOffset));
4133 __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
4134 __ beq(&adaptor_frame);
4135
4136 // No adaptor, parameter count = argument count.
4137 __ mr(r8, r5);
4138 __ mr(r9, r5);
4139 __ b(&try_allocate);
4140
4141 // We have an adaptor frame. Patch the parameters pointer.
4142 __ bind(&adaptor_frame);
4143 __ LoadP(r8, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
4144 __ SmiToPtrArrayOffset(r6, r8);
4145 __ add(r6, r6, r7);
4146 __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
4147
4148 // r8 = argument count (tagged)
4149 // r9 = parameter count (tagged)
4150 // Compute the mapped parameter count = min(r5, r8) in r9.
4151 __ cmp(r5, r8);
4152 if (CpuFeatures::IsSupported(ISELECT)) {
4153 __ isel(lt, r9, r5, r8);
4154 } else {
4155 Label skip;
4156 __ mr(r9, r5);
4157 __ blt(&skip);
4158 __ mr(r9, r8);
4159 __ bind(&skip);
4160 }
4161
4162 __ bind(&try_allocate);
4163
4164 // Compute the sizes of backing store, parameter map, and arguments object.
4165 // 1. Parameter map, has 2 extra words containing context and backing store.
4166 const int kParameterMapHeaderSize =
4167 FixedArray::kHeaderSize + 2 * kPointerSize;
4168 // If there are no mapped parameters, we do not need the parameter_map.
4169 __ CmpSmiLiteral(r9, Smi::kZero, r0);
4170 if (CpuFeatures::IsSupported(ISELECT)) {
4171 __ SmiToPtrArrayOffset(r11, r9);
4172 __ addi(r11, r11, Operand(kParameterMapHeaderSize));
4173 __ isel(eq, r11, r0, r11);
4174 } else {
4175 Label skip2, skip3;
4176 __ bne(&skip2);
4177 __ li(r11, Operand::Zero());
4178 __ b(&skip3);
4179 __ bind(&skip2);
4180 __ SmiToPtrArrayOffset(r11, r9);
4181 __ addi(r11, r11, Operand(kParameterMapHeaderSize));
4182 __ bind(&skip3);
4183 }
4184
4185 // 2. Backing store.
4186 __ SmiToPtrArrayOffset(r7, r8);
4187 __ add(r11, r11, r7);
4188 __ addi(r11, r11, Operand(FixedArray::kHeaderSize));
4189
4190 // 3. Arguments object.
4191 __ addi(r11, r11, Operand(JSSloppyArgumentsObject::kSize));
4192
4193 // Do the allocation of all three objects in one go.
4194 __ Allocate(r11, r3, r11, r7, &runtime, NO_ALLOCATION_FLAGS);
4195
4196 // r3 = address of new object(s) (tagged)
4197 // r5 = argument count (smi-tagged)
4198 // Get the arguments boilerplate from the current native context into r4.
4199 const int kNormalOffset =
4200 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
4201 const int kAliasedOffset =
4202 Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
4203
4204 __ LoadP(r7, NativeContextMemOperand());
4205 __ cmpi(r9, Operand::Zero());
4206 if (CpuFeatures::IsSupported(ISELECT)) {
4207 __ LoadP(r11, MemOperand(r7, kNormalOffset));
4208 __ LoadP(r7, MemOperand(r7, kAliasedOffset));
4209 __ isel(eq, r7, r11, r7);
4210 } else {
4211 Label skip4, skip5;
4212 __ bne(&skip4);
4213 __ LoadP(r7, MemOperand(r7, kNormalOffset));
4214 __ b(&skip5);
4215 __ bind(&skip4);
4216 __ LoadP(r7, MemOperand(r7, kAliasedOffset));
4217 __ bind(&skip5);
4218 }
4219
4220 // r3 = address of new object (tagged)
4221 // r5 = argument count (smi-tagged)
4222 // r7 = address of arguments map (tagged)
4223 // r9 = mapped parameter count (tagged)
4224 __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
4225 __ LoadRoot(r11, Heap::kEmptyFixedArrayRootIndex);
4226 __ StoreP(r11, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
4227 __ StoreP(r11, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
4228
4229 // Set up the callee in-object property.
4230 __ AssertNotSmi(r4);
4231 __ StoreP(r4, FieldMemOperand(r3, JSSloppyArgumentsObject::kCalleeOffset),
4232 r0);
4233
4234 // Use the length (smi tagged) and set that as an in-object property too.
4235 __ AssertSmi(r8);
4236 __ StoreP(r8, FieldMemOperand(r3, JSSloppyArgumentsObject::kLengthOffset),
4237 r0);
4238
4239 // Set up the elements pointer in the allocated arguments object.
4240 // If we allocated a parameter map, r7 will point there, otherwise
4241 // it will point to the backing store.
4242 __ addi(r7, r3, Operand(JSSloppyArgumentsObject::kSize));
4243 __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
4244
4245 // r3 = address of new object (tagged)
4246 // r5 = argument count (tagged)
4247 // r7 = address of parameter map or backing store (tagged)
4248 // r9 = mapped parameter count (tagged)
4249 // Initialize parameter map. If there are no mapped arguments, we're done.
4250 Label skip_parameter_map;
4251 __ CmpSmiLiteral(r9, Smi::kZero, r0);
4252 if (CpuFeatures::IsSupported(ISELECT)) {
4253 __ isel(eq, r4, r7, r4);
4254 __ beq(&skip_parameter_map);
4255 } else {
4256 Label skip6;
4257 __ bne(&skip6);
4258 // Move backing store address to r4, because it is
4259 // expected there when filling in the unmapped arguments.
4260 __ mr(r4, r7);
4261 __ b(&skip_parameter_map);
4262 __ bind(&skip6);
4263 }
4264
4265 __ LoadRoot(r8, Heap::kSloppyArgumentsElementsMapRootIndex);
4266 __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
4267 __ AddSmiLiteral(r8, r9, Smi::FromInt(2), r0);
4268 __ StoreP(r8, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
4269 __ StoreP(cp, FieldMemOperand(r7, FixedArray::kHeaderSize + 0 * kPointerSize),
4270 r0);
4271 __ SmiToPtrArrayOffset(r8, r9);
4272 __ add(r8, r8, r7);
4273 __ addi(r8, r8, Operand(kParameterMapHeaderSize));
4274 __ StoreP(r8, FieldMemOperand(r7, FixedArray::kHeaderSize + 1 * kPointerSize),
4275 r0);
4276
4277 // Copy the parameter slots and the holes in the arguments.
4278 // We need to fill in mapped_parameter_count slots. They index the context,
4279 // where parameters are stored in reverse order, at
4280 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4281 // The mapped parameter thus need to get indices
4282 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
4283 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4284 // We loop from right to left.
4285 Label parameters_loop;
4286 __ mr(r8, r9);
4287 __ AddSmiLiteral(r11, r5, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
4288 __ sub(r11, r11, r9);
4289 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
4290 __ SmiToPtrArrayOffset(r4, r8);
4291 __ add(r4, r4, r7);
4292 __ addi(r4, r4, Operand(kParameterMapHeaderSize));
4293
4294 // r4 = address of backing store (tagged)
4295 // r7 = address of parameter map (tagged)
4296 // r8 = temporary scratch (a.o., for address calculation)
4297 // r10 = temporary scratch (a.o., for address calculation)
4298 // ip = the hole value
4299 __ SmiUntag(r8);
4300 __ mtctr(r8);
4301 __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
4302 __ add(r10, r4, r8);
4303 __ add(r8, r7, r8);
4304 __ addi(r10, r10, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4305 __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4306
4307 __ bind(¶meters_loop);
4308 __ StorePU(r11, MemOperand(r8, -kPointerSize));
4309 __ StorePU(ip, MemOperand(r10, -kPointerSize));
4310 __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
4311 __ bdnz(¶meters_loop);
4312
4313 // Restore r8 = argument count (tagged).
4314 __ LoadP(r8, FieldMemOperand(r3, JSSloppyArgumentsObject::kLengthOffset));
4315
4316 __ bind(&skip_parameter_map);
4317 // r3 = address of new object (tagged)
4318 // r4 = address of backing store (tagged)
4319 // r8 = argument count (tagged)
4320 // r9 = mapped parameter count (tagged)
4321 // r11 = scratch
4322 // Copy arguments header and remaining slots (if there are any).
4323 __ LoadRoot(r11, Heap::kFixedArrayMapRootIndex);
4324 __ StoreP(r11, FieldMemOperand(r4, FixedArray::kMapOffset), r0);
4325 __ StoreP(r8, FieldMemOperand(r4, FixedArray::kLengthOffset), r0);
4326 __ sub(r11, r8, r9, LeaveOE, SetRC);
4327 __ Ret(eq, cr0);
4328
4329 Label arguments_loop;
4330 __ SmiUntag(r11);
4331 __ mtctr(r11);
4332
4333 __ SmiToPtrArrayOffset(r0, r9);
4334 __ sub(r6, r6, r0);
4335 __ add(r11, r4, r0);
4336 __ addi(r11, r11,
4337 Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
4338
4339 __ bind(&arguments_loop);
4340 __ LoadPU(r7, MemOperand(r6, -kPointerSize));
4341 __ StorePU(r7, MemOperand(r11, kPointerSize));
4342 __ bdnz(&arguments_loop);
4343
4344 // Return.
4345 __ Ret();
4346
4347 // Do the runtime call to allocate the arguments object.
4348 // r8 = argument count (tagged)
4349 __ bind(&runtime);
4350 __ Push(r4, r6, r8);
4351 __ TailCallRuntime(Runtime::kNewSloppyArguments);
4352 }
4353
Generate(MacroAssembler * masm)4354 void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
4355 // ----------- S t a t e -------------
4356 // -- r4 : function
4357 // -- cp : context
4358 // -- fp : frame pointer
4359 // -- lr : return address
4360 // -----------------------------------
4361 __ AssertFunction(r4);
4362
4363 // Make r5 point to the JavaScript frame.
4364 __ mr(r5, fp);
4365 if (skip_stub_frame()) {
4366 // For Ignition we need to skip the handler/stub frame to reach the
4367 // JavaScript frame for the function.
4368 __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
4369 }
4370 if (FLAG_debug_code) {
4371 Label ok;
4372 __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kFunctionOffset));
4373 __ cmp(ip, r4);
4374 __ b(&ok);
4375 __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4376 __ bind(&ok);
4377 }
4378
4379 // Check if we have an arguments adaptor frame below the function frame.
4380 Label arguments_adaptor, arguments_done;
4381 __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
4382 __ LoadP(ip, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
4383 __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
4384 __ beq(&arguments_adaptor);
4385 {
4386 __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
4387 __ LoadWordArith(
4388 r3,
4389 FieldMemOperand(r7, SharedFunctionInfo::kFormalParameterCountOffset));
4390 #if V8_TARGET_ARCH_PPC64
4391 __ SmiTag(r3);
4392 #endif
4393 __ SmiToPtrArrayOffset(r9, r3);
4394 __ add(r5, r5, r9);
4395 }
4396 __ b(&arguments_done);
4397 __ bind(&arguments_adaptor);
4398 {
4399 __ LoadP(r3, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
4400 __ SmiToPtrArrayOffset(r9, r3);
4401 __ add(r5, r6, r9);
4402 }
4403 __ bind(&arguments_done);
4404 __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
4405
4406 // ----------- S t a t e -------------
4407 // -- cp : context
4408 // -- r3 : number of rest parameters (tagged)
4409 // -- r4 : function
4410 // -- r5 : pointer just past first rest parameters
4411 // -- r9 : size of rest parameters
4412 // -- lr : return address
4413 // -----------------------------------
4414
4415 // Allocate space for the strict arguments object plus the backing store.
4416 Label allocate, done_allocate;
4417 __ mov(r10,
4418 Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
4419 __ add(r10, r10, r9);
4420 __ Allocate(r10, r6, r7, r8, &allocate, NO_ALLOCATION_FLAGS);
4421 __ bind(&done_allocate);
4422
4423 // Setup the elements array in r6.
4424 __ LoadRoot(r4, Heap::kFixedArrayMapRootIndex);
4425 __ StoreP(r4, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
4426 __ StoreP(r3, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
4427 __ addi(r7, r6,
4428 Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
4429 {
4430 Label loop, done_loop;
4431 __ SmiUntag(r0, r3, SetRC);
4432 __ beq(&done_loop, cr0);
4433 __ mtctr(r0);
4434 __ bind(&loop);
4435 __ LoadPU(ip, MemOperand(r5, -kPointerSize));
4436 __ StorePU(ip, MemOperand(r7, kPointerSize));
4437 __ bdnz(&loop);
4438 __ bind(&done_loop);
4439 __ addi(r7, r7, Operand(kPointerSize));
4440 }
4441
4442 // Setup the rest parameter array in r7.
4443 __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r4);
4444 __ StoreP(r4, MemOperand(r7, JSStrictArgumentsObject::kMapOffset));
4445 __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
4446 __ StoreP(r4, MemOperand(r7, JSStrictArgumentsObject::kPropertiesOffset));
4447 __ StoreP(r6, MemOperand(r7, JSStrictArgumentsObject::kElementsOffset));
4448 __ StoreP(r3, MemOperand(r7, JSStrictArgumentsObject::kLengthOffset));
4449 STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
4450 __ addi(r3, r7, Operand(kHeapObjectTag));
4451 __ Ret();
4452
4453 // Fall back to %AllocateInNewSpace (if not too big).
4454 Label too_big_for_new_space;
4455 __ bind(&allocate);
4456 __ Cmpi(r10, Operand(kMaxRegularHeapObjectSize), r0);
4457 __ bgt(&too_big_for_new_space);
4458 {
4459 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
4460 __ SmiTag(r10);
4461 __ Push(r3, r5, r10);
4462 __ CallRuntime(Runtime::kAllocateInNewSpace);
4463 __ mr(r6, r3);
4464 __ Pop(r3, r5);
4465 }
4466 __ b(&done_allocate);
4467
4468 // Fall back to %NewStrictArguments.
4469 __ bind(&too_big_for_new_space);
4470 __ push(r4);
4471 __ TailCallRuntime(Runtime::kNewStrictArguments);
4472 }
4473
AddressOffset(ExternalReference ref0,ExternalReference ref1)4474 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
4475 return ref0.address() - ref1.address();
4476 }
4477
4478
4479 // Calls an API function. Allocates HandleScope, extracts returned value
4480 // from handle and propagates exceptions. Restores context. stack_space
4481 // - space to be unwound on exit (includes the call JS arguments space and
4482 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,MemOperand return_value_operand,MemOperand * context_restore_operand)4483 static void CallApiFunctionAndReturn(MacroAssembler* masm,
4484 Register function_address,
4485 ExternalReference thunk_ref,
4486 int stack_space,
4487 MemOperand* stack_space_operand,
4488 MemOperand return_value_operand,
4489 MemOperand* context_restore_operand) {
4490 Isolate* isolate = masm->isolate();
4491 ExternalReference next_address =
4492 ExternalReference::handle_scope_next_address(isolate);
4493 const int kNextOffset = 0;
4494 const int kLimitOffset = AddressOffset(
4495 ExternalReference::handle_scope_limit_address(isolate), next_address);
4496 const int kLevelOffset = AddressOffset(
4497 ExternalReference::handle_scope_level_address(isolate), next_address);
4498
4499 // Additional parameter is the address of the actual callback.
4500 DCHECK(function_address.is(r4) || function_address.is(r5));
4501 Register scratch = r6;
4502
4503 __ mov(scratch, Operand(ExternalReference::is_profiling_address(isolate)));
4504 __ lbz(scratch, MemOperand(scratch, 0));
4505 __ cmpi(scratch, Operand::Zero());
4506
4507 if (CpuFeatures::IsSupported(ISELECT)) {
4508 __ mov(scratch, Operand(thunk_ref));
4509 __ isel(eq, scratch, function_address, scratch);
4510 } else {
4511 Label profiler_disabled;
4512 Label end_profiler_check;
4513 __ beq(&profiler_disabled);
4514 __ mov(scratch, Operand(thunk_ref));
4515 __ b(&end_profiler_check);
4516 __ bind(&profiler_disabled);
4517 __ mr(scratch, function_address);
4518 __ bind(&end_profiler_check);
4519 }
4520
4521 // Allocate HandleScope in callee-save registers.
4522 // r17 - next_address
4523 // r14 - next_address->kNextOffset
4524 // r15 - next_address->kLimitOffset
4525 // r16 - next_address->kLevelOffset
4526 __ mov(r17, Operand(next_address));
4527 __ LoadP(r14, MemOperand(r17, kNextOffset));
4528 __ LoadP(r15, MemOperand(r17, kLimitOffset));
4529 __ lwz(r16, MemOperand(r17, kLevelOffset));
4530 __ addi(r16, r16, Operand(1));
4531 __ stw(r16, MemOperand(r17, kLevelOffset));
4532
4533 if (FLAG_log_timer_events) {
4534 FrameScope frame(masm, StackFrame::MANUAL);
4535 __ PushSafepointRegisters();
4536 __ PrepareCallCFunction(1, r3);
4537 __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
4538 __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
4539 1);
4540 __ PopSafepointRegisters();
4541 }
4542
4543 // Native call returns to the DirectCEntry stub which redirects to the
4544 // return address pushed on stack (could have moved after GC).
4545 // DirectCEntry stub itself is generated early and never moves.
4546 DirectCEntryStub stub(isolate);
4547 stub.GenerateCall(masm, scratch);
4548
4549 if (FLAG_log_timer_events) {
4550 FrameScope frame(masm, StackFrame::MANUAL);
4551 __ PushSafepointRegisters();
4552 __ PrepareCallCFunction(1, r3);
4553 __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
4554 __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
4555 1);
4556 __ PopSafepointRegisters();
4557 }
4558
4559 Label promote_scheduled_exception;
4560 Label delete_allocated_handles;
4561 Label leave_exit_frame;
4562 Label return_value_loaded;
4563
4564 // load value from ReturnValue
4565 __ LoadP(r3, return_value_operand);
4566 __ bind(&return_value_loaded);
4567 // No more valid handles (the result handle was the last one). Restore
4568 // previous handle scope.
4569 __ StoreP(r14, MemOperand(r17, kNextOffset));
4570 if (__ emit_debug_code()) {
4571 __ lwz(r4, MemOperand(r17, kLevelOffset));
4572 __ cmp(r4, r16);
4573 __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
4574 }
4575 __ subi(r16, r16, Operand(1));
4576 __ stw(r16, MemOperand(r17, kLevelOffset));
4577 __ LoadP(r0, MemOperand(r17, kLimitOffset));
4578 __ cmp(r15, r0);
4579 __ bne(&delete_allocated_handles);
4580
4581 // Leave the API exit frame.
4582 __ bind(&leave_exit_frame);
4583 bool restore_context = context_restore_operand != NULL;
4584 if (restore_context) {
4585 __ LoadP(cp, *context_restore_operand);
4586 }
4587 // LeaveExitFrame expects unwind space to be in a register.
4588 if (stack_space_operand != NULL) {
4589 __ lwz(r14, *stack_space_operand);
4590 } else {
4591 __ mov(r14, Operand(stack_space));
4592 }
4593 __ LeaveExitFrame(false, r14, !restore_context, stack_space_operand != NULL);
4594
4595 // Check if the function scheduled an exception.
4596 __ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
4597 __ mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate)));
4598 __ LoadP(r15, MemOperand(r15));
4599 __ cmp(r14, r15);
4600 __ bne(&promote_scheduled_exception);
4601
4602 __ blr();
4603
4604 // Re-throw by promoting a scheduled exception.
4605 __ bind(&promote_scheduled_exception);
4606 __ TailCallRuntime(Runtime::kPromoteScheduledException);
4607
4608 // HandleScope limit has changed. Delete allocated extensions.
4609 __ bind(&delete_allocated_handles);
4610 __ StoreP(r15, MemOperand(r17, kLimitOffset));
4611 __ mr(r14, r3);
4612 __ PrepareCallCFunction(1, r15);
4613 __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
4614 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
4615 1);
4616 __ mr(r3, r14);
4617 __ b(&leave_exit_frame);
4618 }
4619
Generate(MacroAssembler * masm)4620 void CallApiCallbackStub::Generate(MacroAssembler* masm) {
4621 // ----------- S t a t e -------------
4622 // -- r3 : callee
4623 // -- r7 : call_data
4624 // -- r5 : holder
4625 // -- r4 : api_function_address
4626 // -- cp : context
4627 // --
4628 // -- sp[0] : last argument
4629 // -- ...
4630 // -- sp[(argc - 1)* 4] : first argument
4631 // -- sp[argc * 4] : receiver
4632 // -----------------------------------
4633
4634 Register callee = r3;
4635 Register call_data = r7;
4636 Register holder = r5;
4637 Register api_function_address = r4;
4638 Register context = cp;
4639
4640 typedef FunctionCallbackArguments FCA;
4641
4642 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
4643 STATIC_ASSERT(FCA::kCalleeIndex == 5);
4644 STATIC_ASSERT(FCA::kDataIndex == 4);
4645 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
4646 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
4647 STATIC_ASSERT(FCA::kIsolateIndex == 1);
4648 STATIC_ASSERT(FCA::kHolderIndex == 0);
4649 STATIC_ASSERT(FCA::kNewTargetIndex == 7);
4650 STATIC_ASSERT(FCA::kArgsLength == 8);
4651
4652 // new target
4653 __ PushRoot(Heap::kUndefinedValueRootIndex);
4654
4655 // context save
4656 __ push(context);
4657 if (!is_lazy()) {
4658 // load context from callee
4659 __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
4660 }
4661
4662 // callee
4663 __ push(callee);
4664
4665 // call data
4666 __ push(call_data);
4667
4668 Register scratch = call_data;
4669 if (!call_data_undefined()) {
4670 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4671 }
4672 // return value
4673 __ push(scratch);
4674 // return value default
4675 __ push(scratch);
4676 // isolate
4677 __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
4678 __ push(scratch);
4679 // holder
4680 __ push(holder);
4681
4682 // Prepare arguments.
4683 __ mr(scratch, sp);
4684
4685 // Allocate the v8::Arguments structure in the arguments' space since
4686 // it's not controlled by GC.
4687 // PPC LINUX ABI:
4688 //
4689 // Create 4 extra slots on stack:
4690 // [0] space for DirectCEntryStub's LR save
4691 // [1-3] FunctionCallbackInfo
4692 const int kApiStackSpace = 4;
4693 const int kFunctionCallbackInfoOffset =
4694 (kStackFrameExtraParamSlot + 1) * kPointerSize;
4695
4696 FrameScope frame_scope(masm, StackFrame::MANUAL);
4697 __ EnterExitFrame(false, kApiStackSpace);
4698
4699 DCHECK(!api_function_address.is(r3) && !scratch.is(r3));
4700 // r3 = FunctionCallbackInfo&
4701 // Arguments is after the return address.
4702 __ addi(r3, sp, Operand(kFunctionCallbackInfoOffset));
4703 // FunctionCallbackInfo::implicit_args_
4704 __ StoreP(scratch, MemOperand(r3, 0 * kPointerSize));
4705 // FunctionCallbackInfo::values_
4706 __ addi(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
4707 __ StoreP(ip, MemOperand(r3, 1 * kPointerSize));
4708 // FunctionCallbackInfo::length_ = argc
4709 __ li(ip, Operand(argc()));
4710 __ stw(ip, MemOperand(r3, 2 * kPointerSize));
4711
4712 ExternalReference thunk_ref =
4713 ExternalReference::invoke_function_callback(masm->isolate());
4714
4715 AllowExternalCallThatCantCauseGC scope(masm);
4716 MemOperand context_restore_operand(
4717 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
4718 // Stores return the first js argument
4719 int return_value_offset = 0;
4720 if (is_store()) {
4721 return_value_offset = 2 + FCA::kArgsLength;
4722 } else {
4723 return_value_offset = 2 + FCA::kReturnValueOffset;
4724 }
4725 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
4726 int stack_space = 0;
4727 MemOperand length_operand =
4728 MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize);
4729 MemOperand* stack_space_operand = &length_operand;
4730 stack_space = argc() + FCA::kArgsLength + 1;
4731 stack_space_operand = NULL;
4732 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
4733 stack_space_operand, return_value_operand,
4734 &context_restore_operand);
4735 }
4736
4737
Generate(MacroAssembler * masm)4738 void CallApiGetterStub::Generate(MacroAssembler* masm) {
4739 int arg0Slot = 0;
4740 int accessorInfoSlot = 0;
4741 int apiStackSpace = 0;
4742 // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
4743 // name below the exit frame to make GC aware of them.
4744 STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
4745 STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
4746 STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
4747 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
4748 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
4749 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
4750 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
4751 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
4752
4753 Register receiver = ApiGetterDescriptor::ReceiverRegister();
4754 Register holder = ApiGetterDescriptor::HolderRegister();
4755 Register callback = ApiGetterDescriptor::CallbackRegister();
4756 Register scratch = r7;
4757 DCHECK(!AreAliased(receiver, holder, callback, scratch));
4758
4759 Register api_function_address = r5;
4760
4761 __ push(receiver);
4762 // Push data from AccessorInfo.
4763 __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
4764 __ push(scratch);
4765 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4766 __ Push(scratch, scratch);
4767 __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
4768 __ Push(scratch, holder);
4769 __ Push(Smi::kZero); // should_throw_on_error -> false
4770 __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
4771 __ push(scratch);
4772
4773 // v8::PropertyCallbackInfo::args_ array and name handle.
4774 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
4775
4776 // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
4777 __ mr(r3, sp); // r3 = Handle<Name>
4778 __ addi(r4, r3, Operand(1 * kPointerSize)); // r4 = v8::PCI::args_
4779
4780 // If ABI passes Handles (pointer-sized struct) in a register:
4781 //
4782 // Create 2 extra slots on stack:
4783 // [0] space for DirectCEntryStub's LR save
4784 // [1] AccessorInfo&
4785 //
4786 // Otherwise:
4787 //
4788 // Create 3 extra slots on stack:
4789 // [0] space for DirectCEntryStub's LR save
4790 // [1] copy of Handle (first arg)
4791 // [2] AccessorInfo&
4792 if (ABI_PASSES_HANDLES_IN_REGS) {
4793 accessorInfoSlot = kStackFrameExtraParamSlot + 1;
4794 apiStackSpace = 2;
4795 } else {
4796 arg0Slot = kStackFrameExtraParamSlot + 1;
4797 accessorInfoSlot = arg0Slot + 1;
4798 apiStackSpace = 3;
4799 }
4800
4801 FrameScope frame_scope(masm, StackFrame::MANUAL);
4802 __ EnterExitFrame(false, apiStackSpace);
4803
4804 if (!ABI_PASSES_HANDLES_IN_REGS) {
4805 // pass 1st arg by reference
4806 __ StoreP(r3, MemOperand(sp, arg0Slot * kPointerSize));
4807 __ addi(r3, sp, Operand(arg0Slot * kPointerSize));
4808 }
4809
4810 // Create v8::PropertyCallbackInfo object on the stack and initialize
4811 // it's args_ field.
4812 __ StoreP(r4, MemOperand(sp, accessorInfoSlot * kPointerSize));
4813 __ addi(r4, sp, Operand(accessorInfoSlot * kPointerSize));
4814 // r4 = v8::PropertyCallbackInfo&
4815
4816 ExternalReference thunk_ref =
4817 ExternalReference::invoke_accessor_getter_callback(isolate());
4818
4819 __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
4820 __ LoadP(api_function_address,
4821 FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
4822
4823 // +3 is to skip prolog, return address and name handle.
4824 MemOperand return_value_operand(
4825 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
4826 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
4827 kStackUnwindSpace, NULL, return_value_operand, NULL);
4828 }
4829
4830 #undef __
4831 } // namespace internal
4832 } // namespace v8
4833
4834 #endif // V8_TARGET_ARCH_PPC
4835