1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #if V8_TARGET_ARCH_X64
8
9 #include "src/bootstrapper.h"
10 #include "src/code-stubs.h"
11 #include "src/codegen.h"
12 #include "src/ic/handler-compiler.h"
13 #include "src/ic/ic.h"
14 #include "src/isolate.h"
15 #include "src/jsregexp.h"
16 #include "src/regexp-macro-assembler.h"
17 #include "src/runtime.h"
18
19 namespace v8 {
20 namespace internal {
21
22
InitializeArrayConstructorDescriptor(Isolate * isolate,CodeStubDescriptor * descriptor,int constant_stack_parameter_count)23 static void InitializeArrayConstructorDescriptor(
24 Isolate* isolate, CodeStubDescriptor* descriptor,
25 int constant_stack_parameter_count) {
26 Address deopt_handler = Runtime::FunctionForId(
27 Runtime::kArrayConstructor)->entry;
28
29 if (constant_stack_parameter_count == 0) {
30 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
31 JS_FUNCTION_STUB_MODE);
32 } else {
33 descriptor->Initialize(rax, deopt_handler, constant_stack_parameter_count,
34 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
35 }
36 }
37
38
InitializeInternalArrayConstructorDescriptor(Isolate * isolate,CodeStubDescriptor * descriptor,int constant_stack_parameter_count)39 static void InitializeInternalArrayConstructorDescriptor(
40 Isolate* isolate, CodeStubDescriptor* descriptor,
41 int constant_stack_parameter_count) {
42 Address deopt_handler = Runtime::FunctionForId(
43 Runtime::kInternalArrayConstructor)->entry;
44
45 if (constant_stack_parameter_count == 0) {
46 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
47 JS_FUNCTION_STUB_MODE);
48 } else {
49 descriptor->Initialize(rax, deopt_handler, constant_stack_parameter_count,
50 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
51 }
52 }
53
54
InitializeDescriptor(CodeStubDescriptor * descriptor)55 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
56 CodeStubDescriptor* descriptor) {
57 InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
58 }
59
60
InitializeDescriptor(CodeStubDescriptor * descriptor)61 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
62 CodeStubDescriptor* descriptor) {
63 InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
64 }
65
66
InitializeDescriptor(CodeStubDescriptor * descriptor)67 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
68 CodeStubDescriptor* descriptor) {
69 InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
70 }
71
72
InitializeDescriptor(CodeStubDescriptor * descriptor)73 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
74 CodeStubDescriptor* descriptor) {
75 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
76 }
77
78
InitializeDescriptor(CodeStubDescriptor * descriptor)79 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
80 CodeStubDescriptor* descriptor) {
81 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
82 }
83
84
InitializeDescriptor(CodeStubDescriptor * descriptor)85 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
86 CodeStubDescriptor* descriptor) {
87 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
88 }
89
90
91 #define __ ACCESS_MASM(masm)
92
93
GenerateLightweightMiss(MacroAssembler * masm,ExternalReference miss)94 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
95 ExternalReference miss) {
96 // Update the static counter each time a new code stub is generated.
97 isolate()->counters()->code_stubs()->Increment();
98
99 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
100 int param_count = descriptor.GetEnvironmentParameterCount();
101 {
102 // Call the runtime system in a fresh internal frame.
103 FrameScope scope(masm, StackFrame::INTERNAL);
104 DCHECK(param_count == 0 ||
105 rax.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
106 // Push arguments
107 for (int i = 0; i < param_count; ++i) {
108 __ Push(descriptor.GetEnvironmentParameterRegister(i));
109 }
110 __ CallExternalReference(miss, param_count);
111 }
112
113 __ Ret();
114 }
115
116
Generate(MacroAssembler * masm)117 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
118 __ PushCallerSaved(save_doubles() ? kSaveFPRegs : kDontSaveFPRegs);
119 const int argument_count = 1;
120 __ PrepareCallCFunction(argument_count);
121 __ LoadAddress(arg_reg_1,
122 ExternalReference::isolate_address(isolate()));
123
124 AllowExternalCallThatCantCauseGC scope(masm);
125 __ CallCFunction(
126 ExternalReference::store_buffer_overflow_function(isolate()),
127 argument_count);
128 __ PopCallerSaved(save_doubles() ? kSaveFPRegs : kDontSaveFPRegs);
129 __ ret(0);
130 }
131
132
133 class FloatingPointHelper : public AllStatic {
134 public:
135 enum ConvertUndefined {
136 CONVERT_UNDEFINED_TO_ZERO,
137 BAILOUT_ON_UNDEFINED
138 };
139 // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
140 // If the operands are not both numbers, jump to not_numbers.
141 // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
142 // NumberOperands assumes both are smis or heap numbers.
143 static void LoadSSE2UnknownOperands(MacroAssembler* masm,
144 Label* not_numbers);
145 };
146
147
Generate(MacroAssembler * masm)148 void DoubleToIStub::Generate(MacroAssembler* masm) {
149 Register input_reg = this->source();
150 Register final_result_reg = this->destination();
151 DCHECK(is_truncating());
152
153 Label check_negative, process_64_bits, done;
154
155 int double_offset = offset();
156
157 // Account for return address and saved regs if input is rsp.
158 if (input_reg.is(rsp)) double_offset += 3 * kRegisterSize;
159
160 MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
161 MemOperand exponent_operand(MemOperand(input_reg,
162 double_offset + kDoubleSize / 2));
163
164 Register scratch1;
165 Register scratch_candidates[3] = { rbx, rdx, rdi };
166 for (int i = 0; i < 3; i++) {
167 scratch1 = scratch_candidates[i];
168 if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break;
169 }
170
171 // Since we must use rcx for shifts below, use some other register (rax)
172 // to calculate the result if ecx is the requested return register.
173 Register result_reg = final_result_reg.is(rcx) ? rax : final_result_reg;
174 // Save ecx if it isn't the return register and therefore volatile, or if it
175 // is the return register, then save the temp register we use in its stead
176 // for the result.
177 Register save_reg = final_result_reg.is(rcx) ? rax : rcx;
178 __ pushq(scratch1);
179 __ pushq(save_reg);
180
181 bool stash_exponent_copy = !input_reg.is(rsp);
182 __ movl(scratch1, mantissa_operand);
183 __ movsd(xmm0, mantissa_operand);
184 __ movl(rcx, exponent_operand);
185 if (stash_exponent_copy) __ pushq(rcx);
186
187 __ andl(rcx, Immediate(HeapNumber::kExponentMask));
188 __ shrl(rcx, Immediate(HeapNumber::kExponentShift));
189 __ leal(result_reg, MemOperand(rcx, -HeapNumber::kExponentBias));
190 __ cmpl(result_reg, Immediate(HeapNumber::kMantissaBits));
191 __ j(below, &process_64_bits);
192
193 // Result is entirely in lower 32-bits of mantissa
194 int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
195 __ subl(rcx, Immediate(delta));
196 __ xorl(result_reg, result_reg);
197 __ cmpl(rcx, Immediate(31));
198 __ j(above, &done);
199 __ shll_cl(scratch1);
200 __ jmp(&check_negative);
201
202 __ bind(&process_64_bits);
203 __ cvttsd2siq(result_reg, xmm0);
204 __ jmp(&done, Label::kNear);
205
206 // If the double was negative, negate the integer result.
207 __ bind(&check_negative);
208 __ movl(result_reg, scratch1);
209 __ negl(result_reg);
210 if (stash_exponent_copy) {
211 __ cmpl(MemOperand(rsp, 0), Immediate(0));
212 } else {
213 __ cmpl(exponent_operand, Immediate(0));
214 }
215 __ cmovl(greater, result_reg, scratch1);
216
217 // Restore registers
218 __ bind(&done);
219 if (stash_exponent_copy) {
220 __ addp(rsp, Immediate(kDoubleSize));
221 }
222 if (!final_result_reg.is(result_reg)) {
223 DCHECK(final_result_reg.is(rcx));
224 __ movl(final_result_reg, result_reg);
225 }
226 __ popq(save_reg);
227 __ popq(scratch1);
228 __ ret(0);
229 }
230
231
LoadSSE2UnknownOperands(MacroAssembler * masm,Label * not_numbers)232 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
233 Label* not_numbers) {
234 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
235 // Load operand in rdx into xmm0, or branch to not_numbers.
236 __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
237 __ JumpIfSmi(rdx, &load_smi_rdx);
238 __ cmpp(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
239 __ j(not_equal, not_numbers); // Argument in rdx is not a number.
240 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
241 // Load operand in rax into xmm1, or branch to not_numbers.
242 __ JumpIfSmi(rax, &load_smi_rax);
243
244 __ bind(&load_nonsmi_rax);
245 __ cmpp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
246 __ j(not_equal, not_numbers);
247 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
248 __ jmp(&done);
249
250 __ bind(&load_smi_rdx);
251 __ SmiToInteger32(kScratchRegister, rdx);
252 __ Cvtlsi2sd(xmm0, kScratchRegister);
253 __ JumpIfNotSmi(rax, &load_nonsmi_rax);
254
255 __ bind(&load_smi_rax);
256 __ SmiToInteger32(kScratchRegister, rax);
257 __ Cvtlsi2sd(xmm1, kScratchRegister);
258 __ bind(&done);
259 }
260
261
Generate(MacroAssembler * masm)262 void MathPowStub::Generate(MacroAssembler* masm) {
263 const Register exponent = MathPowTaggedDescriptor::exponent();
264 DCHECK(exponent.is(rdx));
265 const Register base = rax;
266 const Register scratch = rcx;
267 const XMMRegister double_result = xmm3;
268 const XMMRegister double_base = xmm2;
269 const XMMRegister double_exponent = xmm1;
270 const XMMRegister double_scratch = xmm4;
271
272 Label call_runtime, done, exponent_not_smi, int_exponent;
273
274 // Save 1 in double_result - we need this several times later on.
275 __ movp(scratch, Immediate(1));
276 __ Cvtlsi2sd(double_result, scratch);
277
278 if (exponent_type() == ON_STACK) {
279 Label base_is_smi, unpack_exponent;
280 // The exponent and base are supplied as arguments on the stack.
281 // This can only happen if the stub is called from non-optimized code.
282 // Load input parameters from stack.
283 StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
284 __ movp(base, args.GetArgumentOperand(0));
285 __ movp(exponent, args.GetArgumentOperand(1));
286 __ JumpIfSmi(base, &base_is_smi, Label::kNear);
287 __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
288 Heap::kHeapNumberMapRootIndex);
289 __ j(not_equal, &call_runtime);
290
291 __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
292 __ jmp(&unpack_exponent, Label::kNear);
293
294 __ bind(&base_is_smi);
295 __ SmiToInteger32(base, base);
296 __ Cvtlsi2sd(double_base, base);
297 __ bind(&unpack_exponent);
298
299 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
300 __ SmiToInteger32(exponent, exponent);
301 __ jmp(&int_exponent);
302
303 __ bind(&exponent_not_smi);
304 __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
305 Heap::kHeapNumberMapRootIndex);
306 __ j(not_equal, &call_runtime);
307 __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
308 } else if (exponent_type() == TAGGED) {
309 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
310 __ SmiToInteger32(exponent, exponent);
311 __ jmp(&int_exponent);
312
313 __ bind(&exponent_not_smi);
314 __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
315 }
316
317 if (exponent_type() != INTEGER) {
318 Label fast_power, try_arithmetic_simplification;
319 // Detect integer exponents stored as double.
320 __ DoubleToI(exponent, double_exponent, double_scratch,
321 TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification,
322 &try_arithmetic_simplification,
323 &try_arithmetic_simplification);
324 __ jmp(&int_exponent);
325
326 __ bind(&try_arithmetic_simplification);
327 __ cvttsd2si(exponent, double_exponent);
328 // Skip to runtime if possibly NaN (indicated by the indefinite integer).
329 __ cmpl(exponent, Immediate(0x1));
330 __ j(overflow, &call_runtime);
331
332 if (exponent_type() == ON_STACK) {
333 // Detect square root case. Crankshaft detects constant +/-0.5 at
334 // compile time and uses DoMathPowHalf instead. We then skip this check
335 // for non-constant cases of +/-0.5 as these hardly occur.
336 Label continue_sqrt, continue_rsqrt, not_plus_half;
337 // Test for 0.5.
338 // Load double_scratch with 0.5.
339 __ movq(scratch, V8_UINT64_C(0x3FE0000000000000));
340 __ movq(double_scratch, scratch);
341 // Already ruled out NaNs for exponent.
342 __ ucomisd(double_scratch, double_exponent);
343 __ j(not_equal, ¬_plus_half, Label::kNear);
344
345 // Calculates square root of base. Check for the special case of
346 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
347 // According to IEEE-754, double-precision -Infinity has the highest
348 // 12 bits set and the lowest 52 bits cleared.
349 __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
350 __ movq(double_scratch, scratch);
351 __ ucomisd(double_scratch, double_base);
352 // Comparing -Infinity with NaN results in "unordered", which sets the
353 // zero flag as if both were equal. However, it also sets the carry flag.
354 __ j(not_equal, &continue_sqrt, Label::kNear);
355 __ j(carry, &continue_sqrt, Label::kNear);
356
357 // Set result to Infinity in the special case.
358 __ xorps(double_result, double_result);
359 __ subsd(double_result, double_scratch);
360 __ jmp(&done);
361
362 __ bind(&continue_sqrt);
363 // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
364 __ xorps(double_scratch, double_scratch);
365 __ addsd(double_scratch, double_base); // Convert -0 to 0.
366 __ sqrtsd(double_result, double_scratch);
367 __ jmp(&done);
368
369 // Test for -0.5.
370 __ bind(¬_plus_half);
371 // Load double_scratch with -0.5 by substracting 1.
372 __ subsd(double_scratch, double_result);
373 // Already ruled out NaNs for exponent.
374 __ ucomisd(double_scratch, double_exponent);
375 __ j(not_equal, &fast_power, Label::kNear);
376
377 // Calculates reciprocal of square root of base. Check for the special
378 // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
379 // According to IEEE-754, double-precision -Infinity has the highest
380 // 12 bits set and the lowest 52 bits cleared.
381 __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
382 __ movq(double_scratch, scratch);
383 __ ucomisd(double_scratch, double_base);
384 // Comparing -Infinity with NaN results in "unordered", which sets the
385 // zero flag as if both were equal. However, it also sets the carry flag.
386 __ j(not_equal, &continue_rsqrt, Label::kNear);
387 __ j(carry, &continue_rsqrt, Label::kNear);
388
389 // Set result to 0 in the special case.
390 __ xorps(double_result, double_result);
391 __ jmp(&done);
392
393 __ bind(&continue_rsqrt);
394 // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
395 __ xorps(double_exponent, double_exponent);
396 __ addsd(double_exponent, double_base); // Convert -0 to +0.
397 __ sqrtsd(double_exponent, double_exponent);
398 __ divsd(double_result, double_exponent);
399 __ jmp(&done);
400 }
401
402 // Using FPU instructions to calculate power.
403 Label fast_power_failed;
404 __ bind(&fast_power);
405 __ fnclex(); // Clear flags to catch exceptions later.
406 // Transfer (B)ase and (E)xponent onto the FPU register stack.
407 __ subp(rsp, Immediate(kDoubleSize));
408 __ movsd(Operand(rsp, 0), double_exponent);
409 __ fld_d(Operand(rsp, 0)); // E
410 __ movsd(Operand(rsp, 0), double_base);
411 __ fld_d(Operand(rsp, 0)); // B, E
412
413 // Exponent is in st(1) and base is in st(0)
414 // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
415 // FYL2X calculates st(1) * log2(st(0))
416 __ fyl2x(); // X
417 __ fld(0); // X, X
418 __ frndint(); // rnd(X), X
419 __ fsub(1); // rnd(X), X-rnd(X)
420 __ fxch(1); // X - rnd(X), rnd(X)
421 // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
422 __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
423 __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
424 __ faddp(1); // 2^(X-rnd(X)), rnd(X)
425 // FSCALE calculates st(0) * 2^st(1)
426 __ fscale(); // 2^X, rnd(X)
427 __ fstp(1);
428 // Bail out to runtime in case of exceptions in the status word.
429 __ fnstsw_ax();
430 __ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
431 __ j(not_zero, &fast_power_failed, Label::kNear);
432 __ fstp_d(Operand(rsp, 0));
433 __ movsd(double_result, Operand(rsp, 0));
434 __ addp(rsp, Immediate(kDoubleSize));
435 __ jmp(&done);
436
437 __ bind(&fast_power_failed);
438 __ fninit();
439 __ addp(rsp, Immediate(kDoubleSize));
440 __ jmp(&call_runtime);
441 }
442
443 // Calculate power with integer exponent.
444 __ bind(&int_exponent);
445 const XMMRegister double_scratch2 = double_exponent;
446 // Back up exponent as we need to check if exponent is negative later.
447 __ movp(scratch, exponent); // Back up exponent.
448 __ movsd(double_scratch, double_base); // Back up base.
449 __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
450
451 // Get absolute value of exponent.
452 Label no_neg, while_true, while_false;
453 __ testl(scratch, scratch);
454 __ j(positive, &no_neg, Label::kNear);
455 __ negl(scratch);
456 __ bind(&no_neg);
457
458 __ j(zero, &while_false, Label::kNear);
459 __ shrl(scratch, Immediate(1));
460 // Above condition means CF==0 && ZF==0. This means that the
461 // bit that has been shifted out is 0 and the result is not 0.
462 __ j(above, &while_true, Label::kNear);
463 __ movsd(double_result, double_scratch);
464 __ j(zero, &while_false, Label::kNear);
465
466 __ bind(&while_true);
467 __ shrl(scratch, Immediate(1));
468 __ mulsd(double_scratch, double_scratch);
469 __ j(above, &while_true, Label::kNear);
470 __ mulsd(double_result, double_scratch);
471 __ j(not_zero, &while_true);
472
473 __ bind(&while_false);
474 // If the exponent is negative, return 1/result.
475 __ testl(exponent, exponent);
476 __ j(greater, &done);
477 __ divsd(double_scratch2, double_result);
478 __ movsd(double_result, double_scratch2);
479 // Test whether result is zero. Bail out to check for subnormal result.
480 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
481 __ xorps(double_scratch2, double_scratch2);
482 __ ucomisd(double_scratch2, double_result);
483 // double_exponent aliased as double_scratch2 has already been overwritten
484 // and may not have contained the exponent value in the first place when the
485 // input was a smi. We reset it with exponent value before bailing out.
486 __ j(not_equal, &done);
487 __ Cvtlsi2sd(double_exponent, exponent);
488
489 // Returning or bailing out.
490 Counters* counters = isolate()->counters();
491 if (exponent_type() == ON_STACK) {
492 // The arguments are still on the stack.
493 __ bind(&call_runtime);
494 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
495
496 // The stub is called from non-optimized code, which expects the result
497 // as heap number in rax.
498 __ bind(&done);
499 __ AllocateHeapNumber(rax, rcx, &call_runtime);
500 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
501 __ IncrementCounter(counters->math_pow(), 1);
502 __ ret(2 * kPointerSize);
503 } else {
504 __ bind(&call_runtime);
505 // Move base to the correct argument register. Exponent is already in xmm1.
506 __ movsd(xmm0, double_base);
507 DCHECK(double_exponent.is(xmm1));
508 {
509 AllowExternalCallThatCantCauseGC scope(masm);
510 __ PrepareCallCFunction(2);
511 __ CallCFunction(
512 ExternalReference::power_double_double_function(isolate()), 2);
513 }
514 // Return value is in xmm0.
515 __ movsd(double_result, xmm0);
516
517 __ bind(&done);
518 __ IncrementCounter(counters->math_pow(), 1);
519 __ ret(0);
520 }
521 }
522
523
Generate(MacroAssembler * masm)524 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
525 Label miss;
526 Register receiver = LoadDescriptor::ReceiverRegister();
527
528 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8,
529 r9, &miss);
530 __ bind(&miss);
531 PropertyAccessCompiler::TailCallBuiltin(
532 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
533 }
534
535
GenerateReadElement(MacroAssembler * masm)536 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
537 // The key is in rdx and the parameter count is in rax.
538 DCHECK(rdx.is(ArgumentsAccessReadDescriptor::index()));
539 DCHECK(rax.is(ArgumentsAccessReadDescriptor::parameter_count()));
540
541 // Check that the key is a smi.
542 Label slow;
543 __ JumpIfNotSmi(rdx, &slow);
544
545 // Check if the calling frame is an arguments adaptor frame. We look at the
546 // context offset, and if the frame is not a regular one, then we find a
547 // Smi instead of the context. We can't use SmiCompare here, because that
548 // only works for comparing two smis.
549 Label adaptor;
550 __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
551 __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
552 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
553 __ j(equal, &adaptor);
554
555 // Check index against formal parameters count limit passed in
556 // through register rax. Use unsigned comparison to get negative
557 // check for free.
558 __ cmpp(rdx, rax);
559 __ j(above_equal, &slow);
560
561 // Read the argument from the stack and return it.
562 __ SmiSub(rax, rax, rdx);
563 __ SmiToInteger32(rax, rax);
564 StackArgumentsAccessor args(rbp, rax, ARGUMENTS_DONT_CONTAIN_RECEIVER);
565 __ movp(rax, args.GetArgumentOperand(0));
566 __ Ret();
567
568 // Arguments adaptor case: Check index against actual arguments
569 // limit found in the arguments adaptor frame. Use unsigned
570 // comparison to get negative check for free.
571 __ bind(&adaptor);
572 __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
573 __ cmpp(rdx, rcx);
574 __ j(above_equal, &slow);
575
576 // Read the argument from the stack and return it.
577 __ SmiSub(rcx, rcx, rdx);
578 __ SmiToInteger32(rcx, rcx);
579 StackArgumentsAccessor adaptor_args(rbx, rcx,
580 ARGUMENTS_DONT_CONTAIN_RECEIVER);
581 __ movp(rax, adaptor_args.GetArgumentOperand(0));
582 __ Ret();
583
584 // Slow-case: Handle non-smi or out-of-bounds access to arguments
585 // by calling the runtime system.
586 __ bind(&slow);
587 __ PopReturnAddressTo(rbx);
588 __ Push(rdx);
589 __ PushReturnAddressFrom(rbx);
590 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
591 }
592
593
GenerateNewSloppyFast(MacroAssembler * masm)594 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
595 // Stack layout:
596 // rsp[0] : return address
597 // rsp[8] : number of parameters (tagged)
598 // rsp[16] : receiver displacement
599 // rsp[24] : function
600 // Registers used over the whole function:
601 // rbx: the mapped parameter count (untagged)
602 // rax: the allocated object (tagged).
603
604 Factory* factory = isolate()->factory();
605
606 StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
607 __ SmiToInteger64(rbx, args.GetArgumentOperand(2));
608 // rbx = parameter count (untagged)
609
610 // Check if the calling frame is an arguments adaptor frame.
611 Label runtime;
612 Label adaptor_frame, try_allocate;
613 __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
614 __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
615 __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
616 __ j(equal, &adaptor_frame);
617
618 // No adaptor, parameter count = argument count.
619 __ movp(rcx, rbx);
620 __ jmp(&try_allocate, Label::kNear);
621
622 // We have an adaptor frame. Patch the parameters pointer.
623 __ bind(&adaptor_frame);
624 __ SmiToInteger64(rcx,
625 Operand(rdx,
626 ArgumentsAdaptorFrameConstants::kLengthOffset));
627 __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
628 StandardFrameConstants::kCallerSPOffset));
629 __ movp(args.GetArgumentOperand(1), rdx);
630
631 // rbx = parameter count (untagged)
632 // rcx = argument count (untagged)
633 // Compute the mapped parameter count = min(rbx, rcx) in rbx.
634 __ cmpp(rbx, rcx);
635 __ j(less_equal, &try_allocate, Label::kNear);
636 __ movp(rbx, rcx);
637
638 __ bind(&try_allocate);
639
640 // Compute the sizes of backing store, parameter map, and arguments object.
641 // 1. Parameter map, has 2 extra words containing context and backing store.
642 const int kParameterMapHeaderSize =
643 FixedArray::kHeaderSize + 2 * kPointerSize;
644 Label no_parameter_map;
645 __ xorp(r8, r8);
646 __ testp(rbx, rbx);
647 __ j(zero, &no_parameter_map, Label::kNear);
648 __ leap(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
649 __ bind(&no_parameter_map);
650
651 // 2. Backing store.
652 __ leap(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
653
654 // 3. Arguments object.
655 __ addp(r8, Immediate(Heap::kSloppyArgumentsObjectSize));
656
657 // Do the allocation of all three objects in one go.
658 __ Allocate(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
659
660 // rax = address of new object(s) (tagged)
661 // rcx = argument count (untagged)
662 // Get the arguments map from the current native context into rdi.
663 Label has_mapped_parameters, instantiate;
664 __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
665 __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
666 __ testp(rbx, rbx);
667 __ j(not_zero, &has_mapped_parameters, Label::kNear);
668
669 const int kIndex = Context::SLOPPY_ARGUMENTS_MAP_INDEX;
670 __ movp(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
671 __ jmp(&instantiate, Label::kNear);
672
673 const int kAliasedIndex = Context::ALIASED_ARGUMENTS_MAP_INDEX;
674 __ bind(&has_mapped_parameters);
675 __ movp(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
676 __ bind(&instantiate);
677
678 // rax = address of new object (tagged)
679 // rbx = mapped parameter count (untagged)
680 // rcx = argument count (untagged)
681 // rdi = address of arguments map (tagged)
682 __ movp(FieldOperand(rax, JSObject::kMapOffset), rdi);
683 __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
684 __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
685 __ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister);
686
687 // Set up the callee in-object property.
688 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
689 __ movp(rdx, args.GetArgumentOperand(0));
690 __ AssertNotSmi(rdx);
691 __ movp(FieldOperand(rax, JSObject::kHeaderSize +
692 Heap::kArgumentsCalleeIndex * kPointerSize),
693 rdx);
694
695 // Use the length (smi tagged) and set that as an in-object property too.
696 // Note: rcx is tagged from here on.
697 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
698 __ Integer32ToSmi(rcx, rcx);
699 __ movp(FieldOperand(rax, JSObject::kHeaderSize +
700 Heap::kArgumentsLengthIndex * kPointerSize),
701 rcx);
702
703 // Set up the elements pointer in the allocated arguments object.
704 // If we allocated a parameter map, edi will point there, otherwise to the
705 // backing store.
706 __ leap(rdi, Operand(rax, Heap::kSloppyArgumentsObjectSize));
707 __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
708
709 // rax = address of new object (tagged)
710 // rbx = mapped parameter count (untagged)
711 // rcx = argument count (tagged)
712 // rdi = address of parameter map or backing store (tagged)
713
714 // Initialize parameter map. If there are no mapped arguments, we're done.
715 Label skip_parameter_map;
716 __ testp(rbx, rbx);
717 __ j(zero, &skip_parameter_map);
718
719 __ LoadRoot(kScratchRegister, Heap::kSloppyArgumentsElementsMapRootIndex);
720 // rbx contains the untagged argument count. Add 2 and tag to write.
721 __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
722 __ Integer64PlusConstantToSmi(r9, rbx, 2);
723 __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
724 __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
725 __ leap(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
726 __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
727
728 // Copy the parameter slots and the holes in the arguments.
729 // We need to fill in mapped_parameter_count slots. They index the context,
730 // where parameters are stored in reverse order, at
731 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
732 // The mapped parameter thus need to get indices
733 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
734 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
735 // We loop from right to left.
736 Label parameters_loop, parameters_test;
737
738 // Load tagged parameter count into r9.
739 __ Integer32ToSmi(r9, rbx);
740 __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
741 __ addp(r8, args.GetArgumentOperand(2));
742 __ subp(r8, r9);
743 __ Move(r11, factory->the_hole_value());
744 __ movp(rdx, rdi);
745 __ leap(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
746 // r9 = loop variable (tagged)
747 // r8 = mapping index (tagged)
748 // r11 = the hole value
749 // rdx = address of parameter map (tagged)
750 // rdi = address of backing store (tagged)
751 __ jmp(¶meters_test, Label::kNear);
752
753 __ bind(¶meters_loop);
754 __ SmiSubConstant(r9, r9, Smi::FromInt(1));
755 __ SmiToInteger64(kScratchRegister, r9);
756 __ movp(FieldOperand(rdx, kScratchRegister,
757 times_pointer_size,
758 kParameterMapHeaderSize),
759 r8);
760 __ movp(FieldOperand(rdi, kScratchRegister,
761 times_pointer_size,
762 FixedArray::kHeaderSize),
763 r11);
764 __ SmiAddConstant(r8, r8, Smi::FromInt(1));
765 __ bind(¶meters_test);
766 __ SmiTest(r9);
767 __ j(not_zero, ¶meters_loop, Label::kNear);
768
769 __ bind(&skip_parameter_map);
770
771 // rcx = argument count (tagged)
772 // rdi = address of backing store (tagged)
773 // Copy arguments header and remaining slots (if there are any).
774 __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
775 factory->fixed_array_map());
776 __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
777
778 Label arguments_loop, arguments_test;
779 __ movp(r8, rbx);
780 __ movp(rdx, args.GetArgumentOperand(1));
781 // Untag rcx for the loop below.
782 __ SmiToInteger64(rcx, rcx);
783 __ leap(kScratchRegister, Operand(r8, times_pointer_size, 0));
784 __ subp(rdx, kScratchRegister);
785 __ jmp(&arguments_test, Label::kNear);
786
787 __ bind(&arguments_loop);
788 __ subp(rdx, Immediate(kPointerSize));
789 __ movp(r9, Operand(rdx, 0));
790 __ movp(FieldOperand(rdi, r8,
791 times_pointer_size,
792 FixedArray::kHeaderSize),
793 r9);
794 __ addp(r8, Immediate(1));
795
796 __ bind(&arguments_test);
797 __ cmpp(r8, rcx);
798 __ j(less, &arguments_loop, Label::kNear);
799
800 // Return and remove the on-stack parameters.
801 __ ret(3 * kPointerSize);
802
803 // Do the runtime call to allocate the arguments object.
804 // rcx = argument count (untagged)
805 __ bind(&runtime);
806 __ Integer32ToSmi(rcx, rcx);
807 __ movp(args.GetArgumentOperand(2), rcx); // Patch argument count.
808 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
809 }
810
811
GenerateNewSloppySlow(MacroAssembler * masm)812 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
813 // rsp[0] : return address
814 // rsp[8] : number of parameters
815 // rsp[16] : receiver displacement
816 // rsp[24] : function
817
818 // Check if the calling frame is an arguments adaptor frame.
819 Label runtime;
820 __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
821 __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
822 __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
823 __ j(not_equal, &runtime);
824
825 // Patch the arguments.length and the parameters pointer.
826 StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
827 __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
828 __ movp(args.GetArgumentOperand(2), rcx);
829 __ SmiToInteger64(rcx, rcx);
830 __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
831 StandardFrameConstants::kCallerSPOffset));
832 __ movp(args.GetArgumentOperand(1), rdx);
833
834 __ bind(&runtime);
835 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
836 }
837
838
Generate(MacroAssembler * masm)839 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
840 // Return address is on the stack.
841 Label slow;
842
843 Register receiver = LoadDescriptor::ReceiverRegister();
844 Register key = LoadDescriptor::NameRegister();
845 Register scratch = rax;
846 DCHECK(!scratch.is(receiver) && !scratch.is(key));
847
848 // Check that the key is an array index, that is Uint32.
849 STATIC_ASSERT(kSmiValueSize <= 32);
850 __ JumpUnlessNonNegativeSmi(key, &slow);
851
852 // Everything is fine, call runtime.
853 __ PopReturnAddressTo(scratch);
854 __ Push(receiver); // receiver
855 __ Push(key); // key
856 __ PushReturnAddressFrom(scratch);
857
858 // Perform tail call to the entry.
859 __ TailCallExternalReference(
860 ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
861 masm->isolate()),
862 2, 1);
863
864 __ bind(&slow);
865 PropertyAccessCompiler::TailCallBuiltin(
866 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
867 }
868
869
GenerateNewStrict(MacroAssembler * masm)870 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
871 // rsp[0] : return address
872 // rsp[8] : number of parameters
873 // rsp[16] : receiver displacement
874 // rsp[24] : function
875
876 // Check if the calling frame is an arguments adaptor frame.
877 Label adaptor_frame, try_allocate, runtime;
878 __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
879 __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
880 __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
881 __ j(equal, &adaptor_frame);
882
883 // Get the length from the frame.
884 StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
885 __ movp(rcx, args.GetArgumentOperand(2));
886 __ SmiToInteger64(rcx, rcx);
887 __ jmp(&try_allocate);
888
889 // Patch the arguments.length and the parameters pointer.
890 __ bind(&adaptor_frame);
891 __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
892 __ movp(args.GetArgumentOperand(2), rcx);
893 __ SmiToInteger64(rcx, rcx);
894 __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
895 StandardFrameConstants::kCallerSPOffset));
896 __ movp(args.GetArgumentOperand(1), rdx);
897
898 // Try the new space allocation. Start out with computing the size of
899 // the arguments object and the elements array.
900 Label add_arguments_object;
901 __ bind(&try_allocate);
902 __ testp(rcx, rcx);
903 __ j(zero, &add_arguments_object, Label::kNear);
904 __ leap(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
905 __ bind(&add_arguments_object);
906 __ addp(rcx, Immediate(Heap::kStrictArgumentsObjectSize));
907
908 // Do the allocation of both objects in one go.
909 __ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
910
911 // Get the arguments map from the current native context.
912 __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
913 __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
914 const int offset = Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX);
915 __ movp(rdi, Operand(rdi, offset));
916
917 __ movp(FieldOperand(rax, JSObject::kMapOffset), rdi);
918 __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
919 __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
920 __ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister);
921
922 // Get the length (smi tagged) and set that as an in-object property too.
923 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
924 __ movp(rcx, args.GetArgumentOperand(2));
925 __ movp(FieldOperand(rax, JSObject::kHeaderSize +
926 Heap::kArgumentsLengthIndex * kPointerSize),
927 rcx);
928
929 // If there are no actual arguments, we're done.
930 Label done;
931 __ testp(rcx, rcx);
932 __ j(zero, &done);
933
934 // Get the parameters pointer from the stack.
935 __ movp(rdx, args.GetArgumentOperand(1));
936
937 // Set up the elements pointer in the allocated arguments object and
938 // initialize the header in the elements fixed array.
939 __ leap(rdi, Operand(rax, Heap::kStrictArgumentsObjectSize));
940 __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
941 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
942 __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
943
944
945 __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
946 // Untag the length for the loop below.
947 __ SmiToInteger64(rcx, rcx);
948
949 // Copy the fixed array slots.
950 Label loop;
951 __ bind(&loop);
952 __ movp(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
953 __ movp(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
954 __ addp(rdi, Immediate(kPointerSize));
955 __ subp(rdx, Immediate(kPointerSize));
956 __ decp(rcx);
957 __ j(not_zero, &loop);
958
959 // Return and remove the on-stack parameters.
960 __ bind(&done);
961 __ ret(3 * kPointerSize);
962
963 // Do the runtime call to allocate the arguments object.
964 __ bind(&runtime);
965 __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
966 }
967
968
Generate(MacroAssembler * masm)969 void RegExpExecStub::Generate(MacroAssembler* masm) {
970 // Just jump directly to runtime if native RegExp is not selected at compile
971 // time or if regexp entry in generated code is turned off runtime switch or
972 // at compilation.
973 #ifdef V8_INTERPRETED_REGEXP
974 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
975 #else // V8_INTERPRETED_REGEXP
976
977 // Stack frame on entry.
978 // rsp[0] : return address
979 // rsp[8] : last_match_info (expected JSArray)
980 // rsp[16] : previous index
981 // rsp[24] : subject string
982 // rsp[32] : JSRegExp object
983
984 enum RegExpExecStubArgumentIndices {
985 JS_REG_EXP_OBJECT_ARGUMENT_INDEX,
986 SUBJECT_STRING_ARGUMENT_INDEX,
987 PREVIOUS_INDEX_ARGUMENT_INDEX,
988 LAST_MATCH_INFO_ARGUMENT_INDEX,
989 REG_EXP_EXEC_ARGUMENT_COUNT
990 };
991
992 StackArgumentsAccessor args(rsp, REG_EXP_EXEC_ARGUMENT_COUNT,
993 ARGUMENTS_DONT_CONTAIN_RECEIVER);
994 Label runtime;
995 // Ensure that a RegExp stack is allocated.
996 ExternalReference address_of_regexp_stack_memory_address =
997 ExternalReference::address_of_regexp_stack_memory_address(isolate());
998 ExternalReference address_of_regexp_stack_memory_size =
999 ExternalReference::address_of_regexp_stack_memory_size(isolate());
1000 __ Load(kScratchRegister, address_of_regexp_stack_memory_size);
1001 __ testp(kScratchRegister, kScratchRegister);
1002 __ j(zero, &runtime);
1003
1004 // Check that the first argument is a JSRegExp object.
1005 __ movp(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
1006 __ JumpIfSmi(rax, &runtime);
1007 __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
1008 __ j(not_equal, &runtime);
1009
1010 // Check that the RegExp has been compiled (data contains a fixed array).
1011 __ movp(rax, FieldOperand(rax, JSRegExp::kDataOffset));
1012 if (FLAG_debug_code) {
1013 Condition is_smi = masm->CheckSmi(rax);
1014 __ Check(NegateCondition(is_smi),
1015 kUnexpectedTypeForRegExpDataFixedArrayExpected);
1016 __ CmpObjectType(rax, FIXED_ARRAY_TYPE, kScratchRegister);
1017 __ Check(equal, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1018 }
1019
1020 // rax: RegExp data (FixedArray)
1021 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1022 __ SmiToInteger32(rbx, FieldOperand(rax, JSRegExp::kDataTagOffset));
1023 __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
1024 __ j(not_equal, &runtime);
1025
1026 // rax: RegExp data (FixedArray)
1027 // Check that the number of captures fit in the static offsets vector buffer.
1028 __ SmiToInteger32(rdx,
1029 FieldOperand(rax, JSRegExp::kIrregexpCaptureCountOffset));
1030 // Check (number_of_captures + 1) * 2 <= offsets vector size
1031 // Or number_of_captures <= offsets vector size / 2 - 1
1032 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1033 __ cmpl(rdx, Immediate(Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1));
1034 __ j(above, &runtime);
1035
1036 // Reset offset for possibly sliced string.
1037 __ Set(r14, 0);
1038 __ movp(rdi, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
1039 __ JumpIfSmi(rdi, &runtime);
1040 __ movp(r15, rdi); // Make a copy of the original subject string.
1041 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
1042 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
1043 // rax: RegExp data (FixedArray)
1044 // rdi: subject string
1045 // r15: subject string
1046 // Handle subject string according to its encoding and representation:
1047 // (1) Sequential two byte? If yes, go to (9).
1048 // (2) Sequential one byte? If yes, go to (6).
1049 // (3) Anything but sequential or cons? If yes, go to (7).
1050 // (4) Cons string. If the string is flat, replace subject with first string.
1051 // Otherwise bailout.
1052 // (5a) Is subject sequential two byte? If yes, go to (9).
1053 // (5b) Is subject external? If yes, go to (8).
1054 // (6) One byte sequential. Load regexp code for one byte.
1055 // (E) Carry on.
1056 /// [...]
1057
1058 // Deferred code at the end of the stub:
1059 // (7) Not a long external string? If yes, go to (10).
1060 // (8) External string. Make it, offset-wise, look like a sequential string.
1061 // (8a) Is the external string one byte? If yes, go to (6).
1062 // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
1063 // (10) Short external string or not a string? If yes, bail out to runtime.
1064 // (11) Sliced string. Replace subject with parent. Go to (5a).
1065
1066 Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */,
1067 external_string /* 8 */, check_underlying /* 5a */,
1068 not_seq_nor_cons /* 7 */, check_code /* E */,
1069 not_long_external /* 10 */;
1070
1071 // (1) Sequential two byte? If yes, go to (9).
1072 __ andb(rbx, Immediate(kIsNotStringMask |
1073 kStringRepresentationMask |
1074 kStringEncodingMask |
1075 kShortExternalStringMask));
1076 STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
1077 __ j(zero, &seq_two_byte_string); // Go to (9).
1078
1079 // (2) Sequential one byte? If yes, go to (6).
1080 // Any other sequential string must be one byte.
1081 __ andb(rbx, Immediate(kIsNotStringMask |
1082 kStringRepresentationMask |
1083 kShortExternalStringMask));
1084 __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (6).
1085
1086 // (3) Anything but sequential or cons? If yes, go to (7).
1087 // We check whether the subject string is a cons, since sequential strings
1088 // have already been covered.
1089 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
1090 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
1091 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
1092 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
1093 __ cmpp(rbx, Immediate(kExternalStringTag));
1094 __ j(greater_equal, ¬_seq_nor_cons); // Go to (7).
1095
1096 // (4) Cons string. Check that it's flat.
1097 // Replace subject with first string and reload instance type.
1098 __ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
1099 Heap::kempty_stringRootIndex);
1100 __ j(not_equal, &runtime);
1101 __ movp(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
1102 __ bind(&check_underlying);
1103 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
1104 __ movp(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
1105
1106 // (5a) Is subject sequential two byte? If yes, go to (9).
1107 __ testb(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
1108 STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
1109 __ j(zero, &seq_two_byte_string); // Go to (9).
1110 // (5b) Is subject external? If yes, go to (8).
1111 __ testb(rbx, Immediate(kStringRepresentationMask));
1112 // The underlying external string is never a short external string.
1113 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
1114 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
1115 __ j(not_zero, &external_string); // Go to (8)
1116
1117 // (6) One byte sequential. Load regexp code for one byte.
1118 __ bind(&seq_one_byte_string);
1119 // rax: RegExp data (FixedArray)
1120 __ movp(r11, FieldOperand(rax, JSRegExp::kDataOneByteCodeOffset));
1121 __ Set(rcx, 1); // Type is one byte.
1122
1123 // (E) Carry on. String handling is done.
1124 __ bind(&check_code);
1125 // r11: irregexp code
1126 // Check that the irregexp code has been generated for the actual string
1127 // encoding. If it has, the field contains a code object otherwise it contains
1128 // smi (code flushing support)
1129 __ JumpIfSmi(r11, &runtime);
1130
1131 // rdi: sequential subject string (or look-alike, external string)
1132 // r15: original subject string
1133 // rcx: encoding of subject string (1 if one_byte, 0 if two_byte);
1134 // r11: code
1135 // Load used arguments before starting to push arguments for call to native
1136 // RegExp code to avoid handling changing stack height.
1137 // We have to use r15 instead of rdi to load the length because rdi might
1138 // have been only made to look like a sequential string when it actually
1139 // is an external string.
1140 __ movp(rbx, args.GetArgumentOperand(PREVIOUS_INDEX_ARGUMENT_INDEX));
1141 __ JumpIfNotSmi(rbx, &runtime);
1142 __ SmiCompare(rbx, FieldOperand(r15, String::kLengthOffset));
1143 __ j(above_equal, &runtime);
1144 __ SmiToInteger64(rbx, rbx);
1145
1146 // rdi: subject string
1147 // rbx: previous index
1148 // rcx: encoding of subject string (1 if one_byte 0 if two_byte);
1149 // r11: code
1150 // All checks done. Now push arguments for native regexp code.
1151 Counters* counters = isolate()->counters();
1152 __ IncrementCounter(counters->regexp_entry_native(), 1);
1153
1154 // Isolates: note we add an additional parameter here (isolate pointer).
1155 static const int kRegExpExecuteArguments = 9;
1156 int argument_slots_on_stack =
1157 masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
1158 __ EnterApiExitFrame(argument_slots_on_stack);
1159
1160 // Argument 9: Pass current isolate address.
1161 __ LoadAddress(kScratchRegister,
1162 ExternalReference::isolate_address(isolate()));
1163 __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kRegisterSize),
1164 kScratchRegister);
1165
1166 // Argument 8: Indicate that this is a direct call from JavaScript.
1167 __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kRegisterSize),
1168 Immediate(1));
1169
1170 // Argument 7: Start (high end) of backtracking stack memory area.
1171 __ Move(kScratchRegister, address_of_regexp_stack_memory_address);
1172 __ movp(r9, Operand(kScratchRegister, 0));
1173 __ Move(kScratchRegister, address_of_regexp_stack_memory_size);
1174 __ addp(r9, Operand(kScratchRegister, 0));
1175 __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kRegisterSize), r9);
1176
1177 // Argument 6: Set the number of capture registers to zero to force global
1178 // regexps to behave as non-global. This does not affect non-global regexps.
1179 // Argument 6 is passed in r9 on Linux and on the stack on Windows.
1180 #ifdef _WIN64
1181 __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kRegisterSize),
1182 Immediate(0));
1183 #else
1184 __ Set(r9, 0);
1185 #endif
1186
1187 // Argument 5: static offsets vector buffer.
1188 __ LoadAddress(
1189 r8, ExternalReference::address_of_static_offsets_vector(isolate()));
1190 // Argument 5 passed in r8 on Linux and on the stack on Windows.
1191 #ifdef _WIN64
1192 __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kRegisterSize), r8);
1193 #endif
1194
1195 // rdi: subject string
1196 // rbx: previous index
1197 // rcx: encoding of subject string (1 if one_byte 0 if two_byte);
1198 // r11: code
1199 // r14: slice offset
1200 // r15: original subject string
1201
1202 // Argument 2: Previous index.
1203 __ movp(arg_reg_2, rbx);
1204
1205 // Argument 4: End of string data
1206 // Argument 3: Start of string data
1207 Label setup_two_byte, setup_rest, got_length, length_not_from_slice;
1208 // Prepare start and end index of the input.
1209 // Load the length from the original sliced string if that is the case.
1210 __ addp(rbx, r14);
1211 __ SmiToInteger32(arg_reg_3, FieldOperand(r15, String::kLengthOffset));
1212 __ addp(r14, arg_reg_3); // Using arg3 as scratch.
1213
1214 // rbx: start index of the input
1215 // r14: end index of the input
1216 // r15: original subject string
1217 __ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
1218 __ j(zero, &setup_two_byte, Label::kNear);
1219 __ leap(arg_reg_4,
1220 FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize));
1221 __ leap(arg_reg_3,
1222 FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize));
1223 __ jmp(&setup_rest, Label::kNear);
1224 __ bind(&setup_two_byte);
1225 __ leap(arg_reg_4,
1226 FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
1227 __ leap(arg_reg_3,
1228 FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
1229 __ bind(&setup_rest);
1230
1231 // Argument 1: Original subject string.
1232 // The original subject is in the previous stack frame. Therefore we have to
1233 // use rbp, which points exactly to one pointer size below the previous rsp.
1234 // (Because creating a new stack frame pushes the previous rbp onto the stack
1235 // and thereby moves up rsp by one kPointerSize.)
1236 __ movp(arg_reg_1, r15);
1237
1238 // Locate the code entry and call it.
1239 __ addp(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
1240 __ call(r11);
1241
1242 __ LeaveApiExitFrame(true);
1243
1244 // Check the result.
1245 Label success;
1246 Label exception;
1247 __ cmpl(rax, Immediate(1));
1248 // We expect exactly one result since we force the called regexp to behave
1249 // as non-global.
1250 __ j(equal, &success, Label::kNear);
1251 __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
1252 __ j(equal, &exception);
1253 __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
1254 // If none of the above, it can only be retry.
1255 // Handle that in the runtime system.
1256 __ j(not_equal, &runtime);
1257
1258 // For failure return null.
1259 __ LoadRoot(rax, Heap::kNullValueRootIndex);
1260 __ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
1261
1262 // Load RegExp data.
1263 __ bind(&success);
1264 __ movp(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
1265 __ movp(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
1266 __ SmiToInteger32(rax,
1267 FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
1268 // Calculate number of capture registers (number_of_captures + 1) * 2.
1269 __ leal(rdx, Operand(rax, rax, times_1, 2));
1270
1271 // rdx: Number of capture registers
1272 // Check that the fourth object is a JSArray object.
1273 __ movp(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
1274 __ JumpIfSmi(r15, &runtime);
1275 __ CmpObjectType(r15, JS_ARRAY_TYPE, kScratchRegister);
1276 __ j(not_equal, &runtime);
1277 // Check that the JSArray is in fast case.
1278 __ movp(rbx, FieldOperand(r15, JSArray::kElementsOffset));
1279 __ movp(rax, FieldOperand(rbx, HeapObject::kMapOffset));
1280 __ CompareRoot(rax, Heap::kFixedArrayMapRootIndex);
1281 __ j(not_equal, &runtime);
1282 // Check that the last match info has space for the capture registers and the
1283 // additional information. Ensure no overflow in add.
1284 STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
1285 __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
1286 __ subl(rax, Immediate(RegExpImpl::kLastMatchOverhead));
1287 __ cmpl(rdx, rax);
1288 __ j(greater, &runtime);
1289
1290 // rbx: last_match_info backing store (FixedArray)
1291 // rdx: number of capture registers
1292 // Store the capture count.
1293 __ Integer32ToSmi(kScratchRegister, rdx);
1294 __ movp(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
1295 kScratchRegister);
1296 // Store last subject and last input.
1297 __ movp(rax, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
1298 __ movp(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
1299 __ movp(rcx, rax);
1300 __ RecordWriteField(rbx,
1301 RegExpImpl::kLastSubjectOffset,
1302 rax,
1303 rdi,
1304 kDontSaveFPRegs);
1305 __ movp(rax, rcx);
1306 __ movp(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
1307 __ RecordWriteField(rbx,
1308 RegExpImpl::kLastInputOffset,
1309 rax,
1310 rdi,
1311 kDontSaveFPRegs);
1312
1313 // Get the static offsets vector filled by the native regexp code.
1314 __ LoadAddress(
1315 rcx, ExternalReference::address_of_static_offsets_vector(isolate()));
1316
1317 // rbx: last_match_info backing store (FixedArray)
1318 // rcx: offsets vector
1319 // rdx: number of capture registers
1320 Label next_capture, done;
1321 // Capture register counter starts from number of capture registers and
1322 // counts down until wraping after zero.
1323 __ bind(&next_capture);
1324 __ subp(rdx, Immediate(1));
1325 __ j(negative, &done, Label::kNear);
1326 // Read the value from the static offsets vector buffer and make it a smi.
1327 __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
1328 __ Integer32ToSmi(rdi, rdi);
1329 // Store the smi value in the last match info.
1330 __ movp(FieldOperand(rbx,
1331 rdx,
1332 times_pointer_size,
1333 RegExpImpl::kFirstCaptureOffset),
1334 rdi);
1335 __ jmp(&next_capture);
1336 __ bind(&done);
1337
1338 // Return last match info.
1339 __ movp(rax, r15);
1340 __ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
1341
1342 __ bind(&exception);
1343 // Result must now be exception. If there is no pending exception already a
1344 // stack overflow (on the backtrack stack) was detected in RegExp code but
1345 // haven't created the exception yet. Handle that in the runtime system.
1346 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
1347 ExternalReference pending_exception_address(
1348 Isolate::kPendingExceptionAddress, isolate());
1349 Operand pending_exception_operand =
1350 masm->ExternalOperand(pending_exception_address, rbx);
1351 __ movp(rax, pending_exception_operand);
1352 __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
1353 __ cmpp(rax, rdx);
1354 __ j(equal, &runtime);
1355 __ movp(pending_exception_operand, rdx);
1356
1357 __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
1358 Label termination_exception;
1359 __ j(equal, &termination_exception, Label::kNear);
1360 __ Throw(rax);
1361
1362 __ bind(&termination_exception);
1363 __ ThrowUncatchable(rax);
1364
1365 // Do the runtime call to execute the regexp.
1366 __ bind(&runtime);
1367 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
1368
1369 // Deferred code for string handling.
1370 // (7) Not a long external string? If yes, go to (10).
1371 __ bind(¬_seq_nor_cons);
1372 // Compare flags are still set from (3).
1373 __ j(greater, ¬_long_external, Label::kNear); // Go to (10).
1374
1375 // (8) External string. Short external strings have been ruled out.
1376 __ bind(&external_string);
1377 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
1378 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
1379 if (FLAG_debug_code) {
1380 // Assert that we do not have a cons or slice (indirect strings) here.
1381 // Sequential strings have already been ruled out.
1382 __ testb(rbx, Immediate(kIsIndirectStringMask));
1383 __ Assert(zero, kExternalStringExpectedButNotFound);
1384 }
1385 __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
1386 // Move the pointer so that offset-wise, it looks like a sequential string.
1387 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1388 __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
1389 STATIC_ASSERT(kTwoByteStringTag == 0);
1390 // (8a) Is the external string one byte? If yes, go to (6).
1391 __ testb(rbx, Immediate(kStringEncodingMask));
1392 __ j(not_zero, &seq_one_byte_string); // Goto (6).
1393
1394 // rdi: subject string (flat two-byte)
1395 // rax: RegExp data (FixedArray)
1396 // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
1397 __ bind(&seq_two_byte_string);
1398 __ movp(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
1399 __ Set(rcx, 0); // Type is two byte.
1400 __ jmp(&check_code); // Go to (E).
1401
1402 // (10) Not a string or a short external string? If yes, bail out to runtime.
1403 __ bind(¬_long_external);
1404 // Catch non-string subject or short external string.
1405 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
1406 __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
1407 __ j(not_zero, &runtime);
1408
1409 // (11) Sliced string. Replace subject with parent. Go to (5a).
1410 // Load offset into r14 and replace subject string with parent.
1411 __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
1412 __ movp(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
1413 __ jmp(&check_underlying);
1414 #endif // V8_INTERPRETED_REGEXP
1415 }
1416
1417
NegativeComparisonResult(Condition cc)1418 static int NegativeComparisonResult(Condition cc) {
1419 DCHECK(cc != equal);
1420 DCHECK((cc == less) || (cc == less_equal)
1421 || (cc == greater) || (cc == greater_equal));
1422 return (cc == greater || cc == greater_equal) ? LESS : GREATER;
1423 }
1424
1425
CheckInputType(MacroAssembler * masm,Register input,CompareICState::State expected,Label * fail)1426 static void CheckInputType(MacroAssembler* masm, Register input,
1427 CompareICState::State expected, Label* fail) {
1428 Label ok;
1429 if (expected == CompareICState::SMI) {
1430 __ JumpIfNotSmi(input, fail);
1431 } else if (expected == CompareICState::NUMBER) {
1432 __ JumpIfSmi(input, &ok);
1433 __ CompareMap(input, masm->isolate()->factory()->heap_number_map());
1434 __ j(not_equal, fail);
1435 }
1436 // We could be strict about internalized/non-internalized here, but as long as
1437 // hydrogen doesn't care, the stub doesn't have to care either.
1438 __ bind(&ok);
1439 }
1440
1441
BranchIfNotInternalizedString(MacroAssembler * masm,Label * label,Register object,Register scratch)1442 static void BranchIfNotInternalizedString(MacroAssembler* masm,
1443 Label* label,
1444 Register object,
1445 Register scratch) {
1446 __ JumpIfSmi(object, label);
1447 __ movp(scratch, FieldOperand(object, HeapObject::kMapOffset));
1448 __ movzxbp(scratch,
1449 FieldOperand(scratch, Map::kInstanceTypeOffset));
1450 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
1451 __ testb(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
1452 __ j(not_zero, label);
1453 }
1454
1455
GenerateGeneric(MacroAssembler * masm)1456 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
1457 Label check_unequal_objects, done;
1458 Condition cc = GetCondition();
1459 Factory* factory = isolate()->factory();
1460
1461 Label miss;
1462 CheckInputType(masm, rdx, left(), &miss);
1463 CheckInputType(masm, rax, right(), &miss);
1464
1465 // Compare two smis.
1466 Label non_smi, smi_done;
1467 __ JumpIfNotBothSmi(rax, rdx, &non_smi);
1468 __ subp(rdx, rax);
1469 __ j(no_overflow, &smi_done);
1470 __ notp(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
1471 __ bind(&smi_done);
1472 __ movp(rax, rdx);
1473 __ ret(0);
1474 __ bind(&non_smi);
1475
1476 // The compare stub returns a positive, negative, or zero 64-bit integer
1477 // value in rax, corresponding to result of comparing the two inputs.
1478 // NOTICE! This code is only reached after a smi-fast-case check, so
1479 // it is certain that at least one operand isn't a smi.
1480
1481 // Two identical objects are equal unless they are both NaN or undefined.
1482 {
1483 Label not_identical;
1484 __ cmpp(rax, rdx);
1485 __ j(not_equal, ¬_identical, Label::kNear);
1486
1487 if (cc != equal) {
1488 // Check for undefined. undefined OP undefined is false even though
1489 // undefined == undefined.
1490 Label check_for_nan;
1491 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1492 __ j(not_equal, &check_for_nan, Label::kNear);
1493 __ Set(rax, NegativeComparisonResult(cc));
1494 __ ret(0);
1495 __ bind(&check_for_nan);
1496 }
1497
1498 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
1499 // so we do the second best thing - test it ourselves.
1500 Label heap_number;
1501 // If it's not a heap number, then return equal for (in)equality operator.
1502 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
1503 factory->heap_number_map());
1504 __ j(equal, &heap_number, Label::kNear);
1505 if (cc != equal) {
1506 // Call runtime on identical objects. Otherwise return equal.
1507 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
1508 __ j(above_equal, ¬_identical, Label::kNear);
1509 }
1510 __ Set(rax, EQUAL);
1511 __ ret(0);
1512
1513 __ bind(&heap_number);
1514 // It is a heap number, so return equal if it's not NaN.
1515 // For NaN, return 1 for every condition except greater and
1516 // greater-equal. Return -1 for them, so the comparison yields
1517 // false for all conditions except not-equal.
1518 __ Set(rax, EQUAL);
1519 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1520 __ ucomisd(xmm0, xmm0);
1521 __ setcc(parity_even, rax);
1522 // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
1523 if (cc == greater_equal || cc == greater) {
1524 __ negp(rax);
1525 }
1526 __ ret(0);
1527
1528 __ bind(¬_identical);
1529 }
1530
1531 if (cc == equal) { // Both strict and non-strict.
1532 Label slow; // Fallthrough label.
1533
1534 // If we're doing a strict equality comparison, we don't have to do
1535 // type conversion, so we generate code to do fast comparison for objects
1536 // and oddballs. Non-smi numbers and strings still go through the usual
1537 // slow-case code.
1538 if (strict()) {
1539 // If either is a Smi (we know that not both are), then they can only
1540 // be equal if the other is a HeapNumber. If so, use the slow case.
1541 {
1542 Label not_smis;
1543 __ SelectNonSmi(rbx, rax, rdx, ¬_smis);
1544
1545 // Check if the non-smi operand is a heap number.
1546 __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
1547 factory->heap_number_map());
1548 // If heap number, handle it in the slow case.
1549 __ j(equal, &slow);
1550 // Return non-equal. ebx (the lower half of rbx) is not zero.
1551 __ movp(rax, rbx);
1552 __ ret(0);
1553
1554 __ bind(¬_smis);
1555 }
1556
1557 // If either operand is a JSObject or an oddball value, then they are not
1558 // equal since their pointers are different
1559 // There is no test for undetectability in strict equality.
1560
1561 // If the first object is a JS object, we have done pointer comparison.
1562 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
1563 Label first_non_object;
1564 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
1565 __ j(below, &first_non_object, Label::kNear);
1566 // Return non-zero (rax (not rax) is not zero)
1567 Label return_not_equal;
1568 STATIC_ASSERT(kHeapObjectTag != 0);
1569 __ bind(&return_not_equal);
1570 __ ret(0);
1571
1572 __ bind(&first_non_object);
1573 // Check for oddballs: true, false, null, undefined.
1574 __ CmpInstanceType(rcx, ODDBALL_TYPE);
1575 __ j(equal, &return_not_equal);
1576
1577 __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
1578 __ j(above_equal, &return_not_equal);
1579
1580 // Check for oddballs: true, false, null, undefined.
1581 __ CmpInstanceType(rcx, ODDBALL_TYPE);
1582 __ j(equal, &return_not_equal);
1583
1584 // Fall through to the general case.
1585 }
1586 __ bind(&slow);
1587 }
1588
1589 // Generate the number comparison code.
1590 Label non_number_comparison;
1591 Label unordered;
1592 FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
1593 __ xorl(rax, rax);
1594 __ xorl(rcx, rcx);
1595 __ ucomisd(xmm0, xmm1);
1596
1597 // Don't base result on EFLAGS when a NaN is involved.
1598 __ j(parity_even, &unordered, Label::kNear);
1599 // Return a result of -1, 0, or 1, based on EFLAGS.
1600 __ setcc(above, rax);
1601 __ setcc(below, rcx);
1602 __ subp(rax, rcx);
1603 __ ret(0);
1604
1605 // If one of the numbers was NaN, then the result is always false.
1606 // The cc is never not-equal.
1607 __ bind(&unordered);
1608 DCHECK(cc != not_equal);
1609 if (cc == less || cc == less_equal) {
1610 __ Set(rax, 1);
1611 } else {
1612 __ Set(rax, -1);
1613 }
1614 __ ret(0);
1615
1616 // The number comparison code did not provide a valid result.
1617 __ bind(&non_number_comparison);
1618
1619 // Fast negative check for internalized-to-internalized equality.
1620 Label check_for_strings;
1621 if (cc == equal) {
1622 BranchIfNotInternalizedString(
1623 masm, &check_for_strings, rax, kScratchRegister);
1624 BranchIfNotInternalizedString(
1625 masm, &check_for_strings, rdx, kScratchRegister);
1626
1627 // We've already checked for object identity, so if both operands are
1628 // internalized strings they aren't equal. Register rax (not rax) already
1629 // holds a non-zero value, which indicates not equal, so just return.
1630 __ ret(0);
1631 }
1632
1633 __ bind(&check_for_strings);
1634
1635 __ JumpIfNotBothSequentialOneByteStrings(rdx, rax, rcx, rbx,
1636 &check_unequal_objects);
1637
1638 // Inline comparison of one-byte strings.
1639 if (cc == equal) {
1640 StringHelper::GenerateFlatOneByteStringEquals(masm, rdx, rax, rcx, rbx);
1641 } else {
1642 StringHelper::GenerateCompareFlatOneByteStrings(masm, rdx, rax, rcx, rbx,
1643 rdi, r8);
1644 }
1645
1646 #ifdef DEBUG
1647 __ Abort(kUnexpectedFallThroughFromStringComparison);
1648 #endif
1649
1650 __ bind(&check_unequal_objects);
1651 if (cc == equal && !strict()) {
1652 // Not strict equality. Objects are unequal if
1653 // they are both JSObjects and not undetectable,
1654 // and their pointers are different.
1655 Label not_both_objects, return_unequal;
1656 // At most one is a smi, so we can test for smi by adding the two.
1657 // A smi plus a heap object has the low bit set, a heap object plus
1658 // a heap object has the low bit clear.
1659 STATIC_ASSERT(kSmiTag == 0);
1660 STATIC_ASSERT(kSmiTagMask == 1);
1661 __ leap(rcx, Operand(rax, rdx, times_1, 0));
1662 __ testb(rcx, Immediate(kSmiTagMask));
1663 __ j(not_zero, ¬_both_objects, Label::kNear);
1664 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
1665 __ j(below, ¬_both_objects, Label::kNear);
1666 __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
1667 __ j(below, ¬_both_objects, Label::kNear);
1668 __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
1669 Immediate(1 << Map::kIsUndetectable));
1670 __ j(zero, &return_unequal, Label::kNear);
1671 __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
1672 Immediate(1 << Map::kIsUndetectable));
1673 __ j(zero, &return_unequal, Label::kNear);
1674 // The objects are both undetectable, so they both compare as the value
1675 // undefined, and are equal.
1676 __ Set(rax, EQUAL);
1677 __ bind(&return_unequal);
1678 // Return non-equal by returning the non-zero object pointer in rax,
1679 // or return equal if we fell through to here.
1680 __ ret(0);
1681 __ bind(¬_both_objects);
1682 }
1683
1684 // Push arguments below the return address to prepare jump to builtin.
1685 __ PopReturnAddressTo(rcx);
1686 __ Push(rdx);
1687 __ Push(rax);
1688
1689 // Figure out which native to call and setup the arguments.
1690 Builtins::JavaScript builtin;
1691 if (cc == equal) {
1692 builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1693 } else {
1694 builtin = Builtins::COMPARE;
1695 __ Push(Smi::FromInt(NegativeComparisonResult(cc)));
1696 }
1697
1698 __ PushReturnAddressFrom(rcx);
1699
1700 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1701 // tagged as a small integer.
1702 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
1703
1704 __ bind(&miss);
1705 GenerateMiss(masm);
1706 }
1707
1708
GenerateRecordCallTarget(MacroAssembler * masm)1709 static void GenerateRecordCallTarget(MacroAssembler* masm) {
1710 // Cache the called function in a feedback vector slot. Cache states
1711 // are uninitialized, monomorphic (indicated by a JSFunction), and
1712 // megamorphic.
1713 // rax : number of arguments to the construct function
1714 // rbx : Feedback vector
1715 // rdx : slot in feedback vector (Smi)
1716 // rdi : the function to call
1717 Isolate* isolate = masm->isolate();
1718 Label initialize, done, miss, megamorphic, not_array_function,
1719 done_no_smi_convert;
1720
1721 // Load the cache state into rcx.
1722 __ SmiToInteger32(rdx, rdx);
1723 __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
1724 FixedArray::kHeaderSize));
1725
1726 // A monomorphic cache hit or an already megamorphic state: invoke the
1727 // function without changing the state.
1728 __ cmpp(rcx, rdi);
1729 __ j(equal, &done);
1730 __ Cmp(rcx, TypeFeedbackVector::MegamorphicSentinel(isolate));
1731 __ j(equal, &done);
1732
1733 if (!FLAG_pretenuring_call_new) {
1734 // If we came here, we need to see if we are the array function.
1735 // If we didn't have a matching function, and we didn't find the megamorph
1736 // sentinel, then we have in the slot either some other function or an
1737 // AllocationSite. Do a map check on the object in rcx.
1738 Handle<Map> allocation_site_map =
1739 masm->isolate()->factory()->allocation_site_map();
1740 __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
1741 __ j(not_equal, &miss);
1742
1743 // Make sure the function is the Array() function
1744 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
1745 __ cmpp(rdi, rcx);
1746 __ j(not_equal, &megamorphic);
1747 __ jmp(&done);
1748 }
1749
1750 __ bind(&miss);
1751
1752 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
1753 // megamorphic.
1754 __ Cmp(rcx, TypeFeedbackVector::UninitializedSentinel(isolate));
1755 __ j(equal, &initialize);
1756 // MegamorphicSentinel is an immortal immovable object (undefined) so no
1757 // write-barrier is needed.
1758 __ bind(&megamorphic);
1759 __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
1760 TypeFeedbackVector::MegamorphicSentinel(isolate));
1761 __ jmp(&done);
1762
1763 // An uninitialized cache is patched with the function or sentinel to
1764 // indicate the ElementsKind if function is the Array constructor.
1765 __ bind(&initialize);
1766
1767 if (!FLAG_pretenuring_call_new) {
1768 // Make sure the function is the Array() function
1769 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
1770 __ cmpp(rdi, rcx);
1771 __ j(not_equal, ¬_array_function);
1772
1773 {
1774 FrameScope scope(masm, StackFrame::INTERNAL);
1775
1776 // Arguments register must be smi-tagged to call out.
1777 __ Integer32ToSmi(rax, rax);
1778 __ Push(rax);
1779 __ Push(rdi);
1780 __ Integer32ToSmi(rdx, rdx);
1781 __ Push(rdx);
1782 __ Push(rbx);
1783
1784 CreateAllocationSiteStub create_stub(isolate);
1785 __ CallStub(&create_stub);
1786
1787 __ Pop(rbx);
1788 __ Pop(rdx);
1789 __ Pop(rdi);
1790 __ Pop(rax);
1791 __ SmiToInteger32(rax, rax);
1792 }
1793 __ jmp(&done_no_smi_convert);
1794
1795 __ bind(¬_array_function);
1796 }
1797
1798 __ movp(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
1799 rdi);
1800
1801 // We won't need rdx or rbx anymore, just save rdi
1802 __ Push(rdi);
1803 __ Push(rbx);
1804 __ Push(rdx);
1805 __ RecordWriteArray(rbx, rdi, rdx, kDontSaveFPRegs,
1806 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
1807 __ Pop(rdx);
1808 __ Pop(rbx);
1809 __ Pop(rdi);
1810
1811 __ bind(&done);
1812 __ Integer32ToSmi(rdx, rdx);
1813
1814 __ bind(&done_no_smi_convert);
1815 }
1816
1817
EmitContinueIfStrictOrNative(MacroAssembler * masm,Label * cont)1818 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
1819 // Do not transform the receiver for strict mode functions.
1820 __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
1821 __ testb(FieldOperand(rcx, SharedFunctionInfo::kStrictModeByteOffset),
1822 Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
1823 __ j(not_equal, cont);
1824
1825 // Do not transform the receiver for natives.
1826 // SharedFunctionInfo is already loaded into rcx.
1827 __ testb(FieldOperand(rcx, SharedFunctionInfo::kNativeByteOffset),
1828 Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
1829 __ j(not_equal, cont);
1830 }
1831
1832
EmitSlowCase(Isolate * isolate,MacroAssembler * masm,StackArgumentsAccessor * args,int argc,Label * non_function)1833 static void EmitSlowCase(Isolate* isolate,
1834 MacroAssembler* masm,
1835 StackArgumentsAccessor* args,
1836 int argc,
1837 Label* non_function) {
1838 // Check for function proxy.
1839 __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
1840 __ j(not_equal, non_function);
1841 __ PopReturnAddressTo(rcx);
1842 __ Push(rdi); // put proxy as additional argument under return address
1843 __ PushReturnAddressFrom(rcx);
1844 __ Set(rax, argc + 1);
1845 __ Set(rbx, 0);
1846 __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
1847 {
1848 Handle<Code> adaptor =
1849 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
1850 __ jmp(adaptor, RelocInfo::CODE_TARGET);
1851 }
1852
1853 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
1854 // of the original receiver from the call site).
1855 __ bind(non_function);
1856 __ movp(args->GetReceiverOperand(), rdi);
1857 __ Set(rax, argc);
1858 __ Set(rbx, 0);
1859 __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
1860 Handle<Code> adaptor =
1861 isolate->builtins()->ArgumentsAdaptorTrampoline();
1862 __ Jump(adaptor, RelocInfo::CODE_TARGET);
1863 }
1864
1865
EmitWrapCase(MacroAssembler * masm,StackArgumentsAccessor * args,Label * cont)1866 static void EmitWrapCase(MacroAssembler* masm,
1867 StackArgumentsAccessor* args,
1868 Label* cont) {
1869 // Wrap the receiver and patch it back onto the stack.
1870 { FrameScope frame_scope(masm, StackFrame::INTERNAL);
1871 __ Push(rdi);
1872 __ Push(rax);
1873 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
1874 __ Pop(rdi);
1875 }
1876 __ movp(args->GetReceiverOperand(), rax);
1877 __ jmp(cont);
1878 }
1879
1880
CallFunctionNoFeedback(MacroAssembler * masm,int argc,bool needs_checks,bool call_as_method)1881 static void CallFunctionNoFeedback(MacroAssembler* masm,
1882 int argc, bool needs_checks,
1883 bool call_as_method) {
1884 // rdi : the function to call
1885
1886 // wrap_and_call can only be true if we are compiling a monomorphic method.
1887 Isolate* isolate = masm->isolate();
1888 Label slow, non_function, wrap, cont;
1889 StackArgumentsAccessor args(rsp, argc);
1890
1891 if (needs_checks) {
1892 // Check that the function really is a JavaScript function.
1893 __ JumpIfSmi(rdi, &non_function);
1894
1895 // Goto slow case if we do not have a function.
1896 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
1897 __ j(not_equal, &slow);
1898 }
1899
1900 // Fast-case: Just invoke the function.
1901 ParameterCount actual(argc);
1902
1903 if (call_as_method) {
1904 if (needs_checks) {
1905 EmitContinueIfStrictOrNative(masm, &cont);
1906 }
1907
1908 // Load the receiver from the stack.
1909 __ movp(rax, args.GetReceiverOperand());
1910
1911 if (needs_checks) {
1912 __ JumpIfSmi(rax, &wrap);
1913
1914 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
1915 __ j(below, &wrap);
1916 } else {
1917 __ jmp(&wrap);
1918 }
1919
1920 __ bind(&cont);
1921 }
1922
1923 __ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper());
1924
1925 if (needs_checks) {
1926 // Slow-case: Non-function called.
1927 __ bind(&slow);
1928 EmitSlowCase(isolate, masm, &args, argc, &non_function);
1929 }
1930
1931 if (call_as_method) {
1932 __ bind(&wrap);
1933 EmitWrapCase(masm, &args, &cont);
1934 }
1935 }
1936
1937
Generate(MacroAssembler * masm)1938 void CallFunctionStub::Generate(MacroAssembler* masm) {
1939 CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
1940 }
1941
1942
Generate(MacroAssembler * masm)1943 void CallConstructStub::Generate(MacroAssembler* masm) {
1944 // rax : number of arguments
1945 // rbx : feedback vector
1946 // rdx : (only if rbx is not the megamorphic symbol) slot in feedback
1947 // vector (Smi)
1948 // rdi : constructor function
1949 Label slow, non_function_call;
1950
1951 // Check that function is not a smi.
1952 __ JumpIfSmi(rdi, &non_function_call);
1953 // Check that function is a JSFunction.
1954 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
1955 __ j(not_equal, &slow);
1956
1957 if (RecordCallTarget()) {
1958 GenerateRecordCallTarget(masm);
1959
1960 __ SmiToInteger32(rdx, rdx);
1961 if (FLAG_pretenuring_call_new) {
1962 // Put the AllocationSite from the feedback vector into ebx.
1963 // By adding kPointerSize we encode that we know the AllocationSite
1964 // entry is at the feedback vector slot given by rdx + 1.
1965 __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
1966 FixedArray::kHeaderSize + kPointerSize));
1967 } else {
1968 Label feedback_register_initialized;
1969 // Put the AllocationSite from the feedback vector into rbx, or undefined.
1970 __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
1971 FixedArray::kHeaderSize));
1972 __ CompareRoot(FieldOperand(rbx, 0), Heap::kAllocationSiteMapRootIndex);
1973 __ j(equal, &feedback_register_initialized);
1974 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
1975 __ bind(&feedback_register_initialized);
1976 }
1977
1978 __ AssertUndefinedOrAllocationSite(rbx);
1979 }
1980
1981 // Jump to the function-specific construct stub.
1982 Register jmp_reg = rcx;
1983 __ movp(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
1984 __ movp(jmp_reg, FieldOperand(jmp_reg,
1985 SharedFunctionInfo::kConstructStubOffset));
1986 __ leap(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
1987 __ jmp(jmp_reg);
1988
1989 // rdi: called object
1990 // rax: number of arguments
1991 // rcx: object map
1992 Label do_call;
1993 __ bind(&slow);
1994 __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
1995 __ j(not_equal, &non_function_call);
1996 __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
1997 __ jmp(&do_call);
1998
1999 __ bind(&non_function_call);
2000 __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
2001 __ bind(&do_call);
2002 // Set expected number of arguments to zero (not changing rax).
2003 __ Set(rbx, 0);
2004 __ Jump(isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2005 RelocInfo::CODE_TARGET);
2006 }
2007
2008
EmitLoadTypeFeedbackVector(MacroAssembler * masm,Register vector)2009 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
2010 __ movp(vector, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
2011 __ movp(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
2012 __ movp(vector, FieldOperand(vector,
2013 SharedFunctionInfo::kFeedbackVectorOffset));
2014 }
2015
2016
Generate(MacroAssembler * masm)2017 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
2018 // rdi - function
2019 // rdx - slot id (as integer)
2020 Label miss;
2021 int argc = arg_count();
2022 ParameterCount actual(argc);
2023
2024 EmitLoadTypeFeedbackVector(masm, rbx);
2025 __ SmiToInteger32(rdx, rdx);
2026
2027 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
2028 __ cmpp(rdi, rcx);
2029 __ j(not_equal, &miss);
2030
2031 __ movp(rax, Immediate(arg_count()));
2032 __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
2033 FixedArray::kHeaderSize));
2034 // Verify that ecx contains an AllocationSite
2035 Factory* factory = masm->isolate()->factory();
2036 __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
2037 factory->allocation_site_map());
2038 __ j(not_equal, &miss);
2039
2040 __ movp(rbx, rcx);
2041 ArrayConstructorStub stub(masm->isolate(), arg_count());
2042 __ TailCallStub(&stub);
2043
2044 __ bind(&miss);
2045 GenerateMiss(masm);
2046
2047 // The slow case, we need this no matter what to complete a call after a miss.
2048 CallFunctionNoFeedback(masm,
2049 arg_count(),
2050 true,
2051 CallAsMethod());
2052
2053 // Unreachable.
2054 __ int3();
2055 }
2056
2057
Generate(MacroAssembler * masm)2058 void CallICStub::Generate(MacroAssembler* masm) {
2059 // rdi - function
2060 // rdx - slot id
2061 Isolate* isolate = masm->isolate();
2062 Label extra_checks_or_miss, slow_start;
2063 Label slow, non_function, wrap, cont;
2064 Label have_js_function;
2065 int argc = arg_count();
2066 StackArgumentsAccessor args(rsp, argc);
2067 ParameterCount actual(argc);
2068
2069 EmitLoadTypeFeedbackVector(masm, rbx);
2070
2071 // The checks. First, does rdi match the recorded monomorphic target?
2072 __ SmiToInteger32(rdx, rdx);
2073 __ cmpp(rdi, FieldOperand(rbx, rdx, times_pointer_size,
2074 FixedArray::kHeaderSize));
2075 __ j(not_equal, &extra_checks_or_miss);
2076
2077 __ bind(&have_js_function);
2078 if (CallAsMethod()) {
2079 EmitContinueIfStrictOrNative(masm, &cont);
2080
2081 // Load the receiver from the stack.
2082 __ movp(rax, args.GetReceiverOperand());
2083
2084 __ JumpIfSmi(rax, &wrap);
2085
2086 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
2087 __ j(below, &wrap);
2088
2089 __ bind(&cont);
2090 }
2091
2092 __ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper());
2093
2094 __ bind(&slow);
2095 EmitSlowCase(isolate, masm, &args, argc, &non_function);
2096
2097 if (CallAsMethod()) {
2098 __ bind(&wrap);
2099 EmitWrapCase(masm, &args, &cont);
2100 }
2101
2102 __ bind(&extra_checks_or_miss);
2103 Label miss;
2104
2105 __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
2106 FixedArray::kHeaderSize));
2107 __ Cmp(rcx, TypeFeedbackVector::MegamorphicSentinel(isolate));
2108 __ j(equal, &slow_start);
2109 __ Cmp(rcx, TypeFeedbackVector::UninitializedSentinel(isolate));
2110 __ j(equal, &miss);
2111
2112 if (!FLAG_trace_ic) {
2113 // We are going megamorphic. If the feedback is a JSFunction, it is fine
2114 // to handle it here. More complex cases are dealt with in the runtime.
2115 __ AssertNotSmi(rcx);
2116 __ CmpObjectType(rcx, JS_FUNCTION_TYPE, rcx);
2117 __ j(not_equal, &miss);
2118 __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
2119 TypeFeedbackVector::MegamorphicSentinel(isolate));
2120 __ jmp(&slow_start);
2121 }
2122
2123 // We are here because tracing is on or we are going monomorphic.
2124 __ bind(&miss);
2125 GenerateMiss(masm);
2126
2127 // the slow case
2128 __ bind(&slow_start);
2129 // Check that function is not a smi.
2130 __ JumpIfSmi(rdi, &non_function);
2131 // Check that function is a JSFunction.
2132 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
2133 __ j(not_equal, &slow);
2134 __ jmp(&have_js_function);
2135
2136 // Unreachable
2137 __ int3();
2138 }
2139
2140
GenerateMiss(MacroAssembler * masm)2141 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2142 // Get the receiver of the function from the stack; 1 ~ return address.
2143 __ movp(rcx, Operand(rsp, (arg_count() + 1) * kPointerSize));
2144
2145 {
2146 FrameScope scope(masm, StackFrame::INTERNAL);
2147
2148 // Push the receiver and the function and feedback info.
2149 __ Push(rcx);
2150 __ Push(rdi);
2151 __ Push(rbx);
2152 __ Integer32ToSmi(rdx, rdx);
2153 __ Push(rdx);
2154
2155 // Call the entry.
2156 IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
2157 : IC::kCallIC_Customization_Miss;
2158
2159 ExternalReference miss = ExternalReference(IC_Utility(id),
2160 masm->isolate());
2161 __ CallExternalReference(miss, 4);
2162
2163 // Move result to edi and exit the internal frame.
2164 __ movp(rdi, rax);
2165 }
2166 }
2167
2168
NeedsImmovableCode()2169 bool CEntryStub::NeedsImmovableCode() {
2170 return false;
2171 }
2172
2173
GenerateStubsAheadOfTime(Isolate * isolate)2174 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
2175 CEntryStub::GenerateAheadOfTime(isolate);
2176 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
2177 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
2178 // It is important that the store buffer overflow stubs are generated first.
2179 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
2180 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
2181 BinaryOpICStub::GenerateAheadOfTime(isolate);
2182 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
2183 }
2184
2185
GenerateFPStubs(Isolate * isolate)2186 void CodeStub::GenerateFPStubs(Isolate* isolate) {
2187 }
2188
2189
GenerateAheadOfTime(Isolate * isolate)2190 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
2191 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
2192 stub.GetCode();
2193 CEntryStub save_doubles(isolate, 1, kSaveFPRegs);
2194 save_doubles.GetCode();
2195 }
2196
2197
Generate(MacroAssembler * masm)2198 void CEntryStub::Generate(MacroAssembler* masm) {
2199 // rax: number of arguments including receiver
2200 // rbx: pointer to C function (C callee-saved)
2201 // rbp: frame pointer of calling JS frame (restored after C call)
2202 // rsp: stack pointer (restored after C call)
2203 // rsi: current context (restored)
2204
2205 ProfileEntryHookStub::MaybeCallEntryHook(masm);
2206
2207 // Enter the exit frame that transitions from JavaScript to C++.
2208 #ifdef _WIN64
2209 int arg_stack_space = (result_size() < 2 ? 2 : 4);
2210 #else // _WIN64
2211 int arg_stack_space = 0;
2212 #endif // _WIN64
2213 __ EnterExitFrame(arg_stack_space, save_doubles());
2214
2215 // rbx: pointer to builtin function (C callee-saved).
2216 // rbp: frame pointer of exit frame (restored after C call).
2217 // rsp: stack pointer (restored after C call).
2218 // r14: number of arguments including receiver (C callee-saved).
2219 // r15: argv pointer (C callee-saved).
2220
2221 // Simple results returned in rax (both AMD64 and Win64 calling conventions).
2222 // Complex results must be written to address passed as first argument.
2223 // AMD64 calling convention: a struct of two pointers in rax+rdx
2224
2225 // Check stack alignment.
2226 if (FLAG_debug_code) {
2227 __ CheckStackAlignment();
2228 }
2229
2230 // Call C function.
2231 #ifdef _WIN64
2232 // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9.
2233 // Pass argv and argc as two parameters. The arguments object will
2234 // be created by stubs declared by DECLARE_RUNTIME_FUNCTION().
2235 if (result_size() < 2) {
2236 // Pass a pointer to the Arguments object as the first argument.
2237 // Return result in single register (rax).
2238 __ movp(rcx, r14); // argc.
2239 __ movp(rdx, r15); // argv.
2240 __ Move(r8, ExternalReference::isolate_address(isolate()));
2241 } else {
2242 DCHECK_EQ(2, result_size());
2243 // Pass a pointer to the result location as the first argument.
2244 __ leap(rcx, StackSpaceOperand(2));
2245 // Pass a pointer to the Arguments object as the second argument.
2246 __ movp(rdx, r14); // argc.
2247 __ movp(r8, r15); // argv.
2248 __ Move(r9, ExternalReference::isolate_address(isolate()));
2249 }
2250
2251 #else // _WIN64
2252 // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
2253 __ movp(rdi, r14); // argc.
2254 __ movp(rsi, r15); // argv.
2255 __ Move(rdx, ExternalReference::isolate_address(isolate()));
2256 #endif // _WIN64
2257 __ call(rbx);
2258 // Result is in rax - do not destroy this register!
2259
2260 #ifdef _WIN64
2261 // If return value is on the stack, pop it to registers.
2262 if (result_size() > 1) {
2263 DCHECK_EQ(2, result_size());
2264 // Read result values stored on stack. Result is stored
2265 // above the four argument mirror slots and the two
2266 // Arguments object slots.
2267 __ movq(rax, Operand(rsp, 6 * kRegisterSize));
2268 __ movq(rdx, Operand(rsp, 7 * kRegisterSize));
2269 }
2270 #endif // _WIN64
2271
2272 // Runtime functions should not return 'the hole'. Allowing it to escape may
2273 // lead to crashes in the IC code later.
2274 if (FLAG_debug_code) {
2275 Label okay;
2276 __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
2277 __ j(not_equal, &okay, Label::kNear);
2278 __ int3();
2279 __ bind(&okay);
2280 }
2281
2282 // Check result for exception sentinel.
2283 Label exception_returned;
2284 __ CompareRoot(rax, Heap::kExceptionRootIndex);
2285 __ j(equal, &exception_returned);
2286
2287 ExternalReference pending_exception_address(
2288 Isolate::kPendingExceptionAddress, isolate());
2289
2290 // Check that there is no pending exception, otherwise we
2291 // should have returned the exception sentinel.
2292 if (FLAG_debug_code) {
2293 Label okay;
2294 __ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
2295 Operand pending_exception_operand =
2296 masm->ExternalOperand(pending_exception_address);
2297 __ cmpp(r14, pending_exception_operand);
2298 __ j(equal, &okay, Label::kNear);
2299 __ int3();
2300 __ bind(&okay);
2301 }
2302
2303 // Exit the JavaScript to C++ exit frame.
2304 __ LeaveExitFrame(save_doubles());
2305 __ ret(0);
2306
2307 // Handling of exception.
2308 __ bind(&exception_returned);
2309
2310 // Retrieve the pending exception.
2311 Operand pending_exception_operand =
2312 masm->ExternalOperand(pending_exception_address);
2313 __ movp(rax, pending_exception_operand);
2314
2315 // Clear the pending exception.
2316 __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
2317 __ movp(pending_exception_operand, rdx);
2318
2319 // Special handling of termination exceptions which are uncatchable
2320 // by javascript code.
2321 Label throw_termination_exception;
2322 __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
2323 __ j(equal, &throw_termination_exception);
2324
2325 // Handle normal exception.
2326 __ Throw(rax);
2327
2328 __ bind(&throw_termination_exception);
2329 __ ThrowUncatchable(rax);
2330 }
2331
2332
Generate(MacroAssembler * masm)2333 void JSEntryStub::Generate(MacroAssembler* masm) {
2334 Label invoke, handler_entry, exit;
2335 Label not_outermost_js, not_outermost_js_2;
2336
2337 ProfileEntryHookStub::MaybeCallEntryHook(masm);
2338
2339 { // NOLINT. Scope block confuses linter.
2340 MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
2341 // Set up frame.
2342 __ pushq(rbp);
2343 __ movp(rbp, rsp);
2344
2345 // Push the stack frame type marker twice.
2346 int marker = type();
2347 // Scratch register is neither callee-save, nor an argument register on any
2348 // platform. It's free to use at this point.
2349 // Cannot use smi-register for loading yet.
2350 __ Move(kScratchRegister, Smi::FromInt(marker), Assembler::RelocInfoNone());
2351 __ Push(kScratchRegister); // context slot
2352 __ Push(kScratchRegister); // function slot
2353 // Save callee-saved registers (X64/X32/Win64 calling conventions).
2354 __ pushq(r12);
2355 __ pushq(r13);
2356 __ pushq(r14);
2357 __ pushq(r15);
2358 #ifdef _WIN64
2359 __ pushq(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
2360 __ pushq(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
2361 #endif
2362 __ pushq(rbx);
2363
2364 #ifdef _WIN64
2365 // On Win64 XMM6-XMM15 are callee-save
2366 __ subp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
2367 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
2368 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7);
2369 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8);
2370 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3), xmm9);
2371 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4), xmm10);
2372 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5), xmm11);
2373 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6), xmm12);
2374 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7), xmm13);
2375 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8), xmm14);
2376 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9), xmm15);
2377 #endif
2378
2379 // Set up the roots and smi constant registers.
2380 // Needs to be done before any further smi loads.
2381 __ InitializeSmiConstantRegister();
2382 __ InitializeRootRegister();
2383 }
2384
2385 // Save copies of the top frame descriptor on the stack.
2386 ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate());
2387 {
2388 Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
2389 __ Push(c_entry_fp_operand);
2390 }
2391
2392 // If this is the outermost JS call, set js_entry_sp value.
2393 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
2394 __ Load(rax, js_entry_sp);
2395 __ testp(rax, rax);
2396 __ j(not_zero, ¬_outermost_js);
2397 __ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
2398 __ movp(rax, rbp);
2399 __ Store(js_entry_sp, rax);
2400 Label cont;
2401 __ jmp(&cont);
2402 __ bind(¬_outermost_js);
2403 __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
2404 __ bind(&cont);
2405
2406 // Jump to a faked try block that does the invoke, with a faked catch
2407 // block that sets the pending exception.
2408 __ jmp(&invoke);
2409 __ bind(&handler_entry);
2410 handler_offset_ = handler_entry.pos();
2411 // Caught exception: Store result (exception) in the pending exception
2412 // field in the JSEnv and return a failure sentinel.
2413 ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
2414 isolate());
2415 __ Store(pending_exception, rax);
2416 __ LoadRoot(rax, Heap::kExceptionRootIndex);
2417 __ jmp(&exit);
2418
2419 // Invoke: Link this frame into the handler chain. There's only one
2420 // handler block in this code object, so its index is 0.
2421 __ bind(&invoke);
2422 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
2423
2424 // Clear any pending exceptions.
2425 __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
2426 __ Store(pending_exception, rax);
2427
2428 // Fake a receiver (NULL).
2429 __ Push(Immediate(0)); // receiver
2430
2431 // Invoke the function by calling through JS entry trampoline builtin and
2432 // pop the faked function when we return. We load the address from an
2433 // external reference instead of inlining the call target address directly
2434 // in the code, because the builtin stubs may not have been generated yet
2435 // at the time this code is generated.
2436 if (type() == StackFrame::ENTRY_CONSTRUCT) {
2437 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
2438 isolate());
2439 __ Load(rax, construct_entry);
2440 } else {
2441 ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
2442 __ Load(rax, entry);
2443 }
2444 __ leap(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
2445 __ call(kScratchRegister);
2446
2447 // Unlink this frame from the handler chain.
2448 __ PopTryHandler();
2449
2450 __ bind(&exit);
2451 // Check if the current stack frame is marked as the outermost JS frame.
2452 __ Pop(rbx);
2453 __ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
2454 __ j(not_equal, ¬_outermost_js_2);
2455 __ Move(kScratchRegister, js_entry_sp);
2456 __ movp(Operand(kScratchRegister, 0), Immediate(0));
2457 __ bind(¬_outermost_js_2);
2458
2459 // Restore the top frame descriptor from the stack.
2460 { Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
2461 __ Pop(c_entry_fp_operand);
2462 }
2463
2464 // Restore callee-saved registers (X64 conventions).
2465 #ifdef _WIN64
2466 // On Win64 XMM6-XMM15 are callee-save
2467 __ movdqu(xmm6, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0));
2468 __ movdqu(xmm7, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1));
2469 __ movdqu(xmm8, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2));
2470 __ movdqu(xmm9, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3));
2471 __ movdqu(xmm10, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4));
2472 __ movdqu(xmm11, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5));
2473 __ movdqu(xmm12, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6));
2474 __ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7));
2475 __ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8));
2476 __ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9));
2477 __ addp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
2478 #endif
2479
2480 __ popq(rbx);
2481 #ifdef _WIN64
2482 // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
2483 __ popq(rsi);
2484 __ popq(rdi);
2485 #endif
2486 __ popq(r15);
2487 __ popq(r14);
2488 __ popq(r13);
2489 __ popq(r12);
2490 __ addp(rsp, Immediate(2 * kPointerSize)); // remove markers
2491
2492 // Restore frame pointer and return.
2493 __ popq(rbp);
2494 __ ret(0);
2495 }
2496
2497
Generate(MacroAssembler * masm)2498 void InstanceofStub::Generate(MacroAssembler* masm) {
2499 // Implements "value instanceof function" operator.
2500 // Expected input state with no inline cache:
2501 // rsp[0] : return address
2502 // rsp[8] : function pointer
2503 // rsp[16] : value
2504 // Expected input state with an inline one-element cache:
2505 // rsp[0] : return address
2506 // rsp[8] : offset from return address to location of inline cache
2507 // rsp[16] : function pointer
2508 // rsp[24] : value
2509 // Returns a bitwise zero to indicate that the value
2510 // is and instance of the function and anything else to
2511 // indicate that the value is not an instance.
2512
2513 // Fixed register usage throughout the stub.
2514 Register object = rax; // Object (lhs).
2515 Register map = rbx; // Map of the object.
2516 Register function = rdx; // Function (rhs).
2517 Register prototype = rdi; // Prototype of the function.
2518 Register scratch = rcx;
2519
2520 static const int kOffsetToMapCheckValue = 2;
2521 static const int kOffsetToResultValue = kPointerSize == kInt64Size ? 18 : 14;
2522 // The last 4 bytes of the instruction sequence
2523 // movp(rdi, FieldOperand(rax, HeapObject::kMapOffset))
2524 // Move(kScratchRegister, Factory::the_hole_value())
2525 // in front of the hole value address.
2526 static const unsigned int kWordBeforeMapCheckValue =
2527 kPointerSize == kInt64Size ? 0xBA49FF78 : 0xBA41FF78;
2528 // The last 4 bytes of the instruction sequence
2529 // __ j(not_equal, &cache_miss);
2530 // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
2531 // before the offset of the hole value in the root array.
2532 static const unsigned int kWordBeforeResultValue =
2533 kPointerSize == kInt64Size ? 0x458B4906 : 0x458B4106;
2534
2535 int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0;
2536
2537 DCHECK_EQ(object.code(), InstanceofStub::left().code());
2538 DCHECK_EQ(function.code(), InstanceofStub::right().code());
2539
2540 // Get the object and function - they are always both needed.
2541 // Go slow case if the object is a smi.
2542 Label slow;
2543 StackArgumentsAccessor args(rsp, 2 + extra_argument_offset,
2544 ARGUMENTS_DONT_CONTAIN_RECEIVER);
2545 if (!HasArgsInRegisters()) {
2546 __ movp(object, args.GetArgumentOperand(0));
2547 __ movp(function, args.GetArgumentOperand(1));
2548 }
2549 __ JumpIfSmi(object, &slow);
2550
2551 // Check that the left hand is a JS object. Leave its map in rax.
2552 __ CmpObjectType(object, FIRST_SPEC_OBJECT_TYPE, map);
2553 __ j(below, &slow);
2554 __ CmpInstanceType(map, LAST_SPEC_OBJECT_TYPE);
2555 __ j(above, &slow);
2556
2557 // If there is a call site cache don't look in the global cache, but do the
2558 // real lookup and update the call site cache.
2559 if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
2560 // Look up the function and the map in the instanceof cache.
2561 Label miss;
2562 __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
2563 __ j(not_equal, &miss, Label::kNear);
2564 __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
2565 __ j(not_equal, &miss, Label::kNear);
2566 __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
2567 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
2568 __ bind(&miss);
2569 }
2570
2571 // Get the prototype of the function.
2572 __ TryGetFunctionPrototype(function, prototype, &slow, true);
2573
2574 // Check that the function prototype is a JS object.
2575 __ JumpIfSmi(prototype, &slow);
2576 __ CmpObjectType(prototype, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
2577 __ j(below, &slow);
2578 __ CmpInstanceType(kScratchRegister, LAST_SPEC_OBJECT_TYPE);
2579 __ j(above, &slow);
2580
2581 // Update the global instanceof or call site inlined cache with the current
2582 // map and function. The cached answer will be set when it is known below.
2583 if (!HasCallSiteInlineCheck()) {
2584 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
2585 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
2586 } else {
2587 // The constants for the code patching are based on push instructions
2588 // at the call site.
2589 DCHECK(!HasArgsInRegisters());
2590 // Get return address and delta to inlined map check.
2591 __ movq(kScratchRegister, StackOperandForReturnAddress(0));
2592 __ subp(kScratchRegister, args.GetArgumentOperand(2));
2593 if (FLAG_debug_code) {
2594 __ movl(scratch, Immediate(kWordBeforeMapCheckValue));
2595 __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), scratch);
2596 __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCheck);
2597 }
2598 __ movp(kScratchRegister,
2599 Operand(kScratchRegister, kOffsetToMapCheckValue));
2600 __ movp(Operand(kScratchRegister, 0), map);
2601 }
2602
2603 // Loop through the prototype chain looking for the function prototype.
2604 __ movp(scratch, FieldOperand(map, Map::kPrototypeOffset));
2605 Label loop, is_instance, is_not_instance;
2606 __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
2607 __ bind(&loop);
2608 __ cmpp(scratch, prototype);
2609 __ j(equal, &is_instance, Label::kNear);
2610 __ cmpp(scratch, kScratchRegister);
2611 // The code at is_not_instance assumes that kScratchRegister contains a
2612 // non-zero GCable value (the null object in this case).
2613 __ j(equal, &is_not_instance, Label::kNear);
2614 __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
2615 __ movp(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
2616 __ jmp(&loop);
2617
2618 __ bind(&is_instance);
2619 if (!HasCallSiteInlineCheck()) {
2620 __ xorl(rax, rax);
2621 // Store bitwise zero in the cache. This is a Smi in GC terms.
2622 STATIC_ASSERT(kSmiTag == 0);
2623 __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
2624 if (ReturnTrueFalseObject()) {
2625 __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2626 }
2627 } else {
2628 // Store offset of true in the root array at the inline check site.
2629 int true_offset = 0x100 +
2630 (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
2631 // Assert it is a 1-byte signed value.
2632 DCHECK(true_offset >= 0 && true_offset < 0x100);
2633 __ movl(rax, Immediate(true_offset));
2634 __ movq(kScratchRegister, StackOperandForReturnAddress(0));
2635 __ subp(kScratchRegister, args.GetArgumentOperand(2));
2636 __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
2637 if (FLAG_debug_code) {
2638 __ movl(rax, Immediate(kWordBeforeResultValue));
2639 __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
2640 __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
2641 }
2642 if (!ReturnTrueFalseObject()) {
2643 __ Set(rax, 0);
2644 }
2645 }
2646 __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) *
2647 kPointerSize);
2648
2649 __ bind(&is_not_instance);
2650 if (!HasCallSiteInlineCheck()) {
2651 // We have to store a non-zero value in the cache.
2652 __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
2653 if (ReturnTrueFalseObject()) {
2654 __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2655 }
2656 } else {
2657 // Store offset of false in the root array at the inline check site.
2658 int false_offset = 0x100 +
2659 (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
2660 // Assert it is a 1-byte signed value.
2661 DCHECK(false_offset >= 0 && false_offset < 0x100);
2662 __ movl(rax, Immediate(false_offset));
2663 __ movq(kScratchRegister, StackOperandForReturnAddress(0));
2664 __ subp(kScratchRegister, args.GetArgumentOperand(2));
2665 __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
2666 if (FLAG_debug_code) {
2667 __ movl(rax, Immediate(kWordBeforeResultValue));
2668 __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
2669 __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
2670 }
2671 }
2672 __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) *
2673 kPointerSize);
2674
2675 // Slow-case: Go through the JavaScript implementation.
2676 __ bind(&slow);
2677 if (!ReturnTrueFalseObject()) {
2678 // Tail call the builtin which returns 0 or 1.
2679 DCHECK(!HasArgsInRegisters());
2680 if (HasCallSiteInlineCheck()) {
2681 // Remove extra value from the stack.
2682 __ PopReturnAddressTo(rcx);
2683 __ Pop(rax);
2684 __ PushReturnAddressFrom(rcx);
2685 }
2686 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
2687 } else {
2688 // Call the builtin and convert 0/1 to true/false.
2689 {
2690 FrameScope scope(masm, StackFrame::INTERNAL);
2691 __ Push(object);
2692 __ Push(function);
2693 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
2694 }
2695 Label true_value, done;
2696 __ testq(rax, rax);
2697 __ j(zero, &true_value, Label::kNear);
2698 __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2699 __ jmp(&done, Label::kNear);
2700 __ bind(&true_value);
2701 __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2702 __ bind(&done);
2703 __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) *
2704 kPointerSize);
2705 }
2706 }
2707
2708
2709 // -------------------------------------------------------------------------
2710 // StringCharCodeAtGenerator
2711
GenerateFast(MacroAssembler * masm)2712 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2713 // If the receiver is a smi trigger the non-string case.
2714 __ JumpIfSmi(object_, receiver_not_string_);
2715
2716 // Fetch the instance type of the receiver into result register.
2717 __ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
2718 __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
2719 // If the receiver is not a string trigger the non-string case.
2720 __ testb(result_, Immediate(kIsNotStringMask));
2721 __ j(not_zero, receiver_not_string_);
2722
2723 // If the index is non-smi trigger the non-smi case.
2724 __ JumpIfNotSmi(index_, &index_not_smi_);
2725 __ bind(&got_smi_index_);
2726
2727 // Check for index out of range.
2728 __ SmiCompare(index_, FieldOperand(object_, String::kLengthOffset));
2729 __ j(above_equal, index_out_of_range_);
2730
2731 __ SmiToInteger32(index_, index_);
2732
2733 StringCharLoadGenerator::Generate(
2734 masm, object_, index_, result_, &call_runtime_);
2735
2736 __ Integer32ToSmi(result_, result_);
2737 __ bind(&exit_);
2738 }
2739
2740
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)2741 void StringCharCodeAtGenerator::GenerateSlow(
2742 MacroAssembler* masm,
2743 const RuntimeCallHelper& call_helper) {
2744 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2745
2746 Factory* factory = masm->isolate()->factory();
2747 // Index is not a smi.
2748 __ bind(&index_not_smi_);
2749 // If index is a heap number, try converting it to an integer.
2750 __ CheckMap(index_,
2751 factory->heap_number_map(),
2752 index_not_number_,
2753 DONT_DO_SMI_CHECK);
2754 call_helper.BeforeCall(masm);
2755 __ Push(object_);
2756 __ Push(index_); // Consumed by runtime conversion function.
2757 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
2758 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
2759 } else {
2760 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
2761 // NumberToSmi discards numbers that are not exact integers.
2762 __ CallRuntime(Runtime::kNumberToSmi, 1);
2763 }
2764 if (!index_.is(rax)) {
2765 // Save the conversion result before the pop instructions below
2766 // have a chance to overwrite it.
2767 __ movp(index_, rax);
2768 }
2769 __ Pop(object_);
2770 // Reload the instance type.
2771 __ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
2772 __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
2773 call_helper.AfterCall(masm);
2774 // If index is still not a smi, it must be out of range.
2775 __ JumpIfNotSmi(index_, index_out_of_range_);
2776 // Otherwise, return to the fast path.
2777 __ jmp(&got_smi_index_);
2778
2779 // Call runtime. We get here when the receiver is a string and the
2780 // index is a number, but the code of getting the actual character
2781 // is too complex (e.g., when the string needs to be flattened).
2782 __ bind(&call_runtime_);
2783 call_helper.BeforeCall(masm);
2784 __ Push(object_);
2785 __ Integer32ToSmi(index_, index_);
2786 __ Push(index_);
2787 __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
2788 if (!result_.is(rax)) {
2789 __ movp(result_, rax);
2790 }
2791 call_helper.AfterCall(masm);
2792 __ jmp(&exit_);
2793
2794 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2795 }
2796
2797
2798 // -------------------------------------------------------------------------
2799 // StringCharFromCodeGenerator
2800
GenerateFast(MacroAssembler * masm)2801 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
2802 // Fast case of Heap::LookupSingleCharacterStringFromCode.
2803 __ JumpIfNotSmi(code_, &slow_case_);
2804 __ SmiCompare(code_, Smi::FromInt(String::kMaxOneByteCharCode));
2805 __ j(above, &slow_case_);
2806
2807 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
2808 SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
2809 __ movp(result_, FieldOperand(result_, index.reg, index.scale,
2810 FixedArray::kHeaderSize));
2811 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
2812 __ j(equal, &slow_case_);
2813 __ bind(&exit_);
2814 }
2815
2816
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)2817 void StringCharFromCodeGenerator::GenerateSlow(
2818 MacroAssembler* masm,
2819 const RuntimeCallHelper& call_helper) {
2820 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
2821
2822 __ bind(&slow_case_);
2823 call_helper.BeforeCall(masm);
2824 __ Push(code_);
2825 __ CallRuntime(Runtime::kCharFromCode, 1);
2826 if (!result_.is(rax)) {
2827 __ movp(result_, rax);
2828 }
2829 call_helper.AfterCall(masm);
2830 __ jmp(&exit_);
2831
2832 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
2833 }
2834
2835
GenerateCopyCharacters(MacroAssembler * masm,Register dest,Register src,Register count,String::Encoding encoding)2836 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
2837 Register dest,
2838 Register src,
2839 Register count,
2840 String::Encoding encoding) {
2841 // Nothing to do for zero characters.
2842 Label done;
2843 __ testl(count, count);
2844 __ j(zero, &done, Label::kNear);
2845
2846 // Make count the number of bytes to copy.
2847 if (encoding == String::TWO_BYTE_ENCODING) {
2848 STATIC_ASSERT(2 == sizeof(uc16));
2849 __ addl(count, count);
2850 }
2851
2852 // Copy remaining characters.
2853 Label loop;
2854 __ bind(&loop);
2855 __ movb(kScratchRegister, Operand(src, 0));
2856 __ movb(Operand(dest, 0), kScratchRegister);
2857 __ incp(src);
2858 __ incp(dest);
2859 __ decl(count);
2860 __ j(not_zero, &loop);
2861
2862 __ bind(&done);
2863 }
2864
2865
Generate(MacroAssembler * masm)2866 void SubStringStub::Generate(MacroAssembler* masm) {
2867 Label runtime;
2868
2869 // Stack frame on entry.
2870 // rsp[0] : return address
2871 // rsp[8] : to
2872 // rsp[16] : from
2873 // rsp[24] : string
2874
2875 enum SubStringStubArgumentIndices {
2876 STRING_ARGUMENT_INDEX,
2877 FROM_ARGUMENT_INDEX,
2878 TO_ARGUMENT_INDEX,
2879 SUB_STRING_ARGUMENT_COUNT
2880 };
2881
2882 StackArgumentsAccessor args(rsp, SUB_STRING_ARGUMENT_COUNT,
2883 ARGUMENTS_DONT_CONTAIN_RECEIVER);
2884
2885 // Make sure first argument is a string.
2886 __ movp(rax, args.GetArgumentOperand(STRING_ARGUMENT_INDEX));
2887 STATIC_ASSERT(kSmiTag == 0);
2888 __ testl(rax, Immediate(kSmiTagMask));
2889 __ j(zero, &runtime);
2890 Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
2891 __ j(NegateCondition(is_string), &runtime);
2892
2893 // rax: string
2894 // rbx: instance type
2895 // Calculate length of sub string using the smi values.
2896 __ movp(rcx, args.GetArgumentOperand(TO_ARGUMENT_INDEX));
2897 __ movp(rdx, args.GetArgumentOperand(FROM_ARGUMENT_INDEX));
2898 __ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
2899
2900 __ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
2901 __ cmpp(rcx, FieldOperand(rax, String::kLengthOffset));
2902 Label not_original_string;
2903 // Shorter than original string's length: an actual substring.
2904 __ j(below, ¬_original_string, Label::kNear);
2905 // Longer than original string's length or negative: unsafe arguments.
2906 __ j(above, &runtime);
2907 // Return original string.
2908 Counters* counters = isolate()->counters();
2909 __ IncrementCounter(counters->sub_string_native(), 1);
2910 __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
2911 __ bind(¬_original_string);
2912
2913 Label single_char;
2914 __ SmiCompare(rcx, Smi::FromInt(1));
2915 __ j(equal, &single_char);
2916
2917 __ SmiToInteger32(rcx, rcx);
2918
2919 // rax: string
2920 // rbx: instance type
2921 // rcx: sub string length
2922 // rdx: from index (smi)
2923 // Deal with different string types: update the index if necessary
2924 // and put the underlying string into edi.
2925 Label underlying_unpacked, sliced_string, seq_or_external_string;
2926 // If the string is not indirect, it can only be sequential or external.
2927 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
2928 STATIC_ASSERT(kIsIndirectStringMask != 0);
2929 __ testb(rbx, Immediate(kIsIndirectStringMask));
2930 __ j(zero, &seq_or_external_string, Label::kNear);
2931
2932 __ testb(rbx, Immediate(kSlicedNotConsMask));
2933 __ j(not_zero, &sliced_string, Label::kNear);
2934 // Cons string. Check whether it is flat, then fetch first part.
2935 // Flat cons strings have an empty second part.
2936 __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
2937 Heap::kempty_stringRootIndex);
2938 __ j(not_equal, &runtime);
2939 __ movp(rdi, FieldOperand(rax, ConsString::kFirstOffset));
2940 // Update instance type.
2941 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
2942 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
2943 __ jmp(&underlying_unpacked, Label::kNear);
2944
2945 __ bind(&sliced_string);
2946 // Sliced string. Fetch parent and correct start index by offset.
2947 __ addp(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
2948 __ movp(rdi, FieldOperand(rax, SlicedString::kParentOffset));
2949 // Update instance type.
2950 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
2951 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
2952 __ jmp(&underlying_unpacked, Label::kNear);
2953
2954 __ bind(&seq_or_external_string);
2955 // Sequential or external string. Just move string to the correct register.
2956 __ movp(rdi, rax);
2957
2958 __ bind(&underlying_unpacked);
2959
2960 if (FLAG_string_slices) {
2961 Label copy_routine;
2962 // rdi: underlying subject string
2963 // rbx: instance type of underlying subject string
2964 // rdx: adjusted start index (smi)
2965 // rcx: length
2966 // If coming from the make_two_character_string path, the string
2967 // is too short to be sliced anyways.
2968 __ cmpp(rcx, Immediate(SlicedString::kMinLength));
2969 // Short slice. Copy instead of slicing.
2970 __ j(less, ©_routine);
2971 // Allocate new sliced string. At this point we do not reload the instance
2972 // type including the string encoding because we simply rely on the info
2973 // provided by the original string. It does not matter if the original
2974 // string's encoding is wrong because we always have to recheck encoding of
2975 // the newly created string's parent anyways due to externalized strings.
2976 Label two_byte_slice, set_slice_header;
2977 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
2978 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
2979 __ testb(rbx, Immediate(kStringEncodingMask));
2980 __ j(zero, &two_byte_slice, Label::kNear);
2981 __ AllocateOneByteSlicedString(rax, rbx, r14, &runtime);
2982 __ jmp(&set_slice_header, Label::kNear);
2983 __ bind(&two_byte_slice);
2984 __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
2985 __ bind(&set_slice_header);
2986 __ Integer32ToSmi(rcx, rcx);
2987 __ movp(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
2988 __ movp(FieldOperand(rax, SlicedString::kHashFieldOffset),
2989 Immediate(String::kEmptyHashField));
2990 __ movp(FieldOperand(rax, SlicedString::kParentOffset), rdi);
2991 __ movp(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
2992 __ IncrementCounter(counters->sub_string_native(), 1);
2993 __ ret(3 * kPointerSize);
2994
2995 __ bind(©_routine);
2996 }
2997
2998 // rdi: underlying subject string
2999 // rbx: instance type of underlying subject string
3000 // rdx: adjusted start index (smi)
3001 // rcx: length
3002 // The subject string can only be external or sequential string of either
3003 // encoding at this point.
3004 Label two_byte_sequential, sequential_string;
3005 STATIC_ASSERT(kExternalStringTag != 0);
3006 STATIC_ASSERT(kSeqStringTag == 0);
3007 __ testb(rbx, Immediate(kExternalStringTag));
3008 __ j(zero, &sequential_string);
3009
3010 // Handle external string.
3011 // Rule out short external strings.
3012 STATIC_ASSERT(kShortExternalStringTag != 0);
3013 __ testb(rbx, Immediate(kShortExternalStringMask));
3014 __ j(not_zero, &runtime);
3015 __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
3016 // Move the pointer so that offset-wise, it looks like a sequential string.
3017 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3018 __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3019
3020 __ bind(&sequential_string);
3021 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3022 __ testb(rbx, Immediate(kStringEncodingMask));
3023 __ j(zero, &two_byte_sequential);
3024
3025 // Allocate the result.
3026 __ AllocateOneByteString(rax, rcx, r11, r14, r15, &runtime);
3027
3028 // rax: result string
3029 // rcx: result string length
3030 { // Locate character of sub string start.
3031 SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
3032 __ leap(r14, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
3033 SeqOneByteString::kHeaderSize - kHeapObjectTag));
3034 }
3035 // Locate first character of result.
3036 __ leap(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
3037
3038 // rax: result string
3039 // rcx: result length
3040 // r14: first character of result
3041 // rsi: character of sub string start
3042 StringHelper::GenerateCopyCharacters(
3043 masm, rdi, r14, rcx, String::ONE_BYTE_ENCODING);
3044 __ IncrementCounter(counters->sub_string_native(), 1);
3045 __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
3046
3047 __ bind(&two_byte_sequential);
3048 // Allocate the result.
3049 __ AllocateTwoByteString(rax, rcx, r11, r14, r15, &runtime);
3050
3051 // rax: result string
3052 // rcx: result string length
3053 { // Locate character of sub string start.
3054 SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
3055 __ leap(r14, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
3056 SeqOneByteString::kHeaderSize - kHeapObjectTag));
3057 }
3058 // Locate first character of result.
3059 __ leap(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
3060
3061 // rax: result string
3062 // rcx: result length
3063 // rdi: first character of result
3064 // r14: character of sub string start
3065 StringHelper::GenerateCopyCharacters(
3066 masm, rdi, r14, rcx, String::TWO_BYTE_ENCODING);
3067 __ IncrementCounter(counters->sub_string_native(), 1);
3068 __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
3069
3070 // Just jump to runtime to create the sub string.
3071 __ bind(&runtime);
3072 __ TailCallRuntime(Runtime::kSubString, 3, 1);
3073
3074 __ bind(&single_char);
3075 // rax: string
3076 // rbx: instance type
3077 // rcx: sub string length (smi)
3078 // rdx: from index (smi)
3079 StringCharAtGenerator generator(
3080 rax, rdx, rcx, rax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
3081 generator.GenerateFast(masm);
3082 __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
3083 generator.SkipSlow(masm, &runtime);
3084 }
3085
3086
GenerateFlatOneByteStringEquals(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2)3087 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
3088 Register left,
3089 Register right,
3090 Register scratch1,
3091 Register scratch2) {
3092 Register length = scratch1;
3093
3094 // Compare lengths.
3095 Label check_zero_length;
3096 __ movp(length, FieldOperand(left, String::kLengthOffset));
3097 __ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
3098 __ j(equal, &check_zero_length, Label::kNear);
3099 __ Move(rax, Smi::FromInt(NOT_EQUAL));
3100 __ ret(0);
3101
3102 // Check if the length is zero.
3103 Label compare_chars;
3104 __ bind(&check_zero_length);
3105 STATIC_ASSERT(kSmiTag == 0);
3106 __ SmiTest(length);
3107 __ j(not_zero, &compare_chars, Label::kNear);
3108 __ Move(rax, Smi::FromInt(EQUAL));
3109 __ ret(0);
3110
3111 // Compare characters.
3112 __ bind(&compare_chars);
3113 Label strings_not_equal;
3114 GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
3115 &strings_not_equal, Label::kNear);
3116
3117 // Characters are equal.
3118 __ Move(rax, Smi::FromInt(EQUAL));
3119 __ ret(0);
3120
3121 // Characters are not equal.
3122 __ bind(&strings_not_equal);
3123 __ Move(rax, Smi::FromInt(NOT_EQUAL));
3124 __ ret(0);
3125 }
3126
3127
GenerateCompareFlatOneByteStrings(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3,Register scratch4)3128 void StringHelper::GenerateCompareFlatOneByteStrings(
3129 MacroAssembler* masm, Register left, Register right, Register scratch1,
3130 Register scratch2, Register scratch3, Register scratch4) {
3131 // Ensure that you can always subtract a string length from a non-negative
3132 // number (e.g. another length).
3133 STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
3134
3135 // Find minimum length and length difference.
3136 __ movp(scratch1, FieldOperand(left, String::kLengthOffset));
3137 __ movp(scratch4, scratch1);
3138 __ SmiSub(scratch4,
3139 scratch4,
3140 FieldOperand(right, String::kLengthOffset));
3141 // Register scratch4 now holds left.length - right.length.
3142 const Register length_difference = scratch4;
3143 Label left_shorter;
3144 __ j(less, &left_shorter, Label::kNear);
3145 // The right string isn't longer that the left one.
3146 // Get the right string's length by subtracting the (non-negative) difference
3147 // from the left string's length.
3148 __ SmiSub(scratch1, scratch1, length_difference);
3149 __ bind(&left_shorter);
3150 // Register scratch1 now holds Min(left.length, right.length).
3151 const Register min_length = scratch1;
3152
3153 Label compare_lengths;
3154 // If min-length is zero, go directly to comparing lengths.
3155 __ SmiTest(min_length);
3156 __ j(zero, &compare_lengths, Label::kNear);
3157
3158 // Compare loop.
3159 Label result_not_equal;
3160 GenerateOneByteCharsCompareLoop(
3161 masm, left, right, min_length, scratch2, &result_not_equal,
3162 // In debug-code mode, SmiTest below might push
3163 // the target label outside the near range.
3164 Label::kFar);
3165
3166 // Completed loop without finding different characters.
3167 // Compare lengths (precomputed).
3168 __ bind(&compare_lengths);
3169 __ SmiTest(length_difference);
3170 Label length_not_equal;
3171 __ j(not_zero, &length_not_equal, Label::kNear);
3172
3173 // Result is EQUAL.
3174 __ Move(rax, Smi::FromInt(EQUAL));
3175 __ ret(0);
3176
3177 Label result_greater;
3178 Label result_less;
3179 __ bind(&length_not_equal);
3180 __ j(greater, &result_greater, Label::kNear);
3181 __ jmp(&result_less, Label::kNear);
3182 __ bind(&result_not_equal);
3183 // Unequal comparison of left to right, either character or length.
3184 __ j(above, &result_greater, Label::kNear);
3185 __ bind(&result_less);
3186
3187 // Result is LESS.
3188 __ Move(rax, Smi::FromInt(LESS));
3189 __ ret(0);
3190
3191 // Result is GREATER.
3192 __ bind(&result_greater);
3193 __ Move(rax, Smi::FromInt(GREATER));
3194 __ ret(0);
3195 }
3196
3197
GenerateOneByteCharsCompareLoop(MacroAssembler * masm,Register left,Register right,Register length,Register scratch,Label * chars_not_equal,Label::Distance near_jump)3198 void StringHelper::GenerateOneByteCharsCompareLoop(
3199 MacroAssembler* masm, Register left, Register right, Register length,
3200 Register scratch, Label* chars_not_equal, Label::Distance near_jump) {
3201 // Change index to run from -length to -1 by adding length to string
3202 // start. This means that loop ends when index reaches zero, which
3203 // doesn't need an additional compare.
3204 __ SmiToInteger32(length, length);
3205 __ leap(left,
3206 FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
3207 __ leap(right,
3208 FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
3209 __ negq(length);
3210 Register index = length; // index = -length;
3211
3212 // Compare loop.
3213 Label loop;
3214 __ bind(&loop);
3215 __ movb(scratch, Operand(left, index, times_1, 0));
3216 __ cmpb(scratch, Operand(right, index, times_1, 0));
3217 __ j(not_equal, chars_not_equal, near_jump);
3218 __ incq(index);
3219 __ j(not_zero, &loop);
3220 }
3221
3222
Generate(MacroAssembler * masm)3223 void StringCompareStub::Generate(MacroAssembler* masm) {
3224 Label runtime;
3225
3226 // Stack frame on entry.
3227 // rsp[0] : return address
3228 // rsp[8] : right string
3229 // rsp[16] : left string
3230
3231 StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
3232 __ movp(rdx, args.GetArgumentOperand(0)); // left
3233 __ movp(rax, args.GetArgumentOperand(1)); // right
3234
3235 // Check for identity.
3236 Label not_same;
3237 __ cmpp(rdx, rax);
3238 __ j(not_equal, ¬_same, Label::kNear);
3239 __ Move(rax, Smi::FromInt(EQUAL));
3240 Counters* counters = isolate()->counters();
3241 __ IncrementCounter(counters->string_compare_native(), 1);
3242 __ ret(2 * kPointerSize);
3243
3244 __ bind(¬_same);
3245
3246 // Check that both are sequential one-byte strings.
3247 __ JumpIfNotBothSequentialOneByteStrings(rdx, rax, rcx, rbx, &runtime);
3248
3249 // Inline comparison of one-byte strings.
3250 __ IncrementCounter(counters->string_compare_native(), 1);
3251 // Drop arguments from the stack
3252 __ PopReturnAddressTo(rcx);
3253 __ addp(rsp, Immediate(2 * kPointerSize));
3254 __ PushReturnAddressFrom(rcx);
3255 StringHelper::GenerateCompareFlatOneByteStrings(masm, rdx, rax, rcx, rbx, rdi,
3256 r8);
3257
3258 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
3259 // tagged as a small integer.
3260 __ bind(&runtime);
3261 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3262 }
3263
3264
Generate(MacroAssembler * masm)3265 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3266 // ----------- S t a t e -------------
3267 // -- rdx : left
3268 // -- rax : right
3269 // -- rsp[0] : return address
3270 // -----------------------------------
3271
3272 // Load rcx with the allocation site. We stick an undefined dummy value here
3273 // and replace it with the real allocation site later when we instantiate this
3274 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3275 __ Move(rcx, handle(isolate()->heap()->undefined_value()));
3276
3277 // Make sure that we actually patched the allocation site.
3278 if (FLAG_debug_code) {
3279 __ testb(rcx, Immediate(kSmiTagMask));
3280 __ Assert(not_equal, kExpectedAllocationSite);
3281 __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
3282 isolate()->factory()->allocation_site_map());
3283 __ Assert(equal, kExpectedAllocationSite);
3284 }
3285
3286 // Tail call into the stub that handles binary operations with allocation
3287 // sites.
3288 BinaryOpWithAllocationSiteStub stub(isolate(), state());
3289 __ TailCallStub(&stub);
3290 }
3291
3292
GenerateSmis(MacroAssembler * masm)3293 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3294 DCHECK(state() == CompareICState::SMI);
3295 Label miss;
3296 __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
3297
3298 if (GetCondition() == equal) {
3299 // For equality we do not care about the sign of the result.
3300 __ subp(rax, rdx);
3301 } else {
3302 Label done;
3303 __ subp(rdx, rax);
3304 __ j(no_overflow, &done, Label::kNear);
3305 // Correct sign of result in case of overflow.
3306 __ notp(rdx);
3307 __ bind(&done);
3308 __ movp(rax, rdx);
3309 }
3310 __ ret(0);
3311
3312 __ bind(&miss);
3313 GenerateMiss(masm);
3314 }
3315
3316
GenerateNumbers(MacroAssembler * masm)3317 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3318 DCHECK(state() == CompareICState::NUMBER);
3319
3320 Label generic_stub;
3321 Label unordered, maybe_undefined1, maybe_undefined2;
3322 Label miss;
3323
3324 if (left() == CompareICState::SMI) {
3325 __ JumpIfNotSmi(rdx, &miss);
3326 }
3327 if (right() == CompareICState::SMI) {
3328 __ JumpIfNotSmi(rax, &miss);
3329 }
3330
3331 // Load left and right operand.
3332 Label done, left, left_smi, right_smi;
3333 __ JumpIfSmi(rax, &right_smi, Label::kNear);
3334 __ CompareMap(rax, isolate()->factory()->heap_number_map());
3335 __ j(not_equal, &maybe_undefined1, Label::kNear);
3336 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
3337 __ jmp(&left, Label::kNear);
3338 __ bind(&right_smi);
3339 __ SmiToInteger32(rcx, rax); // Can't clobber rax yet.
3340 __ Cvtlsi2sd(xmm1, rcx);
3341
3342 __ bind(&left);
3343 __ JumpIfSmi(rdx, &left_smi, Label::kNear);
3344 __ CompareMap(rdx, isolate()->factory()->heap_number_map());
3345 __ j(not_equal, &maybe_undefined2, Label::kNear);
3346 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
3347 __ jmp(&done);
3348 __ bind(&left_smi);
3349 __ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet.
3350 __ Cvtlsi2sd(xmm0, rcx);
3351
3352 __ bind(&done);
3353 // Compare operands
3354 __ ucomisd(xmm0, xmm1);
3355
3356 // Don't base result on EFLAGS when a NaN is involved.
3357 __ j(parity_even, &unordered, Label::kNear);
3358
3359 // Return a result of -1, 0, or 1, based on EFLAGS.
3360 // Performing mov, because xor would destroy the flag register.
3361 __ movl(rax, Immediate(0));
3362 __ movl(rcx, Immediate(0));
3363 __ setcc(above, rax); // Add one to zero if carry clear and not equal.
3364 __ sbbp(rax, rcx); // Subtract one if below (aka. carry set).
3365 __ ret(0);
3366
3367 __ bind(&unordered);
3368 __ bind(&generic_stub);
3369 CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
3370 CompareICState::GENERIC, CompareICState::GENERIC);
3371 __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
3372
3373 __ bind(&maybe_undefined1);
3374 if (Token::IsOrderedRelationalCompareOp(op())) {
3375 __ Cmp(rax, isolate()->factory()->undefined_value());
3376 __ j(not_equal, &miss);
3377 __ JumpIfSmi(rdx, &unordered);
3378 __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
3379 __ j(not_equal, &maybe_undefined2, Label::kNear);
3380 __ jmp(&unordered);
3381 }
3382
3383 __ bind(&maybe_undefined2);
3384 if (Token::IsOrderedRelationalCompareOp(op())) {
3385 __ Cmp(rdx, isolate()->factory()->undefined_value());
3386 __ j(equal, &unordered);
3387 }
3388
3389 __ bind(&miss);
3390 GenerateMiss(masm);
3391 }
3392
3393
GenerateInternalizedStrings(MacroAssembler * masm)3394 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3395 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
3396 DCHECK(GetCondition() == equal);
3397
3398 // Registers containing left and right operands respectively.
3399 Register left = rdx;
3400 Register right = rax;
3401 Register tmp1 = rcx;
3402 Register tmp2 = rbx;
3403
3404 // Check that both operands are heap objects.
3405 Label miss;
3406 Condition cond = masm->CheckEitherSmi(left, right, tmp1);
3407 __ j(cond, &miss, Label::kNear);
3408
3409 // Check that both operands are internalized strings.
3410 __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
3411 __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
3412 __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
3413 __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
3414 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3415 __ orp(tmp1, tmp2);
3416 __ testb(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
3417 __ j(not_zero, &miss, Label::kNear);
3418
3419 // Internalized strings are compared by identity.
3420 Label done;
3421 __ cmpp(left, right);
3422 // Make sure rax is non-zero. At this point input operands are
3423 // guaranteed to be non-zero.
3424 DCHECK(right.is(rax));
3425 __ j(not_equal, &done, Label::kNear);
3426 STATIC_ASSERT(EQUAL == 0);
3427 STATIC_ASSERT(kSmiTag == 0);
3428 __ Move(rax, Smi::FromInt(EQUAL));
3429 __ bind(&done);
3430 __ ret(0);
3431
3432 __ bind(&miss);
3433 GenerateMiss(masm);
3434 }
3435
3436
GenerateUniqueNames(MacroAssembler * masm)3437 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3438 DCHECK(state() == CompareICState::UNIQUE_NAME);
3439 DCHECK(GetCondition() == equal);
3440
3441 // Registers containing left and right operands respectively.
3442 Register left = rdx;
3443 Register right = rax;
3444 Register tmp1 = rcx;
3445 Register tmp2 = rbx;
3446
3447 // Check that both operands are heap objects.
3448 Label miss;
3449 Condition cond = masm->CheckEitherSmi(left, right, tmp1);
3450 __ j(cond, &miss, Label::kNear);
3451
3452 // Check that both operands are unique names. This leaves the instance
3453 // types loaded in tmp1 and tmp2.
3454 __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
3455 __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
3456 __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
3457 __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
3458
3459 __ JumpIfNotUniqueNameInstanceType(tmp1, &miss, Label::kNear);
3460 __ JumpIfNotUniqueNameInstanceType(tmp2, &miss, Label::kNear);
3461
3462 // Unique names are compared by identity.
3463 Label done;
3464 __ cmpp(left, right);
3465 // Make sure rax is non-zero. At this point input operands are
3466 // guaranteed to be non-zero.
3467 DCHECK(right.is(rax));
3468 __ j(not_equal, &done, Label::kNear);
3469 STATIC_ASSERT(EQUAL == 0);
3470 STATIC_ASSERT(kSmiTag == 0);
3471 __ Move(rax, Smi::FromInt(EQUAL));
3472 __ bind(&done);
3473 __ ret(0);
3474
3475 __ bind(&miss);
3476 GenerateMiss(masm);
3477 }
3478
3479
GenerateStrings(MacroAssembler * masm)3480 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3481 DCHECK(state() == CompareICState::STRING);
3482 Label miss;
3483
3484 bool equality = Token::IsEqualityOp(op());
3485
3486 // Registers containing left and right operands respectively.
3487 Register left = rdx;
3488 Register right = rax;
3489 Register tmp1 = rcx;
3490 Register tmp2 = rbx;
3491 Register tmp3 = rdi;
3492
3493 // Check that both operands are heap objects.
3494 Condition cond = masm->CheckEitherSmi(left, right, tmp1);
3495 __ j(cond, &miss);
3496
3497 // Check that both operands are strings. This leaves the instance
3498 // types loaded in tmp1 and tmp2.
3499 __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
3500 __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
3501 __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
3502 __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
3503 __ movp(tmp3, tmp1);
3504 STATIC_ASSERT(kNotStringTag != 0);
3505 __ orp(tmp3, tmp2);
3506 __ testb(tmp3, Immediate(kIsNotStringMask));
3507 __ j(not_zero, &miss);
3508
3509 // Fast check for identical strings.
3510 Label not_same;
3511 __ cmpp(left, right);
3512 __ j(not_equal, ¬_same, Label::kNear);
3513 STATIC_ASSERT(EQUAL == 0);
3514 STATIC_ASSERT(kSmiTag == 0);
3515 __ Move(rax, Smi::FromInt(EQUAL));
3516 __ ret(0);
3517
3518 // Handle not identical strings.
3519 __ bind(¬_same);
3520
3521 // Check that both strings are internalized strings. If they are, we're done
3522 // because we already know they are not identical. We also know they are both
3523 // strings.
3524 if (equality) {
3525 Label do_compare;
3526 STATIC_ASSERT(kInternalizedTag == 0);
3527 __ orp(tmp1, tmp2);
3528 __ testb(tmp1, Immediate(kIsNotInternalizedMask));
3529 __ j(not_zero, &do_compare, Label::kNear);
3530 // Make sure rax is non-zero. At this point input operands are
3531 // guaranteed to be non-zero.
3532 DCHECK(right.is(rax));
3533 __ ret(0);
3534 __ bind(&do_compare);
3535 }
3536
3537 // Check that both strings are sequential one-byte.
3538 Label runtime;
3539 __ JumpIfNotBothSequentialOneByteStrings(left, right, tmp1, tmp2, &runtime);
3540
3541 // Compare flat one-byte strings. Returns when done.
3542 if (equality) {
3543 StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
3544 tmp2);
3545 } else {
3546 StringHelper::GenerateCompareFlatOneByteStrings(
3547 masm, left, right, tmp1, tmp2, tmp3, kScratchRegister);
3548 }
3549
3550 // Handle more complex cases in runtime.
3551 __ bind(&runtime);
3552 __ PopReturnAddressTo(tmp1);
3553 __ Push(left);
3554 __ Push(right);
3555 __ PushReturnAddressFrom(tmp1);
3556 if (equality) {
3557 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3558 } else {
3559 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3560 }
3561
3562 __ bind(&miss);
3563 GenerateMiss(masm);
3564 }
3565
3566
GenerateObjects(MacroAssembler * masm)3567 void CompareICStub::GenerateObjects(MacroAssembler* masm) {
3568 DCHECK(state() == CompareICState::OBJECT);
3569 Label miss;
3570 Condition either_smi = masm->CheckEitherSmi(rdx, rax);
3571 __ j(either_smi, &miss, Label::kNear);
3572
3573 __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
3574 __ j(not_equal, &miss, Label::kNear);
3575 __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
3576 __ j(not_equal, &miss, Label::kNear);
3577
3578 DCHECK(GetCondition() == equal);
3579 __ subp(rax, rdx);
3580 __ ret(0);
3581
3582 __ bind(&miss);
3583 GenerateMiss(masm);
3584 }
3585
3586
GenerateKnownObjects(MacroAssembler * masm)3587 void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
3588 Label miss;
3589 Condition either_smi = masm->CheckEitherSmi(rdx, rax);
3590 __ j(either_smi, &miss, Label::kNear);
3591
3592 __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
3593 __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
3594 __ Cmp(rcx, known_map_);
3595 __ j(not_equal, &miss, Label::kNear);
3596 __ Cmp(rbx, known_map_);
3597 __ j(not_equal, &miss, Label::kNear);
3598
3599 __ subp(rax, rdx);
3600 __ ret(0);
3601
3602 __ bind(&miss);
3603 GenerateMiss(masm);
3604 }
3605
3606
GenerateMiss(MacroAssembler * masm)3607 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3608 {
3609 // Call the runtime system in a fresh internal frame.
3610 ExternalReference miss =
3611 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
3612
3613 FrameScope scope(masm, StackFrame::INTERNAL);
3614 __ Push(rdx);
3615 __ Push(rax);
3616 __ Push(rdx);
3617 __ Push(rax);
3618 __ Push(Smi::FromInt(op()));
3619 __ CallExternalReference(miss, 3);
3620
3621 // Compute the entry point of the rewritten stub.
3622 __ leap(rdi, FieldOperand(rax, Code::kHeaderSize));
3623 __ Pop(rax);
3624 __ Pop(rdx);
3625 }
3626
3627 // Do a tail call to the rewritten stub.
3628 __ jmp(rdi);
3629 }
3630
3631
GenerateNegativeLookup(MacroAssembler * masm,Label * miss,Label * done,Register properties,Handle<Name> name,Register r0)3632 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
3633 Label* miss,
3634 Label* done,
3635 Register properties,
3636 Handle<Name> name,
3637 Register r0) {
3638 DCHECK(name->IsUniqueName());
3639 // If names of slots in range from 1 to kProbes - 1 for the hash value are
3640 // not equal to the name and kProbes-th slot is not used (its name is the
3641 // undefined value), it guarantees the hash table doesn't contain the
3642 // property. It's true even if some slots represent deleted properties
3643 // (their names are the hole value).
3644 for (int i = 0; i < kInlinedProbes; i++) {
3645 // r0 points to properties hash.
3646 // Compute the masked index: (hash + i + i * i) & mask.
3647 Register index = r0;
3648 // Capacity is smi 2^n.
3649 __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
3650 __ decl(index);
3651 __ andp(index,
3652 Immediate(name->Hash() + NameDictionary::GetProbeOffset(i)));
3653
3654 // Scale the index by multiplying by the entry size.
3655 DCHECK(NameDictionary::kEntrySize == 3);
3656 __ leap(index, Operand(index, index, times_2, 0)); // index *= 3.
3657
3658 Register entity_name = r0;
3659 // Having undefined at this place means the name is not contained.
3660 DCHECK_EQ(kSmiTagSize, 1);
3661 __ movp(entity_name, Operand(properties,
3662 index,
3663 times_pointer_size,
3664 kElementsStartOffset - kHeapObjectTag));
3665 __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
3666 __ j(equal, done);
3667
3668 // Stop if found the property.
3669 __ Cmp(entity_name, Handle<Name>(name));
3670 __ j(equal, miss);
3671
3672 Label good;
3673 // Check for the hole and skip.
3674 __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
3675 __ j(equal, &good, Label::kNear);
3676
3677 // Check if the entry name is not a unique name.
3678 __ movp(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
3679 __ JumpIfNotUniqueNameInstanceType(
3680 FieldOperand(entity_name, Map::kInstanceTypeOffset), miss);
3681 __ bind(&good);
3682 }
3683
3684 NameDictionaryLookupStub stub(masm->isolate(), properties, r0, r0,
3685 NEGATIVE_LOOKUP);
3686 __ Push(Handle<Object>(name));
3687 __ Push(Immediate(name->Hash()));
3688 __ CallStub(&stub);
3689 __ testp(r0, r0);
3690 __ j(not_zero, miss);
3691 __ jmp(done);
3692 }
3693
3694
3695 // Probe the name dictionary in the |elements| register. Jump to the
3696 // |done| label if a property with the given name is found leaving the
3697 // index into the dictionary in |r1|. Jump to the |miss| label
3698 // otherwise.
GeneratePositiveLookup(MacroAssembler * masm,Label * miss,Label * done,Register elements,Register name,Register r0,Register r1)3699 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
3700 Label* miss,
3701 Label* done,
3702 Register elements,
3703 Register name,
3704 Register r0,
3705 Register r1) {
3706 DCHECK(!elements.is(r0));
3707 DCHECK(!elements.is(r1));
3708 DCHECK(!name.is(r0));
3709 DCHECK(!name.is(r1));
3710
3711 __ AssertName(name);
3712
3713 __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
3714 __ decl(r0);
3715
3716 for (int i = 0; i < kInlinedProbes; i++) {
3717 // Compute the masked index: (hash + i + i * i) & mask.
3718 __ movl(r1, FieldOperand(name, Name::kHashFieldOffset));
3719 __ shrl(r1, Immediate(Name::kHashShift));
3720 if (i > 0) {
3721 __ addl(r1, Immediate(NameDictionary::GetProbeOffset(i)));
3722 }
3723 __ andp(r1, r0);
3724
3725 // Scale the index by multiplying by the entry size.
3726 DCHECK(NameDictionary::kEntrySize == 3);
3727 __ leap(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
3728
3729 // Check if the key is identical to the name.
3730 __ cmpp(name, Operand(elements, r1, times_pointer_size,
3731 kElementsStartOffset - kHeapObjectTag));
3732 __ j(equal, done);
3733 }
3734
3735 NameDictionaryLookupStub stub(masm->isolate(), elements, r0, r1,
3736 POSITIVE_LOOKUP);
3737 __ Push(name);
3738 __ movl(r0, FieldOperand(name, Name::kHashFieldOffset));
3739 __ shrl(r0, Immediate(Name::kHashShift));
3740 __ Push(r0);
3741 __ CallStub(&stub);
3742
3743 __ testp(r0, r0);
3744 __ j(zero, miss);
3745 __ jmp(done);
3746 }
3747
3748
Generate(MacroAssembler * masm)3749 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
3750 // This stub overrides SometimesSetsUpAFrame() to return false. That means
3751 // we cannot call anything that could cause a GC from this stub.
3752 // Stack frame on entry:
3753 // rsp[0 * kPointerSize] : return address.
3754 // rsp[1 * kPointerSize] : key's hash.
3755 // rsp[2 * kPointerSize] : key.
3756 // Registers:
3757 // dictionary_: NameDictionary to probe.
3758 // result_: used as scratch.
3759 // index_: will hold an index of entry if lookup is successful.
3760 // might alias with result_.
3761 // Returns:
3762 // result_ is zero if lookup failed, non zero otherwise.
3763
3764 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
3765
3766 Register scratch = result();
3767
3768 __ SmiToInteger32(scratch, FieldOperand(dictionary(), kCapacityOffset));
3769 __ decl(scratch);
3770 __ Push(scratch);
3771
3772 // If names of slots in range from 1 to kProbes - 1 for the hash value are
3773 // not equal to the name and kProbes-th slot is not used (its name is the
3774 // undefined value), it guarantees the hash table doesn't contain the
3775 // property. It's true even if some slots represent deleted properties
3776 // (their names are the null value).
3777 StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER,
3778 kPointerSize);
3779 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
3780 // Compute the masked index: (hash + i + i * i) & mask.
3781 __ movp(scratch, args.GetArgumentOperand(1));
3782 if (i > 0) {
3783 __ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
3784 }
3785 __ andp(scratch, Operand(rsp, 0));
3786
3787 // Scale the index by multiplying by the entry size.
3788 DCHECK(NameDictionary::kEntrySize == 3);
3789 __ leap(index(), Operand(scratch, scratch, times_2, 0)); // index *= 3.
3790
3791 // Having undefined at this place means the name is not contained.
3792 __ movp(scratch, Operand(dictionary(), index(), times_pointer_size,
3793 kElementsStartOffset - kHeapObjectTag));
3794
3795 __ Cmp(scratch, isolate()->factory()->undefined_value());
3796 __ j(equal, ¬_in_dictionary);
3797
3798 // Stop if found the property.
3799 __ cmpp(scratch, args.GetArgumentOperand(0));
3800 __ j(equal, &in_dictionary);
3801
3802 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
3803 // If we hit a key that is not a unique name during negative
3804 // lookup we have to bailout as this key might be equal to the
3805 // key we are looking for.
3806
3807 // Check if the entry name is not a unique name.
3808 __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
3809 __ JumpIfNotUniqueNameInstanceType(
3810 FieldOperand(scratch, Map::kInstanceTypeOffset),
3811 &maybe_in_dictionary);
3812 }
3813 }
3814
3815 __ bind(&maybe_in_dictionary);
3816 // If we are doing negative lookup then probing failure should be
3817 // treated as a lookup success. For positive lookup probing failure
3818 // should be treated as lookup failure.
3819 if (mode() == POSITIVE_LOOKUP) {
3820 __ movp(scratch, Immediate(0));
3821 __ Drop(1);
3822 __ ret(2 * kPointerSize);
3823 }
3824
3825 __ bind(&in_dictionary);
3826 __ movp(scratch, Immediate(1));
3827 __ Drop(1);
3828 __ ret(2 * kPointerSize);
3829
3830 __ bind(¬_in_dictionary);
3831 __ movp(scratch, Immediate(0));
3832 __ Drop(1);
3833 __ ret(2 * kPointerSize);
3834 }
3835
3836
GenerateFixedRegStubsAheadOfTime(Isolate * isolate)3837 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
3838 Isolate* isolate) {
3839 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
3840 stub1.GetCode();
3841 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
3842 stub2.GetCode();
3843 }
3844
3845
3846 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
3847 // the value has just been written into the object, now this stub makes sure
3848 // we keep the GC informed. The word in the object where the value has been
3849 // written is in the address register.
Generate(MacroAssembler * masm)3850 void RecordWriteStub::Generate(MacroAssembler* masm) {
3851 Label skip_to_incremental_noncompacting;
3852 Label skip_to_incremental_compacting;
3853
3854 // The first two instructions are generated with labels so as to get the
3855 // offset fixed up correctly by the bind(Label*) call. We patch it back and
3856 // forth between a compare instructions (a nop in this position) and the
3857 // real branch when we start and stop incremental heap marking.
3858 // See RecordWriteStub::Patch for details.
3859 __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
3860 __ jmp(&skip_to_incremental_compacting, Label::kFar);
3861
3862 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
3863 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3864 MacroAssembler::kReturnAtEnd);
3865 } else {
3866 __ ret(0);
3867 }
3868
3869 __ bind(&skip_to_incremental_noncompacting);
3870 GenerateIncremental(masm, INCREMENTAL);
3871
3872 __ bind(&skip_to_incremental_compacting);
3873 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
3874
3875 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
3876 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
3877 masm->set_byte_at(0, kTwoByteNopInstruction);
3878 masm->set_byte_at(2, kFiveByteNopInstruction);
3879 }
3880
3881
GenerateIncremental(MacroAssembler * masm,Mode mode)3882 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
3883 regs_.Save(masm);
3884
3885 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
3886 Label dont_need_remembered_set;
3887
3888 __ movp(regs_.scratch0(), Operand(regs_.address(), 0));
3889 __ JumpIfNotInNewSpace(regs_.scratch0(),
3890 regs_.scratch0(),
3891 &dont_need_remembered_set);
3892
3893 __ CheckPageFlag(regs_.object(),
3894 regs_.scratch0(),
3895 1 << MemoryChunk::SCAN_ON_SCAVENGE,
3896 not_zero,
3897 &dont_need_remembered_set);
3898
3899 // First notify the incremental marker if necessary, then update the
3900 // remembered set.
3901 CheckNeedsToInformIncrementalMarker(
3902 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
3903 InformIncrementalMarker(masm);
3904 regs_.Restore(masm);
3905 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3906 MacroAssembler::kReturnAtEnd);
3907
3908 __ bind(&dont_need_remembered_set);
3909 }
3910
3911 CheckNeedsToInformIncrementalMarker(
3912 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
3913 InformIncrementalMarker(masm);
3914 regs_.Restore(masm);
3915 __ ret(0);
3916 }
3917
3918
InformIncrementalMarker(MacroAssembler * masm)3919 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
3920 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
3921 Register address =
3922 arg_reg_1.is(regs_.address()) ? kScratchRegister : regs_.address();
3923 DCHECK(!address.is(regs_.object()));
3924 DCHECK(!address.is(arg_reg_1));
3925 __ Move(address, regs_.address());
3926 __ Move(arg_reg_1, regs_.object());
3927 // TODO(gc) Can we just set address arg2 in the beginning?
3928 __ Move(arg_reg_2, address);
3929 __ LoadAddress(arg_reg_3,
3930 ExternalReference::isolate_address(isolate()));
3931 int argument_count = 3;
3932
3933 AllowExternalCallThatCantCauseGC scope(masm);
3934 __ PrepareCallCFunction(argument_count);
3935 __ CallCFunction(
3936 ExternalReference::incremental_marking_record_write_function(isolate()),
3937 argument_count);
3938 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
3939 }
3940
3941
CheckNeedsToInformIncrementalMarker(MacroAssembler * masm,OnNoNeedToInformIncrementalMarker on_no_need,Mode mode)3942 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
3943 MacroAssembler* masm,
3944 OnNoNeedToInformIncrementalMarker on_no_need,
3945 Mode mode) {
3946 Label on_black;
3947 Label need_incremental;
3948 Label need_incremental_pop_object;
3949
3950 __ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
3951 __ andp(regs_.scratch0(), regs_.object());
3952 __ movp(regs_.scratch1(),
3953 Operand(regs_.scratch0(),
3954 MemoryChunk::kWriteBarrierCounterOffset));
3955 __ subp(regs_.scratch1(), Immediate(1));
3956 __ movp(Operand(regs_.scratch0(),
3957 MemoryChunk::kWriteBarrierCounterOffset),
3958 regs_.scratch1());
3959 __ j(negative, &need_incremental);
3960
3961 // Let's look at the color of the object: If it is not black we don't have
3962 // to inform the incremental marker.
3963 __ JumpIfBlack(regs_.object(),
3964 regs_.scratch0(),
3965 regs_.scratch1(),
3966 &on_black,
3967 Label::kNear);
3968
3969 regs_.Restore(masm);
3970 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
3971 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3972 MacroAssembler::kReturnAtEnd);
3973 } else {
3974 __ ret(0);
3975 }
3976
3977 __ bind(&on_black);
3978
3979 // Get the value from the slot.
3980 __ movp(regs_.scratch0(), Operand(regs_.address(), 0));
3981
3982 if (mode == INCREMENTAL_COMPACTION) {
3983 Label ensure_not_white;
3984
3985 __ CheckPageFlag(regs_.scratch0(), // Contains value.
3986 regs_.scratch1(), // Scratch.
3987 MemoryChunk::kEvacuationCandidateMask,
3988 zero,
3989 &ensure_not_white,
3990 Label::kNear);
3991
3992 __ CheckPageFlag(regs_.object(),
3993 regs_.scratch1(), // Scratch.
3994 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
3995 zero,
3996 &need_incremental);
3997
3998 __ bind(&ensure_not_white);
3999 }
4000
4001 // We need an extra register for this, so we push the object register
4002 // temporarily.
4003 __ Push(regs_.object());
4004 __ EnsureNotWhite(regs_.scratch0(), // The value.
4005 regs_.scratch1(), // Scratch.
4006 regs_.object(), // Scratch.
4007 &need_incremental_pop_object,
4008 Label::kNear);
4009 __ Pop(regs_.object());
4010
4011 regs_.Restore(masm);
4012 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4013 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4014 MacroAssembler::kReturnAtEnd);
4015 } else {
4016 __ ret(0);
4017 }
4018
4019 __ bind(&need_incremental_pop_object);
4020 __ Pop(regs_.object());
4021
4022 __ bind(&need_incremental);
4023
4024 // Fall through when we need to inform the incremental marker.
4025 }
4026
4027
Generate(MacroAssembler * masm)4028 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4029 // ----------- S t a t e -------------
4030 // -- rax : element value to store
4031 // -- rcx : element index as smi
4032 // -- rsp[0] : return address
4033 // -- rsp[8] : array literal index in function
4034 // -- rsp[16] : array literal
4035 // clobbers rbx, rdx, rdi
4036 // -----------------------------------
4037
4038 Label element_done;
4039 Label double_elements;
4040 Label smi_element;
4041 Label slow_elements;
4042 Label fast_elements;
4043
4044 // Get array literal index, array literal and its map.
4045 StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
4046 __ movp(rdx, args.GetArgumentOperand(1));
4047 __ movp(rbx, args.GetArgumentOperand(0));
4048 __ movp(rdi, FieldOperand(rbx, JSObject::kMapOffset));
4049
4050 __ CheckFastElements(rdi, &double_elements);
4051
4052 // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
4053 __ JumpIfSmi(rax, &smi_element);
4054 __ CheckFastSmiElements(rdi, &fast_elements);
4055
4056 // Store into the array literal requires a elements transition. Call into
4057 // the runtime.
4058
4059 __ bind(&slow_elements);
4060 __ PopReturnAddressTo(rdi);
4061 __ Push(rbx);
4062 __ Push(rcx);
4063 __ Push(rax);
4064 __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
4065 __ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
4066 __ Push(rdx);
4067 __ PushReturnAddressFrom(rdi);
4068 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4069
4070 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4071 __ bind(&fast_elements);
4072 __ SmiToInteger32(kScratchRegister, rcx);
4073 __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
4074 __ leap(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
4075 FixedArrayBase::kHeaderSize));
4076 __ movp(Operand(rcx, 0), rax);
4077 // Update the write barrier for the array store.
4078 __ RecordWrite(rbx, rcx, rax,
4079 kDontSaveFPRegs,
4080 EMIT_REMEMBERED_SET,
4081 OMIT_SMI_CHECK);
4082 __ ret(0);
4083
4084 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or
4085 // FAST_*_ELEMENTS, and value is Smi.
4086 __ bind(&smi_element);
4087 __ SmiToInteger32(kScratchRegister, rcx);
4088 __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
4089 __ movp(FieldOperand(rbx, kScratchRegister, times_pointer_size,
4090 FixedArrayBase::kHeaderSize), rax);
4091 __ ret(0);
4092
4093 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
4094 __ bind(&double_elements);
4095
4096 __ movp(r9, FieldOperand(rbx, JSObject::kElementsOffset));
4097 __ SmiToInteger32(r11, rcx);
4098 __ StoreNumberToDoubleElements(rax,
4099 r9,
4100 r11,
4101 xmm0,
4102 &slow_elements);
4103 __ ret(0);
4104 }
4105
4106
Generate(MacroAssembler * masm)4107 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4108 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4109 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4110 int parameter_count_offset =
4111 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4112 __ movp(rbx, MemOperand(rbp, parameter_count_offset));
4113 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4114 __ PopReturnAddressTo(rcx);
4115 int additional_offset =
4116 function_mode() == JS_FUNCTION_STUB_MODE ? kPointerSize : 0;
4117 __ leap(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset));
4118 __ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
4119 }
4120
4121
Generate(MacroAssembler * masm)4122 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4123 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4124 VectorLoadStub stub(isolate(), state());
4125 __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
4126 }
4127
4128
Generate(MacroAssembler * masm)4129 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4130 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4131 VectorKeyedLoadStub stub(isolate());
4132 __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
4133 }
4134
4135
MaybeCallEntryHook(MacroAssembler * masm)4136 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4137 if (masm->isolate()->function_entry_hook() != NULL) {
4138 ProfileEntryHookStub stub(masm->isolate());
4139 masm->CallStub(&stub);
4140 }
4141 }
4142
4143
Generate(MacroAssembler * masm)4144 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4145 // This stub can be called from essentially anywhere, so it needs to save
4146 // all volatile and callee-save registers.
4147 const size_t kNumSavedRegisters = 2;
4148 __ pushq(arg_reg_1);
4149 __ pushq(arg_reg_2);
4150
4151 // Calculate the original stack pointer and store it in the second arg.
4152 __ leap(arg_reg_2,
4153 Operand(rsp, kNumSavedRegisters * kRegisterSize + kPCOnStackSize));
4154
4155 // Calculate the function address to the first arg.
4156 __ movp(arg_reg_1, Operand(rsp, kNumSavedRegisters * kRegisterSize));
4157 __ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
4158
4159 // Save the remainder of the volatile registers.
4160 masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
4161
4162 // Call the entry hook function.
4163 __ Move(rax, FUNCTION_ADDR(isolate()->function_entry_hook()),
4164 Assembler::RelocInfoNone());
4165
4166 AllowExternalCallThatCantCauseGC scope(masm);
4167
4168 const int kArgumentCount = 2;
4169 __ PrepareCallCFunction(kArgumentCount);
4170 __ CallCFunction(rax, kArgumentCount);
4171
4172 // Restore volatile regs.
4173 masm->PopCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
4174 __ popq(arg_reg_2);
4175 __ popq(arg_reg_1);
4176
4177 __ Ret();
4178 }
4179
4180
4181 template<class T>
CreateArrayDispatch(MacroAssembler * masm,AllocationSiteOverrideMode mode)4182 static void CreateArrayDispatch(MacroAssembler* masm,
4183 AllocationSiteOverrideMode mode) {
4184 if (mode == DISABLE_ALLOCATION_SITES) {
4185 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4186 __ TailCallStub(&stub);
4187 } else if (mode == DONT_OVERRIDE) {
4188 int last_index = GetSequenceIndexFromFastElementsKind(
4189 TERMINAL_FAST_ELEMENTS_KIND);
4190 for (int i = 0; i <= last_index; ++i) {
4191 Label next;
4192 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4193 __ cmpl(rdx, Immediate(kind));
4194 __ j(not_equal, &next);
4195 T stub(masm->isolate(), kind);
4196 __ TailCallStub(&stub);
4197 __ bind(&next);
4198 }
4199
4200 // If we reached this point there is a problem.
4201 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4202 } else {
4203 UNREACHABLE();
4204 }
4205 }
4206
4207
CreateArrayDispatchOneArgument(MacroAssembler * masm,AllocationSiteOverrideMode mode)4208 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4209 AllocationSiteOverrideMode mode) {
4210 // rbx - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4211 // rdx - kind (if mode != DISABLE_ALLOCATION_SITES)
4212 // rax - number of arguments
4213 // rdi - constructor?
4214 // rsp[0] - return address
4215 // rsp[8] - last argument
4216 Handle<Object> undefined_sentinel(
4217 masm->isolate()->heap()->undefined_value(),
4218 masm->isolate());
4219
4220 Label normal_sequence;
4221 if (mode == DONT_OVERRIDE) {
4222 DCHECK(FAST_SMI_ELEMENTS == 0);
4223 DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
4224 DCHECK(FAST_ELEMENTS == 2);
4225 DCHECK(FAST_HOLEY_ELEMENTS == 3);
4226 DCHECK(FAST_DOUBLE_ELEMENTS == 4);
4227 DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4228
4229 // is the low bit set? If so, we are holey and that is good.
4230 __ testb(rdx, Immediate(1));
4231 __ j(not_zero, &normal_sequence);
4232 }
4233
4234 // look at the first argument
4235 StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
4236 __ movp(rcx, args.GetArgumentOperand(0));
4237 __ testp(rcx, rcx);
4238 __ j(zero, &normal_sequence);
4239
4240 if (mode == DISABLE_ALLOCATION_SITES) {
4241 ElementsKind initial = GetInitialFastElementsKind();
4242 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4243
4244 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4245 holey_initial,
4246 DISABLE_ALLOCATION_SITES);
4247 __ TailCallStub(&stub_holey);
4248
4249 __ bind(&normal_sequence);
4250 ArraySingleArgumentConstructorStub stub(masm->isolate(),
4251 initial,
4252 DISABLE_ALLOCATION_SITES);
4253 __ TailCallStub(&stub);
4254 } else if (mode == DONT_OVERRIDE) {
4255 // We are going to create a holey array, but our kind is non-holey.
4256 // Fix kind and retry (only if we have an allocation site in the slot).
4257 __ incl(rdx);
4258
4259 if (FLAG_debug_code) {
4260 Handle<Map> allocation_site_map =
4261 masm->isolate()->factory()->allocation_site_map();
4262 __ Cmp(FieldOperand(rbx, 0), allocation_site_map);
4263 __ Assert(equal, kExpectedAllocationSite);
4264 }
4265
4266 // Save the resulting elements kind in type info. We can't just store r3
4267 // in the AllocationSite::transition_info field because elements kind is
4268 // restricted to a portion of the field...upper bits need to be left alone.
4269 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4270 __ SmiAddConstant(FieldOperand(rbx, AllocationSite::kTransitionInfoOffset),
4271 Smi::FromInt(kFastElementsKindPackedToHoley));
4272
4273 __ bind(&normal_sequence);
4274 int last_index = GetSequenceIndexFromFastElementsKind(
4275 TERMINAL_FAST_ELEMENTS_KIND);
4276 for (int i = 0; i <= last_index; ++i) {
4277 Label next;
4278 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4279 __ cmpl(rdx, Immediate(kind));
4280 __ j(not_equal, &next);
4281 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4282 __ TailCallStub(&stub);
4283 __ bind(&next);
4284 }
4285
4286 // If we reached this point there is a problem.
4287 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4288 } else {
4289 UNREACHABLE();
4290 }
4291 }
4292
4293
4294 template<class T>
ArrayConstructorStubAheadOfTimeHelper(Isolate * isolate)4295 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4296 int to_index = GetSequenceIndexFromFastElementsKind(
4297 TERMINAL_FAST_ELEMENTS_KIND);
4298 for (int i = 0; i <= to_index; ++i) {
4299 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4300 T stub(isolate, kind);
4301 stub.GetCode();
4302 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4303 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4304 stub1.GetCode();
4305 }
4306 }
4307 }
4308
4309
GenerateStubsAheadOfTime(Isolate * isolate)4310 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4311 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4312 isolate);
4313 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4314 isolate);
4315 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4316 isolate);
4317 }
4318
4319
GenerateStubsAheadOfTime(Isolate * isolate)4320 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4321 Isolate* isolate) {
4322 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4323 for (int i = 0; i < 2; i++) {
4324 // For internal arrays we only need a few things
4325 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4326 stubh1.GetCode();
4327 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4328 stubh2.GetCode();
4329 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4330 stubh3.GetCode();
4331 }
4332 }
4333
4334
GenerateDispatchToArrayStub(MacroAssembler * masm,AllocationSiteOverrideMode mode)4335 void ArrayConstructorStub::GenerateDispatchToArrayStub(
4336 MacroAssembler* masm,
4337 AllocationSiteOverrideMode mode) {
4338 if (argument_count() == ANY) {
4339 Label not_zero_case, not_one_case;
4340 __ testp(rax, rax);
4341 __ j(not_zero, ¬_zero_case);
4342 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4343
4344 __ bind(¬_zero_case);
4345 __ cmpl(rax, Immediate(1));
4346 __ j(greater, ¬_one_case);
4347 CreateArrayDispatchOneArgument(masm, mode);
4348
4349 __ bind(¬_one_case);
4350 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4351 } else if (argument_count() == NONE) {
4352 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4353 } else if (argument_count() == ONE) {
4354 CreateArrayDispatchOneArgument(masm, mode);
4355 } else if (argument_count() == MORE_THAN_ONE) {
4356 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4357 } else {
4358 UNREACHABLE();
4359 }
4360 }
4361
4362
Generate(MacroAssembler * masm)4363 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4364 // ----------- S t a t e -------------
4365 // -- rax : argc
4366 // -- rbx : AllocationSite or undefined
4367 // -- rdi : constructor
4368 // -- rsp[0] : return address
4369 // -- rsp[8] : last argument
4370 // -----------------------------------
4371 if (FLAG_debug_code) {
4372 // The array construct code is only set for the global and natives
4373 // builtin Array functions which always have maps.
4374
4375 // Initial map for the builtin Array function should be a map.
4376 __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
4377 // Will both indicate a NULL and a Smi.
4378 STATIC_ASSERT(kSmiTag == 0);
4379 Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
4380 __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
4381 __ CmpObjectType(rcx, MAP_TYPE, rcx);
4382 __ Check(equal, kUnexpectedInitialMapForArrayFunction);
4383
4384 // We should either have undefined in rbx or a valid AllocationSite
4385 __ AssertUndefinedOrAllocationSite(rbx);
4386 }
4387
4388 Label no_info;
4389 // If the feedback vector is the undefined value call an array constructor
4390 // that doesn't use AllocationSites.
4391 __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
4392 __ j(equal, &no_info);
4393
4394 // Only look at the lower 16 bits of the transition info.
4395 __ movp(rdx, FieldOperand(rbx, AllocationSite::kTransitionInfoOffset));
4396 __ SmiToInteger32(rdx, rdx);
4397 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4398 __ andp(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
4399 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
4400
4401 __ bind(&no_info);
4402 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
4403 }
4404
4405
GenerateCase(MacroAssembler * masm,ElementsKind kind)4406 void InternalArrayConstructorStub::GenerateCase(
4407 MacroAssembler* masm, ElementsKind kind) {
4408 Label not_zero_case, not_one_case;
4409 Label normal_sequence;
4410
4411 __ testp(rax, rax);
4412 __ j(not_zero, ¬_zero_case);
4413 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4414 __ TailCallStub(&stub0);
4415
4416 __ bind(¬_zero_case);
4417 __ cmpl(rax, Immediate(1));
4418 __ j(greater, ¬_one_case);
4419
4420 if (IsFastPackedElementsKind(kind)) {
4421 // We might need to create a holey array
4422 // look at the first argument
4423 StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
4424 __ movp(rcx, args.GetArgumentOperand(0));
4425 __ testp(rcx, rcx);
4426 __ j(zero, &normal_sequence);
4427
4428 InternalArraySingleArgumentConstructorStub
4429 stub1_holey(isolate(), GetHoleyElementsKind(kind));
4430 __ TailCallStub(&stub1_holey);
4431 }
4432
4433 __ bind(&normal_sequence);
4434 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4435 __ TailCallStub(&stub1);
4436
4437 __ bind(¬_one_case);
4438 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
4439 __ TailCallStub(&stubN);
4440 }
4441
4442
Generate(MacroAssembler * masm)4443 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4444 // ----------- S t a t e -------------
4445 // -- rax : argc
4446 // -- rdi : constructor
4447 // -- rsp[0] : return address
4448 // -- rsp[8] : last argument
4449 // -----------------------------------
4450
4451 if (FLAG_debug_code) {
4452 // The array construct code is only set for the global and natives
4453 // builtin Array functions which always have maps.
4454
4455 // Initial map for the builtin Array function should be a map.
4456 __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
4457 // Will both indicate a NULL and a Smi.
4458 STATIC_ASSERT(kSmiTag == 0);
4459 Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
4460 __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
4461 __ CmpObjectType(rcx, MAP_TYPE, rcx);
4462 __ Check(equal, kUnexpectedInitialMapForArrayFunction);
4463 }
4464
4465 // Figure out the right elements kind
4466 __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
4467
4468 // Load the map's "bit field 2" into |result|. We only need the first byte,
4469 // but the following masking takes care of that anyway.
4470 __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
4471 // Retrieve elements_kind from bit field 2.
4472 __ DecodeField<Map::ElementsKindBits>(rcx);
4473
4474 if (FLAG_debug_code) {
4475 Label done;
4476 __ cmpl(rcx, Immediate(FAST_ELEMENTS));
4477 __ j(equal, &done);
4478 __ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
4479 __ Assert(equal,
4480 kInvalidElementsKindForInternalArrayOrInternalPackedArray);
4481 __ bind(&done);
4482 }
4483
4484 Label fast_elements_case;
4485 __ cmpl(rcx, Immediate(FAST_ELEMENTS));
4486 __ j(equal, &fast_elements_case);
4487 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
4488
4489 __ bind(&fast_elements_case);
4490 GenerateCase(masm, FAST_ELEMENTS);
4491 }
4492
4493
Generate(MacroAssembler * masm)4494 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
4495 // ----------- S t a t e -------------
4496 // -- rax : callee
4497 // -- rbx : call_data
4498 // -- rcx : holder
4499 // -- rdx : api_function_address
4500 // -- rsi : context
4501 // --
4502 // -- rsp[0] : return address
4503 // -- rsp[8] : last argument
4504 // -- ...
4505 // -- rsp[argc * 8] : first argument
4506 // -- rsp[(argc + 1) * 8] : receiver
4507 // -----------------------------------
4508
4509 Register callee = rax;
4510 Register call_data = rbx;
4511 Register holder = rcx;
4512 Register api_function_address = rdx;
4513 Register return_address = rdi;
4514 Register context = rsi;
4515
4516 int argc = this->argc();
4517 bool is_store = this->is_store();
4518 bool call_data_undefined = this->call_data_undefined();
4519
4520 typedef FunctionCallbackArguments FCA;
4521
4522 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
4523 STATIC_ASSERT(FCA::kCalleeIndex == 5);
4524 STATIC_ASSERT(FCA::kDataIndex == 4);
4525 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
4526 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
4527 STATIC_ASSERT(FCA::kIsolateIndex == 1);
4528 STATIC_ASSERT(FCA::kHolderIndex == 0);
4529 STATIC_ASSERT(FCA::kArgsLength == 7);
4530
4531 __ PopReturnAddressTo(return_address);
4532
4533 // context save
4534 __ Push(context);
4535 // load context from callee
4536 __ movp(context, FieldOperand(callee, JSFunction::kContextOffset));
4537
4538 // callee
4539 __ Push(callee);
4540
4541 // call data
4542 __ Push(call_data);
4543 Register scratch = call_data;
4544 if (!call_data_undefined) {
4545 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4546 }
4547 // return value
4548 __ Push(scratch);
4549 // return value default
4550 __ Push(scratch);
4551 // isolate
4552 __ Move(scratch,
4553 ExternalReference::isolate_address(isolate()));
4554 __ Push(scratch);
4555 // holder
4556 __ Push(holder);
4557
4558 __ movp(scratch, rsp);
4559 // Push return address back on stack.
4560 __ PushReturnAddressFrom(return_address);
4561
4562 // Allocate the v8::Arguments structure in the arguments' space since
4563 // it's not controlled by GC.
4564 const int kApiStackSpace = 4;
4565
4566 __ PrepareCallApiFunction(kApiStackSpace);
4567
4568 // FunctionCallbackInfo::implicit_args_.
4569 __ movp(StackSpaceOperand(0), scratch);
4570 __ addp(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
4571 __ movp(StackSpaceOperand(1), scratch); // FunctionCallbackInfo::values_.
4572 __ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_.
4573 // FunctionCallbackInfo::is_construct_call_.
4574 __ Set(StackSpaceOperand(3), 0);
4575
4576 #if defined(__MINGW64__) || defined(_WIN64)
4577 Register arguments_arg = rcx;
4578 Register callback_arg = rdx;
4579 #else
4580 Register arguments_arg = rdi;
4581 Register callback_arg = rsi;
4582 #endif
4583
4584 // It's okay if api_function_address == callback_arg
4585 // but not arguments_arg
4586 DCHECK(!api_function_address.is(arguments_arg));
4587
4588 // v8::InvocationCallback's argument.
4589 __ leap(arguments_arg, StackSpaceOperand(0));
4590
4591 ExternalReference thunk_ref =
4592 ExternalReference::invoke_function_callback(isolate());
4593
4594 // Accessor for FunctionCallbackInfo and first js arg.
4595 StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength + 1,
4596 ARGUMENTS_DONT_CONTAIN_RECEIVER);
4597 Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
4598 FCA::kArgsLength - FCA::kContextSaveIndex);
4599 // Stores return the first js argument
4600 Operand return_value_operand = args_from_rbp.GetArgumentOperand(
4601 is_store ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
4602 __ CallApiFunctionAndReturn(
4603 api_function_address,
4604 thunk_ref,
4605 callback_arg,
4606 argc + FCA::kArgsLength + 1,
4607 return_value_operand,
4608 &context_restore_operand);
4609 }
4610
4611
Generate(MacroAssembler * masm)4612 void CallApiGetterStub::Generate(MacroAssembler* masm) {
4613 // ----------- S t a t e -------------
4614 // -- rsp[0] : return address
4615 // -- rsp[8] : name
4616 // -- rsp[16 - kArgsLength*8] : PropertyCallbackArguments object
4617 // -- ...
4618 // -- r8 : api_function_address
4619 // -----------------------------------
4620
4621 #if defined(__MINGW64__) || defined(_WIN64)
4622 Register getter_arg = r8;
4623 Register accessor_info_arg = rdx;
4624 Register name_arg = rcx;
4625 #else
4626 Register getter_arg = rdx;
4627 Register accessor_info_arg = rsi;
4628 Register name_arg = rdi;
4629 #endif
4630 Register api_function_address = ApiGetterDescriptor::function_address();
4631 DCHECK(api_function_address.is(r8));
4632 Register scratch = rax;
4633
4634 // v8::Arguments::values_ and handler for name.
4635 const int kStackSpace = PropertyCallbackArguments::kArgsLength + 1;
4636
4637 // Allocate v8::AccessorInfo in non-GCed stack space.
4638 const int kArgStackSpace = 1;
4639
4640 __ leap(name_arg, Operand(rsp, kPCOnStackSize));
4641
4642 __ PrepareCallApiFunction(kArgStackSpace);
4643 __ leap(scratch, Operand(name_arg, 1 * kPointerSize));
4644
4645 // v8::PropertyAccessorInfo::args_.
4646 __ movp(StackSpaceOperand(0), scratch);
4647
4648 // The context register (rsi) has been saved in PrepareCallApiFunction and
4649 // could be used to pass arguments.
4650 __ leap(accessor_info_arg, StackSpaceOperand(0));
4651
4652 ExternalReference thunk_ref =
4653 ExternalReference::invoke_accessor_getter_callback(isolate());
4654
4655 // It's okay if api_function_address == getter_arg
4656 // but not accessor_info_arg or name_arg
4657 DCHECK(!api_function_address.is(accessor_info_arg) &&
4658 !api_function_address.is(name_arg));
4659
4660 // The name handler is counted as an argument.
4661 StackArgumentsAccessor args(rbp, PropertyCallbackArguments::kArgsLength);
4662 Operand return_value_operand = args.GetArgumentOperand(
4663 PropertyCallbackArguments::kArgsLength - 1 -
4664 PropertyCallbackArguments::kReturnValueOffset);
4665 __ CallApiFunctionAndReturn(api_function_address,
4666 thunk_ref,
4667 getter_arg,
4668 kStackSpace,
4669 return_value_operand,
4670 NULL);
4671 }
4672
4673
4674 #undef __
4675
4676 } } // namespace v8::internal
4677
4678 #endif // V8_TARGET_ARCH_X64
4679