1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/code-stubs.h"
6
7 #include <sstream>
8
9 #include "src/arguments.h"
10 #include "src/ast/ast.h"
11 #include "src/bootstrapper.h"
12 #include "src/code-factory.h"
13 #include "src/code-stub-assembler.h"
14 #include "src/counters.h"
15 #include "src/factory.h"
16 #include "src/gdb-jit.h"
17 #include "src/heap/heap-inl.h"
18 #include "src/ic/ic-stats.h"
19 #include "src/ic/ic.h"
20 #include "src/macro-assembler.h"
21 #include "src/objects-inl.h"
22 #include "src/tracing/tracing-category-observer.h"
23
24 namespace v8 {
25 namespace internal {
26
27 using compiler::CodeAssemblerState;
28
RUNTIME_FUNCTION(UnexpectedStubMiss)29 RUNTIME_FUNCTION(UnexpectedStubMiss) {
30 FATAL("Unexpected deopt of a stub");
31 return Smi::kZero;
32 }
33
CodeStubDescriptor(CodeStub * stub)34 CodeStubDescriptor::CodeStubDescriptor(CodeStub* stub)
35 : isolate_(stub->isolate()),
36 call_descriptor_(stub->GetCallInterfaceDescriptor()),
37 stack_parameter_count_(no_reg),
38 hint_stack_parameter_count_(-1),
39 function_mode_(NOT_JS_FUNCTION_STUB_MODE),
40 deoptimization_handler_(NULL),
41 miss_handler_(),
42 has_miss_handler_(false) {
43 stub->InitializeDescriptor(this);
44 }
45
CodeStubDescriptor(Isolate * isolate,uint32_t stub_key)46 CodeStubDescriptor::CodeStubDescriptor(Isolate* isolate, uint32_t stub_key)
47 : isolate_(isolate),
48 stack_parameter_count_(no_reg),
49 hint_stack_parameter_count_(-1),
50 function_mode_(NOT_JS_FUNCTION_STUB_MODE),
51 deoptimization_handler_(NULL),
52 miss_handler_(),
53 has_miss_handler_(false) {
54 CodeStub::InitializeDescriptor(isolate, stub_key, this);
55 }
56
57
Initialize(Address deoptimization_handler,int hint_stack_parameter_count,StubFunctionMode function_mode)58 void CodeStubDescriptor::Initialize(Address deoptimization_handler,
59 int hint_stack_parameter_count,
60 StubFunctionMode function_mode) {
61 deoptimization_handler_ = deoptimization_handler;
62 hint_stack_parameter_count_ = hint_stack_parameter_count;
63 function_mode_ = function_mode;
64 }
65
66
Initialize(Register stack_parameter_count,Address deoptimization_handler,int hint_stack_parameter_count,StubFunctionMode function_mode)67 void CodeStubDescriptor::Initialize(Register stack_parameter_count,
68 Address deoptimization_handler,
69 int hint_stack_parameter_count,
70 StubFunctionMode function_mode) {
71 Initialize(deoptimization_handler, hint_stack_parameter_count, function_mode);
72 stack_parameter_count_ = stack_parameter_count;
73 }
74
75
FindCodeInCache(Code ** code_out)76 bool CodeStub::FindCodeInCache(Code** code_out) {
77 UnseededNumberDictionary* stubs = isolate()->heap()->code_stubs();
78 int index = stubs->FindEntry(isolate(), GetKey());
79 if (index != UnseededNumberDictionary::kNotFound) {
80 *code_out = Code::cast(stubs->ValueAt(index));
81 return true;
82 }
83 return false;
84 }
85
86
RecordCodeGeneration(Handle<Code> code)87 void CodeStub::RecordCodeGeneration(Handle<Code> code) {
88 std::ostringstream os;
89 os << *this;
90 PROFILE(isolate(),
91 CodeCreateEvent(CodeEventListener::STUB_TAG,
92 AbstractCode::cast(*code), os.str().c_str()));
93 Counters* counters = isolate()->counters();
94 counters->total_stubs_code_size()->Increment(code->instruction_size());
95 #ifdef DEBUG
96 code->VerifyEmbeddedObjects();
97 #endif
98 }
99
100
GetCodeKind() const101 Code::Kind CodeStub::GetCodeKind() const {
102 return Code::STUB;
103 }
104
105
GetCodeFlags() const106 Code::Flags CodeStub::GetCodeFlags() const {
107 return Code::ComputeFlags(GetCodeKind(), GetExtraICState());
108 }
109
GetCodeCopy(const FindAndReplacePattern & pattern)110 Handle<Code> CodeStub::GetCodeCopy(const FindAndReplacePattern& pattern) {
111 Handle<Code> ic = GetCode();
112 ic = isolate()->factory()->CopyCode(ic);
113 ic->FindAndReplace(pattern);
114 RecordCodeGeneration(ic);
115 return ic;
116 }
117
DeleteStubFromCacheForTesting()118 void CodeStub::DeleteStubFromCacheForTesting() {
119 Heap* heap = isolate_->heap();
120 Handle<UnseededNumberDictionary> dict(heap->code_stubs());
121 dict = UnseededNumberDictionary::DeleteKey(dict, GetKey());
122 heap->SetRootCodeStubs(*dict);
123 }
124
GenerateCode()125 Handle<Code> PlatformCodeStub::GenerateCode() {
126 Factory* factory = isolate()->factory();
127
128 // Generate the new code.
129 MacroAssembler masm(isolate(), NULL, 256, CodeObjectRequired::kYes);
130
131 {
132 // Update the static counter each time a new code stub is generated.
133 isolate()->counters()->code_stubs()->Increment();
134
135 // Generate the code for the stub.
136 masm.set_generating_stub(true);
137 // TODO(yangguo): remove this once we can serialize IC stubs.
138 masm.enable_serializer();
139 NoCurrentFrameScope scope(&masm);
140 Generate(&masm);
141 }
142
143 // Create the code object.
144 CodeDesc desc;
145 masm.GetCode(&desc);
146 // Copy the generated code into a heap object.
147 Code::Flags flags = Code::ComputeFlags(GetCodeKind(), GetExtraICState());
148 Handle<Code> new_object = factory->NewCode(
149 desc, flags, masm.CodeObject(), NeedsImmovableCode());
150 return new_object;
151 }
152
153
GetCode()154 Handle<Code> CodeStub::GetCode() {
155 Heap* heap = isolate()->heap();
156 Code* code;
157 if (UseSpecialCache() ? FindCodeInSpecialCache(&code)
158 : FindCodeInCache(&code)) {
159 DCHECK(GetCodeKind() == code->kind());
160 return Handle<Code>(code);
161 }
162
163 {
164 HandleScope scope(isolate());
165
166 Handle<Code> new_object = GenerateCode();
167 new_object->set_stub_key(GetKey());
168 FinishCode(new_object);
169 RecordCodeGeneration(new_object);
170
171 #ifdef ENABLE_DISASSEMBLER
172 if (FLAG_print_code_stubs) {
173 CodeTracer::Scope trace_scope(isolate()->GetCodeTracer());
174 OFStream os(trace_scope.file());
175 std::ostringstream name;
176 name << *this;
177 new_object->Disassemble(name.str().c_str(), os);
178 os << "\n";
179 }
180 #endif
181
182 if (UseSpecialCache()) {
183 AddToSpecialCache(new_object);
184 } else {
185 // Update the dictionary and the root in Heap.
186 Handle<UnseededNumberDictionary> dict =
187 UnseededNumberDictionary::AtNumberPut(
188 Handle<UnseededNumberDictionary>(heap->code_stubs()),
189 GetKey(),
190 new_object);
191 heap->SetRootCodeStubs(*dict);
192 }
193 code = *new_object;
194 }
195
196 Activate(code);
197 DCHECK(!NeedsImmovableCode() || Heap::IsImmovable(code) ||
198 heap->code_space()->FirstPage()->Contains(code->address()));
199 return Handle<Code>(code, isolate());
200 }
201
202
MajorName(CodeStub::Major major_key)203 const char* CodeStub::MajorName(CodeStub::Major major_key) {
204 switch (major_key) {
205 #define DEF_CASE(name) case name: return #name "Stub";
206 CODE_STUB_LIST(DEF_CASE)
207 #undef DEF_CASE
208 case NoCache:
209 return "<NoCache>Stub";
210 case NUMBER_OF_IDS:
211 UNREACHABLE();
212 return NULL;
213 }
214 return NULL;
215 }
216
217
PrintBaseName(std::ostream & os) const218 void CodeStub::PrintBaseName(std::ostream& os) const { // NOLINT
219 os << MajorName(MajorKey());
220 }
221
222
PrintName(std::ostream & os) const223 void CodeStub::PrintName(std::ostream& os) const { // NOLINT
224 PrintBaseName(os);
225 PrintState(os);
226 }
227
228
Dispatch(Isolate * isolate,uint32_t key,void ** value_out,DispatchedCall call)229 void CodeStub::Dispatch(Isolate* isolate, uint32_t key, void** value_out,
230 DispatchedCall call) {
231 switch (MajorKeyFromKey(key)) {
232 #define DEF_CASE(NAME) \
233 case NAME: { \
234 NAME##Stub stub(key, isolate); \
235 CodeStub* pstub = &stub; \
236 call(pstub, value_out); \
237 break; \
238 }
239 CODE_STUB_LIST(DEF_CASE)
240 #undef DEF_CASE
241 case NUMBER_OF_IDS:
242 case NoCache:
243 UNREACHABLE();
244 break;
245 }
246 }
247
248
InitializeDescriptorDispatchedCall(CodeStub * stub,void ** value_out)249 static void InitializeDescriptorDispatchedCall(CodeStub* stub,
250 void** value_out) {
251 CodeStubDescriptor* descriptor_out =
252 reinterpret_cast<CodeStubDescriptor*>(value_out);
253 stub->InitializeDescriptor(descriptor_out);
254 descriptor_out->set_call_descriptor(stub->GetCallInterfaceDescriptor());
255 }
256
257
InitializeDescriptor(Isolate * isolate,uint32_t key,CodeStubDescriptor * desc)258 void CodeStub::InitializeDescriptor(Isolate* isolate, uint32_t key,
259 CodeStubDescriptor* desc) {
260 void** value_out = reinterpret_cast<void**>(desc);
261 Dispatch(isolate, key, value_out, &InitializeDescriptorDispatchedCall);
262 }
263
264
GetCodeDispatchCall(CodeStub * stub,void ** value_out)265 void CodeStub::GetCodeDispatchCall(CodeStub* stub, void** value_out) {
266 Handle<Code>* code_out = reinterpret_cast<Handle<Code>*>(value_out);
267 // Code stubs with special cache cannot be recreated from stub key.
268 *code_out = stub->UseSpecialCache() ? Handle<Code>() : stub->GetCode();
269 }
270
271
GetCode(Isolate * isolate,uint32_t key)272 MaybeHandle<Code> CodeStub::GetCode(Isolate* isolate, uint32_t key) {
273 HandleScope scope(isolate);
274 Handle<Code> code;
275 void** value_out = reinterpret_cast<void**>(&code);
276 Dispatch(isolate, key, value_out, &GetCodeDispatchCall);
277 return scope.CloseAndEscape(code);
278 }
279
280
281 // static
GenerateAheadOfTime(Isolate * isolate)282 void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate) {
283 if (FLAG_minimal) return;
284 // Generate the uninitialized versions of the stub.
285 for (int op = Token::BIT_OR; op <= Token::MOD; ++op) {
286 BinaryOpICStub stub(isolate, static_cast<Token::Value>(op));
287 stub.GetCode();
288 }
289
290 // Generate special versions of the stub.
291 BinaryOpICState::GenerateAheadOfTime(isolate, &GenerateAheadOfTime);
292 }
293
294
PrintState(std::ostream & os) const295 void BinaryOpICStub::PrintState(std::ostream& os) const { // NOLINT
296 os << state();
297 }
298
299
300 // static
GenerateAheadOfTime(Isolate * isolate,const BinaryOpICState & state)301 void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate,
302 const BinaryOpICState& state) {
303 if (FLAG_minimal) return;
304 BinaryOpICStub stub(isolate, state);
305 stub.GetCode();
306 }
307
308
309 // static
GenerateAheadOfTime(Isolate * isolate)310 void BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
311 // Generate special versions of the stub.
312 BinaryOpICState::GenerateAheadOfTime(isolate, &GenerateAheadOfTime);
313 }
314
315
PrintState(std::ostream & os) const316 void BinaryOpICWithAllocationSiteStub::PrintState(
317 std::ostream& os) const { // NOLINT
318 os << state();
319 }
320
321
322 // static
GenerateAheadOfTime(Isolate * isolate,const BinaryOpICState & state)323 void BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(
324 Isolate* isolate, const BinaryOpICState& state) {
325 if (state.CouldCreateAllocationMementos()) {
326 BinaryOpICWithAllocationSiteStub stub(isolate, state);
327 stub.GetCode();
328 }
329 }
330
PrintBaseName(std::ostream & os) const331 void StringAddStub::PrintBaseName(std::ostream& os) const { // NOLINT
332 os << "StringAddStub_" << flags() << "_" << pretenure_flag();
333 }
334
GenerateAssembly(compiler::CodeAssemblerState * state) const335 void StringAddStub::GenerateAssembly(
336 compiler::CodeAssemblerState* state) const {
337 typedef compiler::Node Node;
338 CodeStubAssembler assembler(state);
339 Node* left = assembler.Parameter(Descriptor::kLeft);
340 Node* right = assembler.Parameter(Descriptor::kRight);
341 Node* context = assembler.Parameter(Descriptor::kContext);
342
343 if ((flags() & STRING_ADD_CHECK_LEFT) != 0) {
344 DCHECK((flags() & STRING_ADD_CONVERT) != 0);
345 // TODO(danno): The ToString and JSReceiverToPrimitive below could be
346 // combined to avoid duplicate smi and instance type checks.
347 left = assembler.ToString(context,
348 assembler.JSReceiverToPrimitive(context, left));
349 }
350 if ((flags() & STRING_ADD_CHECK_RIGHT) != 0) {
351 DCHECK((flags() & STRING_ADD_CONVERT) != 0);
352 // TODO(danno): The ToString and JSReceiverToPrimitive below could be
353 // combined to avoid duplicate smi and instance type checks.
354 right = assembler.ToString(context,
355 assembler.JSReceiverToPrimitive(context, right));
356 }
357
358 if ((flags() & STRING_ADD_CHECK_BOTH) == 0) {
359 CodeStubAssembler::AllocationFlag flags =
360 (pretenure_flag() == TENURED) ? CodeStubAssembler::kPretenured
361 : CodeStubAssembler::kNone;
362 assembler.Return(assembler.StringAdd(context, left, right, flags));
363 } else {
364 Callable callable = CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE,
365 pretenure_flag());
366 assembler.TailCallStub(callable, context, left, right);
367 }
368 }
369
GetICState() const370 InlineCacheState CompareICStub::GetICState() const {
371 CompareICState::State state = Max(left(), right());
372 switch (state) {
373 case CompareICState::UNINITIALIZED:
374 return ::v8::internal::UNINITIALIZED;
375 case CompareICState::BOOLEAN:
376 case CompareICState::SMI:
377 case CompareICState::NUMBER:
378 case CompareICState::INTERNALIZED_STRING:
379 case CompareICState::STRING:
380 case CompareICState::UNIQUE_NAME:
381 case CompareICState::RECEIVER:
382 case CompareICState::KNOWN_RECEIVER:
383 return MONOMORPHIC;
384 case CompareICState::GENERIC:
385 return ::v8::internal::GENERIC;
386 }
387 UNREACHABLE();
388 return ::v8::internal::UNINITIALIZED;
389 }
390
391
GetCondition() const392 Condition CompareICStub::GetCondition() const {
393 return CompareIC::ComputeCondition(op());
394 }
395
396
Generate(MacroAssembler * masm)397 void CompareICStub::Generate(MacroAssembler* masm) {
398 switch (state()) {
399 case CompareICState::UNINITIALIZED:
400 GenerateMiss(masm);
401 break;
402 case CompareICState::BOOLEAN:
403 GenerateBooleans(masm);
404 break;
405 case CompareICState::SMI:
406 GenerateSmis(masm);
407 break;
408 case CompareICState::NUMBER:
409 GenerateNumbers(masm);
410 break;
411 case CompareICState::STRING:
412 GenerateStrings(masm);
413 break;
414 case CompareICState::INTERNALIZED_STRING:
415 GenerateInternalizedStrings(masm);
416 break;
417 case CompareICState::UNIQUE_NAME:
418 GenerateUniqueNames(masm);
419 break;
420 case CompareICState::RECEIVER:
421 GenerateReceivers(masm);
422 break;
423 case CompareICState::KNOWN_RECEIVER:
424 DCHECK(*known_map_ != NULL);
425 GenerateKnownReceivers(masm);
426 break;
427 case CompareICState::GENERIC:
428 GenerateGeneric(masm);
429 break;
430 }
431 }
432
GenerateCode()433 Handle<Code> TurboFanCodeStub::GenerateCode() {
434 const char* name = CodeStub::MajorName(MajorKey());
435 Zone zone(isolate()->allocator(), ZONE_NAME);
436 CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
437 compiler::CodeAssemblerState state(isolate(), &zone, descriptor,
438 GetCodeFlags(), name);
439 GenerateAssembly(&state);
440 return compiler::CodeAssembler::GenerateCode(&state);
441 }
442
GenerateAssembly(compiler::CodeAssemblerState * state) const443 void ElementsTransitionAndStoreStub::GenerateAssembly(
444 compiler::CodeAssemblerState* state) const {
445 typedef CodeStubAssembler::Label Label;
446 typedef compiler::Node Node;
447 CodeStubAssembler assembler(state);
448
449 Node* receiver = assembler.Parameter(Descriptor::kReceiver);
450 Node* key = assembler.Parameter(Descriptor::kName);
451 Node* value = assembler.Parameter(Descriptor::kValue);
452 Node* map = assembler.Parameter(Descriptor::kMap);
453 Node* slot = assembler.Parameter(Descriptor::kSlot);
454 Node* vector = assembler.Parameter(Descriptor::kVector);
455 Node* context = assembler.Parameter(Descriptor::kContext);
456
457 assembler.Comment(
458 "ElementsTransitionAndStoreStub: from_kind=%s, to_kind=%s,"
459 " is_jsarray=%d, store_mode=%d",
460 ElementsKindToString(from_kind()), ElementsKindToString(to_kind()),
461 is_jsarray(), store_mode());
462
463 Label miss(&assembler);
464
465 if (FLAG_trace_elements_transitions) {
466 // Tracing elements transitions is the job of the runtime.
467 assembler.Goto(&miss);
468 } else {
469 assembler.TransitionElementsKind(receiver, map, from_kind(), to_kind(),
470 is_jsarray(), &miss);
471 assembler.EmitElementStore(receiver, key, value, is_jsarray(), to_kind(),
472 store_mode(), &miss);
473 assembler.Return(value);
474 }
475
476 assembler.Bind(&miss);
477 {
478 assembler.Comment("Miss");
479 assembler.TailCallRuntime(Runtime::kElementsTransitionAndStoreIC_Miss,
480 context, receiver, key, value, map, slot, vector);
481 }
482 }
483
GenerateAssembly(compiler::CodeAssemblerState * state) const484 void AllocateHeapNumberStub::GenerateAssembly(
485 compiler::CodeAssemblerState* state) const {
486 typedef compiler::Node Node;
487 CodeStubAssembler assembler(state);
488
489 Node* result = assembler.AllocateHeapNumber();
490 assembler.Return(result);
491 }
492
GenerateAssembly(compiler::CodeAssemblerState * state) const493 void StringLengthStub::GenerateAssembly(
494 compiler::CodeAssemblerState* state) const {
495 CodeStubAssembler assembler(state);
496 compiler::Node* value = assembler.Parameter(0);
497 compiler::Node* string = assembler.LoadJSValueValue(value);
498 compiler::Node* result = assembler.LoadStringLength(string);
499 assembler.Return(result);
500 }
501
502 #define BINARY_OP_STUB(Name) \
503 void Name::GenerateAssembly(compiler::CodeAssemblerState* state) const { \
504 typedef BinaryOpWithVectorDescriptor Descriptor; \
505 CodeStubAssembler assembler(state); \
506 assembler.Return(Generate( \
507 &assembler, assembler.Parameter(Descriptor::kLeft), \
508 assembler.Parameter(Descriptor::kRight), \
509 assembler.ChangeUint32ToWord(assembler.Parameter(Descriptor::kSlot)), \
510 assembler.Parameter(Descriptor::kVector), \
511 assembler.Parameter(Descriptor::kContext))); \
512 }
513 BINARY_OP_STUB(AddWithFeedbackStub)
BINARY_OP_STUB(SubtractWithFeedbackStub)514 BINARY_OP_STUB(SubtractWithFeedbackStub)
515 BINARY_OP_STUB(MultiplyWithFeedbackStub)
516 BINARY_OP_STUB(DivideWithFeedbackStub)
517 BINARY_OP_STUB(ModulusWithFeedbackStub)
518 #undef BINARY_OP_STUB
519
520 // static
521 compiler::Node* AddWithFeedbackStub::Generate(CodeStubAssembler* assembler,
522 compiler::Node* lhs,
523 compiler::Node* rhs,
524 compiler::Node* slot_id,
525 compiler::Node* feedback_vector,
526 compiler::Node* context) {
527 typedef CodeStubAssembler::Label Label;
528 typedef compiler::Node Node;
529 typedef CodeStubAssembler::Variable Variable;
530
531 // Shared entry for floating point addition.
532 Label do_fadd(assembler), if_lhsisnotnumber(assembler, Label::kDeferred),
533 check_rhsisoddball(assembler, Label::kDeferred),
534 call_with_oddball_feedback(assembler), call_with_any_feedback(assembler),
535 call_add_stub(assembler), end(assembler);
536 Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
537 var_fadd_rhs(assembler, MachineRepresentation::kFloat64),
538 var_type_feedback(assembler, MachineRepresentation::kTaggedSigned),
539 var_result(assembler, MachineRepresentation::kTagged);
540
541 // Check if the {lhs} is a Smi or a HeapObject.
542 Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
543 assembler->Branch(assembler->TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
544
545 assembler->Bind(&if_lhsissmi);
546 {
547 // Check if the {rhs} is also a Smi.
548 Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
549 assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
550 &if_rhsisnotsmi);
551
552 assembler->Bind(&if_rhsissmi);
553 {
554 // Try fast Smi addition first.
555 Node* pair =
556 assembler->IntPtrAddWithOverflow(assembler->BitcastTaggedToWord(lhs),
557 assembler->BitcastTaggedToWord(rhs));
558 Node* overflow = assembler->Projection(1, pair);
559
560 // Check if the Smi additon overflowed.
561 Label if_overflow(assembler), if_notoverflow(assembler);
562 assembler->Branch(overflow, &if_overflow, &if_notoverflow);
563
564 assembler->Bind(&if_overflow);
565 {
566 var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
567 var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
568 assembler->Goto(&do_fadd);
569 }
570
571 assembler->Bind(&if_notoverflow);
572 {
573 var_type_feedback.Bind(
574 assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall));
575 var_result.Bind(assembler->BitcastWordToTaggedSigned(
576 assembler->Projection(0, pair)));
577 assembler->Goto(&end);
578 }
579 }
580
581 assembler->Bind(&if_rhsisnotsmi);
582 {
583 // Load the map of {rhs}.
584 Node* rhs_map = assembler->LoadMap(rhs);
585
586 // Check if the {rhs} is a HeapNumber.
587 assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
588 &check_rhsisoddball);
589
590 var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
591 var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
592 assembler->Goto(&do_fadd);
593 }
594 }
595
596 assembler->Bind(&if_lhsisnotsmi);
597 {
598 // Load the map of {lhs}.
599 Node* lhs_map = assembler->LoadMap(lhs);
600
601 // Check if {lhs} is a HeapNumber.
602 assembler->GotoIfNot(assembler->IsHeapNumberMap(lhs_map),
603 &if_lhsisnotnumber);
604
605 // Check if the {rhs} is Smi.
606 Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
607 assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
608 &if_rhsisnotsmi);
609
610 assembler->Bind(&if_rhsissmi);
611 {
612 var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
613 var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
614 assembler->Goto(&do_fadd);
615 }
616
617 assembler->Bind(&if_rhsisnotsmi);
618 {
619 // Load the map of {rhs}.
620 Node* rhs_map = assembler->LoadMap(rhs);
621
622 // Check if the {rhs} is a HeapNumber.
623 assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
624 &check_rhsisoddball);
625
626 var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
627 var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
628 assembler->Goto(&do_fadd);
629 }
630 }
631
632 assembler->Bind(&do_fadd);
633 {
634 var_type_feedback.Bind(
635 assembler->SmiConstant(BinaryOperationFeedback::kNumber));
636 Node* value =
637 assembler->Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
638 Node* result = assembler->AllocateHeapNumberWithValue(value);
639 var_result.Bind(result);
640 assembler->Goto(&end);
641 }
642
643 assembler->Bind(&if_lhsisnotnumber);
644 {
645 // No checks on rhs are done yet. We just know lhs is not a number or Smi.
646 Label if_lhsisoddball(assembler), if_lhsisnotoddball(assembler);
647 Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
648 Node* lhs_is_oddball = assembler->Word32Equal(
649 lhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
650 assembler->Branch(lhs_is_oddball, &if_lhsisoddball, &if_lhsisnotoddball);
651
652 assembler->Bind(&if_lhsisoddball);
653 {
654 assembler->GotoIf(assembler->TaggedIsSmi(rhs),
655 &call_with_oddball_feedback);
656
657 // Load the map of the {rhs}.
658 Node* rhs_map = assembler->LoadMap(rhs);
659
660 // Check if {rhs} is a HeapNumber.
661 assembler->Branch(assembler->IsHeapNumberMap(rhs_map),
662 &call_with_oddball_feedback, &check_rhsisoddball);
663 }
664
665 assembler->Bind(&if_lhsisnotoddball);
666 {
667 // Exit unless {lhs} is a string
668 assembler->GotoIfNot(assembler->IsStringInstanceType(lhs_instance_type),
669 &call_with_any_feedback);
670
671 // Check if the {rhs} is a smi, and exit the string check early if it is.
672 assembler->GotoIf(assembler->TaggedIsSmi(rhs), &call_with_any_feedback);
673
674 Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
675
676 // Exit unless {rhs} is a string. Since {lhs} is a string we no longer
677 // need an Oddball check.
678 assembler->GotoIfNot(assembler->IsStringInstanceType(rhs_instance_type),
679 &call_with_any_feedback);
680
681 var_type_feedback.Bind(
682 assembler->SmiConstant(BinaryOperationFeedback::kString));
683 Callable callable = CodeFactory::StringAdd(
684 assembler->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
685 var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
686
687 assembler->Goto(&end);
688 }
689 }
690
691 assembler->Bind(&check_rhsisoddball);
692 {
693 // Check if rhs is an oddball. At this point we know lhs is either a
694 // Smi or number or oddball and rhs is not a number or Smi.
695 Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
696 Node* rhs_is_oddball = assembler->Word32Equal(
697 rhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
698 assembler->Branch(rhs_is_oddball, &call_with_oddball_feedback,
699 &call_with_any_feedback);
700 }
701
702 assembler->Bind(&call_with_oddball_feedback);
703 {
704 var_type_feedback.Bind(
705 assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
706 assembler->Goto(&call_add_stub);
707 }
708
709 assembler->Bind(&call_with_any_feedback);
710 {
711 var_type_feedback.Bind(
712 assembler->SmiConstant(BinaryOperationFeedback::kAny));
713 assembler->Goto(&call_add_stub);
714 }
715
716 assembler->Bind(&call_add_stub);
717 {
718 Callable callable = CodeFactory::Add(assembler->isolate());
719 var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
720 assembler->Goto(&end);
721 }
722
723 assembler->Bind(&end);
724 assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector,
725 slot_id);
726 return var_result.value();
727 }
728
729 // static
Generate(CodeStubAssembler * assembler,compiler::Node * lhs,compiler::Node * rhs,compiler::Node * slot_id,compiler::Node * feedback_vector,compiler::Node * context)730 compiler::Node* SubtractWithFeedbackStub::Generate(
731 CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* rhs,
732 compiler::Node* slot_id, compiler::Node* feedback_vector,
733 compiler::Node* context) {
734 typedef CodeStubAssembler::Label Label;
735 typedef compiler::Node Node;
736 typedef CodeStubAssembler::Variable Variable;
737
738 // Shared entry for floating point subtraction.
739 Label do_fsub(assembler), end(assembler), call_subtract_stub(assembler),
740 if_lhsisnotnumber(assembler), check_rhsisoddball(assembler),
741 call_with_any_feedback(assembler);
742 Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
743 var_fsub_rhs(assembler, MachineRepresentation::kFloat64),
744 var_type_feedback(assembler, MachineRepresentation::kTaggedSigned),
745 var_result(assembler, MachineRepresentation::kTagged);
746
747 // Check if the {lhs} is a Smi or a HeapObject.
748 Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
749 assembler->Branch(assembler->TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
750
751 assembler->Bind(&if_lhsissmi);
752 {
753 // Check if the {rhs} is also a Smi.
754 Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
755 assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
756 &if_rhsisnotsmi);
757
758 assembler->Bind(&if_rhsissmi);
759 {
760 // Try a fast Smi subtraction first.
761 Node* pair =
762 assembler->IntPtrSubWithOverflow(assembler->BitcastTaggedToWord(lhs),
763 assembler->BitcastTaggedToWord(rhs));
764 Node* overflow = assembler->Projection(1, pair);
765
766 // Check if the Smi subtraction overflowed.
767 Label if_overflow(assembler), if_notoverflow(assembler);
768 assembler->Branch(overflow, &if_overflow, &if_notoverflow);
769
770 assembler->Bind(&if_overflow);
771 {
772 // lhs, rhs - smi and result - number. combined - number.
773 // The result doesn't fit into Smi range.
774 var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
775 var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
776 assembler->Goto(&do_fsub);
777 }
778
779 assembler->Bind(&if_notoverflow);
780 // lhs, rhs, result smi. combined - smi.
781 var_type_feedback.Bind(
782 assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall));
783 var_result.Bind(
784 assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
785 assembler->Goto(&end);
786 }
787
788 assembler->Bind(&if_rhsisnotsmi);
789 {
790 // Load the map of the {rhs}.
791 Node* rhs_map = assembler->LoadMap(rhs);
792
793 // Check if {rhs} is a HeapNumber.
794 assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
795 &check_rhsisoddball);
796
797 // Perform a floating point subtraction.
798 var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
799 var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
800 assembler->Goto(&do_fsub);
801 }
802 }
803
804 assembler->Bind(&if_lhsisnotsmi);
805 {
806 // Load the map of the {lhs}.
807 Node* lhs_map = assembler->LoadMap(lhs);
808
809 // Check if the {lhs} is a HeapNumber.
810 assembler->GotoIfNot(assembler->IsHeapNumberMap(lhs_map),
811 &if_lhsisnotnumber);
812
813 // Check if the {rhs} is a Smi.
814 Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
815 assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
816 &if_rhsisnotsmi);
817
818 assembler->Bind(&if_rhsissmi);
819 {
820 // Perform a floating point subtraction.
821 var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
822 var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
823 assembler->Goto(&do_fsub);
824 }
825
826 assembler->Bind(&if_rhsisnotsmi);
827 {
828 // Load the map of the {rhs}.
829 Node* rhs_map = assembler->LoadMap(rhs);
830
831 // Check if the {rhs} is a HeapNumber.
832 assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
833 &check_rhsisoddball);
834
835 // Perform a floating point subtraction.
836 var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
837 var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
838 assembler->Goto(&do_fsub);
839 }
840 }
841
842 assembler->Bind(&do_fsub);
843 {
844 var_type_feedback.Bind(
845 assembler->SmiConstant(BinaryOperationFeedback::kNumber));
846 Node* lhs_value = var_fsub_lhs.value();
847 Node* rhs_value = var_fsub_rhs.value();
848 Node* value = assembler->Float64Sub(lhs_value, rhs_value);
849 var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
850 assembler->Goto(&end);
851 }
852
853 assembler->Bind(&if_lhsisnotnumber);
854 {
855 // No checks on rhs are done yet. We just know lhs is not a number or Smi.
856 // Check if lhs is an oddball.
857 Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
858 Node* lhs_is_oddball = assembler->Word32Equal(
859 lhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
860 assembler->GotoIfNot(lhs_is_oddball, &call_with_any_feedback);
861
862 Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
863 assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
864 &if_rhsisnotsmi);
865
866 assembler->Bind(&if_rhsissmi);
867 {
868 var_type_feedback.Bind(
869 assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
870 assembler->Goto(&call_subtract_stub);
871 }
872
873 assembler->Bind(&if_rhsisnotsmi);
874 {
875 // Load the map of the {rhs}.
876 Node* rhs_map = assembler->LoadMap(rhs);
877
878 // Check if {rhs} is a HeapNumber.
879 assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
880 &check_rhsisoddball);
881
882 var_type_feedback.Bind(
883 assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
884 assembler->Goto(&call_subtract_stub);
885 }
886 }
887
888 assembler->Bind(&check_rhsisoddball);
889 {
890 // Check if rhs is an oddball. At this point we know lhs is either a
891 // Smi or number or oddball and rhs is not a number or Smi.
892 Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
893 Node* rhs_is_oddball = assembler->Word32Equal(
894 rhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
895 assembler->GotoIfNot(rhs_is_oddball, &call_with_any_feedback);
896
897 var_type_feedback.Bind(
898 assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
899 assembler->Goto(&call_subtract_stub);
900 }
901
902 assembler->Bind(&call_with_any_feedback);
903 {
904 var_type_feedback.Bind(
905 assembler->SmiConstant(BinaryOperationFeedback::kAny));
906 assembler->Goto(&call_subtract_stub);
907 }
908
909 assembler->Bind(&call_subtract_stub);
910 {
911 Callable callable = CodeFactory::Subtract(assembler->isolate());
912 var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
913 assembler->Goto(&end);
914 }
915
916 assembler->Bind(&end);
917 assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector,
918 slot_id);
919 return var_result.value();
920 }
921
922
923 // static
Generate(CodeStubAssembler * assembler,compiler::Node * lhs,compiler::Node * rhs,compiler::Node * slot_id,compiler::Node * feedback_vector,compiler::Node * context)924 compiler::Node* MultiplyWithFeedbackStub::Generate(
925 CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* rhs,
926 compiler::Node* slot_id, compiler::Node* feedback_vector,
927 compiler::Node* context) {
928 using compiler::Node;
929 typedef CodeStubAssembler::Label Label;
930 typedef CodeStubAssembler::Variable Variable;
931
932 // Shared entry point for floating point multiplication.
933 Label do_fmul(assembler), if_lhsisnotnumber(assembler, Label::kDeferred),
934 check_rhsisoddball(assembler, Label::kDeferred),
935 call_with_oddball_feedback(assembler), call_with_any_feedback(assembler),
936 call_multiply_stub(assembler), end(assembler);
937 Variable var_lhs_float64(assembler, MachineRepresentation::kFloat64),
938 var_rhs_float64(assembler, MachineRepresentation::kFloat64),
939 var_result(assembler, MachineRepresentation::kTagged),
940 var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
941
942 Label lhs_is_smi(assembler), lhs_is_not_smi(assembler);
943 assembler->Branch(assembler->TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
944
945 assembler->Bind(&lhs_is_smi);
946 {
947 Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
948 assembler->Branch(assembler->TaggedIsSmi(rhs), &rhs_is_smi,
949 &rhs_is_not_smi);
950
951 assembler->Bind(&rhs_is_smi);
952 {
953 // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
954 // in case of overflow.
955 var_result.Bind(assembler->SmiMul(lhs, rhs));
956 var_type_feedback.Bind(assembler->SelectSmiConstant(
957 assembler->TaggedIsSmi(var_result.value()),
958 BinaryOperationFeedback::kSignedSmall,
959 BinaryOperationFeedback::kNumber));
960 assembler->Goto(&end);
961 }
962
963 assembler->Bind(&rhs_is_not_smi);
964 {
965 Node* rhs_map = assembler->LoadMap(rhs);
966
967 // Check if {rhs} is a HeapNumber.
968 assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
969 &check_rhsisoddball);
970
971 // Convert {lhs} to a double and multiply it with the value of {rhs}.
972 var_lhs_float64.Bind(assembler->SmiToFloat64(lhs));
973 var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
974 assembler->Goto(&do_fmul);
975 }
976 }
977
978 assembler->Bind(&lhs_is_not_smi);
979 {
980 Node* lhs_map = assembler->LoadMap(lhs);
981
982 // Check if {lhs} is a HeapNumber.
983 assembler->GotoIfNot(assembler->IsHeapNumberMap(lhs_map),
984 &if_lhsisnotnumber);
985
986 // Check if {rhs} is a Smi.
987 Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
988 assembler->Branch(assembler->TaggedIsSmi(rhs), &rhs_is_smi,
989 &rhs_is_not_smi);
990
991 assembler->Bind(&rhs_is_smi);
992 {
993 // Convert {rhs} to a double and multiply it with the value of {lhs}.
994 var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
995 var_rhs_float64.Bind(assembler->SmiToFloat64(rhs));
996 assembler->Goto(&do_fmul);
997 }
998
999 assembler->Bind(&rhs_is_not_smi);
1000 {
1001 Node* rhs_map = assembler->LoadMap(rhs);
1002
1003 // Check if {rhs} is a HeapNumber.
1004 assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
1005 &check_rhsisoddball);
1006
1007 // Both {lhs} and {rhs} are HeapNumbers. Load their values and
1008 // multiply them.
1009 var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
1010 var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
1011 assembler->Goto(&do_fmul);
1012 }
1013 }
1014
1015 assembler->Bind(&do_fmul);
1016 {
1017 var_type_feedback.Bind(
1018 assembler->SmiConstant(BinaryOperationFeedback::kNumber));
1019 Node* value =
1020 assembler->Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
1021 Node* result = assembler->AllocateHeapNumberWithValue(value);
1022 var_result.Bind(result);
1023 assembler->Goto(&end);
1024 }
1025
1026 assembler->Bind(&if_lhsisnotnumber);
1027 {
1028 // No checks on rhs are done yet. We just know lhs is not a number or Smi.
1029 // Check if lhs is an oddball.
1030 Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
1031 Node* lhs_is_oddball = assembler->Word32Equal(
1032 lhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
1033 assembler->GotoIfNot(lhs_is_oddball, &call_with_any_feedback);
1034
1035 assembler->GotoIf(assembler->TaggedIsSmi(rhs), &call_with_oddball_feedback);
1036
1037 // Load the map of the {rhs}.
1038 Node* rhs_map = assembler->LoadMap(rhs);
1039
1040 // Check if {rhs} is a HeapNumber.
1041 assembler->Branch(assembler->IsHeapNumberMap(rhs_map),
1042 &call_with_oddball_feedback, &check_rhsisoddball);
1043 }
1044
1045 assembler->Bind(&check_rhsisoddball);
1046 {
1047 // Check if rhs is an oddball. At this point we know lhs is either a
1048 // Smi or number or oddball and rhs is not a number or Smi.
1049 Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
1050 Node* rhs_is_oddball = assembler->Word32Equal(
1051 rhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
1052 assembler->Branch(rhs_is_oddball, &call_with_oddball_feedback,
1053 &call_with_any_feedback);
1054 }
1055
1056 assembler->Bind(&call_with_oddball_feedback);
1057 {
1058 var_type_feedback.Bind(
1059 assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
1060 assembler->Goto(&call_multiply_stub);
1061 }
1062
1063 assembler->Bind(&call_with_any_feedback);
1064 {
1065 var_type_feedback.Bind(
1066 assembler->SmiConstant(BinaryOperationFeedback::kAny));
1067 assembler->Goto(&call_multiply_stub);
1068 }
1069
1070 assembler->Bind(&call_multiply_stub);
1071 {
1072 Callable callable = CodeFactory::Multiply(assembler->isolate());
1073 var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
1074 assembler->Goto(&end);
1075 }
1076
1077 assembler->Bind(&end);
1078 assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector,
1079 slot_id);
1080 return var_result.value();
1081 }
1082
1083
1084 // static
Generate(CodeStubAssembler * assembler,compiler::Node * dividend,compiler::Node * divisor,compiler::Node * slot_id,compiler::Node * feedback_vector,compiler::Node * context)1085 compiler::Node* DivideWithFeedbackStub::Generate(
1086 CodeStubAssembler* assembler, compiler::Node* dividend,
1087 compiler::Node* divisor, compiler::Node* slot_id,
1088 compiler::Node* feedback_vector, compiler::Node* context) {
1089 using compiler::Node;
1090 typedef CodeStubAssembler::Label Label;
1091 typedef CodeStubAssembler::Variable Variable;
1092
1093 // Shared entry point for floating point division.
1094 Label do_fdiv(assembler), dividend_is_not_number(assembler, Label::kDeferred),
1095 check_divisor_for_oddball(assembler, Label::kDeferred),
1096 call_with_oddball_feedback(assembler), call_with_any_feedback(assembler),
1097 call_divide_stub(assembler), end(assembler);
1098 Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
1099 var_divisor_float64(assembler, MachineRepresentation::kFloat64),
1100 var_result(assembler, MachineRepresentation::kTagged),
1101 var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
1102
1103 Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
1104 assembler->Branch(assembler->TaggedIsSmi(dividend), ÷nd_is_smi,
1105 ÷nd_is_not_smi);
1106
1107 assembler->Bind(÷nd_is_smi);
1108 {
1109 Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
1110 assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
1111 &divisor_is_not_smi);
1112
1113 assembler->Bind(&divisor_is_smi);
1114 {
1115 Label bailout(assembler);
1116
1117 // Do floating point division if {divisor} is zero.
1118 assembler->GotoIf(
1119 assembler->WordEqual(divisor, assembler->SmiConstant(0)), &bailout);
1120
1121 // Do floating point division {dividend} is zero and {divisor} is
1122 // negative.
1123 Label dividend_is_zero(assembler), dividend_is_not_zero(assembler);
1124 assembler->Branch(
1125 assembler->WordEqual(dividend, assembler->SmiConstant(0)),
1126 ÷nd_is_zero, ÷nd_is_not_zero);
1127
1128 assembler->Bind(÷nd_is_zero);
1129 {
1130 assembler->GotoIf(
1131 assembler->SmiLessThan(divisor, assembler->SmiConstant(0)),
1132 &bailout);
1133 assembler->Goto(÷nd_is_not_zero);
1134 }
1135 assembler->Bind(÷nd_is_not_zero);
1136
1137 Node* untagged_divisor = assembler->SmiToWord32(divisor);
1138 Node* untagged_dividend = assembler->SmiToWord32(dividend);
1139
1140 // Do floating point division if {dividend} is kMinInt (or kMinInt - 1
1141 // if the Smi size is 31) and {divisor} is -1.
1142 Label divisor_is_minus_one(assembler),
1143 divisor_is_not_minus_one(assembler);
1144 assembler->Branch(assembler->Word32Equal(untagged_divisor,
1145 assembler->Int32Constant(-1)),
1146 &divisor_is_minus_one, &divisor_is_not_minus_one);
1147
1148 assembler->Bind(&divisor_is_minus_one);
1149 {
1150 assembler->GotoIf(
1151 assembler->Word32Equal(
1152 untagged_dividend,
1153 assembler->Int32Constant(kSmiValueSize == 32 ? kMinInt
1154 : (kMinInt >> 1))),
1155 &bailout);
1156 assembler->Goto(&divisor_is_not_minus_one);
1157 }
1158 assembler->Bind(&divisor_is_not_minus_one);
1159
1160 Node* untagged_result =
1161 assembler->Int32Div(untagged_dividend, untagged_divisor);
1162 Node* truncated = assembler->Int32Mul(untagged_result, untagged_divisor);
1163 // Do floating point division if the remainder is not 0.
1164 assembler->GotoIf(assembler->Word32NotEqual(untagged_dividend, truncated),
1165 &bailout);
1166 var_type_feedback.Bind(
1167 assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall));
1168 var_result.Bind(assembler->SmiFromWord32(untagged_result));
1169 assembler->Goto(&end);
1170
1171 // Bailout: convert {dividend} and {divisor} to double and do double
1172 // division.
1173 assembler->Bind(&bailout);
1174 {
1175 var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
1176 var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
1177 assembler->Goto(&do_fdiv);
1178 }
1179 }
1180
1181 assembler->Bind(&divisor_is_not_smi);
1182 {
1183 Node* divisor_map = assembler->LoadMap(divisor);
1184
1185 // Check if {divisor} is a HeapNumber.
1186 assembler->GotoIfNot(assembler->IsHeapNumberMap(divisor_map),
1187 &check_divisor_for_oddball);
1188
1189 // Convert {dividend} to a double and divide it with the value of
1190 // {divisor}.
1191 var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
1192 var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
1193 assembler->Goto(&do_fdiv);
1194 }
1195
1196 assembler->Bind(÷nd_is_not_smi);
1197 {
1198 Node* dividend_map = assembler->LoadMap(dividend);
1199
1200 // Check if {dividend} is a HeapNumber.
1201 assembler->GotoIfNot(assembler->IsHeapNumberMap(dividend_map),
1202 ÷nd_is_not_number);
1203
1204 // Check if {divisor} is a Smi.
1205 Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
1206 assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
1207 &divisor_is_not_smi);
1208
1209 assembler->Bind(&divisor_is_smi);
1210 {
1211 // Convert {divisor} to a double and use it for a floating point
1212 // division.
1213 var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
1214 var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
1215 assembler->Goto(&do_fdiv);
1216 }
1217
1218 assembler->Bind(&divisor_is_not_smi);
1219 {
1220 Node* divisor_map = assembler->LoadMap(divisor);
1221
1222 // Check if {divisor} is a HeapNumber.
1223 assembler->GotoIfNot(assembler->IsHeapNumberMap(divisor_map),
1224 &check_divisor_for_oddball);
1225
1226 // Both {dividend} and {divisor} are HeapNumbers. Load their values
1227 // and divide them.
1228 var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
1229 var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
1230 assembler->Goto(&do_fdiv);
1231 }
1232 }
1233 }
1234
1235 assembler->Bind(&do_fdiv);
1236 {
1237 var_type_feedback.Bind(
1238 assembler->SmiConstant(BinaryOperationFeedback::kNumber));
1239 Node* value = assembler->Float64Div(var_dividend_float64.value(),
1240 var_divisor_float64.value());
1241 var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
1242 assembler->Goto(&end);
1243 }
1244
1245 assembler->Bind(÷nd_is_not_number);
1246 {
1247 // We just know dividend is not a number or Smi. No checks on divisor yet.
1248 // Check if dividend is an oddball.
1249 Node* dividend_instance_type = assembler->LoadInstanceType(dividend);
1250 Node* dividend_is_oddball = assembler->Word32Equal(
1251 dividend_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
1252 assembler->GotoIfNot(dividend_is_oddball, &call_with_any_feedback);
1253
1254 assembler->GotoIf(assembler->TaggedIsSmi(divisor),
1255 &call_with_oddball_feedback);
1256
1257 // Load the map of the {divisor}.
1258 Node* divisor_map = assembler->LoadMap(divisor);
1259
1260 // Check if {divisor} is a HeapNumber.
1261 assembler->Branch(assembler->IsHeapNumberMap(divisor_map),
1262 &call_with_oddball_feedback, &check_divisor_for_oddball);
1263 }
1264
1265 assembler->Bind(&check_divisor_for_oddball);
1266 {
1267 // Check if divisor is an oddball. At this point we know dividend is either
1268 // a Smi or number or oddball and divisor is not a number or Smi.
1269 Node* divisor_instance_type = assembler->LoadInstanceType(divisor);
1270 Node* divisor_is_oddball = assembler->Word32Equal(
1271 divisor_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
1272 assembler->Branch(divisor_is_oddball, &call_with_oddball_feedback,
1273 &call_with_any_feedback);
1274 }
1275
1276 assembler->Bind(&call_with_oddball_feedback);
1277 {
1278 var_type_feedback.Bind(
1279 assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
1280 assembler->Goto(&call_divide_stub);
1281 }
1282
1283 assembler->Bind(&call_with_any_feedback);
1284 {
1285 var_type_feedback.Bind(
1286 assembler->SmiConstant(BinaryOperationFeedback::kAny));
1287 assembler->Goto(&call_divide_stub);
1288 }
1289
1290 assembler->Bind(&call_divide_stub);
1291 {
1292 Callable callable = CodeFactory::Divide(assembler->isolate());
1293 var_result.Bind(assembler->CallStub(callable, context, dividend, divisor));
1294 assembler->Goto(&end);
1295 }
1296
1297 assembler->Bind(&end);
1298 assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector,
1299 slot_id);
1300 return var_result.value();
1301 }
1302
1303 // static
Generate(CodeStubAssembler * assembler,compiler::Node * dividend,compiler::Node * divisor,compiler::Node * slot_id,compiler::Node * feedback_vector,compiler::Node * context)1304 compiler::Node* ModulusWithFeedbackStub::Generate(
1305 CodeStubAssembler* assembler, compiler::Node* dividend,
1306 compiler::Node* divisor, compiler::Node* slot_id,
1307 compiler::Node* feedback_vector, compiler::Node* context) {
1308 using compiler::Node;
1309 typedef CodeStubAssembler::Label Label;
1310 typedef CodeStubAssembler::Variable Variable;
1311
1312 // Shared entry point for floating point division.
1313 Label do_fmod(assembler), dividend_is_not_number(assembler, Label::kDeferred),
1314 check_divisor_for_oddball(assembler, Label::kDeferred),
1315 call_with_oddball_feedback(assembler), call_with_any_feedback(assembler),
1316 call_modulus_stub(assembler), end(assembler);
1317 Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
1318 var_divisor_float64(assembler, MachineRepresentation::kFloat64),
1319 var_result(assembler, MachineRepresentation::kTagged),
1320 var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
1321
1322 Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
1323 assembler->Branch(assembler->TaggedIsSmi(dividend), ÷nd_is_smi,
1324 ÷nd_is_not_smi);
1325
1326 assembler->Bind(÷nd_is_smi);
1327 {
1328 Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
1329 assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
1330 &divisor_is_not_smi);
1331
1332 assembler->Bind(&divisor_is_smi);
1333 {
1334 var_result.Bind(assembler->SmiMod(dividend, divisor));
1335 var_type_feedback.Bind(assembler->SelectSmiConstant(
1336 assembler->TaggedIsSmi(var_result.value()),
1337 BinaryOperationFeedback::kSignedSmall,
1338 BinaryOperationFeedback::kNumber));
1339 assembler->Goto(&end);
1340 }
1341
1342 assembler->Bind(&divisor_is_not_smi);
1343 {
1344 Node* divisor_map = assembler->LoadMap(divisor);
1345
1346 // Check if {divisor} is a HeapNumber.
1347 assembler->GotoIfNot(assembler->IsHeapNumberMap(divisor_map),
1348 &check_divisor_for_oddball);
1349
1350 // Convert {dividend} to a double and divide it with the value of
1351 // {divisor}.
1352 var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
1353 var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
1354 assembler->Goto(&do_fmod);
1355 }
1356 }
1357
1358 assembler->Bind(÷nd_is_not_smi);
1359 {
1360 Node* dividend_map = assembler->LoadMap(dividend);
1361
1362 // Check if {dividend} is a HeapNumber.
1363 assembler->GotoIfNot(assembler->IsHeapNumberMap(dividend_map),
1364 ÷nd_is_not_number);
1365
1366 // Check if {divisor} is a Smi.
1367 Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
1368 assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
1369 &divisor_is_not_smi);
1370
1371 assembler->Bind(&divisor_is_smi);
1372 {
1373 // Convert {divisor} to a double and use it for a floating point
1374 // division.
1375 var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
1376 var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
1377 assembler->Goto(&do_fmod);
1378 }
1379
1380 assembler->Bind(&divisor_is_not_smi);
1381 {
1382 Node* divisor_map = assembler->LoadMap(divisor);
1383
1384 // Check if {divisor} is a HeapNumber.
1385 assembler->GotoIfNot(assembler->IsHeapNumberMap(divisor_map),
1386 &check_divisor_for_oddball);
1387
1388 // Both {dividend} and {divisor} are HeapNumbers. Load their values
1389 // and divide them.
1390 var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
1391 var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
1392 assembler->Goto(&do_fmod);
1393 }
1394 }
1395
1396 assembler->Bind(&do_fmod);
1397 {
1398 var_type_feedback.Bind(
1399 assembler->SmiConstant(BinaryOperationFeedback::kNumber));
1400 Node* value = assembler->Float64Mod(var_dividend_float64.value(),
1401 var_divisor_float64.value());
1402 var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
1403 assembler->Goto(&end);
1404 }
1405
1406 assembler->Bind(÷nd_is_not_number);
1407 {
1408 // No checks on divisor yet. We just know dividend is not a number or Smi.
1409 // Check if dividend is an oddball.
1410 Node* dividend_instance_type = assembler->LoadInstanceType(dividend);
1411 Node* dividend_is_oddball = assembler->Word32Equal(
1412 dividend_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
1413 assembler->GotoIfNot(dividend_is_oddball, &call_with_any_feedback);
1414
1415 assembler->GotoIf(assembler->TaggedIsSmi(divisor),
1416 &call_with_oddball_feedback);
1417
1418 // Load the map of the {divisor}.
1419 Node* divisor_map = assembler->LoadMap(divisor);
1420
1421 // Check if {divisor} is a HeapNumber.
1422 assembler->Branch(assembler->IsHeapNumberMap(divisor_map),
1423 &call_with_oddball_feedback, &check_divisor_for_oddball);
1424 }
1425
1426 assembler->Bind(&check_divisor_for_oddball);
1427 {
1428 // Check if divisor is an oddball. At this point we know dividend is either
1429 // a Smi or number or oddball and divisor is not a number or Smi.
1430 Node* divisor_instance_type = assembler->LoadInstanceType(divisor);
1431 Node* divisor_is_oddball = assembler->Word32Equal(
1432 divisor_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
1433 assembler->Branch(divisor_is_oddball, &call_with_oddball_feedback,
1434 &call_with_any_feedback);
1435 }
1436
1437 assembler->Bind(&call_with_oddball_feedback);
1438 {
1439 var_type_feedback.Bind(
1440 assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
1441 assembler->Goto(&call_modulus_stub);
1442 }
1443
1444 assembler->Bind(&call_with_any_feedback);
1445 {
1446 var_type_feedback.Bind(
1447 assembler->SmiConstant(BinaryOperationFeedback::kAny));
1448 assembler->Goto(&call_modulus_stub);
1449 }
1450
1451 assembler->Bind(&call_modulus_stub);
1452 {
1453 Callable callable = CodeFactory::Modulus(assembler->isolate());
1454 var_result.Bind(assembler->CallStub(callable, context, dividend, divisor));
1455 assembler->Goto(&end);
1456 }
1457
1458 assembler->Bind(&end);
1459 assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector,
1460 slot_id);
1461 return var_result.value();
1462 }
1463
GenerateAssembly(compiler::CodeAssemblerState * state) const1464 void NumberToStringStub::GenerateAssembly(
1465 compiler::CodeAssemblerState* state) const {
1466 typedef compiler::Node Node;
1467 CodeStubAssembler assembler(state);
1468 Node* argument = assembler.Parameter(Descriptor::kArgument);
1469 Node* context = assembler.Parameter(Descriptor::kContext);
1470 assembler.Return(assembler.NumberToString(context, argument));
1471 }
1472
1473 // ES6 section 21.1.3.19 String.prototype.substring ( start, end )
Generate(CodeStubAssembler * assembler,compiler::Node * string,compiler::Node * from,compiler::Node * to,compiler::Node * context)1474 compiler::Node* SubStringStub::Generate(CodeStubAssembler* assembler,
1475 compiler::Node* string,
1476 compiler::Node* from,
1477 compiler::Node* to,
1478 compiler::Node* context) {
1479 return assembler->SubString(context, string, from, to);
1480 }
1481
GenerateAssembly(compiler::CodeAssemblerState * state) const1482 void SubStringStub::GenerateAssembly(
1483 compiler::CodeAssemblerState* state) const {
1484 CodeStubAssembler assembler(state);
1485 assembler.Return(Generate(&assembler,
1486 assembler.Parameter(Descriptor::kString),
1487 assembler.Parameter(Descriptor::kFrom),
1488 assembler.Parameter(Descriptor::kTo),
1489 assembler.Parameter(Descriptor::kContext)));
1490 }
1491
GenerateAssembly(compiler::CodeAssemblerState * state) const1492 void StoreGlobalStub::GenerateAssembly(
1493 compiler::CodeAssemblerState* state) const {
1494 typedef CodeStubAssembler::Label Label;
1495 typedef compiler::Node Node;
1496 CodeStubAssembler assembler(state);
1497
1498 assembler.Comment(
1499 "StoreGlobalStub: cell_type=%d, constant_type=%d, check_global=%d",
1500 cell_type(), PropertyCellType::kConstantType == cell_type()
1501 ? static_cast<int>(constant_type())
1502 : -1,
1503 check_global());
1504
1505 Node* receiver = assembler.Parameter(Descriptor::kReceiver);
1506 Node* name = assembler.Parameter(Descriptor::kName);
1507 Node* value = assembler.Parameter(Descriptor::kValue);
1508 Node* slot = assembler.Parameter(Descriptor::kSlot);
1509 Node* vector = assembler.Parameter(Descriptor::kVector);
1510 Node* context = assembler.Parameter(Descriptor::kContext);
1511
1512 Label miss(&assembler);
1513
1514 if (check_global()) {
1515 // Check that the map of the global has not changed: use a placeholder map
1516 // that will be replaced later with the global object's map.
1517 Node* proxy_map = assembler.LoadMap(receiver);
1518 Node* global = assembler.LoadObjectField(proxy_map, Map::kPrototypeOffset);
1519 Node* map_cell = assembler.HeapConstant(isolate()->factory()->NewWeakCell(
1520 StoreGlobalStub::global_map_placeholder(isolate())));
1521 Node* expected_map = assembler.LoadWeakCellValueUnchecked(map_cell);
1522 Node* map = assembler.LoadMap(global);
1523 assembler.GotoIf(assembler.WordNotEqual(expected_map, map), &miss);
1524 }
1525
1526 Node* weak_cell = assembler.HeapConstant(isolate()->factory()->NewWeakCell(
1527 StoreGlobalStub::property_cell_placeholder(isolate())));
1528 Node* cell = assembler.LoadWeakCellValue(weak_cell);
1529 assembler.GotoIf(assembler.TaggedIsSmi(cell), &miss);
1530
1531 // Load the payload of the global parameter cell. A hole indicates that the
1532 // cell has been invalidated and that the store must be handled by the
1533 // runtime.
1534 Node* cell_contents =
1535 assembler.LoadObjectField(cell, PropertyCell::kValueOffset);
1536
1537 PropertyCellType cell_type = this->cell_type();
1538 if (cell_type == PropertyCellType::kConstant ||
1539 cell_type == PropertyCellType::kUndefined) {
1540 // This is always valid for all states a cell can be in.
1541 assembler.GotoIf(assembler.WordNotEqual(cell_contents, value), &miss);
1542 } else {
1543 assembler.GotoIf(assembler.IsTheHole(cell_contents), &miss);
1544
1545 // When dealing with constant types, the type may be allowed to change, as
1546 // long as optimized code remains valid.
1547 bool value_is_smi = false;
1548 if (cell_type == PropertyCellType::kConstantType) {
1549 switch (constant_type()) {
1550 case PropertyCellConstantType::kSmi:
1551 assembler.GotoIfNot(assembler.TaggedIsSmi(value), &miss);
1552 value_is_smi = true;
1553 break;
1554 case PropertyCellConstantType::kStableMap: {
1555 // It is sufficient here to check that the value and cell contents
1556 // have identical maps, no matter if they are stable or not or if they
1557 // are the maps that were originally in the cell or not. If optimized
1558 // code will deopt when a cell has a unstable map and if it has a
1559 // dependency on a stable map, it will deopt if the map destabilizes.
1560 assembler.GotoIf(assembler.TaggedIsSmi(value), &miss);
1561 assembler.GotoIf(assembler.TaggedIsSmi(cell_contents), &miss);
1562 Node* expected_map = assembler.LoadMap(cell_contents);
1563 Node* map = assembler.LoadMap(value);
1564 assembler.GotoIf(assembler.WordNotEqual(expected_map, map), &miss);
1565 break;
1566 }
1567 }
1568 }
1569 if (value_is_smi) {
1570 assembler.StoreObjectFieldNoWriteBarrier(cell, PropertyCell::kValueOffset,
1571 value);
1572 } else {
1573 assembler.StoreObjectField(cell, PropertyCell::kValueOffset, value);
1574 }
1575 }
1576
1577 assembler.Return(value);
1578
1579 assembler.Bind(&miss);
1580 {
1581 assembler.Comment("Miss");
1582 assembler.TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
1583 vector, receiver, name);
1584 }
1585 }
1586
GenerateAssembly(compiler::CodeAssemblerState * state) const1587 void KeyedLoadSloppyArgumentsStub::GenerateAssembly(
1588 compiler::CodeAssemblerState* state) const {
1589 typedef CodeStubAssembler::Label Label;
1590 typedef compiler::Node Node;
1591 CodeStubAssembler assembler(state);
1592
1593 Node* receiver = assembler.Parameter(Descriptor::kReceiver);
1594 Node* key = assembler.Parameter(Descriptor::kName);
1595 Node* slot = assembler.Parameter(Descriptor::kSlot);
1596 Node* vector = assembler.Parameter(Descriptor::kVector);
1597 Node* context = assembler.Parameter(Descriptor::kContext);
1598
1599 Label miss(&assembler);
1600
1601 Node* result = assembler.LoadKeyedSloppyArguments(receiver, key, &miss);
1602 assembler.Return(result);
1603
1604 assembler.Bind(&miss);
1605 {
1606 assembler.Comment("Miss");
1607 assembler.TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver,
1608 key, slot, vector);
1609 }
1610 }
1611
GenerateAssembly(compiler::CodeAssemblerState * state) const1612 void KeyedStoreSloppyArgumentsStub::GenerateAssembly(
1613 compiler::CodeAssemblerState* state) const {
1614 typedef CodeStubAssembler::Label Label;
1615 typedef compiler::Node Node;
1616 CodeStubAssembler assembler(state);
1617
1618 Node* receiver = assembler.Parameter(Descriptor::kReceiver);
1619 Node* key = assembler.Parameter(Descriptor::kName);
1620 Node* value = assembler.Parameter(Descriptor::kValue);
1621 Node* slot = assembler.Parameter(Descriptor::kSlot);
1622 Node* vector = assembler.Parameter(Descriptor::kVector);
1623 Node* context = assembler.Parameter(Descriptor::kContext);
1624
1625 Label miss(&assembler);
1626
1627 assembler.StoreKeyedSloppyArguments(receiver, key, value, &miss);
1628 assembler.Return(value);
1629
1630 assembler.Bind(&miss);
1631 {
1632 assembler.Comment("Miss");
1633 assembler.TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot,
1634 vector, receiver, key);
1635 }
1636 }
1637
GenerateAssembly(compiler::CodeAssemblerState * state) const1638 void LoadScriptContextFieldStub::GenerateAssembly(
1639 compiler::CodeAssemblerState* state) const {
1640 typedef compiler::Node Node;
1641 CodeStubAssembler assembler(state);
1642
1643 assembler.Comment("LoadScriptContextFieldStub: context_index=%d, slot=%d",
1644 context_index(), slot_index());
1645
1646 Node* context = assembler.Parameter(Descriptor::kContext);
1647
1648 Node* script_context = assembler.LoadScriptContext(context, context_index());
1649 Node* result = assembler.LoadFixedArrayElement(script_context, slot_index());
1650 assembler.Return(result);
1651 }
1652
GenerateAssembly(compiler::CodeAssemblerState * state) const1653 void StoreScriptContextFieldStub::GenerateAssembly(
1654 compiler::CodeAssemblerState* state) const {
1655 typedef compiler::Node Node;
1656 CodeStubAssembler assembler(state);
1657
1658 assembler.Comment("StoreScriptContextFieldStub: context_index=%d, slot=%d",
1659 context_index(), slot_index());
1660
1661 Node* value = assembler.Parameter(Descriptor::kValue);
1662 Node* context = assembler.Parameter(Descriptor::kContext);
1663
1664 Node* script_context = assembler.LoadScriptContext(context, context_index());
1665 assembler.StoreFixedArrayElement(
1666 script_context, assembler.IntPtrConstant(slot_index()), value);
1667 assembler.Return(value);
1668 }
1669
GenerateAssembly(compiler::CodeAssemblerState * state) const1670 void StoreInterceptorStub::GenerateAssembly(
1671 compiler::CodeAssemblerState* state) const {
1672 typedef compiler::Node Node;
1673 CodeStubAssembler assembler(state);
1674
1675 Node* receiver = assembler.Parameter(Descriptor::kReceiver);
1676 Node* name = assembler.Parameter(Descriptor::kName);
1677 Node* value = assembler.Parameter(Descriptor::kValue);
1678 Node* slot = assembler.Parameter(Descriptor::kSlot);
1679 Node* vector = assembler.Parameter(Descriptor::kVector);
1680 Node* context = assembler.Parameter(Descriptor::kContext);
1681 assembler.TailCallRuntime(Runtime::kStorePropertyWithInterceptor, context,
1682 value, slot, vector, receiver, name);
1683 }
1684
GenerateAssembly(compiler::CodeAssemblerState * state) const1685 void LoadIndexedInterceptorStub::GenerateAssembly(
1686 compiler::CodeAssemblerState* state) const {
1687 typedef compiler::Node Node;
1688 typedef CodeStubAssembler::Label Label;
1689 CodeStubAssembler assembler(state);
1690
1691 Node* receiver = assembler.Parameter(Descriptor::kReceiver);
1692 Node* key = assembler.Parameter(Descriptor::kName);
1693 Node* slot = assembler.Parameter(Descriptor::kSlot);
1694 Node* vector = assembler.Parameter(Descriptor::kVector);
1695 Node* context = assembler.Parameter(Descriptor::kContext);
1696
1697 Label if_keyispositivesmi(&assembler), if_keyisinvalid(&assembler);
1698 assembler.Branch(assembler.TaggedIsPositiveSmi(key), &if_keyispositivesmi,
1699 &if_keyisinvalid);
1700 assembler.Bind(&if_keyispositivesmi);
1701 assembler.TailCallRuntime(Runtime::kLoadElementWithInterceptor, context,
1702 receiver, key);
1703
1704 assembler.Bind(&if_keyisinvalid);
1705 assembler.TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, key,
1706 slot, vector);
1707 }
1708
PrintState(std::ostream & os) const1709 void CallICStub::PrintState(std::ostream& os) const { // NOLINT
1710 os << convert_mode() << ", " << tail_call_mode();
1711 }
1712
GenerateAssembly(compiler::CodeAssemblerState * state) const1713 void CallICStub::GenerateAssembly(compiler::CodeAssemblerState* state) const {
1714 typedef CodeStubAssembler::Label Label;
1715 typedef compiler::Node Node;
1716 CodeStubAssembler assembler(state);
1717
1718 Node* context = assembler.Parameter(Descriptor::kContext);
1719 Node* target = assembler.Parameter(Descriptor::kTarget);
1720 Node* argc = assembler.Parameter(Descriptor::kActualArgumentsCount);
1721 Node* slot = assembler.Parameter(Descriptor::kSlot);
1722 Node* vector = assembler.Parameter(Descriptor::kVector);
1723
1724 // TODO(bmeurer): The slot should actually be an IntPtr, but TurboFan's
1725 // SimplifiedLowering cannot deal with IntPtr machine type properly yet.
1726 slot = assembler.ChangeInt32ToIntPtr(slot);
1727
1728 // Static checks to assert it is safe to examine the type feedback element.
1729 // We don't know that we have a weak cell. We might have a private symbol
1730 // or an AllocationSite, but the memory is safe to examine.
1731 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
1732 // FixedArray.
1733 // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
1734 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
1735 // computed, meaning that it can't appear to be a pointer. If the low bit is
1736 // 0, then hash is computed, but the 0 bit prevents the field from appearing
1737 // to be a pointer.
1738 STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
1739 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
1740 WeakCell::kValueOffset &&
1741 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
1742
1743 // Increment the call count.
1744 // TODO(bmeurer): Would it be beneficial to use Int32Add on 64-bit?
1745 assembler.Comment("increment call count");
1746 Node* call_count =
1747 assembler.LoadFixedArrayElement(vector, slot, 1 * kPointerSize);
1748 Node* new_count = assembler.SmiAdd(call_count, assembler.SmiConstant(1));
1749 // Count is Smi, so we don't need a write barrier.
1750 assembler.StoreFixedArrayElement(vector, slot, new_count, SKIP_WRITE_BARRIER,
1751 1 * kPointerSize);
1752
1753 Label call_function(&assembler), extra_checks(&assembler), call(&assembler);
1754
1755 // The checks. First, does function match the recorded monomorphic target?
1756 Node* feedback_element = assembler.LoadFixedArrayElement(vector, slot);
1757 Node* feedback_value = assembler.LoadWeakCellValueUnchecked(feedback_element);
1758 Node* is_monomorphic = assembler.WordEqual(target, feedback_value);
1759 assembler.GotoIfNot(is_monomorphic, &extra_checks);
1760
1761 // The compare above could have been a SMI/SMI comparison. Guard against
1762 // this convincing us that we have a monomorphic JSFunction.
1763 Node* is_smi = assembler.TaggedIsSmi(target);
1764 assembler.Branch(is_smi, &extra_checks, &call_function);
1765
1766 assembler.Bind(&call_function);
1767 {
1768 // Call using CallFunction builtin.
1769 Callable callable =
1770 CodeFactory::CallFunction(isolate(), convert_mode(), tail_call_mode());
1771 assembler.TailCallStub(callable, context, target, argc);
1772 }
1773
1774 assembler.Bind(&extra_checks);
1775 {
1776 Label check_initialized(&assembler), mark_megamorphic(&assembler),
1777 create_allocation_site(&assembler, Label::kDeferred),
1778 create_weak_cell(&assembler, Label::kDeferred);
1779
1780 assembler.Comment("check if megamorphic");
1781 // Check if it is a megamorphic target.
1782 Node* is_megamorphic = assembler.WordEqual(
1783 feedback_element,
1784 assembler.HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
1785 assembler.GotoIf(is_megamorphic, &call);
1786
1787 assembler.Comment("check if it is an allocation site");
1788 assembler.GotoIfNot(
1789 assembler.IsAllocationSiteMap(assembler.LoadMap(feedback_element)),
1790 &check_initialized);
1791
1792 // If it is not the Array() function, mark megamorphic.
1793 Node* context_slot = assembler.LoadContextElement(
1794 assembler.LoadNativeContext(context), Context::ARRAY_FUNCTION_INDEX);
1795 Node* is_array_function = assembler.WordEqual(context_slot, target);
1796 assembler.GotoIfNot(is_array_function, &mark_megamorphic);
1797
1798 // Call ArrayConstructorStub.
1799 Callable callable = CodeFactory::ArrayConstructor(isolate());
1800 assembler.TailCallStub(callable, context, target, target, argc,
1801 feedback_element);
1802
1803 assembler.Bind(&check_initialized);
1804 {
1805 assembler.Comment("check if uninitialized");
1806 // Check if it is uninitialized target first.
1807 Node* is_uninitialized = assembler.WordEqual(
1808 feedback_element,
1809 assembler.HeapConstant(
1810 FeedbackVector::UninitializedSentinel(isolate())));
1811 assembler.GotoIfNot(is_uninitialized, &mark_megamorphic);
1812
1813 assembler.Comment("handle unitinitialized");
1814 // If it is not a JSFunction mark it as megamorphic.
1815 Node* is_smi = assembler.TaggedIsSmi(target);
1816 assembler.GotoIf(is_smi, &mark_megamorphic);
1817
1818 // Check if function is an object of JSFunction type.
1819 Node* is_js_function = assembler.IsJSFunction(target);
1820 assembler.GotoIfNot(is_js_function, &mark_megamorphic);
1821
1822 // Check if it is the Array() function.
1823 Node* context_slot = assembler.LoadContextElement(
1824 assembler.LoadNativeContext(context), Context::ARRAY_FUNCTION_INDEX);
1825 Node* is_array_function = assembler.WordEqual(context_slot, target);
1826 assembler.GotoIf(is_array_function, &create_allocation_site);
1827
1828 // Check if the function belongs to the same native context.
1829 Node* native_context = assembler.LoadNativeContext(
1830 assembler.LoadObjectField(target, JSFunction::kContextOffset));
1831 Node* is_same_native_context = assembler.WordEqual(
1832 native_context, assembler.LoadNativeContext(context));
1833 assembler.Branch(is_same_native_context, &create_weak_cell,
1834 &mark_megamorphic);
1835 }
1836
1837 assembler.Bind(&create_weak_cell);
1838 {
1839 // Wrap the {target} in a WeakCell and remember it.
1840 assembler.Comment("create weak cell");
1841 assembler.CreateWeakCellInFeedbackVector(vector, assembler.SmiTag(slot),
1842 target);
1843
1844 // Call using CallFunction builtin.
1845 assembler.Goto(&call_function);
1846 }
1847
1848 assembler.Bind(&create_allocation_site);
1849 {
1850 // Create an AllocationSite for the {target}.
1851 assembler.Comment("create allocation site");
1852 assembler.CreateAllocationSiteInFeedbackVector(vector,
1853 assembler.SmiTag(slot));
1854
1855 // Call using CallFunction builtin. CallICs have a PREMONOMORPHIC state.
1856 // They start collecting feedback only when a call is executed the second
1857 // time. So, do not pass any feedback here.
1858 assembler.Goto(&call_function);
1859 }
1860
1861 assembler.Bind(&mark_megamorphic);
1862 {
1863 // Mark it as a megamorphic.
1864 // MegamorphicSentinel is created as a part of Heap::InitialObjects
1865 // and will not move during a GC. So it is safe to skip write barrier.
1866 DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
1867 assembler.StoreFixedArrayElement(
1868 vector, slot, assembler.HeapConstant(
1869 FeedbackVector::MegamorphicSentinel(isolate())),
1870 SKIP_WRITE_BARRIER);
1871 assembler.Goto(&call);
1872 }
1873 }
1874
1875 assembler.Bind(&call);
1876 {
1877 // Call using call builtin.
1878 assembler.Comment("call using Call builtin");
1879 Callable callable_call =
1880 CodeFactory::Call(isolate(), convert_mode(), tail_call_mode());
1881 assembler.TailCallStub(callable_call, context, target, argc);
1882 }
1883 }
1884
PrintState(std::ostream & os) const1885 void CallICTrampolineStub::PrintState(std::ostream& os) const { // NOLINT
1886 os << convert_mode() << ", " << tail_call_mode();
1887 }
1888
GenerateAssembly(compiler::CodeAssemblerState * state) const1889 void CallICTrampolineStub::GenerateAssembly(
1890 compiler::CodeAssemblerState* state) const {
1891 typedef compiler::Node Node;
1892 CodeStubAssembler assembler(state);
1893
1894 Node* context = assembler.Parameter(Descriptor::kContext);
1895 Node* target = assembler.Parameter(Descriptor::kTarget);
1896 Node* argc = assembler.Parameter(Descriptor::kActualArgumentsCount);
1897 Node* slot = assembler.Parameter(Descriptor::kSlot);
1898 Node* vector = assembler.LoadFeedbackVectorForStub();
1899
1900 Callable callable =
1901 CodeFactory::CallIC(isolate(), convert_mode(), tail_call_mode());
1902 assembler.TailCallStub(callable, context, target, argc, slot, vector);
1903 }
1904
FinishCode(Handle<Code> code)1905 void JSEntryStub::FinishCode(Handle<Code> code) {
1906 Handle<FixedArray> handler_table =
1907 code->GetIsolate()->factory()->NewFixedArray(1, TENURED);
1908 handler_table->set(0, Smi::FromInt(handler_offset_));
1909 code->set_handler_table(*handler_table);
1910 }
1911
InitializeDescriptor(CodeStubDescriptor * descriptor)1912 void TransitionElementsKindStub::InitializeDescriptor(
1913 CodeStubDescriptor* descriptor) {
1914 descriptor->Initialize(
1915 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry);
1916 }
1917
1918
InitializeDescriptor(CodeStubDescriptor * descriptor)1919 void AllocateHeapNumberStub::InitializeDescriptor(
1920 CodeStubDescriptor* descriptor) {
1921 descriptor->Initialize(
1922 Runtime::FunctionForId(Runtime::kAllocateHeapNumber)->entry);
1923 }
1924
1925
InitializeDescriptor(CodeStubDescriptor * descriptor)1926 void ToBooleanICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
1927 descriptor->Initialize(FUNCTION_ADDR(Runtime_ToBooleanIC_Miss));
1928 descriptor->SetMissHandler(Runtime::kToBooleanIC_Miss);
1929 }
1930
1931
InitializeDescriptor(CodeStubDescriptor * descriptor)1932 void BinaryOpICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
1933 descriptor->Initialize(FUNCTION_ADDR(Runtime_BinaryOpIC_Miss));
1934 descriptor->SetMissHandler(Runtime::kBinaryOpIC_Miss);
1935 }
1936
1937
InitializeDescriptor(CodeStubDescriptor * descriptor)1938 void BinaryOpWithAllocationSiteStub::InitializeDescriptor(
1939 CodeStubDescriptor* descriptor) {
1940 descriptor->Initialize(
1941 FUNCTION_ADDR(Runtime_BinaryOpIC_MissWithAllocationSite));
1942 }
1943
GenerateAssembly(compiler::CodeAssemblerState * state) const1944 void GetPropertyStub::GenerateAssembly(
1945 compiler::CodeAssemblerState* state) const {
1946 typedef compiler::Node Node;
1947 typedef CodeStubAssembler::Label Label;
1948 typedef CodeStubAssembler::Variable Variable;
1949 CodeStubAssembler assembler(state);
1950
1951 Label call_runtime(&assembler, Label::kDeferred),
1952 return_undefined(&assembler), end(&assembler);
1953
1954 Node* object = assembler.Parameter(0);
1955 Node* key = assembler.Parameter(1);
1956 Node* context = assembler.Parameter(2);
1957 Variable var_result(&assembler, MachineRepresentation::kTagged);
1958
1959 CodeStubAssembler::LookupInHolder lookup_property_in_holder =
1960 [&assembler, context, &var_result, &end](
1961 Node* receiver, Node* holder, Node* holder_map,
1962 Node* holder_instance_type, Node* unique_name, Label* next_holder,
1963 Label* if_bailout) {
1964 Variable var_value(&assembler, MachineRepresentation::kTagged);
1965 Label if_found(&assembler);
1966 assembler.TryGetOwnProperty(
1967 context, receiver, holder, holder_map, holder_instance_type,
1968 unique_name, &if_found, &var_value, next_holder, if_bailout);
1969 assembler.Bind(&if_found);
1970 {
1971 var_result.Bind(var_value.value());
1972 assembler.Goto(&end);
1973 }
1974 };
1975
1976 CodeStubAssembler::LookupInHolder lookup_element_in_holder =
1977 [&assembler](
1978 Node* receiver, Node* holder, Node* holder_map,
1979 Node* holder_instance_type, Node* index, Label* next_holder,
1980 Label* if_bailout) {
1981 // Not supported yet.
1982 assembler.Use(next_holder);
1983 assembler.Goto(if_bailout);
1984 };
1985
1986 assembler.TryPrototypeChainLookup(object, key, lookup_property_in_holder,
1987 lookup_element_in_holder, &return_undefined,
1988 &call_runtime);
1989
1990 assembler.Bind(&return_undefined);
1991 {
1992 var_result.Bind(assembler.UndefinedConstant());
1993 assembler.Goto(&end);
1994 }
1995
1996 assembler.Bind(&call_runtime);
1997 {
1998 var_result.Bind(
1999 assembler.CallRuntime(Runtime::kGetProperty, context, object, key));
2000 assembler.Goto(&end);
2001 }
2002
2003 assembler.Bind(&end);
2004 assembler.Return(var_result.value());
2005 }
2006
GenerateAheadOfTime(Isolate * isolate)2007 void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
2008 CreateAllocationSiteStub stub(isolate);
2009 stub.GetCode();
2010 }
2011
2012
GenerateAheadOfTime(Isolate * isolate)2013 void CreateWeakCellStub::GenerateAheadOfTime(Isolate* isolate) {
2014 CreateWeakCellStub stub(isolate);
2015 stub.GetCode();
2016 }
2017
GenerateAssembly(compiler::CodeAssemblerState * state) const2018 void StoreSlowElementStub::GenerateAssembly(
2019 compiler::CodeAssemblerState* state) const {
2020 typedef compiler::Node Node;
2021 CodeStubAssembler assembler(state);
2022
2023 Node* receiver = assembler.Parameter(Descriptor::kReceiver);
2024 Node* name = assembler.Parameter(Descriptor::kName);
2025 Node* value = assembler.Parameter(Descriptor::kValue);
2026 Node* slot = assembler.Parameter(Descriptor::kSlot);
2027 Node* vector = assembler.Parameter(Descriptor::kVector);
2028 Node* context = assembler.Parameter(Descriptor::kContext);
2029
2030 assembler.TailCallRuntime(Runtime::kKeyedStoreIC_Slow, context, value, slot,
2031 vector, receiver, name);
2032 }
2033
GenerateAssembly(compiler::CodeAssemblerState * state) const2034 void StoreFastElementStub::GenerateAssembly(
2035 compiler::CodeAssemblerState* state) const {
2036 typedef CodeStubAssembler::Label Label;
2037 typedef compiler::Node Node;
2038 CodeStubAssembler assembler(state);
2039
2040 assembler.Comment(
2041 "StoreFastElementStub: js_array=%d, elements_kind=%s, store_mode=%d",
2042 is_js_array(), ElementsKindToString(elements_kind()), store_mode());
2043
2044 Node* receiver = assembler.Parameter(Descriptor::kReceiver);
2045 Node* key = assembler.Parameter(Descriptor::kName);
2046 Node* value = assembler.Parameter(Descriptor::kValue);
2047 Node* slot = assembler.Parameter(Descriptor::kSlot);
2048 Node* vector = assembler.Parameter(Descriptor::kVector);
2049 Node* context = assembler.Parameter(Descriptor::kContext);
2050
2051 Label miss(&assembler);
2052
2053 assembler.EmitElementStore(receiver, key, value, is_js_array(),
2054 elements_kind(), store_mode(), &miss);
2055 assembler.Return(value);
2056
2057 assembler.Bind(&miss);
2058 {
2059 assembler.Comment("Miss");
2060 assembler.TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot,
2061 vector, receiver, key);
2062 }
2063 }
2064
2065 // static
GenerateAheadOfTime(Isolate * isolate)2066 void StoreFastElementStub::GenerateAheadOfTime(Isolate* isolate) {
2067 if (FLAG_minimal) return;
2068 StoreFastElementStub(isolate, false, FAST_HOLEY_ELEMENTS, STANDARD_STORE)
2069 .GetCode();
2070 StoreFastElementStub(isolate, false, FAST_HOLEY_ELEMENTS,
2071 STORE_AND_GROW_NO_TRANSITION).GetCode();
2072 for (int i = FIRST_FAST_ELEMENTS_KIND; i <= LAST_FAST_ELEMENTS_KIND; i++) {
2073 ElementsKind kind = static_cast<ElementsKind>(i);
2074 StoreFastElementStub(isolate, true, kind, STANDARD_STORE).GetCode();
2075 StoreFastElementStub(isolate, true, kind, STORE_AND_GROW_NO_TRANSITION)
2076 .GetCode();
2077 }
2078 }
2079
UpdateStatus(Handle<Object> object)2080 bool ToBooleanICStub::UpdateStatus(Handle<Object> object) {
2081 ToBooleanHints old_hints = hints();
2082 ToBooleanHints new_hints = old_hints;
2083 bool to_boolean_value = false; // Dummy initialization.
2084 if (object->IsUndefined(isolate())) {
2085 new_hints |= ToBooleanHint::kUndefined;
2086 to_boolean_value = false;
2087 } else if (object->IsBoolean()) {
2088 new_hints |= ToBooleanHint::kBoolean;
2089 to_boolean_value = object->IsTrue(isolate());
2090 } else if (object->IsNull(isolate())) {
2091 new_hints |= ToBooleanHint::kNull;
2092 to_boolean_value = false;
2093 } else if (object->IsSmi()) {
2094 new_hints |= ToBooleanHint::kSmallInteger;
2095 to_boolean_value = Smi::cast(*object)->value() != 0;
2096 } else if (object->IsJSReceiver()) {
2097 new_hints |= ToBooleanHint::kReceiver;
2098 to_boolean_value = !object->IsUndetectable();
2099 } else if (object->IsString()) {
2100 DCHECK(!object->IsUndetectable());
2101 new_hints |= ToBooleanHint::kString;
2102 to_boolean_value = String::cast(*object)->length() != 0;
2103 } else if (object->IsSymbol()) {
2104 new_hints |= ToBooleanHint::kSymbol;
2105 to_boolean_value = true;
2106 } else if (object->IsHeapNumber()) {
2107 DCHECK(!object->IsUndetectable());
2108 new_hints |= ToBooleanHint::kHeapNumber;
2109 double value = HeapNumber::cast(*object)->value();
2110 to_boolean_value = value != 0 && !std::isnan(value);
2111 } else {
2112 // We should never see an internal object at runtime here!
2113 UNREACHABLE();
2114 to_boolean_value = true;
2115 }
2116
2117 set_sub_minor_key(HintsBits::update(sub_minor_key(), new_hints));
2118 return to_boolean_value;
2119 }
2120
PrintState(std::ostream & os) const2121 void ToBooleanICStub::PrintState(std::ostream& os) const { // NOLINT
2122 os << hints();
2123 }
2124
GenerateAheadOfTime(Isolate * isolate)2125 void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
2126 StubFailureTrampolineStub stub1(isolate, NOT_JS_FUNCTION_STUB_MODE);
2127 StubFailureTrampolineStub stub2(isolate, JS_FUNCTION_STUB_MODE);
2128 stub1.GetCode();
2129 stub2.GetCode();
2130 }
2131
2132
EntryHookTrampoline(intptr_t function,intptr_t stack_pointer,Isolate * isolate)2133 void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
2134 intptr_t stack_pointer,
2135 Isolate* isolate) {
2136 FunctionEntryHook entry_hook = isolate->function_entry_hook();
2137 DCHECK(entry_hook != NULL);
2138 entry_hook(function, stack_pointer);
2139 }
2140
GenerateAssembly(compiler::CodeAssemblerState * state) const2141 void CreateAllocationSiteStub::GenerateAssembly(
2142 compiler::CodeAssemblerState* state) const {
2143 CodeStubAssembler assembler(state);
2144 assembler.Return(assembler.CreateAllocationSiteInFeedbackVector(
2145 assembler.Parameter(Descriptor::kVector),
2146 assembler.Parameter(Descriptor::kSlot)));
2147 }
2148
GenerateAssembly(compiler::CodeAssemblerState * state) const2149 void CreateWeakCellStub::GenerateAssembly(
2150 compiler::CodeAssemblerState* state) const {
2151 CodeStubAssembler assembler(state);
2152 assembler.Return(assembler.CreateWeakCellInFeedbackVector(
2153 assembler.Parameter(Descriptor::kVector),
2154 assembler.Parameter(Descriptor::kSlot),
2155 assembler.Parameter(Descriptor::kValue)));
2156 }
2157
GenerateAssembly(compiler::CodeAssemblerState * state) const2158 void ArrayNoArgumentConstructorStub::GenerateAssembly(
2159 compiler::CodeAssemblerState* state) const {
2160 typedef compiler::Node Node;
2161 CodeStubAssembler assembler(state);
2162 Node* native_context = assembler.LoadObjectField(
2163 assembler.Parameter(Descriptor::kFunction), JSFunction::kContextOffset);
2164 bool track_allocation_site =
2165 AllocationSite::GetMode(elements_kind()) == TRACK_ALLOCATION_SITE &&
2166 override_mode() != DISABLE_ALLOCATION_SITES;
2167 Node* allocation_site = track_allocation_site
2168 ? assembler.Parameter(Descriptor::kAllocationSite)
2169 : nullptr;
2170 Node* array_map =
2171 assembler.LoadJSArrayElementsMap(elements_kind(), native_context);
2172 Node* array = assembler.AllocateJSArray(
2173 elements_kind(), array_map,
2174 assembler.IntPtrConstant(JSArray::kPreallocatedArrayElements),
2175 assembler.SmiConstant(Smi::kZero), allocation_site);
2176 assembler.Return(array);
2177 }
2178
GenerateAssembly(compiler::CodeAssemblerState * state) const2179 void InternalArrayNoArgumentConstructorStub::GenerateAssembly(
2180 compiler::CodeAssemblerState* state) const {
2181 typedef compiler::Node Node;
2182 CodeStubAssembler assembler(state);
2183 Node* array_map =
2184 assembler.LoadObjectField(assembler.Parameter(Descriptor::kFunction),
2185 JSFunction::kPrototypeOrInitialMapOffset);
2186 Node* array = assembler.AllocateJSArray(
2187 elements_kind(), array_map,
2188 assembler.IntPtrConstant(JSArray::kPreallocatedArrayElements),
2189 assembler.SmiConstant(Smi::kZero));
2190 assembler.Return(array);
2191 }
2192
2193 namespace {
2194
2195 template <typename Descriptor>
SingleArgumentConstructorCommon(CodeStubAssembler * assembler,ElementsKind elements_kind,compiler::Node * array_map,compiler::Node * allocation_site,AllocationSiteMode mode)2196 void SingleArgumentConstructorCommon(CodeStubAssembler* assembler,
2197 ElementsKind elements_kind,
2198 compiler::Node* array_map,
2199 compiler::Node* allocation_site,
2200 AllocationSiteMode mode) {
2201 typedef compiler::Node Node;
2202 typedef CodeStubAssembler::Label Label;
2203
2204 Label ok(assembler);
2205 Label smi_size(assembler);
2206 Label small_smi_size(assembler);
2207 Label call_runtime(assembler, Label::kDeferred);
2208
2209 Node* size = assembler->Parameter(Descriptor::kArraySizeSmiParameter);
2210 assembler->Branch(assembler->TaggedIsSmi(size), &smi_size, &call_runtime);
2211
2212 assembler->Bind(&smi_size);
2213
2214 if (IsFastPackedElementsKind(elements_kind)) {
2215 Label abort(assembler, Label::kDeferred);
2216 assembler->Branch(
2217 assembler->SmiEqual(size, assembler->SmiConstant(Smi::kZero)),
2218 &small_smi_size, &abort);
2219
2220 assembler->Bind(&abort);
2221 Node* reason =
2222 assembler->SmiConstant(Smi::FromInt(kAllocatingNonEmptyPackedArray));
2223 Node* context = assembler->Parameter(Descriptor::kContext);
2224 assembler->TailCallRuntime(Runtime::kAbort, context, reason);
2225 } else {
2226 int element_size =
2227 IsFastDoubleElementsKind(elements_kind) ? kDoubleSize : kPointerSize;
2228 int max_fast_elements =
2229 (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - JSArray::kSize -
2230 AllocationMemento::kSize) /
2231 element_size;
2232 assembler->Branch(
2233 assembler->SmiAboveOrEqual(
2234 size, assembler->SmiConstant(Smi::FromInt(max_fast_elements))),
2235 &call_runtime, &small_smi_size);
2236 }
2237
2238 assembler->Bind(&small_smi_size);
2239 {
2240 Node* array = assembler->AllocateJSArray(
2241 elements_kind, array_map, size, size,
2242 mode == DONT_TRACK_ALLOCATION_SITE ? nullptr : allocation_site,
2243 CodeStubAssembler::SMI_PARAMETERS);
2244 assembler->Return(array);
2245 }
2246
2247 assembler->Bind(&call_runtime);
2248 {
2249 Node* context = assembler->Parameter(Descriptor::kContext);
2250 Node* function = assembler->Parameter(Descriptor::kFunction);
2251 Node* array_size = assembler->Parameter(Descriptor::kArraySizeSmiParameter);
2252 Node* allocation_site = assembler->Parameter(Descriptor::kAllocationSite);
2253 assembler->TailCallRuntime(Runtime::kNewArray, context, function,
2254 array_size, function, allocation_site);
2255 }
2256 }
2257 } // namespace
2258
GenerateAssembly(compiler::CodeAssemblerState * state) const2259 void ArraySingleArgumentConstructorStub::GenerateAssembly(
2260 compiler::CodeAssemblerState* state) const {
2261 typedef compiler::Node Node;
2262 CodeStubAssembler assembler(state);
2263 Node* function = assembler.Parameter(Descriptor::kFunction);
2264 Node* native_context =
2265 assembler.LoadObjectField(function, JSFunction::kContextOffset);
2266 Node* array_map =
2267 assembler.LoadJSArrayElementsMap(elements_kind(), native_context);
2268 AllocationSiteMode mode = override_mode() == DISABLE_ALLOCATION_SITES
2269 ? DONT_TRACK_ALLOCATION_SITE
2270 : AllocationSite::GetMode(elements_kind());
2271 Node* allocation_site = assembler.Parameter(Descriptor::kAllocationSite);
2272 SingleArgumentConstructorCommon<Descriptor>(&assembler, elements_kind(),
2273 array_map, allocation_site, mode);
2274 }
2275
GenerateAssembly(compiler::CodeAssemblerState * state) const2276 void InternalArraySingleArgumentConstructorStub::GenerateAssembly(
2277 compiler::CodeAssemblerState* state) const {
2278 typedef compiler::Node Node;
2279 CodeStubAssembler assembler(state);
2280 Node* function = assembler.Parameter(Descriptor::kFunction);
2281 Node* array_map = assembler.LoadObjectField(
2282 function, JSFunction::kPrototypeOrInitialMapOffset);
2283 SingleArgumentConstructorCommon<Descriptor>(
2284 &assembler, elements_kind(), array_map, assembler.UndefinedConstant(),
2285 DONT_TRACK_ALLOCATION_SITE);
2286 }
2287
GenerateAssembly(compiler::CodeAssemblerState * state) const2288 void GrowArrayElementsStub::GenerateAssembly(
2289 compiler::CodeAssemblerState* state) const {
2290 typedef compiler::Node Node;
2291 CodeStubAssembler assembler(state);
2292 CodeStubAssembler::Label runtime(&assembler,
2293 CodeStubAssembler::Label::kDeferred);
2294
2295 Node* object = assembler.Parameter(Descriptor::kObject);
2296 Node* key = assembler.Parameter(Descriptor::kKey);
2297 Node* context = assembler.Parameter(Descriptor::kContext);
2298 ElementsKind kind = elements_kind();
2299
2300 Node* elements = assembler.LoadElements(object);
2301 Node* new_elements =
2302 assembler.TryGrowElementsCapacity(object, elements, kind, key, &runtime);
2303 assembler.Return(new_elements);
2304
2305 assembler.Bind(&runtime);
2306 // TODO(danno): Make this a tail call when the stub is only used from TurboFan
2307 // code. This musn't be a tail call for now, since the caller site in lithium
2308 // creates a safepoint. This safepoint musn't have a different number of
2309 // arguments on the stack in the case that a GC happens from the slow-case
2310 // allocation path (zero, since all the stubs inputs are in registers) and
2311 // when the call happens (it would be two in the tail call case due to the
2312 // tail call pushing the arguments on the stack for the runtime call). By not
2313 // tail-calling, the runtime call case also has zero arguments on the stack
2314 // for the stub frame.
2315 assembler.Return(
2316 assembler.CallRuntime(Runtime::kGrowArrayElements, context, object, key));
2317 }
2318
ArrayConstructorStub(Isolate * isolate)2319 ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
2320 : PlatformCodeStub(isolate) {}
2321
InternalArrayConstructorStub(Isolate * isolate)2322 InternalArrayConstructorStub::InternalArrayConstructorStub(Isolate* isolate)
2323 : PlatformCodeStub(isolate) {}
2324
2325 } // namespace internal
2326 } // namespace v8
2327