1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/code-stubs.h"
6
7 #include <sstream>
8
9 #include "src/ast/ast.h"
10 #include "src/bootstrapper.h"
11 #include "src/code-factory.h"
12 #include "src/code-stub-assembler.h"
13 #include "src/factory.h"
14 #include "src/gdb-jit.h"
15 #include "src/ic/handler-compiler.h"
16 #include "src/ic/ic.h"
17 #include "src/macro-assembler.h"
18
19 namespace v8 {
20 namespace internal {
21
22
RUNTIME_FUNCTION(UnexpectedStubMiss)23 RUNTIME_FUNCTION(UnexpectedStubMiss) {
24 FATAL("Unexpected deopt of a stub");
25 return Smi::kZero;
26 }
27
CodeStubDescriptor(CodeStub * stub)28 CodeStubDescriptor::CodeStubDescriptor(CodeStub* stub)
29 : isolate_(stub->isolate()),
30 call_descriptor_(stub->GetCallInterfaceDescriptor()),
31 stack_parameter_count_(no_reg),
32 hint_stack_parameter_count_(-1),
33 function_mode_(NOT_JS_FUNCTION_STUB_MODE),
34 deoptimization_handler_(NULL),
35 miss_handler_(),
36 has_miss_handler_(false) {
37 stub->InitializeDescriptor(this);
38 }
39
CodeStubDescriptor(Isolate * isolate,uint32_t stub_key)40 CodeStubDescriptor::CodeStubDescriptor(Isolate* isolate, uint32_t stub_key)
41 : isolate_(isolate),
42 stack_parameter_count_(no_reg),
43 hint_stack_parameter_count_(-1),
44 function_mode_(NOT_JS_FUNCTION_STUB_MODE),
45 deoptimization_handler_(NULL),
46 miss_handler_(),
47 has_miss_handler_(false) {
48 CodeStub::InitializeDescriptor(isolate, stub_key, this);
49 }
50
51
Initialize(Address deoptimization_handler,int hint_stack_parameter_count,StubFunctionMode function_mode)52 void CodeStubDescriptor::Initialize(Address deoptimization_handler,
53 int hint_stack_parameter_count,
54 StubFunctionMode function_mode) {
55 deoptimization_handler_ = deoptimization_handler;
56 hint_stack_parameter_count_ = hint_stack_parameter_count;
57 function_mode_ = function_mode;
58 }
59
60
Initialize(Register stack_parameter_count,Address deoptimization_handler,int hint_stack_parameter_count,StubFunctionMode function_mode)61 void CodeStubDescriptor::Initialize(Register stack_parameter_count,
62 Address deoptimization_handler,
63 int hint_stack_parameter_count,
64 StubFunctionMode function_mode) {
65 Initialize(deoptimization_handler, hint_stack_parameter_count, function_mode);
66 stack_parameter_count_ = stack_parameter_count;
67 }
68
69
FindCodeInCache(Code ** code_out)70 bool CodeStub::FindCodeInCache(Code** code_out) {
71 UnseededNumberDictionary* stubs = isolate()->heap()->code_stubs();
72 int index = stubs->FindEntry(GetKey());
73 if (index != UnseededNumberDictionary::kNotFound) {
74 *code_out = Code::cast(stubs->ValueAt(index));
75 return true;
76 }
77 return false;
78 }
79
80
RecordCodeGeneration(Handle<Code> code)81 void CodeStub::RecordCodeGeneration(Handle<Code> code) {
82 std::ostringstream os;
83 os << *this;
84 PROFILE(isolate(),
85 CodeCreateEvent(CodeEventListener::STUB_TAG,
86 AbstractCode::cast(*code), os.str().c_str()));
87 Counters* counters = isolate()->counters();
88 counters->total_stubs_code_size()->Increment(code->instruction_size());
89 #ifdef DEBUG
90 code->VerifyEmbeddedObjects();
91 #endif
92 }
93
94
GetCodeKind() const95 Code::Kind CodeStub::GetCodeKind() const {
96 return Code::STUB;
97 }
98
99
GetCodeFlags() const100 Code::Flags CodeStub::GetCodeFlags() const {
101 return Code::ComputeFlags(GetCodeKind(), GetExtraICState());
102 }
103
104
GetCodeCopy(const Code::FindAndReplacePattern & pattern)105 Handle<Code> CodeStub::GetCodeCopy(const Code::FindAndReplacePattern& pattern) {
106 Handle<Code> ic = GetCode();
107 ic = isolate()->factory()->CopyCode(ic);
108 ic->FindAndReplace(pattern);
109 RecordCodeGeneration(ic);
110 return ic;
111 }
112
113
GenerateCode()114 Handle<Code> PlatformCodeStub::GenerateCode() {
115 Factory* factory = isolate()->factory();
116
117 // Generate the new code.
118 MacroAssembler masm(isolate(), NULL, 256, CodeObjectRequired::kYes);
119
120 {
121 // Update the static counter each time a new code stub is generated.
122 isolate()->counters()->code_stubs()->Increment();
123
124 // Generate the code for the stub.
125 masm.set_generating_stub(true);
126 // TODO(yangguo): remove this once we can serialize IC stubs.
127 masm.enable_serializer();
128 NoCurrentFrameScope scope(&masm);
129 Generate(&masm);
130 }
131
132 // Create the code object.
133 CodeDesc desc;
134 masm.GetCode(&desc);
135 // Copy the generated code into a heap object.
136 Code::Flags flags = Code::ComputeFlags(GetCodeKind(), GetExtraICState());
137 Handle<Code> new_object = factory->NewCode(
138 desc, flags, masm.CodeObject(), NeedsImmovableCode());
139 return new_object;
140 }
141
142
GetCode()143 Handle<Code> CodeStub::GetCode() {
144 Heap* heap = isolate()->heap();
145 Code* code;
146 if (UseSpecialCache() ? FindCodeInSpecialCache(&code)
147 : FindCodeInCache(&code)) {
148 DCHECK(GetCodeKind() == code->kind());
149 return Handle<Code>(code);
150 }
151
152 {
153 HandleScope scope(isolate());
154
155 Handle<Code> new_object = GenerateCode();
156 new_object->set_stub_key(GetKey());
157 FinishCode(new_object);
158 RecordCodeGeneration(new_object);
159
160 #ifdef ENABLE_DISASSEMBLER
161 if (FLAG_print_code_stubs) {
162 CodeTracer::Scope trace_scope(isolate()->GetCodeTracer());
163 OFStream os(trace_scope.file());
164 std::ostringstream name;
165 name << *this;
166 new_object->Disassemble(name.str().c_str(), os);
167 os << "\n";
168 }
169 #endif
170
171 if (UseSpecialCache()) {
172 AddToSpecialCache(new_object);
173 } else {
174 // Update the dictionary and the root in Heap.
175 Handle<UnseededNumberDictionary> dict =
176 UnseededNumberDictionary::AtNumberPut(
177 Handle<UnseededNumberDictionary>(heap->code_stubs()),
178 GetKey(),
179 new_object);
180 heap->SetRootCodeStubs(*dict);
181 }
182 code = *new_object;
183 }
184
185 Activate(code);
186 DCHECK(!NeedsImmovableCode() ||
187 heap->lo_space()->Contains(code) ||
188 heap->code_space()->FirstPage()->Contains(code->address()));
189 return Handle<Code>(code, isolate());
190 }
191
192
MajorName(CodeStub::Major major_key)193 const char* CodeStub::MajorName(CodeStub::Major major_key) {
194 switch (major_key) {
195 #define DEF_CASE(name) case name: return #name "Stub";
196 CODE_STUB_LIST(DEF_CASE)
197 #undef DEF_CASE
198 case NoCache:
199 return "<NoCache>Stub";
200 case NUMBER_OF_IDS:
201 UNREACHABLE();
202 return NULL;
203 }
204 return NULL;
205 }
206
207
PrintBaseName(std::ostream & os) const208 void CodeStub::PrintBaseName(std::ostream& os) const { // NOLINT
209 os << MajorName(MajorKey());
210 }
211
212
PrintName(std::ostream & os) const213 void CodeStub::PrintName(std::ostream& os) const { // NOLINT
214 PrintBaseName(os);
215 PrintState(os);
216 }
217
218
Dispatch(Isolate * isolate,uint32_t key,void ** value_out,DispatchedCall call)219 void CodeStub::Dispatch(Isolate* isolate, uint32_t key, void** value_out,
220 DispatchedCall call) {
221 switch (MajorKeyFromKey(key)) {
222 #define DEF_CASE(NAME) \
223 case NAME: { \
224 NAME##Stub stub(key, isolate); \
225 CodeStub* pstub = &stub; \
226 call(pstub, value_out); \
227 break; \
228 }
229 CODE_STUB_LIST(DEF_CASE)
230 #undef DEF_CASE
231 case NUMBER_OF_IDS:
232 case NoCache:
233 UNREACHABLE();
234 break;
235 }
236 }
237
238
InitializeDescriptorDispatchedCall(CodeStub * stub,void ** value_out)239 static void InitializeDescriptorDispatchedCall(CodeStub* stub,
240 void** value_out) {
241 CodeStubDescriptor* descriptor_out =
242 reinterpret_cast<CodeStubDescriptor*>(value_out);
243 stub->InitializeDescriptor(descriptor_out);
244 descriptor_out->set_call_descriptor(stub->GetCallInterfaceDescriptor());
245 }
246
247
InitializeDescriptor(Isolate * isolate,uint32_t key,CodeStubDescriptor * desc)248 void CodeStub::InitializeDescriptor(Isolate* isolate, uint32_t key,
249 CodeStubDescriptor* desc) {
250 void** value_out = reinterpret_cast<void**>(desc);
251 Dispatch(isolate, key, value_out, &InitializeDescriptorDispatchedCall);
252 }
253
254
GetCodeDispatchCall(CodeStub * stub,void ** value_out)255 void CodeStub::GetCodeDispatchCall(CodeStub* stub, void** value_out) {
256 Handle<Code>* code_out = reinterpret_cast<Handle<Code>*>(value_out);
257 // Code stubs with special cache cannot be recreated from stub key.
258 *code_out = stub->UseSpecialCache() ? Handle<Code>() : stub->GetCode();
259 }
260
261
GetCode(Isolate * isolate,uint32_t key)262 MaybeHandle<Code> CodeStub::GetCode(Isolate* isolate, uint32_t key) {
263 HandleScope scope(isolate);
264 Handle<Code> code;
265 void** value_out = reinterpret_cast<void**>(&code);
266 Dispatch(isolate, key, value_out, &GetCodeDispatchCall);
267 return scope.CloseAndEscape(code);
268 }
269
270
271 // static
GenerateAheadOfTime(Isolate * isolate)272 void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate) {
273 if (FLAG_minimal) return;
274 // Generate the uninitialized versions of the stub.
275 for (int op = Token::BIT_OR; op <= Token::MOD; ++op) {
276 BinaryOpICStub stub(isolate, static_cast<Token::Value>(op));
277 stub.GetCode();
278 }
279
280 // Generate special versions of the stub.
281 BinaryOpICState::GenerateAheadOfTime(isolate, &GenerateAheadOfTime);
282 }
283
284
PrintState(std::ostream & os) const285 void BinaryOpICStub::PrintState(std::ostream& os) const { // NOLINT
286 os << state();
287 }
288
289
290 // static
GenerateAheadOfTime(Isolate * isolate,const BinaryOpICState & state)291 void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate,
292 const BinaryOpICState& state) {
293 if (FLAG_minimal) return;
294 BinaryOpICStub stub(isolate, state);
295 stub.GetCode();
296 }
297
298
299 // static
GenerateAheadOfTime(Isolate * isolate)300 void BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
301 // Generate special versions of the stub.
302 BinaryOpICState::GenerateAheadOfTime(isolate, &GenerateAheadOfTime);
303 }
304
305
PrintState(std::ostream & os) const306 void BinaryOpICWithAllocationSiteStub::PrintState(
307 std::ostream& os) const { // NOLINT
308 os << state();
309 }
310
311
312 // static
GenerateAheadOfTime(Isolate * isolate,const BinaryOpICState & state)313 void BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(
314 Isolate* isolate, const BinaryOpICState& state) {
315 if (state.CouldCreateAllocationMementos()) {
316 BinaryOpICWithAllocationSiteStub stub(isolate, state);
317 stub.GetCode();
318 }
319 }
320
PrintBaseName(std::ostream & os) const321 void StringAddStub::PrintBaseName(std::ostream& os) const { // NOLINT
322 os << "StringAddStub_" << flags() << "_" << pretenure_flag();
323 }
324
GenerateAssembly(CodeStubAssembler * assembler) const325 void StringAddStub::GenerateAssembly(CodeStubAssembler* assembler) const {
326 typedef compiler::Node Node;
327 Node* left = assembler->Parameter(Descriptor::kLeft);
328 Node* right = assembler->Parameter(Descriptor::kRight);
329 Node* context = assembler->Parameter(Descriptor::kContext);
330
331 if ((flags() & STRING_ADD_CHECK_LEFT) != 0) {
332 DCHECK((flags() & STRING_ADD_CONVERT) != 0);
333 // TODO(danno): The ToString and JSReceiverToPrimitive below could be
334 // combined to avoid duplicate smi and instance type checks.
335 left = assembler->ToString(context,
336 assembler->JSReceiverToPrimitive(context, left));
337 }
338 if ((flags() & STRING_ADD_CHECK_RIGHT) != 0) {
339 DCHECK((flags() & STRING_ADD_CONVERT) != 0);
340 // TODO(danno): The ToString and JSReceiverToPrimitive below could be
341 // combined to avoid duplicate smi and instance type checks.
342 right = assembler->ToString(
343 context, assembler->JSReceiverToPrimitive(context, right));
344 }
345
346 if ((flags() & STRING_ADD_CHECK_BOTH) == 0) {
347 CodeStubAssembler::AllocationFlag flags =
348 (pretenure_flag() == TENURED) ? CodeStubAssembler::kPretenured
349 : CodeStubAssembler::kNone;
350 assembler->Return(assembler->StringAdd(context, left, right, flags));
351 } else {
352 Callable callable = CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE,
353 pretenure_flag());
354 assembler->TailCallStub(callable, context, left, right);
355 }
356 }
357
GetICState() const358 InlineCacheState CompareICStub::GetICState() const {
359 CompareICState::State state = Max(left(), right());
360 switch (state) {
361 case CompareICState::UNINITIALIZED:
362 return ::v8::internal::UNINITIALIZED;
363 case CompareICState::BOOLEAN:
364 case CompareICState::SMI:
365 case CompareICState::NUMBER:
366 case CompareICState::INTERNALIZED_STRING:
367 case CompareICState::STRING:
368 case CompareICState::UNIQUE_NAME:
369 case CompareICState::RECEIVER:
370 case CompareICState::KNOWN_RECEIVER:
371 return MONOMORPHIC;
372 case CompareICState::GENERIC:
373 return ::v8::internal::GENERIC;
374 }
375 UNREACHABLE();
376 return ::v8::internal::UNINITIALIZED;
377 }
378
379
GetCondition() const380 Condition CompareICStub::GetCondition() const {
381 return CompareIC::ComputeCondition(op());
382 }
383
384
Generate(MacroAssembler * masm)385 void CompareICStub::Generate(MacroAssembler* masm) {
386 switch (state()) {
387 case CompareICState::UNINITIALIZED:
388 GenerateMiss(masm);
389 break;
390 case CompareICState::BOOLEAN:
391 GenerateBooleans(masm);
392 break;
393 case CompareICState::SMI:
394 GenerateSmis(masm);
395 break;
396 case CompareICState::NUMBER:
397 GenerateNumbers(masm);
398 break;
399 case CompareICState::STRING:
400 GenerateStrings(masm);
401 break;
402 case CompareICState::INTERNALIZED_STRING:
403 GenerateInternalizedStrings(masm);
404 break;
405 case CompareICState::UNIQUE_NAME:
406 GenerateUniqueNames(masm);
407 break;
408 case CompareICState::RECEIVER:
409 GenerateReceivers(masm);
410 break;
411 case CompareICState::KNOWN_RECEIVER:
412 DCHECK(*known_map_ != NULL);
413 GenerateKnownReceivers(masm);
414 break;
415 case CompareICState::GENERIC:
416 GenerateGeneric(masm);
417 break;
418 }
419 }
420
GenerateCode()421 Handle<Code> TurboFanCodeStub::GenerateCode() {
422 const char* name = CodeStub::MajorName(MajorKey());
423 Zone zone(isolate()->allocator(), ZONE_NAME);
424 CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
425 CodeStubAssembler assembler(isolate(), &zone, descriptor, GetCodeFlags(),
426 name);
427 GenerateAssembly(&assembler);
428 return assembler.GenerateCode();
429 }
430
GenerateAssembly(CodeStubAssembler * assembler) const431 void LoadICTrampolineStub::GenerateAssembly(
432 CodeStubAssembler* assembler) const {
433 typedef compiler::Node Node;
434
435 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
436 Node* name = assembler->Parameter(Descriptor::kName);
437 Node* slot = assembler->Parameter(Descriptor::kSlot);
438 Node* context = assembler->Parameter(Descriptor::kContext);
439 Node* vector = assembler->LoadTypeFeedbackVectorForStub();
440
441 CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
442 assembler->LoadIC(&p);
443 }
444
GenerateAssembly(CodeStubAssembler * assembler) const445 void LoadICStub::GenerateAssembly(CodeStubAssembler* assembler) const {
446 typedef compiler::Node Node;
447
448 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
449 Node* name = assembler->Parameter(Descriptor::kName);
450 Node* slot = assembler->Parameter(Descriptor::kSlot);
451 Node* vector = assembler->Parameter(Descriptor::kVector);
452 Node* context = assembler->Parameter(Descriptor::kContext);
453
454 CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
455 assembler->LoadIC(&p);
456 }
457
GenerateAssembly(CodeStubAssembler * assembler) const458 void LoadICProtoArrayStub::GenerateAssembly(
459 CodeStubAssembler* assembler) const {
460 typedef compiler::Node Node;
461
462 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
463 Node* name = assembler->Parameter(Descriptor::kName);
464 Node* slot = assembler->Parameter(Descriptor::kSlot);
465 Node* vector = assembler->Parameter(Descriptor::kVector);
466 Node* handler = assembler->Parameter(Descriptor::kHandler);
467 Node* context = assembler->Parameter(Descriptor::kContext);
468
469 CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
470 assembler->LoadICProtoArray(&p, handler);
471 }
472
GenerateAssembly(CodeStubAssembler * assembler) const473 void LoadGlobalICTrampolineStub::GenerateAssembly(
474 CodeStubAssembler* assembler) const {
475 typedef compiler::Node Node;
476
477 Node* slot = assembler->Parameter(Descriptor::kSlot);
478 Node* context = assembler->Parameter(Descriptor::kContext);
479 Node* vector = assembler->LoadTypeFeedbackVectorForStub();
480
481 CodeStubAssembler::LoadICParameters p(context, nullptr, nullptr, slot,
482 vector);
483 assembler->LoadGlobalIC(&p);
484 }
485
GenerateAssembly(CodeStubAssembler * assembler) const486 void LoadGlobalICStub::GenerateAssembly(CodeStubAssembler* assembler) const {
487 typedef compiler::Node Node;
488
489 Node* slot = assembler->Parameter(Descriptor::kSlot);
490 Node* vector = assembler->Parameter(Descriptor::kVector);
491 Node* context = assembler->Parameter(Descriptor::kContext);
492
493 CodeStubAssembler::LoadICParameters p(context, nullptr, nullptr, slot,
494 vector);
495 assembler->LoadGlobalIC(&p);
496 }
497
GenerateAssembly(CodeStubAssembler * assembler) const498 void KeyedLoadICTrampolineTFStub::GenerateAssembly(
499 CodeStubAssembler* assembler) const {
500 typedef compiler::Node Node;
501
502 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
503 Node* name = assembler->Parameter(Descriptor::kName);
504 Node* slot = assembler->Parameter(Descriptor::kSlot);
505 Node* context = assembler->Parameter(Descriptor::kContext);
506 Node* vector = assembler->LoadTypeFeedbackVectorForStub();
507
508 CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
509 assembler->KeyedLoadIC(&p);
510 }
511
GenerateAssembly(CodeStubAssembler * assembler) const512 void KeyedLoadICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
513 typedef compiler::Node Node;
514
515 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
516 Node* name = assembler->Parameter(Descriptor::kName);
517 Node* slot = assembler->Parameter(Descriptor::kSlot);
518 Node* vector = assembler->Parameter(Descriptor::kVector);
519 Node* context = assembler->Parameter(Descriptor::kContext);
520
521 CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
522 assembler->KeyedLoadIC(&p);
523 }
524
GenerateAssembly(CodeStubAssembler * assembler) const525 void StoreICTrampolineStub::GenerateAssembly(
526 CodeStubAssembler* assembler) const {
527 typedef compiler::Node Node;
528
529 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
530 Node* name = assembler->Parameter(Descriptor::kName);
531 Node* value = assembler->Parameter(Descriptor::kValue);
532 Node* slot = assembler->Parameter(Descriptor::kSlot);
533 Node* context = assembler->Parameter(Descriptor::kContext);
534 Node* vector = assembler->LoadTypeFeedbackVectorForStub();
535
536 CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
537 vector);
538 assembler->StoreIC(&p);
539 }
540
GenerateAssembly(CodeStubAssembler * assembler) const541 void StoreICStub::GenerateAssembly(CodeStubAssembler* assembler) const {
542 typedef compiler::Node Node;
543
544 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
545 Node* name = assembler->Parameter(Descriptor::kName);
546 Node* value = assembler->Parameter(Descriptor::kValue);
547 Node* slot = assembler->Parameter(Descriptor::kSlot);
548 Node* vector = assembler->Parameter(Descriptor::kVector);
549 Node* context = assembler->Parameter(Descriptor::kContext);
550
551 CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
552 vector);
553 assembler->StoreIC(&p);
554 }
555
GenerateAssembly(CodeStubAssembler * assembler) const556 void KeyedStoreICTrampolineTFStub::GenerateAssembly(
557 CodeStubAssembler* assembler) const {
558 typedef compiler::Node Node;
559
560 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
561 Node* name = assembler->Parameter(Descriptor::kName);
562 Node* value = assembler->Parameter(Descriptor::kValue);
563 Node* slot = assembler->Parameter(Descriptor::kSlot);
564 Node* context = assembler->Parameter(Descriptor::kContext);
565 Node* vector = assembler->LoadTypeFeedbackVectorForStub();
566
567 CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
568 vector);
569 assembler->KeyedStoreIC(&p, StoreICState::GetLanguageMode(GetExtraICState()));
570 }
571
GenerateAssembly(CodeStubAssembler * assembler) const572 void KeyedStoreICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
573 typedef compiler::Node Node;
574
575 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
576 Node* name = assembler->Parameter(Descriptor::kName);
577 Node* value = assembler->Parameter(Descriptor::kValue);
578 Node* slot = assembler->Parameter(Descriptor::kSlot);
579 Node* vector = assembler->Parameter(Descriptor::kVector);
580 Node* context = assembler->Parameter(Descriptor::kContext);
581
582 CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
583 vector);
584 assembler->KeyedStoreIC(&p, StoreICState::GetLanguageMode(GetExtraICState()));
585 }
586
GenerateAssembly(CodeStubAssembler * assembler) const587 void StoreMapStub::GenerateAssembly(CodeStubAssembler* assembler) const {
588 typedef compiler::Node Node;
589
590 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
591 Node* map = assembler->Parameter(Descriptor::kMap);
592 Node* value = assembler->Parameter(Descriptor::kValue);
593
594 assembler->StoreObjectField(receiver, JSObject::kMapOffset, map);
595 assembler->Return(value);
596 }
597
GenerateAssembly(CodeStubAssembler * assembler) const598 void StoreTransitionStub::GenerateAssembly(CodeStubAssembler* assembler) const {
599 typedef CodeStubAssembler::Label Label;
600 typedef compiler::Node Node;
601
602 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
603 Node* name = assembler->Parameter(Descriptor::kName);
604 Node* offset =
605 assembler->SmiUntag(assembler->Parameter(Descriptor::kFieldOffset));
606 Node* value = assembler->Parameter(Descriptor::kValue);
607 Node* map = assembler->Parameter(Descriptor::kMap);
608 Node* slot = assembler->Parameter(Descriptor::kSlot);
609 Node* vector = assembler->Parameter(Descriptor::kVector);
610 Node* context = assembler->Parameter(Descriptor::kContext);
611
612 Label miss(assembler);
613
614 Representation representation = this->representation();
615 assembler->Comment("StoreTransitionStub: is_inobject: %d: representation: %s",
616 is_inobject(), representation.Mnemonic());
617
618 Node* prepared_value =
619 assembler->PrepareValueForWrite(value, representation, &miss);
620
621 if (store_mode() == StoreTransitionStub::ExtendStorageAndStoreMapAndValue) {
622 assembler->Comment("Extend storage");
623 assembler->ExtendPropertiesBackingStore(receiver);
624 } else {
625 DCHECK(store_mode() == StoreTransitionStub::StoreMapAndValue);
626 }
627
628 // Store the new value into the "extended" object.
629 assembler->Comment("Store value");
630 assembler->StoreNamedField(receiver, offset, is_inobject(), representation,
631 prepared_value, true);
632
633 // And finally update the map.
634 assembler->Comment("Store map");
635 assembler->StoreObjectField(receiver, JSObject::kMapOffset, map);
636 assembler->Return(value);
637
638 // Only store to tagged field never bails out.
639 if (!representation.IsTagged()) {
640 assembler->Bind(&miss);
641 {
642 assembler->Comment("Miss");
643 assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
644 vector, receiver, name);
645 }
646 }
647 }
648
GenerateAssembly(CodeStubAssembler * assembler) const649 void ElementsTransitionAndStoreStub::GenerateAssembly(
650 CodeStubAssembler* assembler) const {
651 typedef CodeStubAssembler::Label Label;
652 typedef compiler::Node Node;
653
654 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
655 Node* key = assembler->Parameter(Descriptor::kName);
656 Node* value = assembler->Parameter(Descriptor::kValue);
657 Node* map = assembler->Parameter(Descriptor::kMap);
658 Node* slot = assembler->Parameter(Descriptor::kSlot);
659 Node* vector = assembler->Parameter(Descriptor::kVector);
660 Node* context = assembler->Parameter(Descriptor::kContext);
661
662 assembler->Comment(
663 "ElementsTransitionAndStoreStub: from_kind=%s, to_kind=%s,"
664 " is_jsarray=%d, store_mode=%d",
665 ElementsKindToString(from_kind()), ElementsKindToString(to_kind()),
666 is_jsarray(), store_mode());
667
668 Label miss(assembler);
669
670 if (FLAG_trace_elements_transitions) {
671 // Tracing elements transitions is the job of the runtime.
672 assembler->Goto(&miss);
673 } else {
674 assembler->TransitionElementsKind(receiver, map, from_kind(), to_kind(),
675 is_jsarray(), &miss);
676 assembler->EmitElementStore(receiver, key, value, is_jsarray(), to_kind(),
677 store_mode(), &miss);
678 assembler->Return(value);
679 }
680
681 assembler->Bind(&miss);
682 {
683 assembler->Comment("Miss");
684 assembler->TailCallRuntime(Runtime::kElementsTransitionAndStoreIC_Miss,
685 context, receiver, key, value, map, slot,
686 vector);
687 }
688 }
689
GenerateAssembly(CodeStubAssembler * assembler) const690 void AllocateHeapNumberStub::GenerateAssembly(
691 CodeStubAssembler* assembler) const {
692 typedef compiler::Node Node;
693
694 Node* result = assembler->AllocateHeapNumber();
695 assembler->Return(result);
696 }
697
698 #define SIMD128_GEN_ASM(TYPE, Type, type, lane_count, lane_type) \
699 void Allocate##Type##Stub::GenerateAssembly(CodeStubAssembler* assembler) \
700 const { \
701 compiler::Node* result = \
702 assembler->Allocate(Simd128Value::kSize, CodeStubAssembler::kNone); \
703 compiler::Node* map = assembler->LoadMap(result); \
704 assembler->StoreNoWriteBarrier( \
705 MachineRepresentation::kTagged, map, \
706 assembler->HeapConstant(isolate()->factory()->type##_map())); \
707 assembler->Return(result); \
708 }
SIMD128_TYPES(SIMD128_GEN_ASM)709 SIMD128_TYPES(SIMD128_GEN_ASM)
710 #undef SIMD128_GEN_ASM
711
712 void StringLengthStub::GenerateAssembly(CodeStubAssembler* assembler) const {
713 compiler::Node* value = assembler->Parameter(0);
714 compiler::Node* string = assembler->LoadJSValueValue(value);
715 compiler::Node* result = assembler->LoadStringLength(string);
716 assembler->Return(result);
717 }
718
719 // static
Generate(CodeStubAssembler * assembler,compiler::Node * lhs,compiler::Node * rhs,compiler::Node * slot_id,compiler::Node * type_feedback_vector,compiler::Node * context)720 compiler::Node* AddWithFeedbackStub::Generate(
721 CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* rhs,
722 compiler::Node* slot_id, compiler::Node* type_feedback_vector,
723 compiler::Node* context) {
724 typedef CodeStubAssembler::Label Label;
725 typedef compiler::Node Node;
726 typedef CodeStubAssembler::Variable Variable;
727
728 // Shared entry for floating point addition.
729 Label do_fadd(assembler), if_lhsisnotnumber(assembler, Label::kDeferred),
730 check_rhsisoddball(assembler, Label::kDeferred),
731 call_with_oddball_feedback(assembler), call_with_any_feedback(assembler),
732 call_add_stub(assembler), end(assembler);
733 Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
734 var_fadd_rhs(assembler, MachineRepresentation::kFloat64),
735 var_type_feedback(assembler, MachineRepresentation::kWord32),
736 var_result(assembler, MachineRepresentation::kTagged);
737
738 // Check if the {lhs} is a Smi or a HeapObject.
739 Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
740 assembler->Branch(assembler->TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
741
742 assembler->Bind(&if_lhsissmi);
743 {
744 // Check if the {rhs} is also a Smi.
745 Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
746 assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
747 &if_rhsisnotsmi);
748
749 assembler->Bind(&if_rhsissmi);
750 {
751 // Try fast Smi addition first.
752 Node* pair =
753 assembler->IntPtrAddWithOverflow(assembler->BitcastTaggedToWord(lhs),
754 assembler->BitcastTaggedToWord(rhs));
755 Node* overflow = assembler->Projection(1, pair);
756
757 // Check if the Smi additon overflowed.
758 Label if_overflow(assembler), if_notoverflow(assembler);
759 assembler->Branch(overflow, &if_overflow, &if_notoverflow);
760
761 assembler->Bind(&if_overflow);
762 {
763 var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
764 var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
765 assembler->Goto(&do_fadd);
766 }
767
768 assembler->Bind(&if_notoverflow);
769 {
770 var_type_feedback.Bind(
771 assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
772 var_result.Bind(assembler->BitcastWordToTaggedSigned(
773 assembler->Projection(0, pair)));
774 assembler->Goto(&end);
775 }
776 }
777
778 assembler->Bind(&if_rhsisnotsmi);
779 {
780 // Load the map of {rhs}.
781 Node* rhs_map = assembler->LoadMap(rhs);
782
783 // Check if the {rhs} is a HeapNumber.
784 assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
785 &check_rhsisoddball);
786
787 var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
788 var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
789 assembler->Goto(&do_fadd);
790 }
791 }
792
793 assembler->Bind(&if_lhsisnotsmi);
794 {
795 // Load the map of {lhs}.
796 Node* lhs_map = assembler->LoadMap(lhs);
797
798 // Check if {lhs} is a HeapNumber.
799 assembler->GotoUnless(assembler->IsHeapNumberMap(lhs_map),
800 &if_lhsisnotnumber);
801
802 // Check if the {rhs} is Smi.
803 Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
804 assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
805 &if_rhsisnotsmi);
806
807 assembler->Bind(&if_rhsissmi);
808 {
809 var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
810 var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
811 assembler->Goto(&do_fadd);
812 }
813
814 assembler->Bind(&if_rhsisnotsmi);
815 {
816 // Load the map of {rhs}.
817 Node* rhs_map = assembler->LoadMap(rhs);
818
819 // Check if the {rhs} is a HeapNumber.
820 assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
821 &check_rhsisoddball);
822
823 var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
824 var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
825 assembler->Goto(&do_fadd);
826 }
827 }
828
829 assembler->Bind(&do_fadd);
830 {
831 var_type_feedback.Bind(
832 assembler->Int32Constant(BinaryOperationFeedback::kNumber));
833 Node* value =
834 assembler->Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
835 Node* result = assembler->AllocateHeapNumberWithValue(value);
836 var_result.Bind(result);
837 assembler->Goto(&end);
838 }
839
840 assembler->Bind(&if_lhsisnotnumber);
841 {
842 // No checks on rhs are done yet. We just know lhs is not a number or Smi.
843 Label if_lhsisoddball(assembler), if_lhsisnotoddball(assembler);
844 Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
845 Node* lhs_is_oddball = assembler->Word32Equal(
846 lhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
847 assembler->Branch(lhs_is_oddball, &if_lhsisoddball, &if_lhsisnotoddball);
848
849 assembler->Bind(&if_lhsisoddball);
850 {
851 assembler->GotoIf(assembler->TaggedIsSmi(rhs),
852 &call_with_oddball_feedback);
853
854 // Load the map of the {rhs}.
855 Node* rhs_map = assembler->LoadMap(rhs);
856
857 // Check if {rhs} is a HeapNumber.
858 assembler->Branch(assembler->IsHeapNumberMap(rhs_map),
859 &call_with_oddball_feedback, &check_rhsisoddball);
860 }
861
862 assembler->Bind(&if_lhsisnotoddball);
863 {
864 // Exit unless {lhs} is a string
865 assembler->GotoUnless(assembler->IsStringInstanceType(lhs_instance_type),
866 &call_with_any_feedback);
867
868 // Check if the {rhs} is a smi, and exit the string check early if it is.
869 assembler->GotoIf(assembler->TaggedIsSmi(rhs), &call_with_any_feedback);
870
871 Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
872
873 // Exit unless {rhs} is a string. Since {lhs} is a string we no longer
874 // need an Oddball check.
875 assembler->GotoUnless(assembler->IsStringInstanceType(rhs_instance_type),
876 &call_with_any_feedback);
877
878 var_type_feedback.Bind(
879 assembler->Int32Constant(BinaryOperationFeedback::kString));
880 Callable callable = CodeFactory::StringAdd(
881 assembler->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
882 var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
883
884 assembler->Goto(&end);
885 }
886 }
887
888 assembler->Bind(&check_rhsisoddball);
889 {
890 // Check if rhs is an oddball. At this point we know lhs is either a
891 // Smi or number or oddball and rhs is not a number or Smi.
892 Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
893 Node* rhs_is_oddball = assembler->Word32Equal(
894 rhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
895 assembler->Branch(rhs_is_oddball, &call_with_oddball_feedback,
896 &call_with_any_feedback);
897 }
898
899 assembler->Bind(&call_with_oddball_feedback);
900 {
901 var_type_feedback.Bind(
902 assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
903 assembler->Goto(&call_add_stub);
904 }
905
906 assembler->Bind(&call_with_any_feedback);
907 {
908 var_type_feedback.Bind(
909 assembler->Int32Constant(BinaryOperationFeedback::kAny));
910 assembler->Goto(&call_add_stub);
911 }
912
913 assembler->Bind(&call_add_stub);
914 {
915 Callable callable = CodeFactory::Add(assembler->isolate());
916 var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
917 assembler->Goto(&end);
918 }
919
920 assembler->Bind(&end);
921 assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
922 slot_id);
923 return var_result.value();
924 }
925
926 // static
Generate(CodeStubAssembler * assembler,compiler::Node * lhs,compiler::Node * rhs,compiler::Node * slot_id,compiler::Node * type_feedback_vector,compiler::Node * context)927 compiler::Node* SubtractWithFeedbackStub::Generate(
928 CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* rhs,
929 compiler::Node* slot_id, compiler::Node* type_feedback_vector,
930 compiler::Node* context) {
931 typedef CodeStubAssembler::Label Label;
932 typedef compiler::Node Node;
933 typedef CodeStubAssembler::Variable Variable;
934
935 // Shared entry for floating point subtraction.
936 Label do_fsub(assembler), end(assembler), call_subtract_stub(assembler),
937 if_lhsisnotnumber(assembler), check_rhsisoddball(assembler),
938 call_with_any_feedback(assembler);
939 Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
940 var_fsub_rhs(assembler, MachineRepresentation::kFloat64),
941 var_type_feedback(assembler, MachineRepresentation::kWord32),
942 var_result(assembler, MachineRepresentation::kTagged);
943
944 // Check if the {lhs} is a Smi or a HeapObject.
945 Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
946 assembler->Branch(assembler->TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
947
948 assembler->Bind(&if_lhsissmi);
949 {
950 // Check if the {rhs} is also a Smi.
951 Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
952 assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
953 &if_rhsisnotsmi);
954
955 assembler->Bind(&if_rhsissmi);
956 {
957 // Try a fast Smi subtraction first.
958 Node* pair =
959 assembler->IntPtrSubWithOverflow(assembler->BitcastTaggedToWord(lhs),
960 assembler->BitcastTaggedToWord(rhs));
961 Node* overflow = assembler->Projection(1, pair);
962
963 // Check if the Smi subtraction overflowed.
964 Label if_overflow(assembler), if_notoverflow(assembler);
965 assembler->Branch(overflow, &if_overflow, &if_notoverflow);
966
967 assembler->Bind(&if_overflow);
968 {
969 // lhs, rhs - smi and result - number. combined - number.
970 // The result doesn't fit into Smi range.
971 var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
972 var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
973 assembler->Goto(&do_fsub);
974 }
975
976 assembler->Bind(&if_notoverflow);
977 // lhs, rhs, result smi. combined - smi.
978 var_type_feedback.Bind(
979 assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
980 var_result.Bind(
981 assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
982 assembler->Goto(&end);
983 }
984
985 assembler->Bind(&if_rhsisnotsmi);
986 {
987 // Load the map of the {rhs}.
988 Node* rhs_map = assembler->LoadMap(rhs);
989
990 // Check if {rhs} is a HeapNumber.
991 assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
992 &check_rhsisoddball);
993
994 // Perform a floating point subtraction.
995 var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
996 var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
997 assembler->Goto(&do_fsub);
998 }
999 }
1000
1001 assembler->Bind(&if_lhsisnotsmi);
1002 {
1003 // Load the map of the {lhs}.
1004 Node* lhs_map = assembler->LoadMap(lhs);
1005
1006 // Check if the {lhs} is a HeapNumber.
1007 assembler->GotoUnless(assembler->IsHeapNumberMap(lhs_map),
1008 &if_lhsisnotnumber);
1009
1010 // Check if the {rhs} is a Smi.
1011 Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
1012 assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
1013 &if_rhsisnotsmi);
1014
1015 assembler->Bind(&if_rhsissmi);
1016 {
1017 // Perform a floating point subtraction.
1018 var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
1019 var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
1020 assembler->Goto(&do_fsub);
1021 }
1022
1023 assembler->Bind(&if_rhsisnotsmi);
1024 {
1025 // Load the map of the {rhs}.
1026 Node* rhs_map = assembler->LoadMap(rhs);
1027
1028 // Check if the {rhs} is a HeapNumber.
1029 assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
1030 &check_rhsisoddball);
1031
1032 // Perform a floating point subtraction.
1033 var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
1034 var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
1035 assembler->Goto(&do_fsub);
1036 }
1037 }
1038
1039 assembler->Bind(&do_fsub);
1040 {
1041 var_type_feedback.Bind(
1042 assembler->Int32Constant(BinaryOperationFeedback::kNumber));
1043 Node* lhs_value = var_fsub_lhs.value();
1044 Node* rhs_value = var_fsub_rhs.value();
1045 Node* value = assembler->Float64Sub(lhs_value, rhs_value);
1046 var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
1047 assembler->Goto(&end);
1048 }
1049
1050 assembler->Bind(&if_lhsisnotnumber);
1051 {
1052 // No checks on rhs are done yet. We just know lhs is not a number or Smi.
1053 // Check if lhs is an oddball.
1054 Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
1055 Node* lhs_is_oddball = assembler->Word32Equal(
1056 lhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
1057 assembler->GotoUnless(lhs_is_oddball, &call_with_any_feedback);
1058
1059 Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
1060 assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
1061 &if_rhsisnotsmi);
1062
1063 assembler->Bind(&if_rhsissmi);
1064 {
1065 var_type_feedback.Bind(
1066 assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
1067 assembler->Goto(&call_subtract_stub);
1068 }
1069
1070 assembler->Bind(&if_rhsisnotsmi);
1071 {
1072 // Load the map of the {rhs}.
1073 Node* rhs_map = assembler->LoadMap(rhs);
1074
1075 // Check if {rhs} is a HeapNumber.
1076 assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
1077 &check_rhsisoddball);
1078
1079 var_type_feedback.Bind(
1080 assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
1081 assembler->Goto(&call_subtract_stub);
1082 }
1083 }
1084
1085 assembler->Bind(&check_rhsisoddball);
1086 {
1087 // Check if rhs is an oddball. At this point we know lhs is either a
1088 // Smi or number or oddball and rhs is not a number or Smi.
1089 Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
1090 Node* rhs_is_oddball = assembler->Word32Equal(
1091 rhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
1092 assembler->GotoUnless(rhs_is_oddball, &call_with_any_feedback);
1093
1094 var_type_feedback.Bind(
1095 assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
1096 assembler->Goto(&call_subtract_stub);
1097 }
1098
1099 assembler->Bind(&call_with_any_feedback);
1100 {
1101 var_type_feedback.Bind(
1102 assembler->Int32Constant(BinaryOperationFeedback::kAny));
1103 assembler->Goto(&call_subtract_stub);
1104 }
1105
1106 assembler->Bind(&call_subtract_stub);
1107 {
1108 Callable callable = CodeFactory::Subtract(assembler->isolate());
1109 var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
1110 assembler->Goto(&end);
1111 }
1112
1113 assembler->Bind(&end);
1114 assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
1115 slot_id);
1116 return var_result.value();
1117 }
1118
1119
1120 // static
Generate(CodeStubAssembler * assembler,compiler::Node * lhs,compiler::Node * rhs,compiler::Node * slot_id,compiler::Node * type_feedback_vector,compiler::Node * context)1121 compiler::Node* MultiplyWithFeedbackStub::Generate(
1122 CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* rhs,
1123 compiler::Node* slot_id, compiler::Node* type_feedback_vector,
1124 compiler::Node* context) {
1125 using compiler::Node;
1126 typedef CodeStubAssembler::Label Label;
1127 typedef CodeStubAssembler::Variable Variable;
1128
1129 // Shared entry point for floating point multiplication.
1130 Label do_fmul(assembler), if_lhsisnotnumber(assembler, Label::kDeferred),
1131 check_rhsisoddball(assembler, Label::kDeferred),
1132 call_with_oddball_feedback(assembler), call_with_any_feedback(assembler),
1133 call_multiply_stub(assembler), end(assembler);
1134 Variable var_lhs_float64(assembler, MachineRepresentation::kFloat64),
1135 var_rhs_float64(assembler, MachineRepresentation::kFloat64),
1136 var_result(assembler, MachineRepresentation::kTagged),
1137 var_type_feedback(assembler, MachineRepresentation::kWord32);
1138
1139 Node* number_map = assembler->HeapNumberMapConstant();
1140
1141 Label lhs_is_smi(assembler), lhs_is_not_smi(assembler);
1142 assembler->Branch(assembler->TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
1143
1144 assembler->Bind(&lhs_is_smi);
1145 {
1146 Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
1147 assembler->Branch(assembler->TaggedIsSmi(rhs), &rhs_is_smi,
1148 &rhs_is_not_smi);
1149
1150 assembler->Bind(&rhs_is_smi);
1151 {
1152 // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
1153 // in case of overflow.
1154 var_result.Bind(assembler->SmiMul(lhs, rhs));
1155 var_type_feedback.Bind(assembler->Select(
1156 assembler->TaggedIsSmi(var_result.value()),
1157 assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall),
1158 assembler->Int32Constant(BinaryOperationFeedback::kNumber),
1159 MachineRepresentation::kWord32));
1160 assembler->Goto(&end);
1161 }
1162
1163 assembler->Bind(&rhs_is_not_smi);
1164 {
1165 Node* rhs_map = assembler->LoadMap(rhs);
1166
1167 // Check if {rhs} is a HeapNumber.
1168 assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
1169 &check_rhsisoddball);
1170
1171 // Convert {lhs} to a double and multiply it with the value of {rhs}.
1172 var_lhs_float64.Bind(assembler->SmiToFloat64(lhs));
1173 var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
1174 assembler->Goto(&do_fmul);
1175 }
1176 }
1177
1178 assembler->Bind(&lhs_is_not_smi);
1179 {
1180 Node* lhs_map = assembler->LoadMap(lhs);
1181
1182 // Check if {lhs} is a HeapNumber.
1183 assembler->GotoUnless(assembler->WordEqual(lhs_map, number_map),
1184 &if_lhsisnotnumber);
1185
1186 // Check if {rhs} is a Smi.
1187 Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
1188 assembler->Branch(assembler->TaggedIsSmi(rhs), &rhs_is_smi,
1189 &rhs_is_not_smi);
1190
1191 assembler->Bind(&rhs_is_smi);
1192 {
1193 // Convert {rhs} to a double and multiply it with the value of {lhs}.
1194 var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
1195 var_rhs_float64.Bind(assembler->SmiToFloat64(rhs));
1196 assembler->Goto(&do_fmul);
1197 }
1198
1199 assembler->Bind(&rhs_is_not_smi);
1200 {
1201 Node* rhs_map = assembler->LoadMap(rhs);
1202
1203 // Check if {rhs} is a HeapNumber.
1204 assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
1205 &check_rhsisoddball);
1206
1207 // Both {lhs} and {rhs} are HeapNumbers. Load their values and
1208 // multiply them.
1209 var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
1210 var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
1211 assembler->Goto(&do_fmul);
1212 }
1213 }
1214
1215 assembler->Bind(&do_fmul);
1216 {
1217 var_type_feedback.Bind(
1218 assembler->Int32Constant(BinaryOperationFeedback::kNumber));
1219 Node* value =
1220 assembler->Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
1221 Node* result = assembler->AllocateHeapNumberWithValue(value);
1222 var_result.Bind(result);
1223 assembler->Goto(&end);
1224 }
1225
1226 assembler->Bind(&if_lhsisnotnumber);
1227 {
1228 // No checks on rhs are done yet. We just know lhs is not a number or Smi.
1229 // Check if lhs is an oddball.
1230 Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
1231 Node* lhs_is_oddball = assembler->Word32Equal(
1232 lhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
1233 assembler->GotoUnless(lhs_is_oddball, &call_with_any_feedback);
1234
1235 assembler->GotoIf(assembler->TaggedIsSmi(rhs), &call_with_oddball_feedback);
1236
1237 // Load the map of the {rhs}.
1238 Node* rhs_map = assembler->LoadMap(rhs);
1239
1240 // Check if {rhs} is a HeapNumber.
1241 assembler->Branch(assembler->IsHeapNumberMap(rhs_map),
1242 &call_with_oddball_feedback, &check_rhsisoddball);
1243 }
1244
1245 assembler->Bind(&check_rhsisoddball);
1246 {
1247 // Check if rhs is an oddball. At this point we know lhs is either a
1248 // Smi or number or oddball and rhs is not a number or Smi.
1249 Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
1250 Node* rhs_is_oddball = assembler->Word32Equal(
1251 rhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
1252 assembler->Branch(rhs_is_oddball, &call_with_oddball_feedback,
1253 &call_with_any_feedback);
1254 }
1255
1256 assembler->Bind(&call_with_oddball_feedback);
1257 {
1258 var_type_feedback.Bind(
1259 assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
1260 assembler->Goto(&call_multiply_stub);
1261 }
1262
1263 assembler->Bind(&call_with_any_feedback);
1264 {
1265 var_type_feedback.Bind(
1266 assembler->Int32Constant(BinaryOperationFeedback::kAny));
1267 assembler->Goto(&call_multiply_stub);
1268 }
1269
1270 assembler->Bind(&call_multiply_stub);
1271 {
1272 Callable callable = CodeFactory::Multiply(assembler->isolate());
1273 var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
1274 assembler->Goto(&end);
1275 }
1276
1277 assembler->Bind(&end);
1278 assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
1279 slot_id);
1280 return var_result.value();
1281 }
1282
1283
1284 // static
Generate(CodeStubAssembler * assembler,compiler::Node * dividend,compiler::Node * divisor,compiler::Node * slot_id,compiler::Node * type_feedback_vector,compiler::Node * context)1285 compiler::Node* DivideWithFeedbackStub::Generate(
1286 CodeStubAssembler* assembler, compiler::Node* dividend,
1287 compiler::Node* divisor, compiler::Node* slot_id,
1288 compiler::Node* type_feedback_vector, compiler::Node* context) {
1289 using compiler::Node;
1290 typedef CodeStubAssembler::Label Label;
1291 typedef CodeStubAssembler::Variable Variable;
1292
1293 // Shared entry point for floating point division.
1294 Label do_fdiv(assembler), dividend_is_not_number(assembler, Label::kDeferred),
1295 check_divisor_for_oddball(assembler, Label::kDeferred),
1296 call_with_oddball_feedback(assembler), call_with_any_feedback(assembler),
1297 call_divide_stub(assembler), end(assembler);
1298 Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
1299 var_divisor_float64(assembler, MachineRepresentation::kFloat64),
1300 var_result(assembler, MachineRepresentation::kTagged),
1301 var_type_feedback(assembler, MachineRepresentation::kWord32);
1302
1303 Node* number_map = assembler->HeapNumberMapConstant();
1304
1305 Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
1306 assembler->Branch(assembler->TaggedIsSmi(dividend), ÷nd_is_smi,
1307 ÷nd_is_not_smi);
1308
1309 assembler->Bind(÷nd_is_smi);
1310 {
1311 Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
1312 assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
1313 &divisor_is_not_smi);
1314
1315 assembler->Bind(&divisor_is_smi);
1316 {
1317 Label bailout(assembler);
1318
1319 // Do floating point division if {divisor} is zero.
1320 assembler->GotoIf(
1321 assembler->WordEqual(divisor, assembler->IntPtrConstant(0)),
1322 &bailout);
1323
1324 // Do floating point division {dividend} is zero and {divisor} is
1325 // negative.
1326 Label dividend_is_zero(assembler), dividend_is_not_zero(assembler);
1327 assembler->Branch(
1328 assembler->WordEqual(dividend, assembler->IntPtrConstant(0)),
1329 ÷nd_is_zero, ÷nd_is_not_zero);
1330
1331 assembler->Bind(÷nd_is_zero);
1332 {
1333 assembler->GotoIf(
1334 assembler->IntPtrLessThan(divisor, assembler->IntPtrConstant(0)),
1335 &bailout);
1336 assembler->Goto(÷nd_is_not_zero);
1337 }
1338 assembler->Bind(÷nd_is_not_zero);
1339
1340 Node* untagged_divisor = assembler->SmiUntag(divisor);
1341 Node* untagged_dividend = assembler->SmiUntag(dividend);
1342
1343 // Do floating point division if {dividend} is kMinInt (or kMinInt - 1
1344 // if the Smi size is 31) and {divisor} is -1.
1345 Label divisor_is_minus_one(assembler),
1346 divisor_is_not_minus_one(assembler);
1347 assembler->Branch(assembler->Word32Equal(untagged_divisor,
1348 assembler->Int32Constant(-1)),
1349 &divisor_is_minus_one, &divisor_is_not_minus_one);
1350
1351 assembler->Bind(&divisor_is_minus_one);
1352 {
1353 assembler->GotoIf(
1354 assembler->Word32Equal(
1355 untagged_dividend,
1356 assembler->Int32Constant(kSmiValueSize == 32 ? kMinInt
1357 : (kMinInt >> 1))),
1358 &bailout);
1359 assembler->Goto(&divisor_is_not_minus_one);
1360 }
1361 assembler->Bind(&divisor_is_not_minus_one);
1362
1363 Node* untagged_result =
1364 assembler->Int32Div(untagged_dividend, untagged_divisor);
1365 Node* truncated = assembler->Int32Mul(untagged_result, untagged_divisor);
1366 // Do floating point division if the remainder is not 0.
1367 assembler->GotoIf(assembler->Word32NotEqual(untagged_dividend, truncated),
1368 &bailout);
1369 var_type_feedback.Bind(
1370 assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
1371 var_result.Bind(assembler->SmiTag(untagged_result));
1372 assembler->Goto(&end);
1373
1374 // Bailout: convert {dividend} and {divisor} to double and do double
1375 // division.
1376 assembler->Bind(&bailout);
1377 {
1378 var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
1379 var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
1380 assembler->Goto(&do_fdiv);
1381 }
1382 }
1383
1384 assembler->Bind(&divisor_is_not_smi);
1385 {
1386 Node* divisor_map = assembler->LoadMap(divisor);
1387
1388 // Check if {divisor} is a HeapNumber.
1389 assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
1390 &check_divisor_for_oddball);
1391
1392 // Convert {dividend} to a double and divide it with the value of
1393 // {divisor}.
1394 var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
1395 var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
1396 assembler->Goto(&do_fdiv);
1397 }
1398
1399 assembler->Bind(÷nd_is_not_smi);
1400 {
1401 Node* dividend_map = assembler->LoadMap(dividend);
1402
1403 // Check if {dividend} is a HeapNumber.
1404 assembler->GotoUnless(assembler->WordEqual(dividend_map, number_map),
1405 ÷nd_is_not_number);
1406
1407 // Check if {divisor} is a Smi.
1408 Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
1409 assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
1410 &divisor_is_not_smi);
1411
1412 assembler->Bind(&divisor_is_smi);
1413 {
1414 // Convert {divisor} to a double and use it for a floating point
1415 // division.
1416 var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
1417 var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
1418 assembler->Goto(&do_fdiv);
1419 }
1420
1421 assembler->Bind(&divisor_is_not_smi);
1422 {
1423 Node* divisor_map = assembler->LoadMap(divisor);
1424
1425 // Check if {divisor} is a HeapNumber.
1426 assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
1427 &check_divisor_for_oddball);
1428
1429 // Both {dividend} and {divisor} are HeapNumbers. Load their values
1430 // and divide them.
1431 var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
1432 var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
1433 assembler->Goto(&do_fdiv);
1434 }
1435 }
1436 }
1437
1438 assembler->Bind(&do_fdiv);
1439 {
1440 var_type_feedback.Bind(
1441 assembler->Int32Constant(BinaryOperationFeedback::kNumber));
1442 Node* value = assembler->Float64Div(var_dividend_float64.value(),
1443 var_divisor_float64.value());
1444 var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
1445 assembler->Goto(&end);
1446 }
1447
1448 assembler->Bind(÷nd_is_not_number);
1449 {
1450 // We just know dividend is not a number or Smi. No checks on divisor yet.
1451 // Check if dividend is an oddball.
1452 Node* dividend_instance_type = assembler->LoadInstanceType(dividend);
1453 Node* dividend_is_oddball = assembler->Word32Equal(
1454 dividend_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
1455 assembler->GotoUnless(dividend_is_oddball, &call_with_any_feedback);
1456
1457 assembler->GotoIf(assembler->TaggedIsSmi(divisor),
1458 &call_with_oddball_feedback);
1459
1460 // Load the map of the {divisor}.
1461 Node* divisor_map = assembler->LoadMap(divisor);
1462
1463 // Check if {divisor} is a HeapNumber.
1464 assembler->Branch(assembler->IsHeapNumberMap(divisor_map),
1465 &call_with_oddball_feedback, &check_divisor_for_oddball);
1466 }
1467
1468 assembler->Bind(&check_divisor_for_oddball);
1469 {
1470 // Check if divisor is an oddball. At this point we know dividend is either
1471 // a Smi or number or oddball and divisor is not a number or Smi.
1472 Node* divisor_instance_type = assembler->LoadInstanceType(divisor);
1473 Node* divisor_is_oddball = assembler->Word32Equal(
1474 divisor_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
1475 assembler->Branch(divisor_is_oddball, &call_with_oddball_feedback,
1476 &call_with_any_feedback);
1477 }
1478
1479 assembler->Bind(&call_with_oddball_feedback);
1480 {
1481 var_type_feedback.Bind(
1482 assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
1483 assembler->Goto(&call_divide_stub);
1484 }
1485
1486 assembler->Bind(&call_with_any_feedback);
1487 {
1488 var_type_feedback.Bind(
1489 assembler->Int32Constant(BinaryOperationFeedback::kAny));
1490 assembler->Goto(&call_divide_stub);
1491 }
1492
1493 assembler->Bind(&call_divide_stub);
1494 {
1495 Callable callable = CodeFactory::Divide(assembler->isolate());
1496 var_result.Bind(assembler->CallStub(callable, context, dividend, divisor));
1497 assembler->Goto(&end);
1498 }
1499
1500 assembler->Bind(&end);
1501 assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
1502 slot_id);
1503 return var_result.value();
1504 }
1505
1506 // static
Generate(CodeStubAssembler * assembler,compiler::Node * dividend,compiler::Node * divisor,compiler::Node * slot_id,compiler::Node * type_feedback_vector,compiler::Node * context)1507 compiler::Node* ModulusWithFeedbackStub::Generate(
1508 CodeStubAssembler* assembler, compiler::Node* dividend,
1509 compiler::Node* divisor, compiler::Node* slot_id,
1510 compiler::Node* type_feedback_vector, compiler::Node* context) {
1511 using compiler::Node;
1512 typedef CodeStubAssembler::Label Label;
1513 typedef CodeStubAssembler::Variable Variable;
1514
1515 // Shared entry point for floating point division.
1516 Label do_fmod(assembler), dividend_is_not_number(assembler, Label::kDeferred),
1517 check_divisor_for_oddball(assembler, Label::kDeferred),
1518 call_with_oddball_feedback(assembler), call_with_any_feedback(assembler),
1519 call_modulus_stub(assembler), end(assembler);
1520 Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
1521 var_divisor_float64(assembler, MachineRepresentation::kFloat64),
1522 var_result(assembler, MachineRepresentation::kTagged),
1523 var_type_feedback(assembler, MachineRepresentation::kWord32);
1524
1525 Node* number_map = assembler->HeapNumberMapConstant();
1526
1527 Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
1528 assembler->Branch(assembler->TaggedIsSmi(dividend), ÷nd_is_smi,
1529 ÷nd_is_not_smi);
1530
1531 assembler->Bind(÷nd_is_smi);
1532 {
1533 Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
1534 assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
1535 &divisor_is_not_smi);
1536
1537 assembler->Bind(&divisor_is_smi);
1538 {
1539 var_result.Bind(assembler->SmiMod(dividend, divisor));
1540 var_type_feedback.Bind(assembler->Select(
1541 assembler->TaggedIsSmi(var_result.value()),
1542 assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall),
1543 assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
1544 assembler->Goto(&end);
1545 }
1546
1547 assembler->Bind(&divisor_is_not_smi);
1548 {
1549 Node* divisor_map = assembler->LoadMap(divisor);
1550
1551 // Check if {divisor} is a HeapNumber.
1552 assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
1553 &check_divisor_for_oddball);
1554
1555 // Convert {dividend} to a double and divide it with the value of
1556 // {divisor}.
1557 var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
1558 var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
1559 assembler->Goto(&do_fmod);
1560 }
1561 }
1562
1563 assembler->Bind(÷nd_is_not_smi);
1564 {
1565 Node* dividend_map = assembler->LoadMap(dividend);
1566
1567 // Check if {dividend} is a HeapNumber.
1568 assembler->GotoUnless(assembler->WordEqual(dividend_map, number_map),
1569 ÷nd_is_not_number);
1570
1571 // Check if {divisor} is a Smi.
1572 Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
1573 assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
1574 &divisor_is_not_smi);
1575
1576 assembler->Bind(&divisor_is_smi);
1577 {
1578 // Convert {divisor} to a double and use it for a floating point
1579 // division.
1580 var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
1581 var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
1582 assembler->Goto(&do_fmod);
1583 }
1584
1585 assembler->Bind(&divisor_is_not_smi);
1586 {
1587 Node* divisor_map = assembler->LoadMap(divisor);
1588
1589 // Check if {divisor} is a HeapNumber.
1590 assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
1591 &check_divisor_for_oddball);
1592
1593 // Both {dividend} and {divisor} are HeapNumbers. Load their values
1594 // and divide them.
1595 var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
1596 var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
1597 assembler->Goto(&do_fmod);
1598 }
1599 }
1600
1601 assembler->Bind(&do_fmod);
1602 {
1603 var_type_feedback.Bind(
1604 assembler->Int32Constant(BinaryOperationFeedback::kNumber));
1605 Node* value = assembler->Float64Mod(var_dividend_float64.value(),
1606 var_divisor_float64.value());
1607 var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
1608 assembler->Goto(&end);
1609 }
1610
1611 assembler->Bind(÷nd_is_not_number);
1612 {
1613 // No checks on divisor yet. We just know dividend is not a number or Smi.
1614 // Check if dividend is an oddball.
1615 Node* dividend_instance_type = assembler->LoadInstanceType(dividend);
1616 Node* dividend_is_oddball = assembler->Word32Equal(
1617 dividend_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
1618 assembler->GotoUnless(dividend_is_oddball, &call_with_any_feedback);
1619
1620 assembler->GotoIf(assembler->TaggedIsSmi(divisor),
1621 &call_with_oddball_feedback);
1622
1623 // Load the map of the {divisor}.
1624 Node* divisor_map = assembler->LoadMap(divisor);
1625
1626 // Check if {divisor} is a HeapNumber.
1627 assembler->Branch(assembler->IsHeapNumberMap(divisor_map),
1628 &call_with_oddball_feedback, &check_divisor_for_oddball);
1629 }
1630
1631 assembler->Bind(&check_divisor_for_oddball);
1632 {
1633 // Check if divisor is an oddball. At this point we know dividend is either
1634 // a Smi or number or oddball and divisor is not a number or Smi.
1635 Node* divisor_instance_type = assembler->LoadInstanceType(divisor);
1636 Node* divisor_is_oddball = assembler->Word32Equal(
1637 divisor_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
1638 assembler->Branch(divisor_is_oddball, &call_with_oddball_feedback,
1639 &call_with_any_feedback);
1640 }
1641
1642 assembler->Bind(&call_with_oddball_feedback);
1643 {
1644 var_type_feedback.Bind(
1645 assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
1646 assembler->Goto(&call_modulus_stub);
1647 }
1648
1649 assembler->Bind(&call_with_any_feedback);
1650 {
1651 var_type_feedback.Bind(
1652 assembler->Int32Constant(BinaryOperationFeedback::kAny));
1653 assembler->Goto(&call_modulus_stub);
1654 }
1655
1656 assembler->Bind(&call_modulus_stub);
1657 {
1658 Callable callable = CodeFactory::Modulus(assembler->isolate());
1659 var_result.Bind(assembler->CallStub(callable, context, dividend, divisor));
1660 assembler->Goto(&end);
1661 }
1662
1663 assembler->Bind(&end);
1664 assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
1665 slot_id);
1666 return var_result.value();
1667 }
1668
1669 // static
Generate(CodeStubAssembler * assembler,compiler::Node * value,compiler::Node * context,compiler::Node * type_feedback_vector,compiler::Node * slot_id)1670 compiler::Node* IncStub::Generate(CodeStubAssembler* assembler,
1671 compiler::Node* value,
1672 compiler::Node* context,
1673 compiler::Node* type_feedback_vector,
1674 compiler::Node* slot_id) {
1675 typedef CodeStubAssembler::Label Label;
1676 typedef compiler::Node Node;
1677 typedef CodeStubAssembler::Variable Variable;
1678
1679 // Shared entry for floating point increment.
1680 Label do_finc(assembler), end(assembler);
1681 Variable var_finc_value(assembler, MachineRepresentation::kFloat64);
1682
1683 // We might need to try again due to ToNumber conversion.
1684 Variable value_var(assembler, MachineRepresentation::kTagged);
1685 Variable result_var(assembler, MachineRepresentation::kTagged);
1686 Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
1687 Variable* loop_vars[] = {&value_var, &var_type_feedback};
1688 Label start(assembler, 2, loop_vars);
1689 value_var.Bind(value);
1690 var_type_feedback.Bind(
1691 assembler->Int32Constant(BinaryOperationFeedback::kNone));
1692 assembler->Goto(&start);
1693 assembler->Bind(&start);
1694 {
1695 value = value_var.value();
1696
1697 Label if_issmi(assembler), if_isnotsmi(assembler);
1698 assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
1699
1700 assembler->Bind(&if_issmi);
1701 {
1702 // Try fast Smi addition first.
1703 Node* one = assembler->SmiConstant(Smi::FromInt(1));
1704 Node* pair = assembler->IntPtrAddWithOverflow(
1705 assembler->BitcastTaggedToWord(value),
1706 assembler->BitcastTaggedToWord(one));
1707 Node* overflow = assembler->Projection(1, pair);
1708
1709 // Check if the Smi addition overflowed.
1710 Label if_overflow(assembler), if_notoverflow(assembler);
1711 assembler->Branch(overflow, &if_overflow, &if_notoverflow);
1712
1713 assembler->Bind(&if_notoverflow);
1714 var_type_feedback.Bind(assembler->Word32Or(
1715 var_type_feedback.value(),
1716 assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall)));
1717 result_var.Bind(
1718 assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
1719 assembler->Goto(&end);
1720
1721 assembler->Bind(&if_overflow);
1722 {
1723 var_finc_value.Bind(assembler->SmiToFloat64(value));
1724 assembler->Goto(&do_finc);
1725 }
1726 }
1727
1728 assembler->Bind(&if_isnotsmi);
1729 {
1730 // Check if the value is a HeapNumber.
1731 Label if_valueisnumber(assembler),
1732 if_valuenotnumber(assembler, Label::kDeferred);
1733 Node* value_map = assembler->LoadMap(value);
1734 assembler->Branch(assembler->IsHeapNumberMap(value_map),
1735 &if_valueisnumber, &if_valuenotnumber);
1736
1737 assembler->Bind(&if_valueisnumber);
1738 {
1739 // Load the HeapNumber value.
1740 var_finc_value.Bind(assembler->LoadHeapNumberValue(value));
1741 assembler->Goto(&do_finc);
1742 }
1743
1744 assembler->Bind(&if_valuenotnumber);
1745 {
1746 // We do not require an Or with earlier feedback here because once we
1747 // convert the value to a number, we cannot reach this path. We can
1748 // only reach this path on the first pass when the feedback is kNone.
1749 CSA_ASSERT(assembler,
1750 assembler->Word32Equal(var_type_feedback.value(),
1751 assembler->Int32Constant(
1752 BinaryOperationFeedback::kNone)));
1753
1754 Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
1755 Node* instance_type = assembler->LoadMapInstanceType(value_map);
1756 Node* is_oddball = assembler->Word32Equal(
1757 instance_type, assembler->Int32Constant(ODDBALL_TYPE));
1758 assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
1759
1760 assembler->Bind(&if_valueisoddball);
1761 {
1762 // Convert Oddball to Number and check again.
1763 value_var.Bind(
1764 assembler->LoadObjectField(value, Oddball::kToNumberOffset));
1765 var_type_feedback.Bind(assembler->Int32Constant(
1766 BinaryOperationFeedback::kNumberOrOddball));
1767 assembler->Goto(&start);
1768 }
1769
1770 assembler->Bind(&if_valuenotoddball);
1771 {
1772 // Convert to a Number first and try again.
1773 Callable callable =
1774 CodeFactory::NonNumberToNumber(assembler->isolate());
1775 var_type_feedback.Bind(
1776 assembler->Int32Constant(BinaryOperationFeedback::kAny));
1777 value_var.Bind(assembler->CallStub(callable, context, value));
1778 assembler->Goto(&start);
1779 }
1780 }
1781 }
1782 }
1783
1784 assembler->Bind(&do_finc);
1785 {
1786 Node* finc_value = var_finc_value.value();
1787 Node* one = assembler->Float64Constant(1.0);
1788 Node* finc_result = assembler->Float64Add(finc_value, one);
1789 var_type_feedback.Bind(assembler->Word32Or(
1790 var_type_feedback.value(),
1791 assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
1792 result_var.Bind(assembler->AllocateHeapNumberWithValue(finc_result));
1793 assembler->Goto(&end);
1794 }
1795
1796 assembler->Bind(&end);
1797 assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
1798 slot_id);
1799 return result_var.value();
1800 }
1801
GenerateAssembly(CodeStubAssembler * assembler) const1802 void NumberToStringStub::GenerateAssembly(CodeStubAssembler* assembler) const {
1803 typedef compiler::Node Node;
1804 Node* argument = assembler->Parameter(Descriptor::kArgument);
1805 Node* context = assembler->Parameter(Descriptor::kContext);
1806 assembler->Return(assembler->NumberToString(context, argument));
1807 }
1808
1809 // static
Generate(CodeStubAssembler * assembler,compiler::Node * value,compiler::Node * context,compiler::Node * type_feedback_vector,compiler::Node * slot_id)1810 compiler::Node* DecStub::Generate(CodeStubAssembler* assembler,
1811 compiler::Node* value,
1812 compiler::Node* context,
1813 compiler::Node* type_feedback_vector,
1814 compiler::Node* slot_id) {
1815 typedef CodeStubAssembler::Label Label;
1816 typedef compiler::Node Node;
1817 typedef CodeStubAssembler::Variable Variable;
1818
1819 // Shared entry for floating point decrement.
1820 Label do_fdec(assembler), end(assembler);
1821 Variable var_fdec_value(assembler, MachineRepresentation::kFloat64);
1822
1823 // We might need to try again due to ToNumber conversion.
1824 Variable value_var(assembler, MachineRepresentation::kTagged);
1825 Variable result_var(assembler, MachineRepresentation::kTagged);
1826 Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
1827 Variable* loop_vars[] = {&value_var, &var_type_feedback};
1828 Label start(assembler, 2, loop_vars);
1829 var_type_feedback.Bind(
1830 assembler->Int32Constant(BinaryOperationFeedback::kNone));
1831 value_var.Bind(value);
1832 assembler->Goto(&start);
1833 assembler->Bind(&start);
1834 {
1835 value = value_var.value();
1836
1837 Label if_issmi(assembler), if_isnotsmi(assembler);
1838 assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
1839
1840 assembler->Bind(&if_issmi);
1841 {
1842 // Try fast Smi subtraction first.
1843 Node* one = assembler->SmiConstant(Smi::FromInt(1));
1844 Node* pair = assembler->IntPtrSubWithOverflow(
1845 assembler->BitcastTaggedToWord(value),
1846 assembler->BitcastTaggedToWord(one));
1847 Node* overflow = assembler->Projection(1, pair);
1848
1849 // Check if the Smi subtraction overflowed.
1850 Label if_overflow(assembler), if_notoverflow(assembler);
1851 assembler->Branch(overflow, &if_overflow, &if_notoverflow);
1852
1853 assembler->Bind(&if_notoverflow);
1854 var_type_feedback.Bind(assembler->Word32Or(
1855 var_type_feedback.value(),
1856 assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall)));
1857 result_var.Bind(
1858 assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
1859 assembler->Goto(&end);
1860
1861 assembler->Bind(&if_overflow);
1862 {
1863 var_fdec_value.Bind(assembler->SmiToFloat64(value));
1864 assembler->Goto(&do_fdec);
1865 }
1866 }
1867
1868 assembler->Bind(&if_isnotsmi);
1869 {
1870 // Check if the value is a HeapNumber.
1871 Label if_valueisnumber(assembler),
1872 if_valuenotnumber(assembler, Label::kDeferred);
1873 Node* value_map = assembler->LoadMap(value);
1874 assembler->Branch(assembler->IsHeapNumberMap(value_map),
1875 &if_valueisnumber, &if_valuenotnumber);
1876
1877 assembler->Bind(&if_valueisnumber);
1878 {
1879 // Load the HeapNumber value.
1880 var_fdec_value.Bind(assembler->LoadHeapNumberValue(value));
1881 assembler->Goto(&do_fdec);
1882 }
1883
1884 assembler->Bind(&if_valuenotnumber);
1885 {
1886 // We do not require an Or with earlier feedback here because once we
1887 // convert the value to a number, we cannot reach this path. We can
1888 // only reach this path on the first pass when the feedback is kNone.
1889 CSA_ASSERT(assembler,
1890 assembler->Word32Equal(var_type_feedback.value(),
1891 assembler->Int32Constant(
1892 BinaryOperationFeedback::kNone)));
1893
1894 Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
1895 Node* instance_type = assembler->LoadMapInstanceType(value_map);
1896 Node* is_oddball = assembler->Word32Equal(
1897 instance_type, assembler->Int32Constant(ODDBALL_TYPE));
1898 assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
1899
1900 assembler->Bind(&if_valueisoddball);
1901 {
1902 // Convert Oddball to Number and check again.
1903 value_var.Bind(
1904 assembler->LoadObjectField(value, Oddball::kToNumberOffset));
1905 var_type_feedback.Bind(assembler->Int32Constant(
1906 BinaryOperationFeedback::kNumberOrOddball));
1907 assembler->Goto(&start);
1908 }
1909
1910 assembler->Bind(&if_valuenotoddball);
1911 {
1912 // Convert to a Number first and try again.
1913 Callable callable =
1914 CodeFactory::NonNumberToNumber(assembler->isolate());
1915 var_type_feedback.Bind(
1916 assembler->Int32Constant(BinaryOperationFeedback::kAny));
1917 value_var.Bind(assembler->CallStub(callable, context, value));
1918 assembler->Goto(&start);
1919 }
1920 }
1921 }
1922 }
1923
1924 assembler->Bind(&do_fdec);
1925 {
1926 Node* fdec_value = var_fdec_value.value();
1927 Node* one = assembler->Float64Constant(1.0);
1928 Node* fdec_result = assembler->Float64Sub(fdec_value, one);
1929 var_type_feedback.Bind(assembler->Word32Or(
1930 var_type_feedback.value(),
1931 assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
1932 result_var.Bind(assembler->AllocateHeapNumberWithValue(fdec_result));
1933 assembler->Goto(&end);
1934 }
1935
1936 assembler->Bind(&end);
1937 assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
1938 slot_id);
1939 return result_var.value();
1940 }
1941
1942 // ES6 section 21.1.3.19 String.prototype.substring ( start, end )
Generate(CodeStubAssembler * assembler,compiler::Node * string,compiler::Node * from,compiler::Node * to,compiler::Node * context)1943 compiler::Node* SubStringStub::Generate(CodeStubAssembler* assembler,
1944 compiler::Node* string,
1945 compiler::Node* from,
1946 compiler::Node* to,
1947 compiler::Node* context) {
1948 return assembler->SubString(context, string, from, to);
1949 }
1950
GenerateAssembly(CodeStubAssembler * assembler) const1951 void LoadApiGetterStub::GenerateAssembly(CodeStubAssembler* assembler) const {
1952 typedef compiler::Node Node;
1953 Node* context = assembler->Parameter(Descriptor::kContext);
1954 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
1955 // For now we only support receiver_is_holder.
1956 DCHECK(receiver_is_holder());
1957 Node* holder = receiver;
1958 Node* map = assembler->LoadMap(receiver);
1959 Node* descriptors = assembler->LoadMapDescriptors(map);
1960 Node* value_index =
1961 assembler->IntPtrConstant(DescriptorArray::ToValueIndex(index()));
1962 Node* callback = assembler->LoadFixedArrayElement(
1963 descriptors, value_index, 0, CodeStubAssembler::INTPTR_PARAMETERS);
1964 assembler->TailCallStub(CodeFactory::ApiGetter(isolate()), context, receiver,
1965 holder, callback);
1966 }
1967
GenerateAssembly(CodeStubAssembler * assembler) const1968 void StoreFieldStub::GenerateAssembly(CodeStubAssembler* assembler) const {
1969 typedef CodeStubAssembler::Label Label;
1970 typedef compiler::Node Node;
1971
1972 FieldIndex index = this->index();
1973 Representation representation = this->representation();
1974
1975 assembler->Comment("StoreFieldStub: inobject=%d, offset=%d, rep=%s",
1976 index.is_inobject(), index.offset(),
1977 representation.Mnemonic());
1978
1979 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
1980 Node* name = assembler->Parameter(Descriptor::kName);
1981 Node* value = assembler->Parameter(Descriptor::kValue);
1982 Node* slot = assembler->Parameter(Descriptor::kSlot);
1983 Node* vector = assembler->Parameter(Descriptor::kVector);
1984 Node* context = assembler->Parameter(Descriptor::kContext);
1985
1986 Label miss(assembler);
1987
1988 Node* prepared_value =
1989 assembler->PrepareValueForWrite(value, representation, &miss);
1990 assembler->StoreNamedField(receiver, index, representation, prepared_value,
1991 false);
1992 assembler->Return(value);
1993
1994 // Only stores to tagged field can't bailout.
1995 if (!representation.IsTagged()) {
1996 assembler->Bind(&miss);
1997 {
1998 assembler->Comment("Miss");
1999 assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
2000 vector, receiver, name);
2001 }
2002 }
2003 }
2004
GenerateAssembly(CodeStubAssembler * assembler) const2005 void StoreGlobalStub::GenerateAssembly(CodeStubAssembler* assembler) const {
2006 typedef CodeStubAssembler::Label Label;
2007 typedef compiler::Node Node;
2008
2009 assembler->Comment(
2010 "StoreGlobalStub: cell_type=%d, constant_type=%d, check_global=%d",
2011 cell_type(), PropertyCellType::kConstantType == cell_type()
2012 ? static_cast<int>(constant_type())
2013 : -1,
2014 check_global());
2015
2016 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
2017 Node* name = assembler->Parameter(Descriptor::kName);
2018 Node* value = assembler->Parameter(Descriptor::kValue);
2019 Node* slot = assembler->Parameter(Descriptor::kSlot);
2020 Node* vector = assembler->Parameter(Descriptor::kVector);
2021 Node* context = assembler->Parameter(Descriptor::kContext);
2022
2023 Label miss(assembler);
2024
2025 if (check_global()) {
2026 // Check that the map of the global has not changed: use a placeholder map
2027 // that will be replaced later with the global object's map.
2028 Node* proxy_map = assembler->LoadMap(receiver);
2029 Node* global = assembler->LoadObjectField(proxy_map, Map::kPrototypeOffset);
2030 Node* map_cell = assembler->HeapConstant(isolate()->factory()->NewWeakCell(
2031 StoreGlobalStub::global_map_placeholder(isolate())));
2032 Node* expected_map = assembler->LoadWeakCellValueUnchecked(map_cell);
2033 Node* map = assembler->LoadMap(global);
2034 assembler->GotoIf(assembler->WordNotEqual(expected_map, map), &miss);
2035 }
2036
2037 Node* weak_cell = assembler->HeapConstant(isolate()->factory()->NewWeakCell(
2038 StoreGlobalStub::property_cell_placeholder(isolate())));
2039 Node* cell = assembler->LoadWeakCellValue(weak_cell);
2040 assembler->GotoIf(assembler->TaggedIsSmi(cell), &miss);
2041
2042 // Load the payload of the global parameter cell. A hole indicates that the
2043 // cell has been invalidated and that the store must be handled by the
2044 // runtime.
2045 Node* cell_contents =
2046 assembler->LoadObjectField(cell, PropertyCell::kValueOffset);
2047
2048 PropertyCellType cell_type = this->cell_type();
2049 if (cell_type == PropertyCellType::kConstant ||
2050 cell_type == PropertyCellType::kUndefined) {
2051 // This is always valid for all states a cell can be in.
2052 assembler->GotoIf(assembler->WordNotEqual(cell_contents, value), &miss);
2053 } else {
2054 assembler->GotoIf(assembler->IsTheHole(cell_contents), &miss);
2055
2056 // When dealing with constant types, the type may be allowed to change, as
2057 // long as optimized code remains valid.
2058 bool value_is_smi = false;
2059 if (cell_type == PropertyCellType::kConstantType) {
2060 switch (constant_type()) {
2061 case PropertyCellConstantType::kSmi:
2062 assembler->GotoUnless(assembler->TaggedIsSmi(value), &miss);
2063 value_is_smi = true;
2064 break;
2065 case PropertyCellConstantType::kStableMap: {
2066 // It is sufficient here to check that the value and cell contents
2067 // have identical maps, no matter if they are stable or not or if they
2068 // are the maps that were originally in the cell or not. If optimized
2069 // code will deopt when a cell has a unstable map and if it has a
2070 // dependency on a stable map, it will deopt if the map destabilizes.
2071 assembler->GotoIf(assembler->TaggedIsSmi(value), &miss);
2072 assembler->GotoIf(assembler->TaggedIsSmi(cell_contents), &miss);
2073 Node* expected_map = assembler->LoadMap(cell_contents);
2074 Node* map = assembler->LoadMap(value);
2075 assembler->GotoIf(assembler->WordNotEqual(expected_map, map), &miss);
2076 break;
2077 }
2078 }
2079 }
2080 if (value_is_smi) {
2081 assembler->StoreObjectFieldNoWriteBarrier(
2082 cell, PropertyCell::kValueOffset, value);
2083 } else {
2084 assembler->StoreObjectField(cell, PropertyCell::kValueOffset, value);
2085 }
2086 }
2087
2088 assembler->Return(value);
2089
2090 assembler->Bind(&miss);
2091 {
2092 assembler->Comment("Miss");
2093 assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
2094 vector, receiver, name);
2095 }
2096 }
2097
GenerateAssembly(CodeStubAssembler * assembler) const2098 void KeyedLoadSloppyArgumentsStub::GenerateAssembly(
2099 CodeStubAssembler* assembler) const {
2100 typedef CodeStubAssembler::Label Label;
2101 typedef compiler::Node Node;
2102
2103 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
2104 Node* key = assembler->Parameter(Descriptor::kName);
2105 Node* slot = assembler->Parameter(Descriptor::kSlot);
2106 Node* vector = assembler->Parameter(Descriptor::kVector);
2107 Node* context = assembler->Parameter(Descriptor::kContext);
2108
2109 Label miss(assembler);
2110
2111 Node* result = assembler->LoadKeyedSloppyArguments(receiver, key, &miss);
2112 assembler->Return(result);
2113
2114 assembler->Bind(&miss);
2115 {
2116 assembler->Comment("Miss");
2117 assembler->TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver,
2118 key, slot, vector);
2119 }
2120 }
2121
GenerateAssembly(CodeStubAssembler * assembler) const2122 void KeyedStoreSloppyArgumentsStub::GenerateAssembly(
2123 CodeStubAssembler* assembler) const {
2124 typedef CodeStubAssembler::Label Label;
2125 typedef compiler::Node Node;
2126
2127 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
2128 Node* key = assembler->Parameter(Descriptor::kName);
2129 Node* value = assembler->Parameter(Descriptor::kValue);
2130 Node* slot = assembler->Parameter(Descriptor::kSlot);
2131 Node* vector = assembler->Parameter(Descriptor::kVector);
2132 Node* context = assembler->Parameter(Descriptor::kContext);
2133
2134 Label miss(assembler);
2135
2136 assembler->StoreKeyedSloppyArguments(receiver, key, value, &miss);
2137 assembler->Return(value);
2138
2139 assembler->Bind(&miss);
2140 {
2141 assembler->Comment("Miss");
2142 assembler->TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value,
2143 slot, vector, receiver, key);
2144 }
2145 }
2146
GenerateAssembly(CodeStubAssembler * assembler) const2147 void LoadScriptContextFieldStub::GenerateAssembly(
2148 CodeStubAssembler* assembler) const {
2149 typedef compiler::Node Node;
2150
2151 assembler->Comment("LoadScriptContextFieldStub: context_index=%d, slot=%d",
2152 context_index(), slot_index());
2153
2154 Node* context = assembler->Parameter(Descriptor::kContext);
2155
2156 Node* script_context = assembler->LoadScriptContext(context, context_index());
2157 Node* result = assembler->LoadFixedArrayElement(
2158 script_context, assembler->IntPtrConstant(slot_index()), 0,
2159 CodeStubAssembler::INTPTR_PARAMETERS);
2160 assembler->Return(result);
2161 }
2162
GenerateAssembly(CodeStubAssembler * assembler) const2163 void StoreScriptContextFieldStub::GenerateAssembly(
2164 CodeStubAssembler* assembler) const {
2165 typedef compiler::Node Node;
2166
2167 assembler->Comment("StoreScriptContextFieldStub: context_index=%d, slot=%d",
2168 context_index(), slot_index());
2169
2170 Node* value = assembler->Parameter(Descriptor::kValue);
2171 Node* context = assembler->Parameter(Descriptor::kContext);
2172
2173 Node* script_context = assembler->LoadScriptContext(context, context_index());
2174 assembler->StoreFixedArrayElement(
2175 script_context, assembler->IntPtrConstant(slot_index()), value,
2176 UPDATE_WRITE_BARRIER, CodeStubAssembler::INTPTR_PARAMETERS);
2177 assembler->Return(value);
2178 }
2179
GenerateAssembly(CodeStubAssembler * assembler) const2180 void StoreInterceptorStub::GenerateAssembly(
2181 CodeStubAssembler* assembler) const {
2182 typedef compiler::Node Node;
2183
2184 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
2185 Node* name = assembler->Parameter(Descriptor::kName);
2186 Node* value = assembler->Parameter(Descriptor::kValue);
2187 Node* context = assembler->Parameter(Descriptor::kContext);
2188 assembler->TailCallRuntime(Runtime::kStorePropertyWithInterceptor, context,
2189 receiver, name, value);
2190 }
2191
GenerateAssembly(CodeStubAssembler * assembler) const2192 void LoadIndexedInterceptorStub::GenerateAssembly(
2193 CodeStubAssembler* assembler) const {
2194 typedef compiler::Node Node;
2195 typedef CodeStubAssembler::Label Label;
2196
2197 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
2198 Node* key = assembler->Parameter(Descriptor::kName);
2199 Node* slot = assembler->Parameter(Descriptor::kSlot);
2200 Node* vector = assembler->Parameter(Descriptor::kVector);
2201 Node* context = assembler->Parameter(Descriptor::kContext);
2202
2203 Label if_keyispositivesmi(assembler), if_keyisinvalid(assembler);
2204 assembler->Branch(assembler->WordIsPositiveSmi(key), &if_keyispositivesmi,
2205 &if_keyisinvalid);
2206 assembler->Bind(&if_keyispositivesmi);
2207 assembler->TailCallRuntime(Runtime::kLoadElementWithInterceptor, context,
2208 receiver, key);
2209
2210 assembler->Bind(&if_keyisinvalid);
2211 assembler->TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, key,
2212 slot, vector);
2213 }
2214
2215 // static
IsSupported(ObjectLiteral * expr)2216 bool FastCloneShallowObjectStub::IsSupported(ObjectLiteral* expr) {
2217 // FastCloneShallowObjectStub doesn't copy elements, and object literals don't
2218 // support copy-on-write (COW) elements for now.
2219 // TODO(mvstanton): make object literals support COW elements.
2220 return expr->fast_elements() && expr->has_shallow_properties() &&
2221 expr->properties_count() <= kMaximumClonedProperties;
2222 }
2223
2224 // static
PropertiesCount(int literal_length)2225 int FastCloneShallowObjectStub::PropertiesCount(int literal_length) {
2226 // This heuristic of setting empty literals to have
2227 // kInitialGlobalObjectUnusedPropertiesCount must remain in-sync with the
2228 // runtime.
2229 // TODO(verwaest): Unify this with the heuristic in the runtime.
2230 return literal_length == 0
2231 ? JSObject::kInitialGlobalObjectUnusedPropertiesCount
2232 : literal_length;
2233 }
2234
2235 // static
GenerateFastPath(CodeStubAssembler * assembler,compiler::CodeAssembler::Label * call_runtime,compiler::Node * closure,compiler::Node * literals_index,compiler::Node * properties_count)2236 compiler::Node* FastCloneShallowObjectStub::GenerateFastPath(
2237 CodeStubAssembler* assembler, compiler::CodeAssembler::Label* call_runtime,
2238 compiler::Node* closure, compiler::Node* literals_index,
2239 compiler::Node* properties_count) {
2240 typedef compiler::Node Node;
2241 typedef compiler::CodeAssembler::Label Label;
2242 typedef compiler::CodeAssembler::Variable Variable;
2243
2244 Node* literals_array =
2245 assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
2246 Node* allocation_site = assembler->LoadFixedArrayElement(
2247 literals_array, literals_index,
2248 LiteralsArray::kFirstLiteralIndex * kPointerSize,
2249 CodeStubAssembler::SMI_PARAMETERS);
2250 assembler->GotoIf(assembler->IsUndefined(allocation_site), call_runtime);
2251
2252 // Calculate the object and allocation size based on the properties count.
2253 Node* object_size = assembler->IntPtrAdd(
2254 assembler->WordShl(properties_count, kPointerSizeLog2),
2255 assembler->IntPtrConstant(JSObject::kHeaderSize));
2256 Node* allocation_size = object_size;
2257 if (FLAG_allocation_site_pretenuring) {
2258 allocation_size = assembler->IntPtrAdd(
2259 object_size, assembler->IntPtrConstant(AllocationMemento::kSize));
2260 }
2261 Node* boilerplate = assembler->LoadObjectField(
2262 allocation_site, AllocationSite::kTransitionInfoOffset);
2263 Node* boilerplate_map = assembler->LoadMap(boilerplate);
2264 Node* instance_size = assembler->LoadMapInstanceSize(boilerplate_map);
2265 Node* size_in_words = assembler->WordShr(object_size, kPointerSizeLog2);
2266 assembler->GotoUnless(assembler->Word32Equal(instance_size, size_in_words),
2267 call_runtime);
2268
2269 Node* copy = assembler->Allocate(allocation_size);
2270
2271 // Copy boilerplate elements.
2272 Variable offset(assembler, MachineType::PointerRepresentation());
2273 offset.Bind(assembler->IntPtrConstant(-kHeapObjectTag));
2274 Node* end_offset = assembler->IntPtrAdd(object_size, offset.value());
2275 Label loop_body(assembler, &offset), loop_check(assembler, &offset);
2276 // We should always have an object size greater than zero.
2277 assembler->Goto(&loop_body);
2278 assembler->Bind(&loop_body);
2279 {
2280 // The Allocate above guarantees that the copy lies in new space. This
2281 // allows us to skip write barriers. This is necessary since we may also be
2282 // copying unboxed doubles.
2283 Node* field =
2284 assembler->Load(MachineType::IntPtr(), boilerplate, offset.value());
2285 assembler->StoreNoWriteBarrier(MachineType::PointerRepresentation(), copy,
2286 offset.value(), field);
2287 assembler->Goto(&loop_check);
2288 }
2289 assembler->Bind(&loop_check);
2290 {
2291 offset.Bind(assembler->IntPtrAdd(offset.value(),
2292 assembler->IntPtrConstant(kPointerSize)));
2293 assembler->GotoUnless(
2294 assembler->IntPtrGreaterThanOrEqual(offset.value(), end_offset),
2295 &loop_body);
2296 }
2297
2298 if (FLAG_allocation_site_pretenuring) {
2299 Node* memento = assembler->InnerAllocate(copy, object_size);
2300 assembler->StoreObjectFieldNoWriteBarrier(
2301 memento, HeapObject::kMapOffset,
2302 assembler->LoadRoot(Heap::kAllocationMementoMapRootIndex));
2303 assembler->StoreObjectFieldNoWriteBarrier(
2304 memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
2305 Node* memento_create_count = assembler->LoadObjectField(
2306 allocation_site, AllocationSite::kPretenureCreateCountOffset);
2307 memento_create_count = assembler->SmiAdd(
2308 memento_create_count, assembler->SmiConstant(Smi::FromInt(1)));
2309 assembler->StoreObjectFieldNoWriteBarrier(
2310 allocation_site, AllocationSite::kPretenureCreateCountOffset,
2311 memento_create_count);
2312 }
2313
2314 // TODO(verwaest): Allocate and fill in double boxes.
2315 return copy;
2316 }
2317
GenerateAssembly(CodeStubAssembler * assembler) const2318 void FastCloneShallowObjectStub::GenerateAssembly(
2319 CodeStubAssembler* assembler) const {
2320 typedef CodeStubAssembler::Label Label;
2321 typedef compiler::Node Node;
2322 Label call_runtime(assembler);
2323 Node* closure = assembler->Parameter(0);
2324 Node* literals_index = assembler->Parameter(1);
2325
2326 Node* properties_count =
2327 assembler->IntPtrConstant(PropertiesCount(this->length()));
2328 Node* copy = GenerateFastPath(assembler, &call_runtime, closure,
2329 literals_index, properties_count);
2330 assembler->Return(copy);
2331
2332 assembler->Bind(&call_runtime);
2333 Node* constant_properties = assembler->Parameter(2);
2334 Node* flags = assembler->Parameter(3);
2335 Node* context = assembler->Parameter(4);
2336 assembler->TailCallRuntime(Runtime::kCreateObjectLiteral, context, closure,
2337 literals_index, constant_properties, flags);
2338 }
2339
2340 template<class StateType>
TraceTransition(StateType from,StateType to)2341 void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
2342 // Note: Although a no-op transition is semantically OK, it is hinting at a
2343 // bug somewhere in our state transition machinery.
2344 DCHECK(from != to);
2345 if (!FLAG_trace_ic) return;
2346 OFStream os(stdout);
2347 os << "[";
2348 PrintBaseName(os);
2349 os << ": " << from << "=>" << to << "]" << std::endl;
2350 }
2351
PrintState(std::ostream & os) const2352 void CallICStub::PrintState(std::ostream& os) const { // NOLINT
2353 os << state();
2354 }
2355
2356
FinishCode(Handle<Code> code)2357 void JSEntryStub::FinishCode(Handle<Code> code) {
2358 Handle<FixedArray> handler_table =
2359 code->GetIsolate()->factory()->NewFixedArray(1, TENURED);
2360 handler_table->set(0, Smi::FromInt(handler_offset_));
2361 code->set_handler_table(*handler_table);
2362 }
2363
2364
InitializeDescriptor(CodeStubDescriptor * descriptor)2365 void LoadDictionaryElementStub::InitializeDescriptor(
2366 CodeStubDescriptor* descriptor) {
2367 descriptor->Initialize(
2368 FUNCTION_ADDR(Runtime_KeyedLoadIC_MissFromStubFailure));
2369 }
2370
InitializeDescriptor(CodeStubDescriptor * descriptor)2371 void HandlerStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
2372 DCHECK(kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC);
2373 if (kind() == Code::KEYED_LOAD_IC) {
2374 descriptor->Initialize(
2375 FUNCTION_ADDR(Runtime_KeyedLoadIC_MissFromStubFailure));
2376 }
2377 }
2378
2379
GetCallInterfaceDescriptor() const2380 CallInterfaceDescriptor HandlerStub::GetCallInterfaceDescriptor() const {
2381 if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
2382 return LoadWithVectorDescriptor(isolate());
2383 } else {
2384 DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
2385 return StoreWithVectorDescriptor(isolate());
2386 }
2387 }
2388
InitializeDescriptor(CodeStubDescriptor * descriptor)2389 void TransitionElementsKindStub::InitializeDescriptor(
2390 CodeStubDescriptor* descriptor) {
2391 descriptor->Initialize(
2392 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry);
2393 }
2394
2395
InitializeDescriptor(CodeStubDescriptor * descriptor)2396 void AllocateHeapNumberStub::InitializeDescriptor(
2397 CodeStubDescriptor* descriptor) {
2398 descriptor->Initialize(
2399 Runtime::FunctionForId(Runtime::kAllocateHeapNumber)->entry);
2400 }
2401
2402
2403 #define SIMD128_INIT_DESC(TYPE, Type, type, lane_count, lane_type) \
2404 void Allocate##Type##Stub::InitializeDescriptor( \
2405 CodeStubDescriptor* descriptor) { \
2406 descriptor->Initialize( \
2407 Runtime::FunctionForId(Runtime::kCreate##Type)->entry); \
2408 }
SIMD128_TYPES(SIMD128_INIT_DESC)2409 SIMD128_TYPES(SIMD128_INIT_DESC)
2410 #undef SIMD128_INIT_DESC
2411
2412 void ToBooleanICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
2413 descriptor->Initialize(FUNCTION_ADDR(Runtime_ToBooleanIC_Miss));
2414 descriptor->SetMissHandler(Runtime::kToBooleanIC_Miss);
2415 }
2416
2417
InitializeDescriptor(CodeStubDescriptor * descriptor)2418 void BinaryOpICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
2419 descriptor->Initialize(FUNCTION_ADDR(Runtime_BinaryOpIC_Miss));
2420 descriptor->SetMissHandler(Runtime::kBinaryOpIC_Miss);
2421 }
2422
2423
InitializeDescriptor(CodeStubDescriptor * descriptor)2424 void BinaryOpWithAllocationSiteStub::InitializeDescriptor(
2425 CodeStubDescriptor* descriptor) {
2426 descriptor->Initialize(
2427 FUNCTION_ADDR(Runtime_BinaryOpIC_MissWithAllocationSite));
2428 }
2429
GenerateAssembly(CodeStubAssembler * assembler) const2430 void GetPropertyStub::GenerateAssembly(CodeStubAssembler* assembler) const {
2431 typedef compiler::Node Node;
2432 typedef CodeStubAssembler::Label Label;
2433 typedef CodeStubAssembler::Variable Variable;
2434
2435 Label call_runtime(assembler, Label::kDeferred), return_undefined(assembler),
2436 end(assembler);
2437
2438 Node* object = assembler->Parameter(0);
2439 Node* key = assembler->Parameter(1);
2440 Node* context = assembler->Parameter(2);
2441 Variable var_result(assembler, MachineRepresentation::kTagged);
2442
2443 CodeStubAssembler::LookupInHolder lookup_property_in_holder =
2444 [assembler, context, &var_result, &end](
2445 Node* receiver, Node* holder, Node* holder_map,
2446 Node* holder_instance_type, Node* unique_name, Label* next_holder,
2447 Label* if_bailout) {
2448 Variable var_value(assembler, MachineRepresentation::kTagged);
2449 Label if_found(assembler);
2450 assembler->TryGetOwnProperty(
2451 context, receiver, holder, holder_map, holder_instance_type,
2452 unique_name, &if_found, &var_value, next_holder, if_bailout);
2453 assembler->Bind(&if_found);
2454 {
2455 var_result.Bind(var_value.value());
2456 assembler->Goto(&end);
2457 }
2458 };
2459
2460 CodeStubAssembler::LookupInHolder lookup_element_in_holder =
2461 [assembler, context, &var_result, &end](
2462 Node* receiver, Node* holder, Node* holder_map,
2463 Node* holder_instance_type, Node* index, Label* next_holder,
2464 Label* if_bailout) {
2465 // Not supported yet.
2466 assembler->Use(next_holder);
2467 assembler->Goto(if_bailout);
2468 };
2469
2470 assembler->TryPrototypeChainLookup(object, key, lookup_property_in_holder,
2471 lookup_element_in_holder,
2472 &return_undefined, &call_runtime);
2473
2474 assembler->Bind(&return_undefined);
2475 {
2476 var_result.Bind(assembler->UndefinedConstant());
2477 assembler->Goto(&end);
2478 }
2479
2480 assembler->Bind(&call_runtime);
2481 {
2482 var_result.Bind(
2483 assembler->CallRuntime(Runtime::kGetProperty, context, object, key));
2484 assembler->Goto(&end);
2485 }
2486
2487 assembler->Bind(&end);
2488 assembler->Return(var_result.value());
2489 }
2490
2491 // static
Generate(CodeStubAssembler * assembler,compiler::Node * shared_info,compiler::Node * context)2492 compiler::Node* FastNewClosureStub::Generate(CodeStubAssembler* assembler,
2493 compiler::Node* shared_info,
2494 compiler::Node* context) {
2495 typedef compiler::Node Node;
2496 typedef compiler::CodeAssembler::Label Label;
2497 typedef compiler::CodeAssembler::Variable Variable;
2498
2499 Isolate* isolate = assembler->isolate();
2500 Factory* factory = assembler->isolate()->factory();
2501 assembler->IncrementCounter(isolate->counters()->fast_new_closure_total(), 1);
2502
2503 // Create a new closure from the given function info in new space
2504 Node* result = assembler->Allocate(JSFunction::kSize);
2505
2506 // Calculate the index of the map we should install on the function based on
2507 // the FunctionKind and LanguageMode of the function.
2508 // Note: Must be kept in sync with Context::FunctionMapIndex
2509 Node* compiler_hints = assembler->LoadObjectField(
2510 shared_info, SharedFunctionInfo::kCompilerHintsOffset,
2511 MachineType::Uint32());
2512 Node* is_strict = assembler->Word32And(
2513 compiler_hints,
2514 assembler->Int32Constant(1 << SharedFunctionInfo::kStrictModeBit));
2515
2516 Label if_normal(assembler), if_generator(assembler), if_async(assembler),
2517 if_class_constructor(assembler), if_function_without_prototype(assembler),
2518 load_map(assembler);
2519 Variable map_index(assembler, MachineType::PointerRepresentation());
2520
2521 STATIC_ASSERT(FunctionKind::kNormalFunction == 0);
2522 Node* is_not_normal = assembler->Word32And(
2523 compiler_hints,
2524 assembler->Int32Constant(SharedFunctionInfo::kAllFunctionKindBitsMask));
2525 assembler->GotoUnless(is_not_normal, &if_normal);
2526
2527 Node* is_generator = assembler->Word32And(
2528 compiler_hints,
2529 assembler->Int32Constant(FunctionKind::kGeneratorFunction
2530 << SharedFunctionInfo::kFunctionKindShift));
2531 assembler->GotoIf(is_generator, &if_generator);
2532
2533 Node* is_async = assembler->Word32And(
2534 compiler_hints,
2535 assembler->Int32Constant(FunctionKind::kAsyncFunction
2536 << SharedFunctionInfo::kFunctionKindShift));
2537 assembler->GotoIf(is_async, &if_async);
2538
2539 Node* is_class_constructor = assembler->Word32And(
2540 compiler_hints,
2541 assembler->Int32Constant(FunctionKind::kClassConstructor
2542 << SharedFunctionInfo::kFunctionKindShift));
2543 assembler->GotoIf(is_class_constructor, &if_class_constructor);
2544
2545 if (FLAG_debug_code) {
2546 // Function must be a function without a prototype.
2547 CSA_ASSERT(assembler, assembler->Word32And(
2548 compiler_hints,
2549 assembler->Int32Constant(
2550 (FunctionKind::kAccessorFunction |
2551 FunctionKind::kArrowFunction |
2552 FunctionKind::kConciseMethod)
2553 << SharedFunctionInfo::kFunctionKindShift)));
2554 }
2555 assembler->Goto(&if_function_without_prototype);
2556
2557 assembler->Bind(&if_normal);
2558 {
2559 map_index.Bind(assembler->Select(
2560 is_strict,
2561 assembler->IntPtrConstant(Context::STRICT_FUNCTION_MAP_INDEX),
2562 assembler->IntPtrConstant(Context::SLOPPY_FUNCTION_MAP_INDEX)));
2563 assembler->Goto(&load_map);
2564 }
2565
2566 assembler->Bind(&if_generator);
2567 {
2568 map_index.Bind(assembler->Select(
2569 is_strict,
2570 assembler->IntPtrConstant(Context::STRICT_GENERATOR_FUNCTION_MAP_INDEX),
2571 assembler->IntPtrConstant(
2572 Context::SLOPPY_GENERATOR_FUNCTION_MAP_INDEX)));
2573 assembler->Goto(&load_map);
2574 }
2575
2576 assembler->Bind(&if_async);
2577 {
2578 map_index.Bind(assembler->Select(
2579 is_strict,
2580 assembler->IntPtrConstant(Context::STRICT_ASYNC_FUNCTION_MAP_INDEX),
2581 assembler->IntPtrConstant(Context::SLOPPY_ASYNC_FUNCTION_MAP_INDEX)));
2582 assembler->Goto(&load_map);
2583 }
2584
2585 assembler->Bind(&if_class_constructor);
2586 {
2587 map_index.Bind(
2588 assembler->IntPtrConstant(Context::STRICT_FUNCTION_MAP_INDEX));
2589 assembler->Goto(&load_map);
2590 }
2591
2592 assembler->Bind(&if_function_without_prototype);
2593 {
2594 map_index.Bind(assembler->IntPtrConstant(
2595 Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
2596 assembler->Goto(&load_map);
2597 }
2598
2599 assembler->Bind(&load_map);
2600
2601 // Get the function map in the current native context and set that
2602 // as the map of the allocated object.
2603 Node* native_context = assembler->LoadNativeContext(context);
2604 Node* map_slot_value =
2605 assembler->LoadFixedArrayElement(native_context, map_index.value(), 0,
2606 CodeStubAssembler::INTPTR_PARAMETERS);
2607 assembler->StoreMapNoWriteBarrier(result, map_slot_value);
2608
2609 // Initialize the rest of the function.
2610 Node* empty_fixed_array =
2611 assembler->HeapConstant(factory->empty_fixed_array());
2612 Node* empty_literals_array =
2613 assembler->HeapConstant(factory->empty_literals_array());
2614 assembler->StoreObjectFieldNoWriteBarrier(result, JSObject::kPropertiesOffset,
2615 empty_fixed_array);
2616 assembler->StoreObjectFieldNoWriteBarrier(result, JSObject::kElementsOffset,
2617 empty_fixed_array);
2618 assembler->StoreObjectFieldNoWriteBarrier(result, JSFunction::kLiteralsOffset,
2619 empty_literals_array);
2620 assembler->StoreObjectFieldNoWriteBarrier(
2621 result, JSFunction::kPrototypeOrInitialMapOffset,
2622 assembler->TheHoleConstant());
2623 assembler->StoreObjectFieldNoWriteBarrier(
2624 result, JSFunction::kSharedFunctionInfoOffset, shared_info);
2625 assembler->StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset,
2626 context);
2627 Handle<Code> lazy_builtin_handle(
2628 assembler->isolate()->builtins()->builtin(Builtins::kCompileLazy));
2629 Node* lazy_builtin = assembler->HeapConstant(lazy_builtin_handle);
2630 Node* lazy_builtin_entry = assembler->IntPtrAdd(
2631 lazy_builtin,
2632 assembler->IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
2633 assembler->StoreObjectFieldNoWriteBarrier(
2634 result, JSFunction::kCodeEntryOffset, lazy_builtin_entry);
2635 assembler->StoreObjectFieldNoWriteBarrier(result,
2636 JSFunction::kNextFunctionLinkOffset,
2637 assembler->UndefinedConstant());
2638
2639 return result;
2640 }
2641
GenerateAssembly(CodeStubAssembler * assembler) const2642 void FastNewClosureStub::GenerateAssembly(CodeStubAssembler* assembler) const {
2643 assembler->Return(
2644 Generate(assembler, assembler->Parameter(0), assembler->Parameter(1)));
2645 }
2646
2647 // static
Generate(CodeStubAssembler * assembler,compiler::Node * function,compiler::Node * slots,compiler::Node * context)2648 compiler::Node* FastNewFunctionContextStub::Generate(
2649 CodeStubAssembler* assembler, compiler::Node* function,
2650 compiler::Node* slots, compiler::Node* context) {
2651 typedef compiler::Node Node;
2652
2653 Node* min_context_slots =
2654 assembler->Int32Constant(Context::MIN_CONTEXT_SLOTS);
2655 Node* length = assembler->Int32Add(slots, min_context_slots);
2656 Node* size = assembler->Int32Add(
2657 assembler->Word32Shl(length, assembler->Int32Constant(kPointerSizeLog2)),
2658 assembler->Int32Constant(FixedArray::kHeaderSize));
2659
2660 // Create a new closure from the given function info in new space
2661 Node* function_context = assembler->Allocate(size);
2662
2663 Isolate* isolate = assembler->isolate();
2664 assembler->StoreMapNoWriteBarrier(
2665 function_context,
2666 assembler->HeapConstant(isolate->factory()->function_context_map()));
2667 assembler->StoreObjectFieldNoWriteBarrier(function_context,
2668 Context::kLengthOffset,
2669 assembler->SmiFromWord32(length));
2670
2671 // Set up the fixed slots.
2672 assembler->StoreFixedArrayElement(
2673 function_context, assembler->Int32Constant(Context::CLOSURE_INDEX),
2674 function, SKIP_WRITE_BARRIER);
2675 assembler->StoreFixedArrayElement(
2676 function_context, assembler->Int32Constant(Context::PREVIOUS_INDEX),
2677 context, SKIP_WRITE_BARRIER);
2678 assembler->StoreFixedArrayElement(
2679 function_context, assembler->Int32Constant(Context::EXTENSION_INDEX),
2680 assembler->TheHoleConstant(), SKIP_WRITE_BARRIER);
2681
2682 // Copy the native context from the previous context.
2683 Node* native_context = assembler->LoadNativeContext(context);
2684 assembler->StoreFixedArrayElement(
2685 function_context, assembler->Int32Constant(Context::NATIVE_CONTEXT_INDEX),
2686 native_context, SKIP_WRITE_BARRIER);
2687
2688 // Initialize the rest of the slots to undefined.
2689 Node* undefined = assembler->UndefinedConstant();
2690 assembler->BuildFastFixedArrayForEach(
2691 function_context, FAST_ELEMENTS, min_context_slots, length,
2692 [undefined](CodeStubAssembler* assembler, Node* context, Node* offset) {
2693 assembler->StoreNoWriteBarrier(MachineType::PointerRepresentation(),
2694 context, offset, undefined);
2695 });
2696
2697 return function_context;
2698 }
2699
GenerateAssembly(CodeStubAssembler * assembler) const2700 void FastNewFunctionContextStub::GenerateAssembly(
2701 CodeStubAssembler* assembler) const {
2702 typedef compiler::Node Node;
2703 Node* function = assembler->Parameter(Descriptor::kFunction);
2704 Node* slots = assembler->Parameter(FastNewFunctionContextDescriptor::kSlots);
2705 Node* context = assembler->Parameter(Descriptor::kContext);
2706
2707 assembler->Return(Generate(assembler, function, slots, context));
2708 }
2709
2710 // static
Generate(CodeStubAssembler * assembler,compiler::Node * closure,compiler::Node * literal_index,compiler::Node * pattern,compiler::Node * flags,compiler::Node * context)2711 compiler::Node* FastCloneRegExpStub::Generate(CodeStubAssembler* assembler,
2712 compiler::Node* closure,
2713 compiler::Node* literal_index,
2714 compiler::Node* pattern,
2715 compiler::Node* flags,
2716 compiler::Node* context) {
2717 typedef CodeStubAssembler::Label Label;
2718 typedef CodeStubAssembler::Variable Variable;
2719 typedef compiler::Node Node;
2720
2721 Label call_runtime(assembler, Label::kDeferred), end(assembler);
2722
2723 Variable result(assembler, MachineRepresentation::kTagged);
2724
2725 Node* literals_array =
2726 assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
2727 Node* boilerplate = assembler->LoadFixedArrayElement(
2728 literals_array, literal_index,
2729 LiteralsArray::kFirstLiteralIndex * kPointerSize,
2730 CodeStubAssembler::SMI_PARAMETERS);
2731 assembler->GotoIf(assembler->IsUndefined(boilerplate), &call_runtime);
2732
2733 {
2734 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
2735 Node* copy = assembler->Allocate(size);
2736 for (int offset = 0; offset < size; offset += kPointerSize) {
2737 Node* value = assembler->LoadObjectField(boilerplate, offset);
2738 assembler->StoreObjectFieldNoWriteBarrier(copy, offset, value);
2739 }
2740 result.Bind(copy);
2741 assembler->Goto(&end);
2742 }
2743
2744 assembler->Bind(&call_runtime);
2745 {
2746 result.Bind(assembler->CallRuntime(Runtime::kCreateRegExpLiteral, context,
2747 closure, literal_index, pattern, flags));
2748 assembler->Goto(&end);
2749 }
2750
2751 assembler->Bind(&end);
2752 return result.value();
2753 }
2754
GenerateAssembly(CodeStubAssembler * assembler) const2755 void FastCloneRegExpStub::GenerateAssembly(CodeStubAssembler* assembler) const {
2756 typedef compiler::Node Node;
2757 Node* closure = assembler->Parameter(Descriptor::kClosure);
2758 Node* literal_index = assembler->Parameter(Descriptor::kLiteralIndex);
2759 Node* pattern = assembler->Parameter(Descriptor::kPattern);
2760 Node* flags = assembler->Parameter(Descriptor::kFlags);
2761 Node* context = assembler->Parameter(Descriptor::kContext);
2762
2763 assembler->Return(
2764 Generate(assembler, closure, literal_index, pattern, flags, context));
2765 }
2766
2767 namespace {
2768
NonEmptyShallowClone(CodeStubAssembler * assembler,compiler::Node * boilerplate,compiler::Node * boilerplate_map,compiler::Node * boilerplate_elements,compiler::Node * allocation_site,compiler::Node * capacity,ElementsKind kind)2769 compiler::Node* NonEmptyShallowClone(CodeStubAssembler* assembler,
2770 compiler::Node* boilerplate,
2771 compiler::Node* boilerplate_map,
2772 compiler::Node* boilerplate_elements,
2773 compiler::Node* allocation_site,
2774 compiler::Node* capacity,
2775 ElementsKind kind) {
2776 typedef compiler::Node Node;
2777 typedef CodeStubAssembler::ParameterMode ParameterMode;
2778
2779 ParameterMode param_mode = CodeStubAssembler::SMI_PARAMETERS;
2780
2781 Node* length = assembler->LoadJSArrayLength(boilerplate);
2782
2783 if (assembler->Is64()) {
2784 capacity = assembler->SmiUntag(capacity);
2785 param_mode = CodeStubAssembler::INTEGER_PARAMETERS;
2786 }
2787
2788 Node *array, *elements;
2789 std::tie(array, elements) =
2790 assembler->AllocateUninitializedJSArrayWithElements(
2791 kind, boilerplate_map, length, allocation_site, capacity, param_mode);
2792
2793 assembler->Comment("copy elements header");
2794 for (int offset = 0; offset < FixedArrayBase::kHeaderSize;
2795 offset += kPointerSize) {
2796 Node* value = assembler->LoadObjectField(boilerplate_elements, offset);
2797 assembler->StoreObjectField(elements, offset, value);
2798 }
2799
2800 if (assembler->Is64()) {
2801 length = assembler->SmiUntag(length);
2802 }
2803
2804 assembler->Comment("copy boilerplate elements");
2805 assembler->CopyFixedArrayElements(kind, boilerplate_elements, elements,
2806 length, SKIP_WRITE_BARRIER, param_mode);
2807 assembler->IncrementCounter(
2808 assembler->isolate()->counters()->inlined_copied_elements(), 1);
2809
2810 return array;
2811 }
2812
2813 } // namespace
2814
2815 // static
Generate(CodeStubAssembler * assembler,compiler::Node * closure,compiler::Node * literal_index,compiler::Node * context,CodeStubAssembler::Label * call_runtime,AllocationSiteMode allocation_site_mode)2816 compiler::Node* FastCloneShallowArrayStub::Generate(
2817 CodeStubAssembler* assembler, compiler::Node* closure,
2818 compiler::Node* literal_index, compiler::Node* context,
2819 CodeStubAssembler::Label* call_runtime,
2820 AllocationSiteMode allocation_site_mode) {
2821 typedef CodeStubAssembler::Label Label;
2822 typedef CodeStubAssembler::Variable Variable;
2823 typedef compiler::Node Node;
2824
2825 Label zero_capacity(assembler), cow_elements(assembler),
2826 fast_elements(assembler), return_result(assembler);
2827 Variable result(assembler, MachineRepresentation::kTagged);
2828
2829 Node* literals_array =
2830 assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
2831 Node* allocation_site = assembler->LoadFixedArrayElement(
2832 literals_array, literal_index,
2833 LiteralsArray::kFirstLiteralIndex * kPointerSize,
2834 CodeStubAssembler::SMI_PARAMETERS);
2835
2836 assembler->GotoIf(assembler->IsUndefined(allocation_site), call_runtime);
2837 allocation_site = assembler->LoadFixedArrayElement(
2838 literals_array, literal_index,
2839 LiteralsArray::kFirstLiteralIndex * kPointerSize,
2840 CodeStubAssembler::SMI_PARAMETERS);
2841
2842 Node* boilerplate = assembler->LoadObjectField(
2843 allocation_site, AllocationSite::kTransitionInfoOffset);
2844 Node* boilerplate_map = assembler->LoadMap(boilerplate);
2845 Node* boilerplate_elements = assembler->LoadElements(boilerplate);
2846 Node* capacity = assembler->LoadFixedArrayBaseLength(boilerplate_elements);
2847 allocation_site =
2848 allocation_site_mode == TRACK_ALLOCATION_SITE ? allocation_site : nullptr;
2849
2850 Node* zero = assembler->SmiConstant(Smi::kZero);
2851 assembler->GotoIf(assembler->SmiEqual(capacity, zero), &zero_capacity);
2852
2853 Node* elements_map = assembler->LoadMap(boilerplate_elements);
2854 assembler->GotoIf(assembler->IsFixedCOWArrayMap(elements_map), &cow_elements);
2855
2856 assembler->GotoIf(assembler->IsFixedArrayMap(elements_map), &fast_elements);
2857 {
2858 assembler->Comment("fast double elements path");
2859 if (FLAG_debug_code) {
2860 Label correct_elements_map(assembler), abort(assembler, Label::kDeferred);
2861 assembler->Branch(assembler->IsFixedDoubleArrayMap(elements_map),
2862 &correct_elements_map, &abort);
2863
2864 assembler->Bind(&abort);
2865 {
2866 Node* abort_id = assembler->SmiConstant(
2867 Smi::FromInt(BailoutReason::kExpectedFixedDoubleArrayMap));
2868 assembler->CallRuntime(Runtime::kAbort, context, abort_id);
2869 result.Bind(assembler->UndefinedConstant());
2870 assembler->Goto(&return_result);
2871 }
2872 assembler->Bind(&correct_elements_map);
2873 }
2874
2875 Node* array = NonEmptyShallowClone(assembler, boilerplate, boilerplate_map,
2876 boilerplate_elements, allocation_site,
2877 capacity, FAST_DOUBLE_ELEMENTS);
2878 result.Bind(array);
2879 assembler->Goto(&return_result);
2880 }
2881
2882 assembler->Bind(&fast_elements);
2883 {
2884 assembler->Comment("fast elements path");
2885 Node* array = NonEmptyShallowClone(assembler, boilerplate, boilerplate_map,
2886 boilerplate_elements, allocation_site,
2887 capacity, FAST_ELEMENTS);
2888 result.Bind(array);
2889 assembler->Goto(&return_result);
2890 }
2891
2892 Variable length(assembler, MachineRepresentation::kTagged),
2893 elements(assembler, MachineRepresentation::kTagged);
2894 Label allocate_without_elements(assembler);
2895
2896 assembler->Bind(&cow_elements);
2897 {
2898 assembler->Comment("fixed cow path");
2899 length.Bind(assembler->LoadJSArrayLength(boilerplate));
2900 elements.Bind(boilerplate_elements);
2901
2902 assembler->Goto(&allocate_without_elements);
2903 }
2904
2905 assembler->Bind(&zero_capacity);
2906 {
2907 assembler->Comment("zero capacity path");
2908 length.Bind(zero);
2909 elements.Bind(assembler->LoadRoot(Heap::kEmptyFixedArrayRootIndex));
2910
2911 assembler->Goto(&allocate_without_elements);
2912 }
2913
2914 assembler->Bind(&allocate_without_elements);
2915 {
2916 Node* array = assembler->AllocateUninitializedJSArrayWithoutElements(
2917 FAST_ELEMENTS, boilerplate_map, length.value(), allocation_site);
2918 assembler->StoreObjectField(array, JSObject::kElementsOffset,
2919 elements.value());
2920 result.Bind(array);
2921 assembler->Goto(&return_result);
2922 }
2923
2924 assembler->Bind(&return_result);
2925 return result.value();
2926 }
2927
GenerateAssembly(CodeStubAssembler * assembler) const2928 void FastCloneShallowArrayStub::GenerateAssembly(
2929 CodeStubAssembler* assembler) const {
2930 typedef compiler::Node Node;
2931 typedef CodeStubAssembler::Label Label;
2932 Node* closure = assembler->Parameter(Descriptor::kClosure);
2933 Node* literal_index = assembler->Parameter(Descriptor::kLiteralIndex);
2934 Node* constant_elements = assembler->Parameter(Descriptor::kConstantElements);
2935 Node* context = assembler->Parameter(Descriptor::kContext);
2936 Label call_runtime(assembler, Label::kDeferred);
2937 assembler->Return(Generate(assembler, closure, literal_index, context,
2938 &call_runtime, allocation_site_mode()));
2939
2940 assembler->Bind(&call_runtime);
2941 {
2942 assembler->Comment("call runtime");
2943 Node* flags = assembler->SmiConstant(
2944 Smi::FromInt(ArrayLiteral::kShallowElements |
2945 (allocation_site_mode() == TRACK_ALLOCATION_SITE
2946 ? 0
2947 : ArrayLiteral::kDisableMementos)));
2948 assembler->Return(assembler->CallRuntime(Runtime::kCreateArrayLiteral,
2949 context, closure, literal_index,
2950 constant_elements, flags));
2951 }
2952 }
2953
GenerateAheadOfTime(Isolate * isolate)2954 void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
2955 CreateAllocationSiteStub stub(isolate);
2956 stub.GetCode();
2957 }
2958
2959
GenerateAheadOfTime(Isolate * isolate)2960 void CreateWeakCellStub::GenerateAheadOfTime(Isolate* isolate) {
2961 CreateWeakCellStub stub(isolate);
2962 stub.GetCode();
2963 }
2964
2965
Generate(MacroAssembler * masm)2966 void StoreElementStub::Generate(MacroAssembler* masm) {
2967 DCHECK_EQ(DICTIONARY_ELEMENTS, elements_kind());
2968 KeyedStoreIC::GenerateSlow(masm);
2969 }
2970
GenerateAssembly(CodeStubAssembler * assembler) const2971 void StoreFastElementStub::GenerateAssembly(
2972 CodeStubAssembler* assembler) const {
2973 typedef CodeStubAssembler::Label Label;
2974 typedef compiler::Node Node;
2975
2976 assembler->Comment(
2977 "StoreFastElementStub: js_array=%d, elements_kind=%s, store_mode=%d",
2978 is_js_array(), ElementsKindToString(elements_kind()), store_mode());
2979
2980 Node* receiver = assembler->Parameter(Descriptor::kReceiver);
2981 Node* key = assembler->Parameter(Descriptor::kName);
2982 Node* value = assembler->Parameter(Descriptor::kValue);
2983 Node* slot = assembler->Parameter(Descriptor::kSlot);
2984 Node* vector = assembler->Parameter(Descriptor::kVector);
2985 Node* context = assembler->Parameter(Descriptor::kContext);
2986
2987 Label miss(assembler);
2988
2989 assembler->EmitElementStore(receiver, key, value, is_js_array(),
2990 elements_kind(), store_mode(), &miss);
2991 assembler->Return(value);
2992
2993 assembler->Bind(&miss);
2994 {
2995 assembler->Comment("Miss");
2996 assembler->TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value,
2997 slot, vector, receiver, key);
2998 }
2999 }
3000
3001 // static
GenerateAheadOfTime(Isolate * isolate)3002 void StoreFastElementStub::GenerateAheadOfTime(Isolate* isolate) {
3003 if (FLAG_minimal) return;
3004 StoreFastElementStub(isolate, false, FAST_HOLEY_ELEMENTS, STANDARD_STORE)
3005 .GetCode();
3006 StoreFastElementStub(isolate, false, FAST_HOLEY_ELEMENTS,
3007 STORE_AND_GROW_NO_TRANSITION).GetCode();
3008 for (int i = FIRST_FAST_ELEMENTS_KIND; i <= LAST_FAST_ELEMENTS_KIND; i++) {
3009 ElementsKind kind = static_cast<ElementsKind>(i);
3010 StoreFastElementStub(isolate, true, kind, STANDARD_STORE).GetCode();
3011 StoreFastElementStub(isolate, true, kind, STORE_AND_GROW_NO_TRANSITION)
3012 .GetCode();
3013 }
3014 }
3015
UpdateStatus(Handle<Object> object)3016 bool ToBooleanICStub::UpdateStatus(Handle<Object> object) {
3017 ToBooleanHints old_hints = hints();
3018 ToBooleanHints new_hints = old_hints;
3019 bool to_boolean_value = false; // Dummy initialization.
3020 if (object->IsUndefined(isolate())) {
3021 new_hints |= ToBooleanHint::kUndefined;
3022 to_boolean_value = false;
3023 } else if (object->IsBoolean()) {
3024 new_hints |= ToBooleanHint::kBoolean;
3025 to_boolean_value = object->IsTrue(isolate());
3026 } else if (object->IsNull(isolate())) {
3027 new_hints |= ToBooleanHint::kNull;
3028 to_boolean_value = false;
3029 } else if (object->IsSmi()) {
3030 new_hints |= ToBooleanHint::kSmallInteger;
3031 to_boolean_value = Smi::cast(*object)->value() != 0;
3032 } else if (object->IsJSReceiver()) {
3033 new_hints |= ToBooleanHint::kReceiver;
3034 to_boolean_value = !object->IsUndetectable();
3035 } else if (object->IsString()) {
3036 DCHECK(!object->IsUndetectable());
3037 new_hints |= ToBooleanHint::kString;
3038 to_boolean_value = String::cast(*object)->length() != 0;
3039 } else if (object->IsSymbol()) {
3040 new_hints |= ToBooleanHint::kSymbol;
3041 to_boolean_value = true;
3042 } else if (object->IsHeapNumber()) {
3043 DCHECK(!object->IsUndetectable());
3044 new_hints |= ToBooleanHint::kHeapNumber;
3045 double value = HeapNumber::cast(*object)->value();
3046 to_boolean_value = value != 0 && !std::isnan(value);
3047 } else if (object->IsSimd128Value()) {
3048 new_hints |= ToBooleanHint::kSimdValue;
3049 to_boolean_value = true;
3050 } else {
3051 // We should never see an internal object at runtime here!
3052 UNREACHABLE();
3053 to_boolean_value = true;
3054 }
3055 TraceTransition(old_hints, new_hints);
3056 set_sub_minor_key(HintsBits::update(sub_minor_key(), new_hints));
3057 return to_boolean_value;
3058 }
3059
PrintState(std::ostream & os) const3060 void ToBooleanICStub::PrintState(std::ostream& os) const { // NOLINT
3061 os << hints();
3062 }
3063
GenerateAheadOfTime(Isolate * isolate)3064 void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
3065 StubFailureTrampolineStub stub1(isolate, NOT_JS_FUNCTION_STUB_MODE);
3066 StubFailureTrampolineStub stub2(isolate, JS_FUNCTION_STUB_MODE);
3067 stub1.GetCode();
3068 stub2.GetCode();
3069 }
3070
3071
EntryHookTrampoline(intptr_t function,intptr_t stack_pointer,Isolate * isolate)3072 void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
3073 intptr_t stack_pointer,
3074 Isolate* isolate) {
3075 FunctionEntryHook entry_hook = isolate->function_entry_hook();
3076 DCHECK(entry_hook != NULL);
3077 entry_hook(function, stack_pointer);
3078 }
3079
GenerateAssembly(CodeStubAssembler * assembler) const3080 void CreateAllocationSiteStub::GenerateAssembly(
3081 CodeStubAssembler* assembler) const {
3082 assembler->Return(assembler->CreateAllocationSiteInFeedbackVector(
3083 assembler->Parameter(Descriptor::kVector),
3084 assembler->Parameter(Descriptor::kSlot)));
3085 }
3086
GenerateAssembly(CodeStubAssembler * assembler) const3087 void CreateWeakCellStub::GenerateAssembly(CodeStubAssembler* assembler) const {
3088 assembler->Return(assembler->CreateWeakCellInFeedbackVector(
3089 assembler->Parameter(Descriptor::kVector),
3090 assembler->Parameter(Descriptor::kSlot),
3091 assembler->Parameter(Descriptor::kValue)));
3092 }
3093
GenerateAssembly(CodeStubAssembler * assembler) const3094 void ArrayNoArgumentConstructorStub::GenerateAssembly(
3095 CodeStubAssembler* assembler) const {
3096 typedef compiler::Node Node;
3097 Node* native_context = assembler->LoadObjectField(
3098 assembler->Parameter(Descriptor::kFunction), JSFunction::kContextOffset);
3099 bool track_allocation_site =
3100 AllocationSite::GetMode(elements_kind()) == TRACK_ALLOCATION_SITE &&
3101 override_mode() != DISABLE_ALLOCATION_SITES;
3102 Node* allocation_site =
3103 track_allocation_site ? assembler->Parameter(Descriptor::kAllocationSite)
3104 : nullptr;
3105 Node* array_map =
3106 assembler->LoadJSArrayElementsMap(elements_kind(), native_context);
3107 Node* array = assembler->AllocateJSArray(
3108 elements_kind(), array_map,
3109 assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
3110 assembler->SmiConstant(Smi::kZero), allocation_site);
3111 assembler->Return(array);
3112 }
3113
GenerateAssembly(CodeStubAssembler * assembler) const3114 void InternalArrayNoArgumentConstructorStub::GenerateAssembly(
3115 CodeStubAssembler* assembler) const {
3116 typedef compiler::Node Node;
3117 Node* array_map =
3118 assembler->LoadObjectField(assembler->Parameter(Descriptor::kFunction),
3119 JSFunction::kPrototypeOrInitialMapOffset);
3120 Node* array = assembler->AllocateJSArray(
3121 elements_kind(), array_map,
3122 assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
3123 assembler->SmiConstant(Smi::kZero), nullptr);
3124 assembler->Return(array);
3125 }
3126
3127 namespace {
3128
3129 template <typename Descriptor>
SingleArgumentConstructorCommon(CodeStubAssembler * assembler,ElementsKind elements_kind,compiler::Node * array_map,compiler::Node * allocation_site,AllocationSiteMode mode)3130 void SingleArgumentConstructorCommon(CodeStubAssembler* assembler,
3131 ElementsKind elements_kind,
3132 compiler::Node* array_map,
3133 compiler::Node* allocation_site,
3134 AllocationSiteMode mode) {
3135 typedef compiler::Node Node;
3136 typedef CodeStubAssembler::Label Label;
3137
3138 Label ok(assembler);
3139 Label smi_size(assembler);
3140 Label small_smi_size(assembler);
3141 Label call_runtime(assembler, Label::kDeferred);
3142
3143 Node* size = assembler->Parameter(Descriptor::kArraySizeSmiParameter);
3144 assembler->Branch(assembler->TaggedIsSmi(size), &smi_size, &call_runtime);
3145
3146 assembler->Bind(&smi_size);
3147
3148 if (IsFastPackedElementsKind(elements_kind)) {
3149 Label abort(assembler, Label::kDeferred);
3150 assembler->Branch(
3151 assembler->SmiEqual(size, assembler->SmiConstant(Smi::kZero)),
3152 &small_smi_size, &abort);
3153
3154 assembler->Bind(&abort);
3155 Node* reason =
3156 assembler->SmiConstant(Smi::FromInt(kAllocatingNonEmptyPackedArray));
3157 Node* context = assembler->Parameter(Descriptor::kContext);
3158 assembler->TailCallRuntime(Runtime::kAbort, context, reason);
3159 } else {
3160 int element_size =
3161 IsFastDoubleElementsKind(elements_kind) ? kDoubleSize : kPointerSize;
3162 int max_fast_elements =
3163 (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - JSArray::kSize -
3164 AllocationMemento::kSize) /
3165 element_size;
3166 assembler->Branch(
3167 assembler->SmiAboveOrEqual(
3168 size, assembler->SmiConstant(Smi::FromInt(max_fast_elements))),
3169 &call_runtime, &small_smi_size);
3170 }
3171
3172 assembler->Bind(&small_smi_size);
3173 {
3174 Node* array = assembler->AllocateJSArray(
3175 elements_kind, array_map, size, size,
3176 mode == DONT_TRACK_ALLOCATION_SITE ? nullptr : allocation_site,
3177 CodeStubAssembler::SMI_PARAMETERS);
3178 assembler->Return(array);
3179 }
3180
3181 assembler->Bind(&call_runtime);
3182 {
3183 Node* context = assembler->Parameter(Descriptor::kContext);
3184 Node* function = assembler->Parameter(Descriptor::kFunction);
3185 Node* array_size = assembler->Parameter(Descriptor::kArraySizeSmiParameter);
3186 Node* allocation_site = assembler->Parameter(Descriptor::kAllocationSite);
3187 assembler->TailCallRuntime(Runtime::kNewArray, context, function,
3188 array_size, function, allocation_site);
3189 }
3190 }
3191 } // namespace
3192
GenerateAssembly(CodeStubAssembler * assembler) const3193 void ArraySingleArgumentConstructorStub::GenerateAssembly(
3194 CodeStubAssembler* assembler) const {
3195 typedef compiler::Node Node;
3196 Node* function = assembler->Parameter(Descriptor::kFunction);
3197 Node* native_context =
3198 assembler->LoadObjectField(function, JSFunction::kContextOffset);
3199 Node* array_map =
3200 assembler->LoadJSArrayElementsMap(elements_kind(), native_context);
3201 AllocationSiteMode mode = override_mode() == DISABLE_ALLOCATION_SITES
3202 ? DONT_TRACK_ALLOCATION_SITE
3203 : AllocationSite::GetMode(elements_kind());
3204 Node* allocation_site = assembler->Parameter(Descriptor::kAllocationSite);
3205 SingleArgumentConstructorCommon<Descriptor>(assembler, elements_kind(),
3206 array_map, allocation_site, mode);
3207 }
3208
GenerateAssembly(CodeStubAssembler * assembler) const3209 void InternalArraySingleArgumentConstructorStub::GenerateAssembly(
3210 CodeStubAssembler* assembler) const {
3211 typedef compiler::Node Node;
3212 Node* function = assembler->Parameter(Descriptor::kFunction);
3213 Node* array_map = assembler->LoadObjectField(
3214 function, JSFunction::kPrototypeOrInitialMapOffset);
3215 SingleArgumentConstructorCommon<Descriptor>(
3216 assembler, elements_kind(), array_map, assembler->UndefinedConstant(),
3217 DONT_TRACK_ALLOCATION_SITE);
3218 }
3219
GenerateAssembly(CodeStubAssembler * assembler) const3220 void GrowArrayElementsStub::GenerateAssembly(
3221 CodeStubAssembler* assembler) const {
3222 typedef compiler::Node Node;
3223 CodeStubAssembler::Label runtime(assembler,
3224 CodeStubAssembler::Label::kDeferred);
3225
3226 Node* object = assembler->Parameter(Descriptor::kObject);
3227 Node* key = assembler->Parameter(Descriptor::kKey);
3228 Node* context = assembler->Parameter(Descriptor::kContext);
3229 ElementsKind kind = elements_kind();
3230
3231 Node* elements = assembler->LoadElements(object);
3232 Node* new_elements =
3233 assembler->TryGrowElementsCapacity(object, elements, kind, key, &runtime);
3234 assembler->Return(new_elements);
3235
3236 assembler->Bind(&runtime);
3237 // TODO(danno): Make this a tail call when the stub is only used from TurboFan
3238 // code. This musn't be a tail call for now, since the caller site in lithium
3239 // creates a safepoint. This safepoint musn't have a different number of
3240 // arguments on the stack in the case that a GC happens from the slow-case
3241 // allocation path (zero, since all the stubs inputs are in registers) and
3242 // when the call happens (it would be two in the tail call case due to the
3243 // tail call pushing the arguments on the stack for the runtime call). By not
3244 // tail-calling, the runtime call case also has zero arguments on the stack
3245 // for the stub frame.
3246 assembler->Return(assembler->CallRuntime(Runtime::kGrowArrayElements, context,
3247 object, key));
3248 }
3249
ArrayConstructorStub(Isolate * isolate)3250 ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
3251 : PlatformCodeStub(isolate) {}
3252
InternalArrayConstructorStub(Isolate * isolate)3253 InternalArrayConstructorStub::InternalArrayConstructorStub(Isolate* isolate)
3254 : PlatformCodeStub(isolate) {}
3255
3256 } // namespace internal
3257 } // namespace v8
3258