• Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/interpreter/interpreter.h"
6 
7 #include <fstream>
8 #include <memory>
9 
10 #include "src/ast/prettyprinter.h"
11 #include "src/code-factory.h"
12 #include "src/compilation-info.h"
13 #include "src/compiler.h"
14 #include "src/factory.h"
15 #include "src/interpreter/bytecode-flags.h"
16 #include "src/interpreter/bytecode-generator.h"
17 #include "src/interpreter/bytecodes.h"
18 #include "src/interpreter/interpreter-assembler.h"
19 #include "src/interpreter/interpreter-intrinsics.h"
20 #include "src/log.h"
21 #include "src/zone/zone.h"
22 
23 namespace v8 {
24 namespace internal {
25 namespace interpreter {
26 
27 using compiler::Node;
28 typedef CodeStubAssembler::Label Label;
29 typedef CodeStubAssembler::Variable Variable;
30 typedef InterpreterAssembler::Arg Arg;
31 
32 #define __ assembler->
33 
34 class InterpreterCompilationJob final : public CompilationJob {
35  public:
36   explicit InterpreterCompilationJob(CompilationInfo* info);
37 
38  protected:
39   Status PrepareJobImpl() final;
40   Status ExecuteJobImpl() final;
41   Status FinalizeJobImpl() final;
42 
43  private:
generator()44   BytecodeGenerator* generator() { return &generator_; }
45 
46   BytecodeGenerator generator_;
47 
48   DISALLOW_COPY_AND_ASSIGN(InterpreterCompilationJob);
49 };
50 
Interpreter(Isolate * isolate)51 Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
52   memset(dispatch_table_, 0, sizeof(dispatch_table_));
53 }
54 
Initialize()55 void Interpreter::Initialize() {
56   if (!ShouldInitializeDispatchTable()) return;
57   Zone zone(isolate_->allocator(), ZONE_NAME);
58   HandleScope scope(isolate_);
59 
60   if (FLAG_trace_ignition_dispatches) {
61     static const int kBytecodeCount = static_cast<int>(Bytecode::kLast) + 1;
62     bytecode_dispatch_counters_table_.reset(
63         new uintptr_t[kBytecodeCount * kBytecodeCount]);
64     memset(bytecode_dispatch_counters_table_.get(), 0,
65            sizeof(uintptr_t) * kBytecodeCount * kBytecodeCount);
66   }
67 
68   // Generate bytecode handlers for all bytecodes and scales.
69   const OperandScale kOperandScales[] = {
70 #define VALUE(Name, _) OperandScale::k##Name,
71       OPERAND_SCALE_LIST(VALUE)
72 #undef VALUE
73   };
74 
75   for (OperandScale operand_scale : kOperandScales) {
76 #define GENERATE_CODE(Name, ...)                                               \
77   {                                                                            \
78     if (Bytecodes::BytecodeHasHandler(Bytecode::k##Name, operand_scale)) {     \
79       InterpreterAssembler assembler(isolate_, &zone, Bytecode::k##Name,       \
80                                      operand_scale);                           \
81       Do##Name(&assembler);                                                    \
82       Handle<Code> code = assembler.GenerateCode();                            \
83       size_t index = GetDispatchTableIndex(Bytecode::k##Name, operand_scale);  \
84       dispatch_table_[index] = code->entry();                                  \
85       TraceCodegen(code);                                                      \
86       PROFILE(                                                                 \
87           isolate_,                                                            \
88           CodeCreateEvent(                                                     \
89               CodeEventListener::BYTECODE_HANDLER_TAG,                         \
90               AbstractCode::cast(*code),                                       \
91               Bytecodes::ToString(Bytecode::k##Name, operand_scale).c_str())); \
92     }                                                                          \
93   }
94     BYTECODE_LIST(GENERATE_CODE)
95 #undef GENERATE_CODE
96   }
97 
98   // Fill unused entries will the illegal bytecode handler.
99   size_t illegal_index =
100       GetDispatchTableIndex(Bytecode::kIllegal, OperandScale::kSingle);
101   for (size_t index = 0; index < arraysize(dispatch_table_); ++index) {
102     if (dispatch_table_[index] == nullptr) {
103       dispatch_table_[index] = dispatch_table_[illegal_index];
104     }
105   }
106 
107   // Initialization should have been successful.
108   DCHECK(IsDispatchTableInitialized());
109 }
110 
GetBytecodeHandler(Bytecode bytecode,OperandScale operand_scale)111 Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
112                                       OperandScale operand_scale) {
113   DCHECK(IsDispatchTableInitialized());
114   DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
115   size_t index = GetDispatchTableIndex(bytecode, operand_scale);
116   Address code_entry = dispatch_table_[index];
117   return Code::GetCodeFromTargetAddress(code_entry);
118 }
119 
120 // static
GetDispatchTableIndex(Bytecode bytecode,OperandScale operand_scale)121 size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
122                                           OperandScale operand_scale) {
123   static const size_t kEntriesPerOperandScale = 1u << kBitsPerByte;
124   size_t index = static_cast<size_t>(bytecode);
125   switch (operand_scale) {
126     case OperandScale::kSingle:
127       return index;
128     case OperandScale::kDouble:
129       return index + kEntriesPerOperandScale;
130     case OperandScale::kQuadruple:
131       return index + 2 * kEntriesPerOperandScale;
132   }
133   UNREACHABLE();
134   return 0;
135 }
136 
IterateDispatchTable(ObjectVisitor * v)137 void Interpreter::IterateDispatchTable(ObjectVisitor* v) {
138   for (int i = 0; i < kDispatchTableSize; i++) {
139     Address code_entry = dispatch_table_[i];
140     Object* code = code_entry == nullptr
141                        ? nullptr
142                        : Code::GetCodeFromTargetAddress(code_entry);
143     Object* old_code = code;
144     v->VisitPointer(&code);
145     if (code != old_code) {
146       dispatch_table_[i] = reinterpret_cast<Code*>(code)->entry();
147     }
148   }
149 }
150 
151 // static
InterruptBudget()152 int Interpreter::InterruptBudget() {
153   return FLAG_interrupt_budget * kCodeSizeMultiplier;
154 }
155 
InterpreterCompilationJob(CompilationInfo * info)156 InterpreterCompilationJob::InterpreterCompilationJob(CompilationInfo* info)
157     : CompilationJob(info->isolate(), info, "Ignition"), generator_(info) {}
158 
PrepareJobImpl()159 InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() {
160   if (FLAG_print_bytecode || FLAG_print_ast) {
161     OFStream os(stdout);
162     std::unique_ptr<char[]> name = info()->GetDebugName();
163     os << "[generating bytecode for function: " << info()->GetDebugName().get()
164        << "]" << std::endl
165        << std::flush;
166   }
167 
168 #ifdef DEBUG
169   if (info()->parse_info() && FLAG_print_ast) {
170     OFStream os(stdout);
171     os << "--- AST ---" << std::endl
172        << AstPrinter(info()->isolate()).PrintProgram(info()->literal())
173        << std::endl
174        << std::flush;
175   }
176 #endif  // DEBUG
177 
178   return SUCCEEDED;
179 }
180 
ExecuteJobImpl()181 InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
182   // TODO(5203): These timers aren't thread safe, move to using the CompilerJob
183   // timers.
184   RuntimeCallTimerScope runtimeTimer(info()->isolate(),
185                                      &RuntimeCallStats::CompileIgnition);
186   TimerEventScope<TimerEventCompileIgnition> timer(info()->isolate());
187   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileIgnition");
188 
189   generator()->GenerateBytecode(stack_limit());
190 
191   if (generator()->HasStackOverflow()) {
192     return FAILED;
193   }
194   return SUCCEEDED;
195 }
196 
FinalizeJobImpl()197 InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
198   Handle<BytecodeArray> bytecodes = generator()->FinalizeBytecode(isolate());
199   if (generator()->HasStackOverflow()) {
200     return FAILED;
201   }
202 
203   CodeGenerator::MakeCodePrologue(info(), "interpreter");
204 
205   if (FLAG_print_bytecode) {
206     OFStream os(stdout);
207     bytecodes->Print(os);
208     os << std::flush;
209   }
210 
211   info()->SetBytecodeArray(bytecodes);
212   info()->SetCode(info()->isolate()->builtins()->InterpreterEntryTrampoline());
213   return SUCCEEDED;
214 }
215 
NewCompilationJob(CompilationInfo * info)216 CompilationJob* Interpreter::NewCompilationJob(CompilationInfo* info) {
217   return new InterpreterCompilationJob(info);
218 }
219 
IsDispatchTableInitialized()220 bool Interpreter::IsDispatchTableInitialized() {
221   return dispatch_table_[0] != nullptr;
222 }
223 
ShouldInitializeDispatchTable()224 bool Interpreter::ShouldInitializeDispatchTable() {
225   if (FLAG_trace_ignition || FLAG_trace_ignition_codegen ||
226       FLAG_trace_ignition_dispatches) {
227     // Regenerate table to add bytecode tracing operations, print the assembly
228     // code generated by TurboFan or instrument handlers with dispatch counters.
229     return true;
230   }
231   return !IsDispatchTableInitialized();
232 }
233 
TraceCodegen(Handle<Code> code)234 void Interpreter::TraceCodegen(Handle<Code> code) {
235 #ifdef ENABLE_DISASSEMBLER
236   if (FLAG_trace_ignition_codegen) {
237     OFStream os(stdout);
238     code->Disassemble(nullptr, os);
239     os << std::flush;
240   }
241 #endif  // ENABLE_DISASSEMBLER
242 }
243 
LookupNameOfBytecodeHandler(Code * code)244 const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) {
245 #ifdef ENABLE_DISASSEMBLER
246 #define RETURN_NAME(Name, ...)                                 \
247   if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == \
248       code->entry()) {                                         \
249     return #Name;                                              \
250   }
251   BYTECODE_LIST(RETURN_NAME)
252 #undef RETURN_NAME
253 #endif  // ENABLE_DISASSEMBLER
254   return nullptr;
255 }
256 
GetDispatchCounter(Bytecode from,Bytecode to) const257 uintptr_t Interpreter::GetDispatchCounter(Bytecode from, Bytecode to) const {
258   int from_index = Bytecodes::ToByte(from);
259   int to_index = Bytecodes::ToByte(to);
260   return bytecode_dispatch_counters_table_[from_index * kNumberOfBytecodes +
261                                            to_index];
262 }
263 
GetDispatchCountersObject()264 Local<v8::Object> Interpreter::GetDispatchCountersObject() {
265   v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(isolate_);
266   Local<v8::Context> context = isolate->GetCurrentContext();
267 
268   Local<v8::Object> counters_map = v8::Object::New(isolate);
269 
270   // Output is a JSON-encoded object of objects.
271   //
272   // The keys on the top level object are source bytecodes,
273   // and corresponding value are objects. Keys on these last are the
274   // destinations of the dispatch and the value associated is a counter for
275   // the correspondent source-destination dispatch chain.
276   //
277   // Only non-zero counters are written to file, but an entry in the top-level
278   // object is always present, even if the value is empty because all counters
279   // for that source are zero.
280 
281   for (int from_index = 0; from_index < kNumberOfBytecodes; ++from_index) {
282     Bytecode from_bytecode = Bytecodes::FromByte(from_index);
283     Local<v8::Object> counters_row = v8::Object::New(isolate);
284 
285     for (int to_index = 0; to_index < kNumberOfBytecodes; ++to_index) {
286       Bytecode to_bytecode = Bytecodes::FromByte(to_index);
287       uintptr_t counter = GetDispatchCounter(from_bytecode, to_bytecode);
288 
289       if (counter > 0) {
290         std::string to_name = Bytecodes::ToString(to_bytecode);
291         Local<v8::String> to_name_object =
292             v8::String::NewFromUtf8(isolate, to_name.c_str(),
293                                     NewStringType::kNormal)
294                 .ToLocalChecked();
295         Local<v8::Number> counter_object = v8::Number::New(isolate, counter);
296         CHECK(counters_row
297                   ->DefineOwnProperty(context, to_name_object, counter_object)
298                   .IsJust());
299       }
300     }
301 
302     std::string from_name = Bytecodes::ToString(from_bytecode);
303     Local<v8::String> from_name_object =
304         v8::String::NewFromUtf8(isolate, from_name.c_str(),
305                                 NewStringType::kNormal)
306             .ToLocalChecked();
307 
308     CHECK(
309         counters_map->DefineOwnProperty(context, from_name_object, counters_row)
310             .IsJust());
311   }
312 
313   return counters_map;
314 }
315 
316 // LdaZero
317 //
318 // Load literal '0' into the accumulator.
DoLdaZero(InterpreterAssembler * assembler)319 void Interpreter::DoLdaZero(InterpreterAssembler* assembler) {
320   Node* zero_value = __ NumberConstant(0.0);
321   __ SetAccumulator(zero_value);
322   __ Dispatch();
323 }
324 
325 // LdaSmi <imm>
326 //
327 // Load an integer literal into the accumulator as a Smi.
DoLdaSmi(InterpreterAssembler * assembler)328 void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) {
329   Node* raw_int = __ BytecodeOperandImm(0);
330   Node* smi_int = __ SmiTag(raw_int);
331   __ SetAccumulator(smi_int);
332   __ Dispatch();
333 }
334 
335 // LdaConstant <idx>
336 //
337 // Load constant literal at |idx| in the constant pool into the accumulator.
DoLdaConstant(InterpreterAssembler * assembler)338 void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) {
339   Node* index = __ BytecodeOperandIdx(0);
340   Node* constant = __ LoadConstantPoolEntry(index);
341   __ SetAccumulator(constant);
342   __ Dispatch();
343 }
344 
345 // LdaUndefined
346 //
347 // Load Undefined into the accumulator.
DoLdaUndefined(InterpreterAssembler * assembler)348 void Interpreter::DoLdaUndefined(InterpreterAssembler* assembler) {
349   Node* undefined_value =
350       __ HeapConstant(isolate_->factory()->undefined_value());
351   __ SetAccumulator(undefined_value);
352   __ Dispatch();
353 }
354 
355 // LdaNull
356 //
357 // Load Null into the accumulator.
DoLdaNull(InterpreterAssembler * assembler)358 void Interpreter::DoLdaNull(InterpreterAssembler* assembler) {
359   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
360   __ SetAccumulator(null_value);
361   __ Dispatch();
362 }
363 
364 // LdaTheHole
365 //
366 // Load TheHole into the accumulator.
DoLdaTheHole(InterpreterAssembler * assembler)367 void Interpreter::DoLdaTheHole(InterpreterAssembler* assembler) {
368   Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
369   __ SetAccumulator(the_hole_value);
370   __ Dispatch();
371 }
372 
373 // LdaTrue
374 //
375 // Load True into the accumulator.
DoLdaTrue(InterpreterAssembler * assembler)376 void Interpreter::DoLdaTrue(InterpreterAssembler* assembler) {
377   Node* true_value = __ HeapConstant(isolate_->factory()->true_value());
378   __ SetAccumulator(true_value);
379   __ Dispatch();
380 }
381 
382 // LdaFalse
383 //
384 // Load False into the accumulator.
DoLdaFalse(InterpreterAssembler * assembler)385 void Interpreter::DoLdaFalse(InterpreterAssembler* assembler) {
386   Node* false_value = __ HeapConstant(isolate_->factory()->false_value());
387   __ SetAccumulator(false_value);
388   __ Dispatch();
389 }
390 
391 // Ldar <src>
392 //
393 // Load accumulator with value from register <src>.
DoLdar(InterpreterAssembler * assembler)394 void Interpreter::DoLdar(InterpreterAssembler* assembler) {
395   Node* reg_index = __ BytecodeOperandReg(0);
396   Node* value = __ LoadRegister(reg_index);
397   __ SetAccumulator(value);
398   __ Dispatch();
399 }
400 
401 // Star <dst>
402 //
403 // Store accumulator to register <dst>.
DoStar(InterpreterAssembler * assembler)404 void Interpreter::DoStar(InterpreterAssembler* assembler) {
405   Node* reg_index = __ BytecodeOperandReg(0);
406   Node* accumulator = __ GetAccumulator();
407   __ StoreRegister(accumulator, reg_index);
408   __ Dispatch();
409 }
410 
411 // Mov <src> <dst>
412 //
413 // Stores the value of register <src> to register <dst>.
DoMov(InterpreterAssembler * assembler)414 void Interpreter::DoMov(InterpreterAssembler* assembler) {
415   Node* src_index = __ BytecodeOperandReg(0);
416   Node* src_value = __ LoadRegister(src_index);
417   Node* dst_index = __ BytecodeOperandReg(1);
418   __ StoreRegister(src_value, dst_index);
419   __ Dispatch();
420 }
421 
BuildLoadGlobal(Callable ic,Node * context,Node * feedback_slot,InterpreterAssembler * assembler)422 Node* Interpreter::BuildLoadGlobal(Callable ic, Node* context,
423                                    Node* feedback_slot,
424                                    InterpreterAssembler* assembler) {
425   typedef LoadGlobalWithVectorDescriptor Descriptor;
426 
427   // Load the global via the LoadGlobalIC.
428   Node* code_target = __ HeapConstant(ic.code());
429   Node* smi_slot = __ SmiTag(feedback_slot);
430   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
431   return __ CallStub(ic.descriptor(), code_target, context,
432                      Arg(Descriptor::kSlot, smi_slot),
433                      Arg(Descriptor::kVector, type_feedback_vector));
434 }
435 
436 // LdaGlobal <slot>
437 //
438 // Load the global with name in constant pool entry <name_index> into the
439 // accumulator using FeedBackVector slot <slot> outside of a typeof.
DoLdaGlobal(InterpreterAssembler * assembler)440 void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
441   Callable ic =
442       CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF);
443 
444   Node* context = __ GetContext();
445 
446   Node* raw_slot = __ BytecodeOperandIdx(0);
447   Node* result = BuildLoadGlobal(ic, context, raw_slot, assembler);
448   __ SetAccumulator(result);
449   __ Dispatch();
450 }
451 
452 // LdaGlobalInsideTypeof <slot>
453 //
454 // Load the global with name in constant pool entry <name_index> into the
455 // accumulator using FeedBackVector slot <slot> inside of a typeof.
DoLdaGlobalInsideTypeof(InterpreterAssembler * assembler)456 void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) {
457   Callable ic =
458       CodeFactory::LoadGlobalICInOptimizedCode(isolate_, INSIDE_TYPEOF);
459 
460   Node* context = __ GetContext();
461 
462   Node* raw_slot = __ BytecodeOperandIdx(0);
463   Node* result = BuildLoadGlobal(ic, context, raw_slot, assembler);
464   __ SetAccumulator(result);
465   __ Dispatch();
466 }
467 
DoStaGlobal(Callable ic,InterpreterAssembler * assembler)468 void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) {
469   typedef StoreWithVectorDescriptor Descriptor;
470   // Get the global object.
471   Node* context = __ GetContext();
472   Node* native_context = __ LoadNativeContext(context);
473   Node* global =
474       __ LoadContextElement(native_context, Context::EXTENSION_INDEX);
475 
476   // Store the global via the StoreIC.
477   Node* code_target = __ HeapConstant(ic.code());
478   Node* constant_index = __ BytecodeOperandIdx(0);
479   Node* name = __ LoadConstantPoolEntry(constant_index);
480   Node* value = __ GetAccumulator();
481   Node* raw_slot = __ BytecodeOperandIdx(1);
482   Node* smi_slot = __ SmiTag(raw_slot);
483   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
484   __ CallStub(ic.descriptor(), code_target, context,
485               Arg(Descriptor::kReceiver, global), Arg(Descriptor::kName, name),
486               Arg(Descriptor::kValue, value), Arg(Descriptor::kSlot, smi_slot),
487               Arg(Descriptor::kVector, type_feedback_vector));
488   __ Dispatch();
489 }
490 
491 // StaGlobalSloppy <name_index> <slot>
492 //
493 // Store the value in the accumulator into the global with name in constant pool
494 // entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
DoStaGlobalSloppy(InterpreterAssembler * assembler)495 void Interpreter::DoStaGlobalSloppy(InterpreterAssembler* assembler) {
496   Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY);
497   DoStaGlobal(ic, assembler);
498 }
499 
500 // StaGlobalStrict <name_index> <slot>
501 //
502 // Store the value in the accumulator into the global with name in constant pool
503 // entry <name_index> using FeedBackVector slot <slot> in strict mode.
DoStaGlobalStrict(InterpreterAssembler * assembler)504 void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) {
505   Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT);
506   DoStaGlobal(ic, assembler);
507 }
508 
509 // LdaContextSlot <context> <slot_index> <depth>
510 //
511 // Load the object in |slot_index| of the context at |depth| in the context
512 // chain starting at |context| into the accumulator.
DoLdaContextSlot(InterpreterAssembler * assembler)513 void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) {
514   Node* reg_index = __ BytecodeOperandReg(0);
515   Node* context = __ LoadRegister(reg_index);
516   Node* slot_index = __ BytecodeOperandIdx(1);
517   Node* depth = __ BytecodeOperandUImm(2);
518   Node* slot_context = __ GetContextAtDepth(context, depth);
519   Node* result = __ LoadContextElement(slot_context, slot_index);
520   __ SetAccumulator(result);
521   __ Dispatch();
522 }
523 
524 // LdaCurrentContextSlot <slot_index>
525 //
526 // Load the object in |slot_index| of the current context into the accumulator.
DoLdaCurrentContextSlot(InterpreterAssembler * assembler)527 void Interpreter::DoLdaCurrentContextSlot(InterpreterAssembler* assembler) {
528   Node* slot_index = __ BytecodeOperandIdx(0);
529   Node* slot_context = __ GetContext();
530   Node* result = __ LoadContextElement(slot_context, slot_index);
531   __ SetAccumulator(result);
532   __ Dispatch();
533 }
534 
535 // StaContextSlot <context> <slot_index> <depth>
536 //
537 // Stores the object in the accumulator into |slot_index| of the context at
538 // |depth| in the context chain starting at |context|.
DoStaContextSlot(InterpreterAssembler * assembler)539 void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) {
540   Node* value = __ GetAccumulator();
541   Node* reg_index = __ BytecodeOperandReg(0);
542   Node* context = __ LoadRegister(reg_index);
543   Node* slot_index = __ BytecodeOperandIdx(1);
544   Node* depth = __ BytecodeOperandUImm(2);
545   Node* slot_context = __ GetContextAtDepth(context, depth);
546   __ StoreContextElement(slot_context, slot_index, value);
547   __ Dispatch();
548 }
549 
550 // StaCurrentContextSlot <slot_index>
551 //
552 // Stores the object in the accumulator into |slot_index| of the current
553 // context.
DoStaCurrentContextSlot(InterpreterAssembler * assembler)554 void Interpreter::DoStaCurrentContextSlot(InterpreterAssembler* assembler) {
555   Node* value = __ GetAccumulator();
556   Node* slot_index = __ BytecodeOperandIdx(0);
557   Node* slot_context = __ GetContext();
558   __ StoreContextElement(slot_context, slot_index, value);
559   __ Dispatch();
560 }
561 
DoLdaLookupSlot(Runtime::FunctionId function_id,InterpreterAssembler * assembler)562 void Interpreter::DoLdaLookupSlot(Runtime::FunctionId function_id,
563                                   InterpreterAssembler* assembler) {
564   Node* name_index = __ BytecodeOperandIdx(0);
565   Node* name = __ LoadConstantPoolEntry(name_index);
566   Node* context = __ GetContext();
567   Node* result = __ CallRuntime(function_id, context, name);
568   __ SetAccumulator(result);
569   __ Dispatch();
570 }
571 
572 // LdaLookupSlot <name_index>
573 //
574 // Lookup the object with the name in constant pool entry |name_index|
575 // dynamically.
DoLdaLookupSlot(InterpreterAssembler * assembler)576 void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) {
577   DoLdaLookupSlot(Runtime::kLoadLookupSlot, assembler);
578 }
579 
580 // LdaLookupSlotInsideTypeof <name_index>
581 //
582 // Lookup the object with the name in constant pool entry |name_index|
583 // dynamically without causing a NoReferenceError.
DoLdaLookupSlotInsideTypeof(InterpreterAssembler * assembler)584 void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) {
585   DoLdaLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
586 }
587 
DoLdaLookupContextSlot(Runtime::FunctionId function_id,InterpreterAssembler * assembler)588 void Interpreter::DoLdaLookupContextSlot(Runtime::FunctionId function_id,
589                                          InterpreterAssembler* assembler) {
590   Node* context = __ GetContext();
591   Node* name_index = __ BytecodeOperandIdx(0);
592   Node* slot_index = __ BytecodeOperandIdx(1);
593   Node* depth = __ BytecodeOperandUImm(2);
594 
595   Label slowpath(assembler, Label::kDeferred);
596 
597   // Check for context extensions to allow the fast path.
598   __ GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath);
599 
600   // Fast path does a normal load context.
601   {
602     Node* slot_context = __ GetContextAtDepth(context, depth);
603     Node* result = __ LoadContextElement(slot_context, slot_index);
604     __ SetAccumulator(result);
605     __ Dispatch();
606   }
607 
608   // Slow path when we have to call out to the runtime.
609   __ Bind(&slowpath);
610   {
611     Node* name = __ LoadConstantPoolEntry(name_index);
612     Node* result = __ CallRuntime(function_id, context, name);
613     __ SetAccumulator(result);
614     __ Dispatch();
615   }
616 }
617 
618 // LdaLookupSlot <name_index>
619 //
620 // Lookup the object with the name in constant pool entry |name_index|
621 // dynamically.
DoLdaLookupContextSlot(InterpreterAssembler * assembler)622 void Interpreter::DoLdaLookupContextSlot(InterpreterAssembler* assembler) {
623   DoLdaLookupContextSlot(Runtime::kLoadLookupSlot, assembler);
624 }
625 
626 // LdaLookupSlotInsideTypeof <name_index>
627 //
628 // Lookup the object with the name in constant pool entry |name_index|
629 // dynamically without causing a NoReferenceError.
DoLdaLookupContextSlotInsideTypeof(InterpreterAssembler * assembler)630 void Interpreter::DoLdaLookupContextSlotInsideTypeof(
631     InterpreterAssembler* assembler) {
632   DoLdaLookupContextSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
633 }
634 
DoLdaLookupGlobalSlot(Runtime::FunctionId function_id,InterpreterAssembler * assembler)635 void Interpreter::DoLdaLookupGlobalSlot(Runtime::FunctionId function_id,
636                                         InterpreterAssembler* assembler) {
637   Node* context = __ GetContext();
638   Node* name_index = __ BytecodeOperandIdx(0);
639   Node* feedback_slot = __ BytecodeOperandIdx(1);
640   Node* depth = __ BytecodeOperandUImm(2);
641 
642   Label slowpath(assembler, Label::kDeferred);
643 
644   // Check for context extensions to allow the fast path
645   __ GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath);
646 
647   // Fast path does a normal load global
648   {
649     Callable ic = CodeFactory::LoadGlobalICInOptimizedCode(
650         isolate_, function_id == Runtime::kLoadLookupSlotInsideTypeof
651                       ? INSIDE_TYPEOF
652                       : NOT_INSIDE_TYPEOF);
653     Node* result = BuildLoadGlobal(ic, context, feedback_slot, assembler);
654     __ SetAccumulator(result);
655     __ Dispatch();
656   }
657 
658   // Slow path when we have to call out to the runtime
659   __ Bind(&slowpath);
660   {
661     Node* name = __ LoadConstantPoolEntry(name_index);
662     Node* result = __ CallRuntime(function_id, context, name);
663     __ SetAccumulator(result);
664     __ Dispatch();
665   }
666 }
667 
668 // LdaLookupGlobalSlot <name_index> <feedback_slot> <depth>
669 //
670 // Lookup the object with the name in constant pool entry |name_index|
671 // dynamically.
DoLdaLookupGlobalSlot(InterpreterAssembler * assembler)672 void Interpreter::DoLdaLookupGlobalSlot(InterpreterAssembler* assembler) {
673   DoLdaLookupGlobalSlot(Runtime::kLoadLookupSlot, assembler);
674 }
675 
676 // LdaLookupGlobalSlotInsideTypeof <name_index> <feedback_slot> <depth>
677 //
678 // Lookup the object with the name in constant pool entry |name_index|
679 // dynamically without causing a NoReferenceError.
DoLdaLookupGlobalSlotInsideTypeof(InterpreterAssembler * assembler)680 void Interpreter::DoLdaLookupGlobalSlotInsideTypeof(
681     InterpreterAssembler* assembler) {
682   DoLdaLookupGlobalSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
683 }
684 
DoStaLookupSlot(LanguageMode language_mode,InterpreterAssembler * assembler)685 void Interpreter::DoStaLookupSlot(LanguageMode language_mode,
686                                   InterpreterAssembler* assembler) {
687   Node* value = __ GetAccumulator();
688   Node* index = __ BytecodeOperandIdx(0);
689   Node* name = __ LoadConstantPoolEntry(index);
690   Node* context = __ GetContext();
691   Node* result = __ CallRuntime(is_strict(language_mode)
692                                     ? Runtime::kStoreLookupSlot_Strict
693                                     : Runtime::kStoreLookupSlot_Sloppy,
694                                 context, name, value);
695   __ SetAccumulator(result);
696   __ Dispatch();
697 }
698 
699 // StaLookupSlotSloppy <name_index>
700 //
701 // Store the object in accumulator to the object with the name in constant
702 // pool entry |name_index| in sloppy mode.
DoStaLookupSlotSloppy(InterpreterAssembler * assembler)703 void Interpreter::DoStaLookupSlotSloppy(InterpreterAssembler* assembler) {
704   DoStaLookupSlot(LanguageMode::SLOPPY, assembler);
705 }
706 
707 // StaLookupSlotStrict <name_index>
708 //
709 // Store the object in accumulator to the object with the name in constant
710 // pool entry |name_index| in strict mode.
DoStaLookupSlotStrict(InterpreterAssembler * assembler)711 void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) {
712   DoStaLookupSlot(LanguageMode::STRICT, assembler);
713 }
714 
715 // LdaNamedProperty <object> <name_index> <slot>
716 //
717 // Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
718 // constant pool entry <name_index>.
DoLdaNamedProperty(InterpreterAssembler * assembler)719 void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
720   typedef LoadWithVectorDescriptor Descriptor;
721   Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
722   Node* code_target = __ HeapConstant(ic.code());
723   Node* register_index = __ BytecodeOperandReg(0);
724   Node* object = __ LoadRegister(register_index);
725   Node* constant_index = __ BytecodeOperandIdx(1);
726   Node* name = __ LoadConstantPoolEntry(constant_index);
727   Node* raw_slot = __ BytecodeOperandIdx(2);
728   Node* smi_slot = __ SmiTag(raw_slot);
729   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
730   Node* context = __ GetContext();
731   Node* result = __ CallStub(
732       ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
733       Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
734       Arg(Descriptor::kVector, type_feedback_vector));
735   __ SetAccumulator(result);
736   __ Dispatch();
737 }
738 
739 // KeyedLoadIC <object> <slot>
740 //
741 // Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
742 // in the accumulator.
DoLdaKeyedProperty(InterpreterAssembler * assembler)743 void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
744   typedef LoadWithVectorDescriptor Descriptor;
745   Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
746   Node* code_target = __ HeapConstant(ic.code());
747   Node* reg_index = __ BytecodeOperandReg(0);
748   Node* object = __ LoadRegister(reg_index);
749   Node* name = __ GetAccumulator();
750   Node* raw_slot = __ BytecodeOperandIdx(1);
751   Node* smi_slot = __ SmiTag(raw_slot);
752   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
753   Node* context = __ GetContext();
754   Node* result = __ CallStub(
755       ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
756       Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
757       Arg(Descriptor::kVector, type_feedback_vector));
758   __ SetAccumulator(result);
759   __ Dispatch();
760 }
761 
DoStoreIC(Callable ic,InterpreterAssembler * assembler)762 void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
763   typedef StoreWithVectorDescriptor Descriptor;
764   Node* code_target = __ HeapConstant(ic.code());
765   Node* object_reg_index = __ BytecodeOperandReg(0);
766   Node* object = __ LoadRegister(object_reg_index);
767   Node* constant_index = __ BytecodeOperandIdx(1);
768   Node* name = __ LoadConstantPoolEntry(constant_index);
769   Node* value = __ GetAccumulator();
770   Node* raw_slot = __ BytecodeOperandIdx(2);
771   Node* smi_slot = __ SmiTag(raw_slot);
772   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
773   Node* context = __ GetContext();
774   __ CallStub(ic.descriptor(), code_target, context,
775               Arg(Descriptor::kReceiver, object), Arg(Descriptor::kName, name),
776               Arg(Descriptor::kValue, value), Arg(Descriptor::kSlot, smi_slot),
777               Arg(Descriptor::kVector, type_feedback_vector));
778   __ Dispatch();
779 }
780 
781 // StaNamedPropertySloppy <object> <name_index> <slot>
782 //
783 // Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
784 // the name in constant pool entry <name_index> with the value in the
785 // accumulator.
DoStaNamedPropertySloppy(InterpreterAssembler * assembler)786 void Interpreter::DoStaNamedPropertySloppy(InterpreterAssembler* assembler) {
787   Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY);
788   DoStoreIC(ic, assembler);
789 }
790 
791 // StaNamedPropertyStrict <object> <name_index> <slot>
792 //
793 // Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
794 // the name in constant pool entry <name_index> with the value in the
795 // accumulator.
DoStaNamedPropertyStrict(InterpreterAssembler * assembler)796 void Interpreter::DoStaNamedPropertyStrict(InterpreterAssembler* assembler) {
797   Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT);
798   DoStoreIC(ic, assembler);
799 }
800 
DoKeyedStoreIC(Callable ic,InterpreterAssembler * assembler)801 void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
802   typedef StoreWithVectorDescriptor Descriptor;
803   Node* code_target = __ HeapConstant(ic.code());
804   Node* object_reg_index = __ BytecodeOperandReg(0);
805   Node* object = __ LoadRegister(object_reg_index);
806   Node* name_reg_index = __ BytecodeOperandReg(1);
807   Node* name = __ LoadRegister(name_reg_index);
808   Node* value = __ GetAccumulator();
809   Node* raw_slot = __ BytecodeOperandIdx(2);
810   Node* smi_slot = __ SmiTag(raw_slot);
811   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
812   Node* context = __ GetContext();
813   __ CallStub(ic.descriptor(), code_target, context,
814               Arg(Descriptor::kReceiver, object), Arg(Descriptor::kName, name),
815               Arg(Descriptor::kValue, value), Arg(Descriptor::kSlot, smi_slot),
816               Arg(Descriptor::kVector, type_feedback_vector));
817   __ Dispatch();
818 }
819 
820 // StaKeyedPropertySloppy <object> <key> <slot>
821 //
822 // Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
823 // and the key <key> with the value in the accumulator.
DoStaKeyedPropertySloppy(InterpreterAssembler * assembler)824 void Interpreter::DoStaKeyedPropertySloppy(InterpreterAssembler* assembler) {
825   Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY);
826   DoKeyedStoreIC(ic, assembler);
827 }
828 
829 // StaKeyedPropertyStrict <object> <key> <slot>
830 //
831 // Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
832 // and the key <key> with the value in the accumulator.
DoStaKeyedPropertyStrict(InterpreterAssembler * assembler)833 void Interpreter::DoStaKeyedPropertyStrict(InterpreterAssembler* assembler) {
834   Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT);
835   DoKeyedStoreIC(ic, assembler);
836 }
837 
838 // LdaModuleVariable <cell_index> <depth>
839 //
840 // Load the contents of a module variable into the accumulator.  The variable is
841 // identified by <cell_index>.  <depth> is the depth of the current context
842 // relative to the module context.
DoLdaModuleVariable(InterpreterAssembler * assembler)843 void Interpreter::DoLdaModuleVariable(InterpreterAssembler* assembler) {
844   Node* cell_index = __ BytecodeOperandImm(0);
845   Node* depth = __ BytecodeOperandUImm(1);
846 
847   Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
848   Node* module =
849       __ LoadContextElement(module_context, Context::EXTENSION_INDEX);
850 
851   Label if_export(assembler), if_import(assembler), end(assembler);
852   __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export,
853             &if_import);
854 
855   __ Bind(&if_export);
856   {
857     Node* regular_exports =
858         __ LoadObjectField(module, Module::kRegularExportsOffset);
859     // The actual array index is (cell_index - 1).
860     Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1));
861     Node* cell = __ LoadFixedArrayElement(regular_exports, export_index);
862     __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset));
863     __ Goto(&end);
864   }
865 
866   __ Bind(&if_import);
867   {
868     Node* regular_imports =
869         __ LoadObjectField(module, Module::kRegularImportsOffset);
870     // The actual array index is (-cell_index - 1).
871     Node* import_index = __ IntPtrSub(__ IntPtrConstant(-1), cell_index);
872     Node* cell = __ LoadFixedArrayElement(regular_imports, import_index);
873     __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset));
874     __ Goto(&end);
875   }
876 
877   __ Bind(&end);
878   __ Dispatch();
879 }
880 
881 // StaModuleVariable <cell_index> <depth>
882 //
883 // Store accumulator to the module variable identified by <cell_index>.
884 // <depth> is the depth of the current context relative to the module context.
DoStaModuleVariable(InterpreterAssembler * assembler)885 void Interpreter::DoStaModuleVariable(InterpreterAssembler* assembler) {
886   Node* value = __ GetAccumulator();
887   Node* cell_index = __ BytecodeOperandImm(0);
888   Node* depth = __ BytecodeOperandUImm(1);
889 
890   Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
891   Node* module =
892       __ LoadContextElement(module_context, Context::EXTENSION_INDEX);
893 
894   Label if_export(assembler), if_import(assembler), end(assembler);
895   __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export,
896             &if_import);
897 
898   __ Bind(&if_export);
899   {
900     Node* regular_exports =
901         __ LoadObjectField(module, Module::kRegularExportsOffset);
902     // The actual array index is (cell_index - 1).
903     Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1));
904     Node* cell = __ LoadFixedArrayElement(regular_exports, export_index);
905     __ StoreObjectField(cell, Cell::kValueOffset, value);
906     __ Goto(&end);
907   }
908 
909   __ Bind(&if_import);
910   {
911     // Not supported (probably never).
912     __ Abort(kUnsupportedModuleOperation);
913     __ Goto(&end);
914   }
915 
916   __ Bind(&end);
917   __ Dispatch();
918 }
919 
920 // PushContext <context>
921 //
922 // Saves the current context in <context>, and pushes the accumulator as the
923 // new current context.
DoPushContext(InterpreterAssembler * assembler)924 void Interpreter::DoPushContext(InterpreterAssembler* assembler) {
925   Node* reg_index = __ BytecodeOperandReg(0);
926   Node* new_context = __ GetAccumulator();
927   Node* old_context = __ GetContext();
928   __ StoreRegister(old_context, reg_index);
929   __ SetContext(new_context);
930   __ Dispatch();
931 }
932 
933 // PopContext <context>
934 //
935 // Pops the current context and sets <context> as the new context.
DoPopContext(InterpreterAssembler * assembler)936 void Interpreter::DoPopContext(InterpreterAssembler* assembler) {
937   Node* reg_index = __ BytecodeOperandReg(0);
938   Node* context = __ LoadRegister(reg_index);
939   __ SetContext(context);
940   __ Dispatch();
941 }
942 
943 // TODO(mythria): Remove this function once all CompareOps record type feedback.
DoCompareOp(Token::Value compare_op,InterpreterAssembler * assembler)944 void Interpreter::DoCompareOp(Token::Value compare_op,
945                               InterpreterAssembler* assembler) {
946   Node* reg_index = __ BytecodeOperandReg(0);
947   Node* lhs = __ LoadRegister(reg_index);
948   Node* rhs = __ GetAccumulator();
949   Node* context = __ GetContext();
950   Node* result;
951   switch (compare_op) {
952     case Token::IN:
953       result = assembler->HasProperty(rhs, lhs, context);
954       break;
955     case Token::INSTANCEOF:
956       result = assembler->InstanceOf(lhs, rhs, context);
957       break;
958     default:
959       UNREACHABLE();
960   }
961   __ SetAccumulator(result);
962   __ Dispatch();
963 }
964 
965 template <class Generator>
DoBinaryOpWithFeedback(InterpreterAssembler * assembler)966 void Interpreter::DoBinaryOpWithFeedback(InterpreterAssembler* assembler) {
967   Node* reg_index = __ BytecodeOperandReg(0);
968   Node* lhs = __ LoadRegister(reg_index);
969   Node* rhs = __ GetAccumulator();
970   Node* context = __ GetContext();
971   Node* slot_index = __ BytecodeOperandIdx(1);
972   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
973   Node* result = Generator::Generate(assembler, lhs, rhs, slot_index,
974                                      type_feedback_vector, context);
975   __ SetAccumulator(result);
976   __ Dispatch();
977 }
978 
DoCompareOpWithFeedback(Token::Value compare_op,InterpreterAssembler * assembler)979 void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op,
980                                           InterpreterAssembler* assembler) {
981   Node* reg_index = __ BytecodeOperandReg(0);
982   Node* lhs = __ LoadRegister(reg_index);
983   Node* rhs = __ GetAccumulator();
984   Node* context = __ GetContext();
985   Node* slot_index = __ BytecodeOperandIdx(1);
986   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
987 
988   // TODO(interpreter): the only reason this check is here is because we
989   // sometimes emit comparisons that shouldn't collect feedback (e.g.
990   // try-finally blocks and generators), and we could get rid of this by
991   // introducing Smi equality tests.
992   Label skip_feedback_update(assembler);
993   __ GotoIf(__ WordEqual(slot_index, __ IntPtrConstant(0)),
994             &skip_feedback_update);
995 
996   Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
997   Label lhs_is_smi(assembler), lhs_is_not_smi(assembler),
998       gather_rhs_type(assembler), do_compare(assembler);
999   __ Branch(__ TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
1000 
1001   __ Bind(&lhs_is_smi);
1002   var_type_feedback.Bind(
1003       __ Int32Constant(CompareOperationFeedback::kSignedSmall));
1004   __ Goto(&gather_rhs_type);
1005 
1006   __ Bind(&lhs_is_not_smi);
1007   {
1008     Label lhs_is_number(assembler), lhs_is_not_number(assembler);
1009     Node* lhs_map = __ LoadMap(lhs);
1010     __ Branch(__ WordEqual(lhs_map, __ HeapNumberMapConstant()), &lhs_is_number,
1011               &lhs_is_not_number);
1012 
1013     __ Bind(&lhs_is_number);
1014     var_type_feedback.Bind(__ Int32Constant(CompareOperationFeedback::kNumber));
1015     __ Goto(&gather_rhs_type);
1016 
1017     __ Bind(&lhs_is_not_number);
1018     var_type_feedback.Bind(__ Int32Constant(CompareOperationFeedback::kAny));
1019     __ Goto(&do_compare);
1020   }
1021 
1022   __ Bind(&gather_rhs_type);
1023   {
1024     Label rhs_is_smi(assembler);
1025     __ GotoIf(__ TaggedIsSmi(rhs), &rhs_is_smi);
1026 
1027     Node* rhs_map = __ LoadMap(rhs);
1028     Node* rhs_type =
1029         __ Select(__ WordEqual(rhs_map, __ HeapNumberMapConstant()),
1030                   __ Int32Constant(CompareOperationFeedback::kNumber),
1031                   __ Int32Constant(CompareOperationFeedback::kAny));
1032     var_type_feedback.Bind(__ Word32Or(var_type_feedback.value(), rhs_type));
1033     __ Goto(&do_compare);
1034 
1035     __ Bind(&rhs_is_smi);
1036     var_type_feedback.Bind(
1037         __ Word32Or(var_type_feedback.value(),
1038                     __ Int32Constant(CompareOperationFeedback::kSignedSmall)));
1039     __ Goto(&do_compare);
1040   }
1041 
1042   __ Bind(&do_compare);
1043   __ UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
1044                     slot_index);
1045   __ Goto(&skip_feedback_update);
1046 
1047   __ Bind(&skip_feedback_update);
1048   Node* result;
1049   switch (compare_op) {
1050     case Token::EQ:
1051       result = assembler->Equal(CodeStubAssembler::kDontNegateResult, lhs, rhs,
1052                                 context);
1053       break;
1054     case Token::NE:
1055       result =
1056           assembler->Equal(CodeStubAssembler::kNegateResult, lhs, rhs, context);
1057       break;
1058     case Token::EQ_STRICT:
1059       result = assembler->StrictEqual(CodeStubAssembler::kDontNegateResult, lhs,
1060                                       rhs, context);
1061       break;
1062     case Token::LT:
1063       result = assembler->RelationalComparison(CodeStubAssembler::kLessThan,
1064                                                lhs, rhs, context);
1065       break;
1066     case Token::GT:
1067       result = assembler->RelationalComparison(CodeStubAssembler::kGreaterThan,
1068                                                lhs, rhs, context);
1069       break;
1070     case Token::LTE:
1071       result = assembler->RelationalComparison(
1072           CodeStubAssembler::kLessThanOrEqual, lhs, rhs, context);
1073       break;
1074     case Token::GTE:
1075       result = assembler->RelationalComparison(
1076           CodeStubAssembler::kGreaterThanOrEqual, lhs, rhs, context);
1077       break;
1078     default:
1079       UNREACHABLE();
1080   }
1081   __ SetAccumulator(result);
1082   __ Dispatch();
1083 }
1084 
1085 // Add <src>
1086 //
1087 // Add register <src> to accumulator.
DoAdd(InterpreterAssembler * assembler)1088 void Interpreter::DoAdd(InterpreterAssembler* assembler) {
1089   DoBinaryOpWithFeedback<AddWithFeedbackStub>(assembler);
1090 }
1091 
1092 // Sub <src>
1093 //
1094 // Subtract register <src> from accumulator.
DoSub(InterpreterAssembler * assembler)1095 void Interpreter::DoSub(InterpreterAssembler* assembler) {
1096   DoBinaryOpWithFeedback<SubtractWithFeedbackStub>(assembler);
1097 }
1098 
1099 // Mul <src>
1100 //
1101 // Multiply accumulator by register <src>.
DoMul(InterpreterAssembler * assembler)1102 void Interpreter::DoMul(InterpreterAssembler* assembler) {
1103   DoBinaryOpWithFeedback<MultiplyWithFeedbackStub>(assembler);
1104 }
1105 
1106 // Div <src>
1107 //
1108 // Divide register <src> by accumulator.
DoDiv(InterpreterAssembler * assembler)1109 void Interpreter::DoDiv(InterpreterAssembler* assembler) {
1110   DoBinaryOpWithFeedback<DivideWithFeedbackStub>(assembler);
1111 }
1112 
1113 // Mod <src>
1114 //
1115 // Modulo register <src> by accumulator.
DoMod(InterpreterAssembler * assembler)1116 void Interpreter::DoMod(InterpreterAssembler* assembler) {
1117   DoBinaryOpWithFeedback<ModulusWithFeedbackStub>(assembler);
1118 }
1119 
DoBitwiseBinaryOp(Token::Value bitwise_op,InterpreterAssembler * assembler)1120 void Interpreter::DoBitwiseBinaryOp(Token::Value bitwise_op,
1121                                     InterpreterAssembler* assembler) {
1122   Node* reg_index = __ BytecodeOperandReg(0);
1123   Node* lhs = __ LoadRegister(reg_index);
1124   Node* rhs = __ GetAccumulator();
1125   Node* context = __ GetContext();
1126   Node* slot_index = __ BytecodeOperandIdx(1);
1127   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
1128 
1129   Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32),
1130       var_rhs_type_feedback(assembler, MachineRepresentation::kWord32);
1131   Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
1132       context, lhs, &var_lhs_type_feedback);
1133   Node* rhs_value = __ TruncateTaggedToWord32WithFeedback(
1134       context, rhs, &var_rhs_type_feedback);
1135   Node* result = nullptr;
1136 
1137   switch (bitwise_op) {
1138     case Token::BIT_OR: {
1139       Node* value = __ Word32Or(lhs_value, rhs_value);
1140       result = __ ChangeInt32ToTagged(value);
1141     } break;
1142     case Token::BIT_AND: {
1143       Node* value = __ Word32And(lhs_value, rhs_value);
1144       result = __ ChangeInt32ToTagged(value);
1145     } break;
1146     case Token::BIT_XOR: {
1147       Node* value = __ Word32Xor(lhs_value, rhs_value);
1148       result = __ ChangeInt32ToTagged(value);
1149     } break;
1150     case Token::SHL: {
1151       Node* value = __ Word32Shl(
1152           lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f)));
1153       result = __ ChangeInt32ToTagged(value);
1154     } break;
1155     case Token::SHR: {
1156       Node* value = __ Word32Shr(
1157           lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f)));
1158       result = __ ChangeUint32ToTagged(value);
1159     } break;
1160     case Token::SAR: {
1161       Node* value = __ Word32Sar(
1162           lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f)));
1163       result = __ ChangeInt32ToTagged(value);
1164     } break;
1165     default:
1166       UNREACHABLE();
1167   }
1168 
1169   Node* result_type =
1170       __ Select(__ TaggedIsSmi(result),
1171                 __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
1172                 __ Int32Constant(BinaryOperationFeedback::kNumber));
1173 
1174   if (FLAG_debug_code) {
1175     Label ok(assembler);
1176     __ GotoIf(__ TaggedIsSmi(result), &ok);
1177     Node* result_map = __ LoadMap(result);
1178     __ AbortIfWordNotEqual(result_map, __ HeapNumberMapConstant(),
1179                            kExpectedHeapNumber);
1180     __ Goto(&ok);
1181     __ Bind(&ok);
1182   }
1183 
1184   Node* input_feedback =
1185       __ Word32Or(var_lhs_type_feedback.value(), var_rhs_type_feedback.value());
1186   __ UpdateFeedback(__ Word32Or(result_type, input_feedback),
1187                     type_feedback_vector, slot_index);
1188   __ SetAccumulator(result);
1189   __ Dispatch();
1190 }
1191 
1192 // BitwiseOr <src>
1193 //
1194 // BitwiseOr register <src> to accumulator.
DoBitwiseOr(InterpreterAssembler * assembler)1195 void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) {
1196   DoBitwiseBinaryOp(Token::BIT_OR, assembler);
1197 }
1198 
1199 // BitwiseXor <src>
1200 //
1201 // BitwiseXor register <src> to accumulator.
DoBitwiseXor(InterpreterAssembler * assembler)1202 void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) {
1203   DoBitwiseBinaryOp(Token::BIT_XOR, assembler);
1204 }
1205 
1206 // BitwiseAnd <src>
1207 //
1208 // BitwiseAnd register <src> to accumulator.
DoBitwiseAnd(InterpreterAssembler * assembler)1209 void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) {
1210   DoBitwiseBinaryOp(Token::BIT_AND, assembler);
1211 }
1212 
1213 // ShiftLeft <src>
1214 //
1215 // Left shifts register <src> by the count specified in the accumulator.
1216 // Register <src> is converted to an int32 and the accumulator to uint32
1217 // before the operation. 5 lsb bits from the accumulator are used as count
1218 // i.e. <src> << (accumulator & 0x1F).
DoShiftLeft(InterpreterAssembler * assembler)1219 void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) {
1220   DoBitwiseBinaryOp(Token::SHL, assembler);
1221 }
1222 
1223 // ShiftRight <src>
1224 //
1225 // Right shifts register <src> by the count specified in the accumulator.
1226 // Result is sign extended. Register <src> is converted to an int32 and the
1227 // accumulator to uint32 before the operation. 5 lsb bits from the accumulator
1228 // are used as count i.e. <src> >> (accumulator & 0x1F).
DoShiftRight(InterpreterAssembler * assembler)1229 void Interpreter::DoShiftRight(InterpreterAssembler* assembler) {
1230   DoBitwiseBinaryOp(Token::SAR, assembler);
1231 }
1232 
1233 // ShiftRightLogical <src>
1234 //
1235 // Right Shifts register <src> by the count specified in the accumulator.
1236 // Result is zero-filled. The accumulator and register <src> are converted to
1237 // uint32 before the operation 5 lsb bits from the accumulator are used as
1238 // count i.e. <src> << (accumulator & 0x1F).
DoShiftRightLogical(InterpreterAssembler * assembler)1239 void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) {
1240   DoBitwiseBinaryOp(Token::SHR, assembler);
1241 }
1242 
1243 // AddSmi <imm> <reg>
1244 //
1245 // Adds an immediate value <imm> to register <reg>. For this
1246 // operation <reg> is the lhs operand and <imm> is the <rhs> operand.
DoAddSmi(InterpreterAssembler * assembler)1247 void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
1248   Variable var_result(assembler, MachineRepresentation::kTagged);
1249   Label fastpath(assembler), slowpath(assembler, Label::kDeferred),
1250       end(assembler);
1251 
1252   Node* reg_index = __ BytecodeOperandReg(1);
1253   Node* left = __ LoadRegister(reg_index);
1254   Node* raw_int = __ BytecodeOperandImm(0);
1255   Node* right = __ SmiTag(raw_int);
1256   Node* slot_index = __ BytecodeOperandIdx(2);
1257   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
1258 
1259   // {right} is known to be a Smi.
1260   // Check if the {left} is a Smi take the fast path.
1261   __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
1262   __ Bind(&fastpath);
1263   {
1264     // Try fast Smi addition first.
1265     Node* pair = __ IntPtrAddWithOverflow(__ BitcastTaggedToWord(left),
1266                                           __ BitcastTaggedToWord(right));
1267     Node* overflow = __ Projection(1, pair);
1268 
1269     // Check if the Smi additon overflowed.
1270     Label if_notoverflow(assembler);
1271     __ Branch(overflow, &slowpath, &if_notoverflow);
1272     __ Bind(&if_notoverflow);
1273     {
1274       __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
1275                         type_feedback_vector, slot_index);
1276       var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
1277       __ Goto(&end);
1278     }
1279   }
1280   __ Bind(&slowpath);
1281   {
1282     Node* context = __ GetContext();
1283     AddWithFeedbackStub stub(__ isolate());
1284     Callable callable =
1285         Callable(stub.GetCode(), AddWithFeedbackStub::Descriptor(__ isolate()));
1286     Node* args[] = {left, right, slot_index, type_feedback_vector, context};
1287     var_result.Bind(__ CallStubN(callable, args, 1));
1288     __ Goto(&end);
1289   }
1290   __ Bind(&end);
1291   {
1292     __ SetAccumulator(var_result.value());
1293     __ Dispatch();
1294   }
1295 }
1296 
1297 // SubSmi <imm> <reg>
1298 //
1299 // Subtracts an immediate value <imm> to register <reg>. For this
1300 // operation <reg> is the lhs operand and <imm> is the rhs operand.
DoSubSmi(InterpreterAssembler * assembler)1301 void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
1302   Variable var_result(assembler, MachineRepresentation::kTagged);
1303   Label fastpath(assembler), slowpath(assembler, Label::kDeferred),
1304       end(assembler);
1305 
1306   Node* reg_index = __ BytecodeOperandReg(1);
1307   Node* left = __ LoadRegister(reg_index);
1308   Node* raw_int = __ BytecodeOperandImm(0);
1309   Node* right = __ SmiTag(raw_int);
1310   Node* slot_index = __ BytecodeOperandIdx(2);
1311   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
1312 
1313   // {right} is known to be a Smi.
1314   // Check if the {left} is a Smi take the fast path.
1315   __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
1316   __ Bind(&fastpath);
1317   {
1318     // Try fast Smi subtraction first.
1319     Node* pair = __ IntPtrSubWithOverflow(__ BitcastTaggedToWord(left),
1320                                           __ BitcastTaggedToWord(right));
1321     Node* overflow = __ Projection(1, pair);
1322 
1323     // Check if the Smi subtraction overflowed.
1324     Label if_notoverflow(assembler);
1325     __ Branch(overflow, &slowpath, &if_notoverflow);
1326     __ Bind(&if_notoverflow);
1327     {
1328       __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
1329                         type_feedback_vector, slot_index);
1330       var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
1331       __ Goto(&end);
1332     }
1333   }
1334   __ Bind(&slowpath);
1335   {
1336     Node* context = __ GetContext();
1337     SubtractWithFeedbackStub stub(__ isolate());
1338     Callable callable = Callable(
1339         stub.GetCode(), SubtractWithFeedbackStub::Descriptor(__ isolate()));
1340     Node* args[] = {left, right, slot_index, type_feedback_vector, context};
1341     var_result.Bind(__ CallStubN(callable, args, 1));
1342     __ Goto(&end);
1343   }
1344   __ Bind(&end);
1345   {
1346     __ SetAccumulator(var_result.value());
1347     __ Dispatch();
1348   }
1349 }
1350 
1351 // BitwiseOr <imm> <reg>
1352 //
1353 // BitwiseOr <reg> with <imm>. For this operation <reg> is the lhs
1354 // operand and <imm> is the rhs operand.
DoBitwiseOrSmi(InterpreterAssembler * assembler)1355 void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) {
1356   Node* reg_index = __ BytecodeOperandReg(1);
1357   Node* left = __ LoadRegister(reg_index);
1358   Node* raw_int = __ BytecodeOperandImm(0);
1359   Node* right = __ SmiTag(raw_int);
1360   Node* context = __ GetContext();
1361   Node* slot_index = __ BytecodeOperandIdx(2);
1362   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
1363   Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
1364   Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
1365       context, left, &var_lhs_type_feedback);
1366   Node* rhs_value = __ SmiToWord32(right);
1367   Node* value = __ Word32Or(lhs_value, rhs_value);
1368   Node* result = __ ChangeInt32ToTagged(value);
1369   Node* result_type =
1370       __ Select(__ TaggedIsSmi(result),
1371                 __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
1372                 __ Int32Constant(BinaryOperationFeedback::kNumber));
1373   __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
1374                     type_feedback_vector, slot_index);
1375   __ SetAccumulator(result);
1376   __ Dispatch();
1377 }
1378 
1379 // BitwiseAnd <imm> <reg>
1380 //
1381 // BitwiseAnd <reg> with <imm>. For this operation <reg> is the lhs
1382 // operand and <imm> is the rhs operand.
DoBitwiseAndSmi(InterpreterAssembler * assembler)1383 void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) {
1384   Node* reg_index = __ BytecodeOperandReg(1);
1385   Node* left = __ LoadRegister(reg_index);
1386   Node* raw_int = __ BytecodeOperandImm(0);
1387   Node* right = __ SmiTag(raw_int);
1388   Node* context = __ GetContext();
1389   Node* slot_index = __ BytecodeOperandIdx(2);
1390   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
1391   Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
1392   Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
1393       context, left, &var_lhs_type_feedback);
1394   Node* rhs_value = __ SmiToWord32(right);
1395   Node* value = __ Word32And(lhs_value, rhs_value);
1396   Node* result = __ ChangeInt32ToTagged(value);
1397   Node* result_type =
1398       __ Select(__ TaggedIsSmi(result),
1399                 __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
1400                 __ Int32Constant(BinaryOperationFeedback::kNumber));
1401   __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
1402                     type_feedback_vector, slot_index);
1403   __ SetAccumulator(result);
1404   __ Dispatch();
1405 }
1406 
1407 // ShiftLeftSmi <imm> <reg>
1408 //
1409 // Left shifts register <src> by the count specified in <imm>.
1410 // Register <src> is converted to an int32 before the operation. The 5
1411 // lsb bits from <imm> are used as count i.e. <src> << (<imm> & 0x1F).
DoShiftLeftSmi(InterpreterAssembler * assembler)1412 void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) {
1413   Node* reg_index = __ BytecodeOperandReg(1);
1414   Node* left = __ LoadRegister(reg_index);
1415   Node* raw_int = __ BytecodeOperandImm(0);
1416   Node* right = __ SmiTag(raw_int);
1417   Node* context = __ GetContext();
1418   Node* slot_index = __ BytecodeOperandIdx(2);
1419   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
1420   Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
1421   Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
1422       context, left, &var_lhs_type_feedback);
1423   Node* rhs_value = __ SmiToWord32(right);
1424   Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
1425   Node* value = __ Word32Shl(lhs_value, shift_count);
1426   Node* result = __ ChangeInt32ToTagged(value);
1427   Node* result_type =
1428       __ Select(__ TaggedIsSmi(result),
1429                 __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
1430                 __ Int32Constant(BinaryOperationFeedback::kNumber));
1431   __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
1432                     type_feedback_vector, slot_index);
1433   __ SetAccumulator(result);
1434   __ Dispatch();
1435 }
1436 
1437 // ShiftRightSmi <imm> <reg>
1438 //
1439 // Right shifts register <src> by the count specified in <imm>.
1440 // Register <src> is converted to an int32 before the operation. The 5
1441 // lsb bits from <imm> are used as count i.e. <src> << (<imm> & 0x1F).
DoShiftRightSmi(InterpreterAssembler * assembler)1442 void Interpreter::DoShiftRightSmi(InterpreterAssembler* assembler) {
1443   Node* reg_index = __ BytecodeOperandReg(1);
1444   Node* left = __ LoadRegister(reg_index);
1445   Node* raw_int = __ BytecodeOperandImm(0);
1446   Node* right = __ SmiTag(raw_int);
1447   Node* context = __ GetContext();
1448   Node* slot_index = __ BytecodeOperandIdx(2);
1449   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
1450   Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
1451   Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
1452       context, left, &var_lhs_type_feedback);
1453   Node* rhs_value = __ SmiToWord32(right);
1454   Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
1455   Node* value = __ Word32Sar(lhs_value, shift_count);
1456   Node* result = __ ChangeInt32ToTagged(value);
1457   Node* result_type =
1458       __ Select(__ TaggedIsSmi(result),
1459                 __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
1460                 __ Int32Constant(BinaryOperationFeedback::kNumber));
1461   __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
1462                     type_feedback_vector, slot_index);
1463   __ SetAccumulator(result);
1464   __ Dispatch();
1465 }
1466 
BuildUnaryOp(Callable callable,InterpreterAssembler * assembler)1467 Node* Interpreter::BuildUnaryOp(Callable callable,
1468                                 InterpreterAssembler* assembler) {
1469   Node* target = __ HeapConstant(callable.code());
1470   Node* accumulator = __ GetAccumulator();
1471   Node* context = __ GetContext();
1472   return __ CallStub(callable.descriptor(), target, context, accumulator);
1473 }
1474 
1475 template <class Generator>
DoUnaryOpWithFeedback(InterpreterAssembler * assembler)1476 void Interpreter::DoUnaryOpWithFeedback(InterpreterAssembler* assembler) {
1477   Node* value = __ GetAccumulator();
1478   Node* context = __ GetContext();
1479   Node* slot_index = __ BytecodeOperandIdx(0);
1480   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
1481   Node* result = Generator::Generate(assembler, value, context,
1482                                      type_feedback_vector, slot_index);
1483   __ SetAccumulator(result);
1484   __ Dispatch();
1485 }
1486 
1487 // ToName
1488 //
1489 // Convert the object referenced by the accumulator to a name.
DoToName(InterpreterAssembler * assembler)1490 void Interpreter::DoToName(InterpreterAssembler* assembler) {
1491   Node* object = __ GetAccumulator();
1492   Node* context = __ GetContext();
1493   Node* result = __ ToName(context, object);
1494   __ StoreRegister(result, __ BytecodeOperandReg(0));
1495   __ Dispatch();
1496 }
1497 
1498 // ToNumber
1499 //
1500 // Convert the object referenced by the accumulator to a number.
DoToNumber(InterpreterAssembler * assembler)1501 void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
1502   Node* object = __ GetAccumulator();
1503   Node* context = __ GetContext();
1504   Node* result = __ ToNumber(context, object);
1505   __ StoreRegister(result, __ BytecodeOperandReg(0));
1506   __ Dispatch();
1507 }
1508 
1509 // ToObject
1510 //
1511 // Convert the object referenced by the accumulator to a JSReceiver.
DoToObject(InterpreterAssembler * assembler)1512 void Interpreter::DoToObject(InterpreterAssembler* assembler) {
1513   Node* result = BuildUnaryOp(CodeFactory::ToObject(isolate_), assembler);
1514   __ StoreRegister(result, __ BytecodeOperandReg(0));
1515   __ Dispatch();
1516 }
1517 
1518 // Inc
1519 //
1520 // Increments value in the accumulator by one.
DoInc(InterpreterAssembler * assembler)1521 void Interpreter::DoInc(InterpreterAssembler* assembler) {
1522   DoUnaryOpWithFeedback<IncStub>(assembler);
1523 }
1524 
1525 // Dec
1526 //
1527 // Decrements value in the accumulator by one.
DoDec(InterpreterAssembler * assembler)1528 void Interpreter::DoDec(InterpreterAssembler* assembler) {
1529   DoUnaryOpWithFeedback<DecStub>(assembler);
1530 }
1531 
1532 // LogicalNot
1533 //
1534 // Perform logical-not on the accumulator, first casting the
1535 // accumulator to a boolean value if required.
1536 // ToBooleanLogicalNot
DoToBooleanLogicalNot(InterpreterAssembler * assembler)1537 void Interpreter::DoToBooleanLogicalNot(InterpreterAssembler* assembler) {
1538   Node* value = __ GetAccumulator();
1539   Variable result(assembler, MachineRepresentation::kTagged);
1540   Label if_true(assembler), if_false(assembler), end(assembler);
1541   Node* true_value = __ BooleanConstant(true);
1542   Node* false_value = __ BooleanConstant(false);
1543   __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
1544   __ Bind(&if_true);
1545   {
1546     result.Bind(false_value);
1547     __ Goto(&end);
1548   }
1549   __ Bind(&if_false);
1550   {
1551     result.Bind(true_value);
1552     __ Goto(&end);
1553   }
1554   __ Bind(&end);
1555   __ SetAccumulator(result.value());
1556   __ Dispatch();
1557 }
1558 
1559 // LogicalNot
1560 //
1561 // Perform logical-not on the accumulator, which must already be a boolean
1562 // value.
DoLogicalNot(InterpreterAssembler * assembler)1563 void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
1564   Node* value = __ GetAccumulator();
1565   Variable result(assembler, MachineRepresentation::kTagged);
1566   Label if_true(assembler), if_false(assembler), end(assembler);
1567   Node* true_value = __ BooleanConstant(true);
1568   Node* false_value = __ BooleanConstant(false);
1569   __ Branch(__ WordEqual(value, true_value), &if_true, &if_false);
1570   __ Bind(&if_true);
1571   {
1572     result.Bind(false_value);
1573     __ Goto(&end);
1574   }
1575   __ Bind(&if_false);
1576   {
1577     if (FLAG_debug_code) {
1578       __ AbortIfWordNotEqual(value, false_value,
1579                              BailoutReason::kExpectedBooleanValue);
1580     }
1581     result.Bind(true_value);
1582     __ Goto(&end);
1583   }
1584   __ Bind(&end);
1585   __ SetAccumulator(result.value());
1586   __ Dispatch();
1587 }
1588 
1589 // TypeOf
1590 //
1591 // Load the accumulator with the string representating type of the
1592 // object in the accumulator.
DoTypeOf(InterpreterAssembler * assembler)1593 void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
1594   Node* value = __ GetAccumulator();
1595   Node* context = __ GetContext();
1596   Node* result = assembler->Typeof(value, context);
1597   __ SetAccumulator(result);
1598   __ Dispatch();
1599 }
1600 
DoDelete(Runtime::FunctionId function_id,InterpreterAssembler * assembler)1601 void Interpreter::DoDelete(Runtime::FunctionId function_id,
1602                            InterpreterAssembler* assembler) {
1603   Node* reg_index = __ BytecodeOperandReg(0);
1604   Node* object = __ LoadRegister(reg_index);
1605   Node* key = __ GetAccumulator();
1606   Node* context = __ GetContext();
1607   Node* result = __ CallRuntime(function_id, context, object, key);
1608   __ SetAccumulator(result);
1609   __ Dispatch();
1610 }
1611 
1612 // DeletePropertyStrict
1613 //
1614 // Delete the property specified in the accumulator from the object
1615 // referenced by the register operand following strict mode semantics.
DoDeletePropertyStrict(InterpreterAssembler * assembler)1616 void Interpreter::DoDeletePropertyStrict(InterpreterAssembler* assembler) {
1617   DoDelete(Runtime::kDeleteProperty_Strict, assembler);
1618 }
1619 
1620 // DeletePropertySloppy
1621 //
1622 // Delete the property specified in the accumulator from the object
1623 // referenced by the register operand following sloppy mode semantics.
DoDeletePropertySloppy(InterpreterAssembler * assembler)1624 void Interpreter::DoDeletePropertySloppy(InterpreterAssembler* assembler) {
1625   DoDelete(Runtime::kDeleteProperty_Sloppy, assembler);
1626 }
1627 
DoJSCall(InterpreterAssembler * assembler,TailCallMode tail_call_mode)1628 void Interpreter::DoJSCall(InterpreterAssembler* assembler,
1629                            TailCallMode tail_call_mode) {
1630   Node* function_reg = __ BytecodeOperandReg(0);
1631   Node* function = __ LoadRegister(function_reg);
1632   Node* receiver_reg = __ BytecodeOperandReg(1);
1633   Node* receiver_arg = __ RegisterLocation(receiver_reg);
1634   Node* receiver_args_count = __ BytecodeOperandCount(2);
1635   Node* receiver_count = __ Int32Constant(1);
1636   Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
1637   Node* slot_id = __ BytecodeOperandIdx(3);
1638   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
1639   Node* context = __ GetContext();
1640   Node* result =
1641       __ CallJSWithFeedback(function, context, receiver_arg, args_count,
1642                             slot_id, type_feedback_vector, tail_call_mode);
1643   __ SetAccumulator(result);
1644   __ Dispatch();
1645 }
1646 
1647 // Call <callable> <receiver> <arg_count> <feedback_slot_id>
1648 //
1649 // Call a JSfunction or Callable in |callable| with the |receiver| and
1650 // |arg_count| arguments in subsequent registers. Collect type feedback
1651 // into |feedback_slot_id|
DoCall(InterpreterAssembler * assembler)1652 void Interpreter::DoCall(InterpreterAssembler* assembler) {
1653   DoJSCall(assembler, TailCallMode::kDisallow);
1654 }
1655 
1656 // CallProperty <callable> <receiver> <arg_count> <feedback_slot_id>
1657 //
1658 // Call a JSfunction or Callable in |callable| with the |receiver| and
1659 // |arg_count| arguments in subsequent registers. Collect type feedback into
1660 // |feedback_slot_id|. The callable is known to be a property of the receiver.
DoCallProperty(InterpreterAssembler * assembler)1661 void Interpreter::DoCallProperty(InterpreterAssembler* assembler) {
1662   // TODO(leszeks): Look into making the interpreter use the fact that the
1663   // receiver is non-null.
1664   DoJSCall(assembler, TailCallMode::kDisallow);
1665 }
1666 
1667 // TailCall <callable> <receiver> <arg_count> <feedback_slot_id>
1668 //
1669 // Tail call a JSfunction or Callable in |callable| with the |receiver| and
1670 // |arg_count| arguments in subsequent registers. Collect type feedback
1671 // into |feedback_slot_id|
DoTailCall(InterpreterAssembler * assembler)1672 void Interpreter::DoTailCall(InterpreterAssembler* assembler) {
1673   DoJSCall(assembler, TailCallMode::kAllow);
1674 }
1675 
1676 // CallRuntime <function_id> <first_arg> <arg_count>
1677 //
1678 // Call the runtime function |function_id| with the first argument in
1679 // register |first_arg| and |arg_count| arguments in subsequent
1680 // registers.
DoCallRuntime(InterpreterAssembler * assembler)1681 void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) {
1682   Node* function_id = __ BytecodeOperandRuntimeId(0);
1683   Node* first_arg_reg = __ BytecodeOperandReg(1);
1684   Node* first_arg = __ RegisterLocation(first_arg_reg);
1685   Node* args_count = __ BytecodeOperandCount(2);
1686   Node* context = __ GetContext();
1687   Node* result = __ CallRuntimeN(function_id, context, first_arg, args_count);
1688   __ SetAccumulator(result);
1689   __ Dispatch();
1690 }
1691 
1692 // InvokeIntrinsic <function_id> <first_arg> <arg_count>
1693 //
1694 // Implements the semantic equivalent of calling the runtime function
1695 // |function_id| with the first argument in |first_arg| and |arg_count|
1696 // arguments in subsequent registers.
DoInvokeIntrinsic(InterpreterAssembler * assembler)1697 void Interpreter::DoInvokeIntrinsic(InterpreterAssembler* assembler) {
1698   Node* function_id = __ BytecodeOperandIntrinsicId(0);
1699   Node* first_arg_reg = __ BytecodeOperandReg(1);
1700   Node* arg_count = __ BytecodeOperandCount(2);
1701   Node* context = __ GetContext();
1702   IntrinsicsHelper helper(assembler);
1703   Node* result =
1704       helper.InvokeIntrinsic(function_id, context, first_arg_reg, arg_count);
1705   __ SetAccumulator(result);
1706   __ Dispatch();
1707 }
1708 
1709 // CallRuntimeForPair <function_id> <first_arg> <arg_count> <first_return>
1710 //
1711 // Call the runtime function |function_id| which returns a pair, with the
1712 // first argument in register |first_arg| and |arg_count| arguments in
1713 // subsequent registers. Returns the result in <first_return> and
1714 // <first_return + 1>
DoCallRuntimeForPair(InterpreterAssembler * assembler)1715 void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) {
1716   // Call the runtime function.
1717   Node* function_id = __ BytecodeOperandRuntimeId(0);
1718   Node* first_arg_reg = __ BytecodeOperandReg(1);
1719   Node* first_arg = __ RegisterLocation(first_arg_reg);
1720   Node* args_count = __ BytecodeOperandCount(2);
1721   Node* context = __ GetContext();
1722   Node* result_pair =
1723       __ CallRuntimeN(function_id, context, first_arg, args_count, 2);
1724 
1725   // Store the results in <first_return> and <first_return + 1>
1726   Node* first_return_reg = __ BytecodeOperandReg(3);
1727   Node* second_return_reg = __ NextRegister(first_return_reg);
1728   Node* result0 = __ Projection(0, result_pair);
1729   Node* result1 = __ Projection(1, result_pair);
1730   __ StoreRegister(result0, first_return_reg);
1731   __ StoreRegister(result1, second_return_reg);
1732   __ Dispatch();
1733 }
1734 
1735 // CallJSRuntime <context_index> <receiver> <arg_count>
1736 //
1737 // Call the JS runtime function that has the |context_index| with the receiver
1738 // in register |receiver| and |arg_count| arguments in subsequent registers.
DoCallJSRuntime(InterpreterAssembler * assembler)1739 void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
1740   Node* context_index = __ BytecodeOperandIdx(0);
1741   Node* receiver_reg = __ BytecodeOperandReg(1);
1742   Node* first_arg = __ RegisterLocation(receiver_reg);
1743   Node* receiver_args_count = __ BytecodeOperandCount(2);
1744   Node* receiver_count = __ Int32Constant(1);
1745   Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
1746 
1747   // Get the function to call from the native context.
1748   Node* context = __ GetContext();
1749   Node* native_context = __ LoadNativeContext(context);
1750   Node* function = __ LoadContextElement(native_context, context_index);
1751 
1752   // Call the function.
1753   Node* result = __ CallJS(function, context, first_arg, args_count,
1754                            TailCallMode::kDisallow);
1755   __ SetAccumulator(result);
1756   __ Dispatch();
1757 }
1758 
1759 // New <constructor> <first_arg> <arg_count>
1760 //
1761 // Call operator new with |constructor| and the first argument in
1762 // register |first_arg| and |arg_count| arguments in subsequent
1763 // registers. The new.target is in the accumulator.
1764 //
DoNew(InterpreterAssembler * assembler)1765 void Interpreter::DoNew(InterpreterAssembler* assembler) {
1766   Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
1767   Node* new_target = __ GetAccumulator();
1768   Node* constructor_reg = __ BytecodeOperandReg(0);
1769   Node* constructor = __ LoadRegister(constructor_reg);
1770   Node* first_arg_reg = __ BytecodeOperandReg(1);
1771   Node* first_arg = __ RegisterLocation(first_arg_reg);
1772   Node* args_count = __ BytecodeOperandCount(2);
1773   Node* slot_id = __ BytecodeOperandIdx(3);
1774   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
1775   Node* context = __ GetContext();
1776   Node* result = __ CallConstruct(constructor, context, new_target, first_arg,
1777                                   args_count, slot_id, type_feedback_vector);
1778   __ SetAccumulator(result);
1779   __ Dispatch();
1780 }
1781 
1782 // TestEqual <src>
1783 //
1784 // Test if the value in the <src> register equals the accumulator.
DoTestEqual(InterpreterAssembler * assembler)1785 void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
1786   DoCompareOpWithFeedback(Token::Value::EQ, assembler);
1787 }
1788 
1789 // TestNotEqual <src>
1790 //
1791 // Test if the value in the <src> register is not equal to the accumulator.
DoTestNotEqual(InterpreterAssembler * assembler)1792 void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
1793   DoCompareOpWithFeedback(Token::Value::NE, assembler);
1794 }
1795 
1796 // TestEqualStrict <src>
1797 //
1798 // Test if the value in the <src> register is strictly equal to the accumulator.
DoTestEqualStrict(InterpreterAssembler * assembler)1799 void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
1800   DoCompareOpWithFeedback(Token::Value::EQ_STRICT, assembler);
1801 }
1802 
1803 // TestLessThan <src>
1804 //
1805 // Test if the value in the <src> register is less than the accumulator.
DoTestLessThan(InterpreterAssembler * assembler)1806 void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
1807   DoCompareOpWithFeedback(Token::Value::LT, assembler);
1808 }
1809 
1810 // TestGreaterThan <src>
1811 //
1812 // Test if the value in the <src> register is greater than the accumulator.
DoTestGreaterThan(InterpreterAssembler * assembler)1813 void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
1814   DoCompareOpWithFeedback(Token::Value::GT, assembler);
1815 }
1816 
1817 // TestLessThanOrEqual <src>
1818 //
1819 // Test if the value in the <src> register is less than or equal to the
1820 // accumulator.
DoTestLessThanOrEqual(InterpreterAssembler * assembler)1821 void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
1822   DoCompareOpWithFeedback(Token::Value::LTE, assembler);
1823 }
1824 
1825 // TestGreaterThanOrEqual <src>
1826 //
1827 // Test if the value in the <src> register is greater than or equal to the
1828 // accumulator.
DoTestGreaterThanOrEqual(InterpreterAssembler * assembler)1829 void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
1830   DoCompareOpWithFeedback(Token::Value::GTE, assembler);
1831 }
1832 
1833 // TestIn <src>
1834 //
1835 // Test if the object referenced by the register operand is a property of the
1836 // object referenced by the accumulator.
DoTestIn(InterpreterAssembler * assembler)1837 void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
1838   DoCompareOp(Token::IN, assembler);
1839 }
1840 
1841 // TestInstanceOf <src>
1842 //
1843 // Test if the object referenced by the <src> register is an an instance of type
1844 // referenced by the accumulator.
DoTestInstanceOf(InterpreterAssembler * assembler)1845 void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
1846   DoCompareOp(Token::INSTANCEOF, assembler);
1847 }
1848 
1849 // Jump <imm>
1850 //
1851 // Jump by number of bytes represented by the immediate operand |imm|.
DoJump(InterpreterAssembler * assembler)1852 void Interpreter::DoJump(InterpreterAssembler* assembler) {
1853   Node* relative_jump = __ BytecodeOperandImm(0);
1854   __ Jump(relative_jump);
1855 }
1856 
1857 // JumpConstant <idx>
1858 //
1859 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool.
DoJumpConstant(InterpreterAssembler * assembler)1860 void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) {
1861   Node* index = __ BytecodeOperandIdx(0);
1862   Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
1863   __ Jump(relative_jump);
1864 }
1865 
1866 // JumpIfTrue <imm>
1867 //
1868 // Jump by number of bytes represented by an immediate operand if the
1869 // accumulator contains true.
DoJumpIfTrue(InterpreterAssembler * assembler)1870 void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) {
1871   Node* accumulator = __ GetAccumulator();
1872   Node* relative_jump = __ BytecodeOperandImm(0);
1873   Node* true_value = __ BooleanConstant(true);
1874   __ JumpIfWordEqual(accumulator, true_value, relative_jump);
1875 }
1876 
1877 // JumpIfTrueConstant <idx>
1878 //
1879 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
1880 // if the accumulator contains true.
DoJumpIfTrueConstant(InterpreterAssembler * assembler)1881 void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
1882   Node* accumulator = __ GetAccumulator();
1883   Node* index = __ BytecodeOperandIdx(0);
1884   Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
1885   Node* true_value = __ BooleanConstant(true);
1886   __ JumpIfWordEqual(accumulator, true_value, relative_jump);
1887 }
1888 
1889 // JumpIfFalse <imm>
1890 //
1891 // Jump by number of bytes represented by an immediate operand if the
1892 // accumulator contains false.
DoJumpIfFalse(InterpreterAssembler * assembler)1893 void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) {
1894   Node* accumulator = __ GetAccumulator();
1895   Node* relative_jump = __ BytecodeOperandImm(0);
1896   Node* false_value = __ BooleanConstant(false);
1897   __ JumpIfWordEqual(accumulator, false_value, relative_jump);
1898 }
1899 
1900 // JumpIfFalseConstant <idx>
1901 //
1902 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
1903 // if the accumulator contains false.
DoJumpIfFalseConstant(InterpreterAssembler * assembler)1904 void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
1905   Node* accumulator = __ GetAccumulator();
1906   Node* index = __ BytecodeOperandIdx(0);
1907   Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
1908   Node* false_value = __ BooleanConstant(false);
1909   __ JumpIfWordEqual(accumulator, false_value, relative_jump);
1910 }
1911 
1912 // JumpIfToBooleanTrue <imm>
1913 //
1914 // Jump by number of bytes represented by an immediate operand if the object
1915 // referenced by the accumulator is true when the object is cast to boolean.
DoJumpIfToBooleanTrue(InterpreterAssembler * assembler)1916 void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
1917   Node* value = __ GetAccumulator();
1918   Node* relative_jump = __ BytecodeOperandImm(0);
1919   Label if_true(assembler), if_false(assembler);
1920   __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
1921   __ Bind(&if_true);
1922   __ Jump(relative_jump);
1923   __ Bind(&if_false);
1924   __ Dispatch();
1925 }
1926 
1927 // JumpIfToBooleanTrueConstant <idx>
1928 //
1929 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
1930 // if the object referenced by the accumulator is true when the object is cast
1931 // to boolean.
DoJumpIfToBooleanTrueConstant(InterpreterAssembler * assembler)1932 void Interpreter::DoJumpIfToBooleanTrueConstant(
1933     InterpreterAssembler* assembler) {
1934   Node* value = __ GetAccumulator();
1935   Node* index = __ BytecodeOperandIdx(0);
1936   Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
1937   Label if_true(assembler), if_false(assembler);
1938   __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
1939   __ Bind(&if_true);
1940   __ Jump(relative_jump);
1941   __ Bind(&if_false);
1942   __ Dispatch();
1943 }
1944 
1945 // JumpIfToBooleanFalse <imm>
1946 //
1947 // Jump by number of bytes represented by an immediate operand if the object
1948 // referenced by the accumulator is false when the object is cast to boolean.
DoJumpIfToBooleanFalse(InterpreterAssembler * assembler)1949 void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
1950   Node* value = __ GetAccumulator();
1951   Node* relative_jump = __ BytecodeOperandImm(0);
1952   Label if_true(assembler), if_false(assembler);
1953   __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
1954   __ Bind(&if_true);
1955   __ Dispatch();
1956   __ Bind(&if_false);
1957   __ Jump(relative_jump);
1958 }
1959 
1960 // JumpIfToBooleanFalseConstant <idx>
1961 //
1962 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
1963 // if the object referenced by the accumulator is false when the object is cast
1964 // to boolean.
DoJumpIfToBooleanFalseConstant(InterpreterAssembler * assembler)1965 void Interpreter::DoJumpIfToBooleanFalseConstant(
1966     InterpreterAssembler* assembler) {
1967   Node* value = __ GetAccumulator();
1968   Node* index = __ BytecodeOperandIdx(0);
1969   Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
1970   Label if_true(assembler), if_false(assembler);
1971   __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
1972   __ Bind(&if_true);
1973   __ Dispatch();
1974   __ Bind(&if_false);
1975   __ Jump(relative_jump);
1976 }
1977 
1978 // JumpIfNull <imm>
1979 //
1980 // Jump by number of bytes represented by an immediate operand if the object
1981 // referenced by the accumulator is the null constant.
DoJumpIfNull(InterpreterAssembler * assembler)1982 void Interpreter::DoJumpIfNull(InterpreterAssembler* assembler) {
1983   Node* accumulator = __ GetAccumulator();
1984   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
1985   Node* relative_jump = __ BytecodeOperandImm(0);
1986   __ JumpIfWordEqual(accumulator, null_value, relative_jump);
1987 }
1988 
1989 // JumpIfNullConstant <idx>
1990 //
1991 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
1992 // if the object referenced by the accumulator is the null constant.
DoJumpIfNullConstant(InterpreterAssembler * assembler)1993 void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) {
1994   Node* accumulator = __ GetAccumulator();
1995   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
1996   Node* index = __ BytecodeOperandIdx(0);
1997   Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
1998   __ JumpIfWordEqual(accumulator, null_value, relative_jump);
1999 }
2000 
2001 // JumpIfUndefined <imm>
2002 //
2003 // Jump by number of bytes represented by an immediate operand if the object
2004 // referenced by the accumulator is the undefined constant.
DoJumpIfUndefined(InterpreterAssembler * assembler)2005 void Interpreter::DoJumpIfUndefined(InterpreterAssembler* assembler) {
2006   Node* accumulator = __ GetAccumulator();
2007   Node* undefined_value =
2008       __ HeapConstant(isolate_->factory()->undefined_value());
2009   Node* relative_jump = __ BytecodeOperandImm(0);
2010   __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
2011 }
2012 
2013 // JumpIfUndefinedConstant <idx>
2014 //
2015 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2016 // if the object referenced by the accumulator is the undefined constant.
DoJumpIfUndefinedConstant(InterpreterAssembler * assembler)2017 void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
2018   Node* accumulator = __ GetAccumulator();
2019   Node* undefined_value =
2020       __ HeapConstant(isolate_->factory()->undefined_value());
2021   Node* index = __ BytecodeOperandIdx(0);
2022   Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2023   __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
2024 }
2025 
2026 // JumpIfNotHole <imm>
2027 //
2028 // Jump by number of bytes represented by an immediate operand if the object
2029 // referenced by the accumulator is the hole.
DoJumpIfNotHole(InterpreterAssembler * assembler)2030 void Interpreter::DoJumpIfNotHole(InterpreterAssembler* assembler) {
2031   Node* accumulator = __ GetAccumulator();
2032   Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
2033   Node* relative_jump = __ BytecodeOperandImm(0);
2034   __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
2035 }
2036 
2037 // JumpIfNotHoleConstant <idx>
2038 //
2039 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2040 // if the object referenced by the accumulator is the hole constant.
DoJumpIfNotHoleConstant(InterpreterAssembler * assembler)2041 void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) {
2042   Node* accumulator = __ GetAccumulator();
2043   Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
2044   Node* index = __ BytecodeOperandIdx(0);
2045   Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2046   __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
2047 }
2048 
2049 // JumpLoop <imm> <loop_depth>
2050 //
2051 // Jump by number of bytes represented by the immediate operand |imm|. Also
2052 // performs a loop nesting check and potentially triggers OSR in case the
2053 // current OSR level matches (or exceeds) the specified |loop_depth|.
DoJumpLoop(InterpreterAssembler * assembler)2054 void Interpreter::DoJumpLoop(InterpreterAssembler* assembler) {
2055   Node* relative_jump = __ BytecodeOperandImm(0);
2056   Node* loop_depth = __ BytecodeOperandImm(1);
2057   Node* osr_level = __ LoadOSRNestingLevel();
2058 
2059   // Check if OSR points at the given {loop_depth} are armed by comparing it to
2060   // the current {osr_level} loaded from the header of the BytecodeArray.
2061   Label ok(assembler), osr_armed(assembler, Label::kDeferred);
2062   Node* condition = __ Int32GreaterThanOrEqual(loop_depth, osr_level);
2063   __ Branch(condition, &ok, &osr_armed);
2064 
2065   __ Bind(&ok);
2066   __ Jump(relative_jump);
2067 
2068   __ Bind(&osr_armed);
2069   {
2070     Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate_);
2071     Node* target = __ HeapConstant(callable.code());
2072     Node* context = __ GetContext();
2073     __ CallStub(callable.descriptor(), target, context);
2074     __ Jump(relative_jump);
2075   }
2076 }
2077 
2078 // CreateRegExpLiteral <pattern_idx> <literal_idx> <flags>
2079 //
2080 // Creates a regular expression literal for literal index <literal_idx> with
2081 // <flags> and the pattern in <pattern_idx>.
DoCreateRegExpLiteral(InterpreterAssembler * assembler)2082 void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
2083   Node* index = __ BytecodeOperandIdx(0);
2084   Node* pattern = __ LoadConstantPoolEntry(index);
2085   Node* literal_index_raw = __ BytecodeOperandIdx(1);
2086   Node* literal_index = __ SmiTag(literal_index_raw);
2087   Node* flags_raw = __ BytecodeOperandFlag(2);
2088   Node* flags = __ SmiTag(flags_raw);
2089   Node* closure = __ LoadRegister(Register::function_closure());
2090   Node* context = __ GetContext();
2091   Node* result = FastCloneRegExpStub::Generate(
2092       assembler, closure, literal_index, pattern, flags, context);
2093   __ SetAccumulator(result);
2094   __ Dispatch();
2095 }
2096 
2097 // CreateArrayLiteral <element_idx> <literal_idx> <flags>
2098 //
2099 // Creates an array literal for literal index <literal_idx> with
2100 // CreateArrayLiteral flags <flags> and constant elements in <element_idx>.
DoCreateArrayLiteral(InterpreterAssembler * assembler)2101 void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
2102   Node* literal_index_raw = __ BytecodeOperandIdx(1);
2103   Node* literal_index = __ SmiTag(literal_index_raw);
2104   Node* closure = __ LoadRegister(Register::function_closure());
2105   Node* context = __ GetContext();
2106   Node* bytecode_flags = __ BytecodeOperandFlag(2);
2107 
2108   Label fast_shallow_clone(assembler),
2109       call_runtime(assembler, Label::kDeferred);
2110   Node* use_fast_shallow_clone = __ Word32And(
2111       bytecode_flags,
2112       __ Int32Constant(CreateArrayLiteralFlags::FastShallowCloneBit::kMask));
2113   __ Branch(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime);
2114 
2115   __ Bind(&fast_shallow_clone);
2116   {
2117     DCHECK(FLAG_allocation_site_pretenuring);
2118     Node* result = FastCloneShallowArrayStub::Generate(
2119         assembler, closure, literal_index, context, &call_runtime,
2120         TRACK_ALLOCATION_SITE);
2121     __ SetAccumulator(result);
2122     __ Dispatch();
2123   }
2124 
2125   __ Bind(&call_runtime);
2126   {
2127     STATIC_ASSERT(CreateArrayLiteralFlags::FlagsBits::kShift == 0);
2128     Node* flags_raw = __ Word32And(
2129         bytecode_flags,
2130         __ Int32Constant(CreateArrayLiteralFlags::FlagsBits::kMask));
2131     Node* flags = __ SmiTag(flags_raw);
2132     Node* index = __ BytecodeOperandIdx(0);
2133     Node* constant_elements = __ LoadConstantPoolEntry(index);
2134     Node* result =
2135         __ CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
2136                        literal_index, constant_elements, flags);
2137     __ SetAccumulator(result);
2138     __ Dispatch();
2139   }
2140 }
2141 
2142 // CreateObjectLiteral <element_idx> <literal_idx> <flags>
2143 //
2144 // Creates an object literal for literal index <literal_idx> with
2145 // CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
DoCreateObjectLiteral(InterpreterAssembler * assembler)2146 void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
2147   Node* literal_index_raw = __ BytecodeOperandIdx(1);
2148   Node* literal_index = __ SmiTag(literal_index_raw);
2149   Node* bytecode_flags = __ BytecodeOperandFlag(2);
2150   Node* closure = __ LoadRegister(Register::function_closure());
2151 
2152   // Check if we can do a fast clone or have to call the runtime.
2153   Label if_fast_clone(assembler),
2154       if_not_fast_clone(assembler, Label::kDeferred);
2155   Node* fast_clone_properties_count =
2156       __ DecodeWord32<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
2157           bytecode_flags);
2158   __ Branch(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
2159 
2160   __ Bind(&if_fast_clone);
2161   {
2162     // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub.
2163     Node* result = FastCloneShallowObjectStub::GenerateFastPath(
2164         assembler, &if_not_fast_clone, closure, literal_index,
2165         fast_clone_properties_count);
2166     __ StoreRegister(result, __ BytecodeOperandReg(3));
2167     __ Dispatch();
2168   }
2169 
2170   __ Bind(&if_not_fast_clone);
2171   {
2172     // If we can't do a fast clone, call into the runtime.
2173     Node* index = __ BytecodeOperandIdx(0);
2174     Node* constant_elements = __ LoadConstantPoolEntry(index);
2175     Node* context = __ GetContext();
2176 
2177     STATIC_ASSERT(CreateObjectLiteralFlags::FlagsBits::kShift == 0);
2178     Node* flags_raw = __ Word32And(
2179         bytecode_flags,
2180         __ Int32Constant(CreateObjectLiteralFlags::FlagsBits::kMask));
2181     Node* flags = __ SmiTag(flags_raw);
2182 
2183     Node* result =
2184         __ CallRuntime(Runtime::kCreateObjectLiteral, context, closure,
2185                        literal_index, constant_elements, flags);
2186     __ StoreRegister(result, __ BytecodeOperandReg(3));
2187     // TODO(klaasb) build a single dispatch once the call is inlined
2188     __ Dispatch();
2189   }
2190 }
2191 
2192 // CreateClosure <index> <tenured>
2193 //
2194 // Creates a new closure for SharedFunctionInfo at position |index| in the
2195 // constant pool and with the PretenureFlag <tenured>.
DoCreateClosure(InterpreterAssembler * assembler)2196 void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
2197   Node* index = __ BytecodeOperandIdx(0);
2198   Node* shared = __ LoadConstantPoolEntry(index);
2199   Node* flags = __ BytecodeOperandFlag(1);
2200   Node* context = __ GetContext();
2201 
2202   Label call_runtime(assembler, Label::kDeferred);
2203   Node* fast_new_closure = __ Word32And(
2204       flags, __ Int32Constant(CreateClosureFlags::FastNewClosureBit::kMask));
2205   __ GotoUnless(fast_new_closure, &call_runtime);
2206   __ SetAccumulator(FastNewClosureStub::Generate(assembler, shared, context));
2207   __ Dispatch();
2208 
2209   __ Bind(&call_runtime);
2210   {
2211     STATIC_ASSERT(CreateClosureFlags::PretenuredBit::kShift == 0);
2212     Node* tenured_raw = __ Word32And(
2213         flags, __ Int32Constant(CreateClosureFlags::PretenuredBit::kMask));
2214     Node* tenured = __ SmiTag(tenured_raw);
2215     Node* result = __ CallRuntime(Runtime::kInterpreterNewClosure, context,
2216                                   shared, tenured);
2217     __ SetAccumulator(result);
2218     __ Dispatch();
2219   }
2220 }
2221 
2222 // CreateBlockContext <index>
2223 //
2224 // Creates a new block context with the scope info constant at |index| and the
2225 // closure in the accumulator.
DoCreateBlockContext(InterpreterAssembler * assembler)2226 void Interpreter::DoCreateBlockContext(InterpreterAssembler* assembler) {
2227   Node* index = __ BytecodeOperandIdx(0);
2228   Node* scope_info = __ LoadConstantPoolEntry(index);
2229   Node* closure = __ GetAccumulator();
2230   Node* context = __ GetContext();
2231   __ SetAccumulator(
2232       __ CallRuntime(Runtime::kPushBlockContext, context, scope_info, closure));
2233   __ Dispatch();
2234 }
2235 
2236 // CreateCatchContext <exception> <name_idx> <scope_info_idx>
2237 //
2238 // Creates a new context for a catch block with the |exception| in a register,
2239 // the variable name at |name_idx|, the ScopeInfo at |scope_info_idx|, and the
2240 // closure in the accumulator.
DoCreateCatchContext(InterpreterAssembler * assembler)2241 void Interpreter::DoCreateCatchContext(InterpreterAssembler* assembler) {
2242   Node* exception_reg = __ BytecodeOperandReg(0);
2243   Node* exception = __ LoadRegister(exception_reg);
2244   Node* name_idx = __ BytecodeOperandIdx(1);
2245   Node* name = __ LoadConstantPoolEntry(name_idx);
2246   Node* scope_info_idx = __ BytecodeOperandIdx(2);
2247   Node* scope_info = __ LoadConstantPoolEntry(scope_info_idx);
2248   Node* closure = __ GetAccumulator();
2249   Node* context = __ GetContext();
2250   __ SetAccumulator(__ CallRuntime(Runtime::kPushCatchContext, context, name,
2251                                    exception, scope_info, closure));
2252   __ Dispatch();
2253 }
2254 
2255 // CreateFunctionContext <slots>
2256 //
2257 // Creates a new context with number of |slots| for the function closure.
DoCreateFunctionContext(InterpreterAssembler * assembler)2258 void Interpreter::DoCreateFunctionContext(InterpreterAssembler* assembler) {
2259   Node* closure = __ LoadRegister(Register::function_closure());
2260   Node* slots = __ BytecodeOperandUImm(0);
2261   Node* context = __ GetContext();
2262   __ SetAccumulator(
2263       FastNewFunctionContextStub::Generate(assembler, closure, slots, context));
2264   __ Dispatch();
2265 }
2266 
2267 // CreateWithContext <register> <scope_info_idx>
2268 //
2269 // Creates a new context with the ScopeInfo at |scope_info_idx| for a
2270 // with-statement with the object in |register| and the closure in the
2271 // accumulator.
DoCreateWithContext(InterpreterAssembler * assembler)2272 void Interpreter::DoCreateWithContext(InterpreterAssembler* assembler) {
2273   Node* reg_index = __ BytecodeOperandReg(0);
2274   Node* object = __ LoadRegister(reg_index);
2275   Node* scope_info_idx = __ BytecodeOperandIdx(1);
2276   Node* scope_info = __ LoadConstantPoolEntry(scope_info_idx);
2277   Node* closure = __ GetAccumulator();
2278   Node* context = __ GetContext();
2279   __ SetAccumulator(__ CallRuntime(Runtime::kPushWithContext, context, object,
2280                                    scope_info, closure));
2281   __ Dispatch();
2282 }
2283 
2284 // CreateMappedArguments
2285 //
2286 // Creates a new mapped arguments object.
DoCreateMappedArguments(InterpreterAssembler * assembler)2287 void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
2288   Node* closure = __ LoadRegister(Register::function_closure());
2289   Node* context = __ GetContext();
2290 
2291   Label if_duplicate_parameters(assembler, Label::kDeferred);
2292   Label if_not_duplicate_parameters(assembler);
2293 
2294   // Check if function has duplicate parameters.
2295   // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports
2296   // duplicate parameters.
2297   Node* shared_info =
2298       __ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
2299   Node* compiler_hints = __ LoadObjectField(
2300       shared_info, SharedFunctionInfo::kHasDuplicateParametersByteOffset,
2301       MachineType::Uint8());
2302   Node* duplicate_parameters_bit = __ Int32Constant(
2303       1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte);
2304   Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit);
2305   __ Branch(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
2306 
2307   __ Bind(&if_not_duplicate_parameters);
2308   {
2309     // TODO(rmcilroy): Inline FastNewSloppyArguments when it is a TurboFan stub.
2310     Callable callable = CodeFactory::FastNewSloppyArguments(isolate_, true);
2311     Node* target = __ HeapConstant(callable.code());
2312     Node* result = __ CallStub(callable.descriptor(), target, context, closure);
2313     __ SetAccumulator(result);
2314     __ Dispatch();
2315   }
2316 
2317   __ Bind(&if_duplicate_parameters);
2318   {
2319     Node* result =
2320         __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
2321     __ SetAccumulator(result);
2322     __ Dispatch();
2323   }
2324 }
2325 
2326 // CreateUnmappedArguments
2327 //
2328 // Creates a new unmapped arguments object.
DoCreateUnmappedArguments(InterpreterAssembler * assembler)2329 void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
2330   // TODO(rmcilroy): Inline FastNewStrictArguments when it is a TurboFan stub.
2331   Callable callable = CodeFactory::FastNewStrictArguments(isolate_, true);
2332   Node* target = __ HeapConstant(callable.code());
2333   Node* context = __ GetContext();
2334   Node* closure = __ LoadRegister(Register::function_closure());
2335   Node* result = __ CallStub(callable.descriptor(), target, context, closure);
2336   __ SetAccumulator(result);
2337   __ Dispatch();
2338 }
2339 
2340 // CreateRestParameter
2341 //
2342 // Creates a new rest parameter array.
DoCreateRestParameter(InterpreterAssembler * assembler)2343 void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) {
2344   // TODO(rmcilroy): Inline FastNewRestArguments when it is a TurboFan stub.
2345   Callable callable = CodeFactory::FastNewRestParameter(isolate_, true);
2346   Node* target = __ HeapConstant(callable.code());
2347   Node* closure = __ LoadRegister(Register::function_closure());
2348   Node* context = __ GetContext();
2349   Node* result = __ CallStub(callable.descriptor(), target, context, closure);
2350   __ SetAccumulator(result);
2351   __ Dispatch();
2352 }
2353 
2354 // StackCheck
2355 //
2356 // Performs a stack guard check.
DoStackCheck(InterpreterAssembler * assembler)2357 void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
2358   Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred);
2359 
2360   Node* interrupt = __ StackCheckTriggeredInterrupt();
2361   __ Branch(interrupt, &stack_check_interrupt, &ok);
2362 
2363   __ Bind(&ok);
2364   __ Dispatch();
2365 
2366   __ Bind(&stack_check_interrupt);
2367   {
2368     Node* context = __ GetContext();
2369     __ CallRuntime(Runtime::kStackGuard, context);
2370     __ Dispatch();
2371   }
2372 }
2373 
2374 // Throw
2375 //
2376 // Throws the exception in the accumulator.
DoThrow(InterpreterAssembler * assembler)2377 void Interpreter::DoThrow(InterpreterAssembler* assembler) {
2378   Node* exception = __ GetAccumulator();
2379   Node* context = __ GetContext();
2380   __ CallRuntime(Runtime::kThrow, context, exception);
2381   // We shouldn't ever return from a throw.
2382   __ Abort(kUnexpectedReturnFromThrow);
2383 }
2384 
2385 // ReThrow
2386 //
2387 // Re-throws the exception in the accumulator.
DoReThrow(InterpreterAssembler * assembler)2388 void Interpreter::DoReThrow(InterpreterAssembler* assembler) {
2389   Node* exception = __ GetAccumulator();
2390   Node* context = __ GetContext();
2391   __ CallRuntime(Runtime::kReThrow, context, exception);
2392   // We shouldn't ever return from a throw.
2393   __ Abort(kUnexpectedReturnFromThrow);
2394 }
2395 
2396 // Return
2397 //
2398 // Return the value in the accumulator.
DoReturn(InterpreterAssembler * assembler)2399 void Interpreter::DoReturn(InterpreterAssembler* assembler) {
2400   __ UpdateInterruptBudgetOnReturn();
2401   Node* accumulator = __ GetAccumulator();
2402   __ Return(accumulator);
2403 }
2404 
2405 // Debugger
2406 //
2407 // Call runtime to handle debugger statement.
DoDebugger(InterpreterAssembler * assembler)2408 void Interpreter::DoDebugger(InterpreterAssembler* assembler) {
2409   Node* context = __ GetContext();
2410   __ CallRuntime(Runtime::kHandleDebuggerStatement, context);
2411   __ Dispatch();
2412 }
2413 
2414 // DebugBreak
2415 //
2416 // Call runtime to handle a debug break.
2417 #define DEBUG_BREAK(Name, ...)                                                \
2418   void Interpreter::Do##Name(InterpreterAssembler* assembler) {               \
2419     Node* context = __ GetContext();                                          \
2420     Node* accumulator = __ GetAccumulator();                                  \
2421     Node* original_handler =                                                  \
2422         __ CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \
2423     __ DispatchToBytecodeHandler(original_handler);                           \
2424   }
2425 DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
2426 #undef DEBUG_BREAK
2427 
BuildForInPrepareResult(Node * output_register,Node * cache_type,Node * cache_array,Node * cache_length,InterpreterAssembler * assembler)2428 void Interpreter::BuildForInPrepareResult(Node* output_register,
2429                                           Node* cache_type, Node* cache_array,
2430                                           Node* cache_length,
2431                                           InterpreterAssembler* assembler) {
2432   __ StoreRegister(cache_type, output_register);
2433   output_register = __ NextRegister(output_register);
2434   __ StoreRegister(cache_array, output_register);
2435   output_register = __ NextRegister(output_register);
2436   __ StoreRegister(cache_length, output_register);
2437 }
2438 
2439 // ForInPrepare <receiver> <cache_info_triple>
2440 //
2441 // Returns state for for..in loop execution based on the object in the register
2442 // |receiver|. The object must not be null or undefined and must have been
2443 // converted to a receiver already.
2444 // The result is output in registers |cache_info_triple| to
2445 // |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
2446 // and cache_length respectively.
DoForInPrepare(InterpreterAssembler * assembler)2447 void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) {
2448   Node* object_reg = __ BytecodeOperandReg(0);
2449   Node* receiver = __ LoadRegister(object_reg);
2450   Node* context = __ GetContext();
2451   Node* const zero_smi = __ SmiConstant(Smi::kZero);
2452 
2453   Label nothing_to_iterate(assembler, Label::kDeferred),
2454       use_enum_cache(assembler), use_runtime(assembler, Label::kDeferred);
2455 
2456   if (FLAG_debug_code) {
2457     Label already_receiver(assembler), abort(assembler);
2458     Node* instance_type = __ LoadInstanceType(receiver);
2459     __ Branch(__ IsJSReceiverInstanceType(instance_type), &already_receiver,
2460               &abort);
2461     __ Bind(&abort);
2462     {
2463       __ Abort(kExpectedJSReceiver);
2464       // TODO(klaasb) remove this unreachable Goto once Abort ends the block
2465       __ Goto(&already_receiver);
2466     }
2467     __ Bind(&already_receiver);
2468   }
2469 
2470   __ CheckEnumCache(receiver, &use_enum_cache, &use_runtime);
2471 
2472   __ Bind(&use_enum_cache);
2473   {
2474     // The enum cache is valid.  Load the map of the object being
2475     // iterated over and use the cache for the iteration.
2476     Node* cache_type = __ LoadMap(receiver);
2477     Node* cache_length = __ EnumLength(cache_type);
2478     __ GotoIf(assembler->WordEqual(cache_length, zero_smi),
2479               &nothing_to_iterate);
2480     Node* descriptors = __ LoadMapDescriptors(cache_type);
2481     Node* cache_offset =
2482         __ LoadObjectField(descriptors, DescriptorArray::kEnumCacheOffset);
2483     Node* cache_array = __ LoadObjectField(
2484         cache_offset, DescriptorArray::kEnumCacheBridgeCacheOffset);
2485     Node* output_register = __ BytecodeOperandReg(1);
2486     BuildForInPrepareResult(output_register, cache_type, cache_array,
2487                             cache_length, assembler);
2488     __ Dispatch();
2489   }
2490 
2491   __ Bind(&use_runtime);
2492   {
2493     Node* result_triple =
2494         __ CallRuntime(Runtime::kForInPrepare, context, receiver);
2495     Node* cache_type = __ Projection(0, result_triple);
2496     Node* cache_array = __ Projection(1, result_triple);
2497     Node* cache_length = __ Projection(2, result_triple);
2498     Node* output_register = __ BytecodeOperandReg(1);
2499     BuildForInPrepareResult(output_register, cache_type, cache_array,
2500                             cache_length, assembler);
2501     __ Dispatch();
2502   }
2503 
2504   __ Bind(&nothing_to_iterate);
2505   {
2506     // Receiver is null or undefined or descriptors are zero length.
2507     Node* output_register = __ BytecodeOperandReg(1);
2508     BuildForInPrepareResult(output_register, zero_smi, zero_smi, zero_smi,
2509                             assembler);
2510     __ Dispatch();
2511   }
2512 }
2513 
2514 // ForInNext <receiver> <index> <cache_info_pair>
2515 //
2516 // Returns the next enumerable property in the the accumulator.
DoForInNext(InterpreterAssembler * assembler)2517 void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
2518   Node* receiver_reg = __ BytecodeOperandReg(0);
2519   Node* receiver = __ LoadRegister(receiver_reg);
2520   Node* index_reg = __ BytecodeOperandReg(1);
2521   Node* index = __ LoadRegister(index_reg);
2522   Node* cache_type_reg = __ BytecodeOperandReg(2);
2523   Node* cache_type = __ LoadRegister(cache_type_reg);
2524   Node* cache_array_reg = __ NextRegister(cache_type_reg);
2525   Node* cache_array = __ LoadRegister(cache_array_reg);
2526 
2527   // Load the next key from the enumeration array.
2528   Node* key = __ LoadFixedArrayElement(cache_array, index, 0,
2529                                        CodeStubAssembler::SMI_PARAMETERS);
2530 
2531   // Check if we can use the for-in fast path potentially using the enum cache.
2532   Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
2533   Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
2534   __ Branch(__ WordEqual(receiver_map, cache_type), &if_fast, &if_slow);
2535   __ Bind(&if_fast);
2536   {
2537     // Enum cache in use for {receiver}, the {key} is definitely valid.
2538     __ SetAccumulator(key);
2539     __ Dispatch();
2540   }
2541   __ Bind(&if_slow);
2542   {
2543     // Record the fact that we hit the for-in slow path.
2544     Node* vector_index = __ BytecodeOperandIdx(3);
2545     Node* type_feedback_vector = __ LoadTypeFeedbackVector();
2546     Node* megamorphic_sentinel =
2547         __ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate_));
2548     __ StoreFixedArrayElement(type_feedback_vector, vector_index,
2549                               megamorphic_sentinel, SKIP_WRITE_BARRIER);
2550 
2551     // Need to filter the {key} for the {receiver}.
2552     Node* context = __ GetContext();
2553     Callable callable = CodeFactory::ForInFilter(assembler->isolate());
2554     Node* result = __ CallStub(callable, context, key, receiver);
2555     __ SetAccumulator(result);
2556     __ Dispatch();
2557   }
2558 }
2559 
2560 // ForInContinue <index> <cache_length>
2561 //
2562 // Returns false if the end of the enumerable properties has been reached.
DoForInContinue(InterpreterAssembler * assembler)2563 void Interpreter::DoForInContinue(InterpreterAssembler* assembler) {
2564   Node* index_reg = __ BytecodeOperandReg(0);
2565   Node* index = __ LoadRegister(index_reg);
2566   Node* cache_length_reg = __ BytecodeOperandReg(1);
2567   Node* cache_length = __ LoadRegister(cache_length_reg);
2568 
2569   // Check if {index} is at {cache_length} already.
2570   Label if_true(assembler), if_false(assembler), end(assembler);
2571   __ Branch(__ WordEqual(index, cache_length), &if_true, &if_false);
2572   __ Bind(&if_true);
2573   {
2574     __ SetAccumulator(__ BooleanConstant(false));
2575     __ Goto(&end);
2576   }
2577   __ Bind(&if_false);
2578   {
2579     __ SetAccumulator(__ BooleanConstant(true));
2580     __ Goto(&end);
2581   }
2582   __ Bind(&end);
2583   __ Dispatch();
2584 }
2585 
2586 // ForInStep <index>
2587 //
2588 // Increments the loop counter in register |index| and stores the result
2589 // in the accumulator.
DoForInStep(InterpreterAssembler * assembler)2590 void Interpreter::DoForInStep(InterpreterAssembler* assembler) {
2591   Node* index_reg = __ BytecodeOperandReg(0);
2592   Node* index = __ LoadRegister(index_reg);
2593   Node* one = __ SmiConstant(Smi::FromInt(1));
2594   Node* result = __ SmiAdd(index, one);
2595   __ SetAccumulator(result);
2596   __ Dispatch();
2597 }
2598 
2599 // Wide
2600 //
2601 // Prefix bytecode indicating next bytecode has wide (16-bit) operands.
DoWide(InterpreterAssembler * assembler)2602 void Interpreter::DoWide(InterpreterAssembler* assembler) {
2603   __ DispatchWide(OperandScale::kDouble);
2604 }
2605 
2606 // ExtraWide
2607 //
2608 // Prefix bytecode indicating next bytecode has extra-wide (32-bit) operands.
DoExtraWide(InterpreterAssembler * assembler)2609 void Interpreter::DoExtraWide(InterpreterAssembler* assembler) {
2610   __ DispatchWide(OperandScale::kQuadruple);
2611 }
2612 
2613 // Illegal
2614 //
2615 // An invalid bytecode aborting execution if dispatched.
DoIllegal(InterpreterAssembler * assembler)2616 void Interpreter::DoIllegal(InterpreterAssembler* assembler) {
2617   __ Abort(kInvalidBytecode);
2618 }
2619 
2620 // Nop
2621 //
2622 // No operation.
DoNop(InterpreterAssembler * assembler)2623 void Interpreter::DoNop(InterpreterAssembler* assembler) { __ Dispatch(); }
2624 
2625 // SuspendGenerator <generator>
2626 //
2627 // Exports the register file and stores it into the generator.  Also stores the
2628 // current context, the state given in the accumulator, and the current bytecode
2629 // offset (for debugging purposes) into the generator.
DoSuspendGenerator(InterpreterAssembler * assembler)2630 void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) {
2631   Node* generator_reg = __ BytecodeOperandReg(0);
2632   Node* generator = __ LoadRegister(generator_reg);
2633 
2634   Label if_stepping(assembler, Label::kDeferred), ok(assembler);
2635   Node* step_action_address = __ ExternalConstant(
2636       ExternalReference::debug_last_step_action_address(isolate_));
2637   Node* step_action = __ Load(MachineType::Int8(), step_action_address);
2638   STATIC_ASSERT(StepIn > StepNext);
2639   STATIC_ASSERT(StepFrame > StepNext);
2640   STATIC_ASSERT(LastStepAction == StepFrame);
2641   Node* step_next = __ Int32Constant(StepNext);
2642   __ Branch(__ Int32LessThanOrEqual(step_next, step_action), &if_stepping, &ok);
2643   __ Bind(&ok);
2644 
2645   Node* array =
2646       __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset);
2647   Node* context = __ GetContext();
2648   Node* state = __ GetAccumulator();
2649 
2650   __ ExportRegisterFile(array);
2651   __ StoreObjectField(generator, JSGeneratorObject::kContextOffset, context);
2652   __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, state);
2653 
2654   Node* offset = __ SmiTag(__ BytecodeOffset());
2655   __ StoreObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset,
2656                       offset);
2657 
2658   __ Dispatch();
2659 
2660   __ Bind(&if_stepping);
2661   {
2662     Node* context = __ GetContext();
2663     __ CallRuntime(Runtime::kDebugRecordAsyncFunction, context, generator);
2664     __ Goto(&ok);
2665   }
2666 }
2667 
2668 // ResumeGenerator <generator>
2669 //
2670 // Imports the register file stored in the generator. Also loads the
2671 // generator's state and stores it in the accumulator, before overwriting it
2672 // with kGeneratorExecuting.
DoResumeGenerator(InterpreterAssembler * assembler)2673 void Interpreter::DoResumeGenerator(InterpreterAssembler* assembler) {
2674   Node* generator_reg = __ BytecodeOperandReg(0);
2675   Node* generator = __ LoadRegister(generator_reg);
2676 
2677   __ ImportRegisterFile(
2678       __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset));
2679 
2680   Node* old_state =
2681       __ LoadObjectField(generator, JSGeneratorObject::kContinuationOffset);
2682   Node* new_state = __ Int32Constant(JSGeneratorObject::kGeneratorExecuting);
2683   __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset,
2684       __ SmiTag(new_state));
2685   __ SetAccumulator(old_state);
2686 
2687   __ Dispatch();
2688 }
2689 
2690 }  // namespace interpreter
2691 }  // namespace internal
2692 }  // namespace v8
2693