1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/pipeline.h"
6 
7 #include <fstream>  // NOLINT(readability/streams)
8 #include <memory>
9 #include <sstream>
10 
11 #include "src/base/adapters.h"
12 #include "src/base/platform/elapsed-timer.h"
13 #include "src/compilation-info.h"
14 #include "src/compiler.h"
15 #include "src/compiler/ast-graph-builder.h"
16 #include "src/compiler/ast-loop-assignment-analyzer.h"
17 #include "src/compiler/basic-block-instrumentor.h"
18 #include "src/compiler/branch-elimination.h"
19 #include "src/compiler/bytecode-graph-builder.h"
20 #include "src/compiler/checkpoint-elimination.h"
21 #include "src/compiler/code-generator.h"
22 #include "src/compiler/common-operator-reducer.h"
23 #include "src/compiler/control-flow-optimizer.h"
24 #include "src/compiler/dead-code-elimination.h"
25 #include "src/compiler/effect-control-linearizer.h"
26 #include "src/compiler/escape-analysis-reducer.h"
27 #include "src/compiler/escape-analysis.h"
28 #include "src/compiler/frame-elider.h"
29 #include "src/compiler/graph-replay.h"
30 #include "src/compiler/graph-trimmer.h"
31 #include "src/compiler/graph-visualizer.h"
32 #include "src/compiler/instruction-selector.h"
33 #include "src/compiler/instruction.h"
34 #include "src/compiler/js-builtin-reducer.h"
35 #include "src/compiler/js-call-reducer.h"
36 #include "src/compiler/js-context-specialization.h"
37 #include "src/compiler/js-create-lowering.h"
38 #include "src/compiler/js-frame-specialization.h"
39 #include "src/compiler/js-generic-lowering.h"
40 #include "src/compiler/js-global-object-specialization.h"
41 #include "src/compiler/js-inlining-heuristic.h"
42 #include "src/compiler/js-intrinsic-lowering.h"
43 #include "src/compiler/js-native-context-specialization.h"
44 #include "src/compiler/js-typed-lowering.h"
45 #include "src/compiler/jump-threading.h"
46 #include "src/compiler/live-range-separator.h"
47 #include "src/compiler/load-elimination.h"
48 #include "src/compiler/loop-analysis.h"
49 #include "src/compiler/loop-peeling.h"
50 #include "src/compiler/loop-variable-optimizer.h"
51 #include "src/compiler/machine-graph-verifier.h"
52 #include "src/compiler/machine-operator-reducer.h"
53 #include "src/compiler/memory-optimizer.h"
54 #include "src/compiler/move-optimizer.h"
55 #include "src/compiler/osr.h"
56 #include "src/compiler/pipeline-statistics.h"
57 #include "src/compiler/redundancy-elimination.h"
58 #include "src/compiler/register-allocator-verifier.h"
59 #include "src/compiler/register-allocator.h"
60 #include "src/compiler/schedule.h"
61 #include "src/compiler/scheduler.h"
62 #include "src/compiler/select-lowering.h"
63 #include "src/compiler/simplified-lowering.h"
64 #include "src/compiler/simplified-operator-reducer.h"
65 #include "src/compiler/simplified-operator.h"
66 #include "src/compiler/store-store-elimination.h"
67 #include "src/compiler/tail-call-optimization.h"
68 #include "src/compiler/type-hint-analyzer.h"
69 #include "src/compiler/typed-optimization.h"
70 #include "src/compiler/typer.h"
71 #include "src/compiler/value-numbering-reducer.h"
72 #include "src/compiler/verifier.h"
73 #include "src/compiler/zone-stats.h"
74 #include "src/isolate-inl.h"
75 #include "src/ostreams.h"
76 #include "src/parsing/parse-info.h"
77 #include "src/register-configuration.h"
78 #include "src/type-info.h"
79 #include "src/utils.h"
80 
81 namespace v8 {
82 namespace internal {
83 namespace compiler {
84 
85 class PipelineData {
86  public:
87   // For main entry point.
PipelineData(ZoneStats * zone_stats,CompilationInfo * info,PipelineStatistics * pipeline_statistics)88   PipelineData(ZoneStats* zone_stats, CompilationInfo* info,
89                PipelineStatistics* pipeline_statistics)
90       : isolate_(info->isolate()),
91         info_(info),
92         debug_name_(info_->GetDebugName()),
93         outer_zone_(info_->zone()),
94         zone_stats_(zone_stats),
95         pipeline_statistics_(pipeline_statistics),
96         graph_zone_scope_(zone_stats_, ZONE_NAME),
97         graph_zone_(graph_zone_scope_.zone()),
98         instruction_zone_scope_(zone_stats_, ZONE_NAME),
99         instruction_zone_(instruction_zone_scope_.zone()),
100         register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
101         register_allocation_zone_(register_allocation_zone_scope_.zone()) {
102     PhaseScope scope(pipeline_statistics, "init pipeline data");
103     graph_ = new (graph_zone_) Graph(graph_zone_);
104     source_positions_ = new (graph_zone_) SourcePositionTable(graph_);
105     simplified_ = new (graph_zone_) SimplifiedOperatorBuilder(graph_zone_);
106     machine_ = new (graph_zone_) MachineOperatorBuilder(
107         graph_zone_, MachineType::PointerRepresentation(),
108         InstructionSelector::SupportedMachineOperatorFlags(),
109         InstructionSelector::AlignmentRequirements());
110     common_ = new (graph_zone_) CommonOperatorBuilder(graph_zone_);
111     javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
112     jsgraph_ = new (graph_zone_)
113         JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
114   }
115 
116   // For WASM compile entry point.
PipelineData(ZoneStats * zone_stats,CompilationInfo * info,Graph * graph,SourcePositionTable * source_positions)117   PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Graph* graph,
118                SourcePositionTable* source_positions)
119       : isolate_(info->isolate()),
120         info_(info),
121         debug_name_(info_->GetDebugName()),
122         zone_stats_(zone_stats),
123         graph_zone_scope_(zone_stats_, ZONE_NAME),
124         graph_(graph),
125         source_positions_(source_positions),
126         instruction_zone_scope_(zone_stats_, ZONE_NAME),
127         instruction_zone_(instruction_zone_scope_.zone()),
128         register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
129         register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
130 
131   // For machine graph testing entry point.
PipelineData(ZoneStats * zone_stats,CompilationInfo * info,Graph * graph,Schedule * schedule)132   PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Graph* graph,
133                Schedule* schedule)
134       : isolate_(info->isolate()),
135         info_(info),
136         debug_name_(info_->GetDebugName()),
137         zone_stats_(zone_stats),
138         graph_zone_scope_(zone_stats_, ZONE_NAME),
139         graph_(graph),
140         source_positions_(new (info->zone()) SourcePositionTable(graph_)),
141         schedule_(schedule),
142         instruction_zone_scope_(zone_stats_, ZONE_NAME),
143         instruction_zone_(instruction_zone_scope_.zone()),
144         register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
145         register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
146 
147   // For register allocation testing entry point.
PipelineData(ZoneStats * zone_stats,CompilationInfo * info,InstructionSequence * sequence)148   PipelineData(ZoneStats* zone_stats, CompilationInfo* info,
149                InstructionSequence* sequence)
150       : isolate_(info->isolate()),
151         info_(info),
152         debug_name_(info_->GetDebugName()),
153         zone_stats_(zone_stats),
154         graph_zone_scope_(zone_stats_, ZONE_NAME),
155         instruction_zone_scope_(zone_stats_, ZONE_NAME),
156         instruction_zone_(sequence->zone()),
157         sequence_(sequence),
158         register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
159         register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
160 
~PipelineData()161   ~PipelineData() {
162     DeleteRegisterAllocationZone();
163     DeleteInstructionZone();
164     DeleteGraphZone();
165   }
166 
isolate() const167   Isolate* isolate() const { return isolate_; }
info() const168   CompilationInfo* info() const { return info_; }
zone_stats() const169   ZoneStats* zone_stats() const { return zone_stats_; }
pipeline_statistics()170   PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
compilation_failed() const171   bool compilation_failed() const { return compilation_failed_; }
set_compilation_failed()172   void set_compilation_failed() { compilation_failed_ = true; }
code()173   Handle<Code> code() { return code_; }
set_code(Handle<Code> code)174   void set_code(Handle<Code> code) {
175     DCHECK(code_.is_null());
176     code_ = code;
177   }
178 
179   // RawMachineAssembler generally produces graphs which cannot be verified.
MayHaveUnverifiableGraph() const180   bool MayHaveUnverifiableGraph() const { return outer_zone_ == nullptr; }
181 
graph_zone() const182   Zone* graph_zone() const { return graph_zone_; }
graph() const183   Graph* graph() const { return graph_; }
source_positions() const184   SourcePositionTable* source_positions() const { return source_positions_; }
machine() const185   MachineOperatorBuilder* machine() const { return machine_; }
common() const186   CommonOperatorBuilder* common() const { return common_; }
javascript() const187   JSOperatorBuilder* javascript() const { return javascript_; }
jsgraph() const188   JSGraph* jsgraph() const { return jsgraph_; }
native_context() const189   Handle<Context> native_context() const {
190     return handle(info()->native_context(), isolate());
191   }
global_object() const192   Handle<JSGlobalObject> global_object() const {
193     return handle(info()->global_object(), isolate());
194   }
195 
loop_assignment() const196   LoopAssignmentAnalysis* loop_assignment() const { return loop_assignment_; }
set_loop_assignment(LoopAssignmentAnalysis * loop_assignment)197   void set_loop_assignment(LoopAssignmentAnalysis* loop_assignment) {
198     DCHECK(!loop_assignment_);
199     loop_assignment_ = loop_assignment;
200   }
201 
type_hint_analysis() const202   TypeHintAnalysis* type_hint_analysis() const { return type_hint_analysis_; }
set_type_hint_analysis(TypeHintAnalysis * type_hint_analysis)203   void set_type_hint_analysis(TypeHintAnalysis* type_hint_analysis) {
204     DCHECK_NULL(type_hint_analysis_);
205     type_hint_analysis_ = type_hint_analysis;
206   }
207 
schedule() const208   Schedule* schedule() const { return schedule_; }
set_schedule(Schedule * schedule)209   void set_schedule(Schedule* schedule) {
210     DCHECK(!schedule_);
211     schedule_ = schedule;
212   }
reset_schedule()213   void reset_schedule() { schedule_ = nullptr; }
214 
instruction_zone() const215   Zone* instruction_zone() const { return instruction_zone_; }
sequence() const216   InstructionSequence* sequence() const { return sequence_; }
frame() const217   Frame* frame() const { return frame_; }
218 
register_allocation_zone() const219   Zone* register_allocation_zone() const { return register_allocation_zone_; }
register_allocation_data() const220   RegisterAllocationData* register_allocation_data() const {
221     return register_allocation_data_;
222   }
223 
profiler_data() const224   BasicBlockProfiler::Data* profiler_data() const { return profiler_data_; }
set_profiler_data(BasicBlockProfiler::Data * profiler_data)225   void set_profiler_data(BasicBlockProfiler::Data* profiler_data) {
226     profiler_data_ = profiler_data;
227   }
228 
source_position_output() const229   std::string const& source_position_output() const {
230     return source_position_output_;
231   }
set_source_position_output(std::string const & source_position_output)232   void set_source_position_output(std::string const& source_position_output) {
233     source_position_output_ = source_position_output;
234   }
235 
DeleteGraphZone()236   void DeleteGraphZone() {
237     if (graph_zone_ == nullptr) return;
238     graph_zone_scope_.Destroy();
239     graph_zone_ = nullptr;
240     graph_ = nullptr;
241     source_positions_ = nullptr;
242     loop_assignment_ = nullptr;
243     type_hint_analysis_ = nullptr;
244     simplified_ = nullptr;
245     machine_ = nullptr;
246     common_ = nullptr;
247     javascript_ = nullptr;
248     jsgraph_ = nullptr;
249     schedule_ = nullptr;
250   }
251 
DeleteInstructionZone()252   void DeleteInstructionZone() {
253     if (instruction_zone_ == nullptr) return;
254     instruction_zone_scope_.Destroy();
255     instruction_zone_ = nullptr;
256     sequence_ = nullptr;
257     frame_ = nullptr;
258   }
259 
DeleteRegisterAllocationZone()260   void DeleteRegisterAllocationZone() {
261     if (register_allocation_zone_ == nullptr) return;
262     register_allocation_zone_scope_.Destroy();
263     register_allocation_zone_ = nullptr;
264     register_allocation_data_ = nullptr;
265   }
266 
InitializeInstructionSequence(const CallDescriptor * descriptor)267   void InitializeInstructionSequence(const CallDescriptor* descriptor) {
268     DCHECK(sequence_ == nullptr);
269     InstructionBlocks* instruction_blocks =
270         InstructionSequence::InstructionBlocksFor(instruction_zone(),
271                                                   schedule());
272     sequence_ = new (instruction_zone()) InstructionSequence(
273         info()->isolate(), instruction_zone(), instruction_blocks);
274     if (descriptor && descriptor->RequiresFrameAsIncoming()) {
275       sequence_->instruction_blocks()[0]->mark_needs_frame();
276     } else {
277       DCHECK_EQ(0u, descriptor->CalleeSavedFPRegisters());
278       DCHECK_EQ(0u, descriptor->CalleeSavedRegisters());
279     }
280   }
281 
InitializeFrameData(CallDescriptor * descriptor)282   void InitializeFrameData(CallDescriptor* descriptor) {
283     DCHECK(frame_ == nullptr);
284     int fixed_frame_size = 0;
285     if (descriptor != nullptr) {
286       fixed_frame_size = descriptor->CalculateFixedFrameSize();
287     }
288     frame_ = new (instruction_zone()) Frame(fixed_frame_size);
289   }
290 
InitializeRegisterAllocationData(const RegisterConfiguration * config,CallDescriptor * descriptor)291   void InitializeRegisterAllocationData(const RegisterConfiguration* config,
292                                         CallDescriptor* descriptor) {
293     DCHECK(register_allocation_data_ == nullptr);
294     register_allocation_data_ = new (register_allocation_zone())
295         RegisterAllocationData(config, register_allocation_zone(), frame(),
296                                sequence(), debug_name_.get());
297   }
298 
BeginPhaseKind(const char * phase_kind_name)299   void BeginPhaseKind(const char* phase_kind_name) {
300     if (pipeline_statistics() != nullptr) {
301       pipeline_statistics()->BeginPhaseKind(phase_kind_name);
302     }
303   }
304 
EndPhaseKind()305   void EndPhaseKind() {
306     if (pipeline_statistics() != nullptr) {
307       pipeline_statistics()->EndPhaseKind();
308     }
309   }
310 
311  private:
312   Isolate* const isolate_;
313   CompilationInfo* const info_;
314   std::unique_ptr<char[]> debug_name_;
315   Zone* outer_zone_ = nullptr;
316   ZoneStats* const zone_stats_;
317   PipelineStatistics* pipeline_statistics_ = nullptr;
318   bool compilation_failed_ = false;
319   Handle<Code> code_ = Handle<Code>::null();
320 
321   // All objects in the following group of fields are allocated in graph_zone_.
322   // They are all set to nullptr when the graph_zone_ is destroyed.
323   ZoneStats::Scope graph_zone_scope_;
324   Zone* graph_zone_ = nullptr;
325   Graph* graph_ = nullptr;
326   SourcePositionTable* source_positions_ = nullptr;
327   LoopAssignmentAnalysis* loop_assignment_ = nullptr;
328   TypeHintAnalysis* type_hint_analysis_ = nullptr;
329   SimplifiedOperatorBuilder* simplified_ = nullptr;
330   MachineOperatorBuilder* machine_ = nullptr;
331   CommonOperatorBuilder* common_ = nullptr;
332   JSOperatorBuilder* javascript_ = nullptr;
333   JSGraph* jsgraph_ = nullptr;
334   Schedule* schedule_ = nullptr;
335 
336   // All objects in the following group of fields are allocated in
337   // instruction_zone_.  They are all set to nullptr when the instruction_zone_
338   // is
339   // destroyed.
340   ZoneStats::Scope instruction_zone_scope_;
341   Zone* instruction_zone_;
342   InstructionSequence* sequence_ = nullptr;
343   Frame* frame_ = nullptr;
344 
345   // All objects in the following group of fields are allocated in
346   // register_allocation_zone_.  They are all set to nullptr when the zone is
347   // destroyed.
348   ZoneStats::Scope register_allocation_zone_scope_;
349   Zone* register_allocation_zone_;
350   RegisterAllocationData* register_allocation_data_ = nullptr;
351 
352   // Basic block profiling support.
353   BasicBlockProfiler::Data* profiler_data_ = nullptr;
354 
355   // Source position output for --trace-turbo.
356   std::string source_position_output_;
357 
358   DISALLOW_COPY_AND_ASSIGN(PipelineData);
359 };
360 
361 class PipelineImpl final {
362  public:
PipelineImpl(PipelineData * data)363   explicit PipelineImpl(PipelineData* data) : data_(data) {}
364 
365   // Helpers for executing pipeline phases.
366   template <typename Phase>
367   void Run();
368   template <typename Phase, typename Arg0>
369   void Run(Arg0 arg_0);
370   template <typename Phase, typename Arg0, typename Arg1>
371   void Run(Arg0 arg_0, Arg1 arg_1);
372 
373   // Run the graph creation and initial optimization passes.
374   bool CreateGraph();
375 
376   // Run the concurrent optimization passes.
377   bool OptimizeGraph(Linkage* linkage);
378 
379   // Perform the actual code generation and return handle to a code object.
380   Handle<Code> GenerateCode(Linkage* linkage);
381 
382   bool ScheduleAndSelectInstructions(Linkage* linkage, bool trim_graph);
383   void RunPrintAndVerify(const char* phase, bool untyped = false);
384   Handle<Code> ScheduleAndGenerateCode(CallDescriptor* call_descriptor);
385   void AllocateRegisters(const RegisterConfiguration* config,
386                          CallDescriptor* descriptor, bool run_verifier);
387 
388   CompilationInfo* info() const;
389   Isolate* isolate() const;
390 
391   PipelineData* const data_;
392 };
393 
394 namespace {
395 
396 struct TurboCfgFile : public std::ofstream {
TurboCfgFilev8::internal::compiler::__anon8f5d45590111::TurboCfgFile397   explicit TurboCfgFile(Isolate* isolate)
398       : std::ofstream(isolate->GetTurboCfgFileName().c_str(),
399                       std::ios_base::app) {}
400 };
401 
402 struct TurboJsonFile : public std::ofstream {
TurboJsonFilev8::internal::compiler::__anon8f5d45590111::TurboJsonFile403   TurboJsonFile(CompilationInfo* info, std::ios_base::openmode mode)
404       : std::ofstream(GetVisualizerLogFileName(info, nullptr, "json").get(),
405                       mode) {}
406 };
407 
TraceSchedule(CompilationInfo * info,Schedule * schedule)408 void TraceSchedule(CompilationInfo* info, Schedule* schedule) {
409   if (FLAG_trace_turbo) {
410     AllowHandleDereference allow_deref;
411     TurboJsonFile json_of(info, std::ios_base::app);
412     json_of << "{\"name\":\"Schedule\",\"type\":\"schedule\",\"data\":\"";
413     std::stringstream schedule_stream;
414     schedule_stream << *schedule;
415     std::string schedule_string(schedule_stream.str());
416     for (const auto& c : schedule_string) {
417       json_of << AsEscapedUC16ForJSON(c);
418     }
419     json_of << "\"},\n";
420   }
421   if (FLAG_trace_turbo_graph || FLAG_trace_turbo_scheduler) {
422     AllowHandleDereference allow_deref;
423     CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
424     OFStream os(tracing_scope.file());
425     os << "-- Schedule --------------------------------------\n" << *schedule;
426   }
427 }
428 
429 
430 class SourcePositionWrapper final : public Reducer {
431  public:
SourcePositionWrapper(Reducer * reducer,SourcePositionTable * table)432   SourcePositionWrapper(Reducer* reducer, SourcePositionTable* table)
433       : reducer_(reducer), table_(table) {}
~SourcePositionWrapper()434   ~SourcePositionWrapper() final {}
435 
Reduce(Node * node)436   Reduction Reduce(Node* node) final {
437     SourcePosition const pos = table_->GetSourcePosition(node);
438     SourcePositionTable::Scope position(table_, pos);
439     return reducer_->Reduce(node);
440   }
441 
Finalize()442   void Finalize() final { reducer_->Finalize(); }
443 
444  private:
445   Reducer* const reducer_;
446   SourcePositionTable* const table_;
447 
448   DISALLOW_COPY_AND_ASSIGN(SourcePositionWrapper);
449 };
450 
451 
452 class JSGraphReducer final : public GraphReducer {
453  public:
JSGraphReducer(JSGraph * jsgraph,Zone * zone)454   JSGraphReducer(JSGraph* jsgraph, Zone* zone)
455       : GraphReducer(zone, jsgraph->graph(), jsgraph->Dead()) {}
~JSGraphReducer()456   ~JSGraphReducer() final {}
457 };
458 
459 
AddReducer(PipelineData * data,GraphReducer * graph_reducer,Reducer * reducer)460 void AddReducer(PipelineData* data, GraphReducer* graph_reducer,
461                 Reducer* reducer) {
462   if (data->info()->is_source_positions_enabled()) {
463     void* const buffer = data->graph_zone()->New(sizeof(SourcePositionWrapper));
464     SourcePositionWrapper* const wrapper =
465         new (buffer) SourcePositionWrapper(reducer, data->source_positions());
466     graph_reducer->AddReducer(wrapper);
467   } else {
468     graph_reducer->AddReducer(reducer);
469   }
470 }
471 
472 
473 class PipelineRunScope {
474  public:
PipelineRunScope(PipelineData * data,const char * phase_name)475   PipelineRunScope(PipelineData* data, const char* phase_name)
476       : phase_scope_(
477             phase_name == nullptr ? nullptr : data->pipeline_statistics(),
478             phase_name),
479         zone_scope_(data->zone_stats(), ZONE_NAME) {}
480 
zone()481   Zone* zone() { return zone_scope_.zone(); }
482 
483  private:
484   PhaseScope phase_scope_;
485   ZoneStats::Scope zone_scope_;
486 };
487 
CreatePipelineStatistics(CompilationInfo * info,ZoneStats * zone_stats)488 PipelineStatistics* CreatePipelineStatistics(CompilationInfo* info,
489                                              ZoneStats* zone_stats) {
490   PipelineStatistics* pipeline_statistics = nullptr;
491 
492   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
493     pipeline_statistics = new PipelineStatistics(info, zone_stats);
494     pipeline_statistics->BeginPhaseKind("initializing");
495   }
496 
497   if (FLAG_trace_turbo) {
498     TurboJsonFile json_of(info, std::ios_base::trunc);
499     Handle<Script> script = info->script();
500     std::unique_ptr<char[]> function_name = info->GetDebugName();
501     int pos = info->shared_info()->start_position();
502     json_of << "{\"function\":\"" << function_name.get()
503             << "\", \"sourcePosition\":" << pos << ", \"source\":\"";
504     Isolate* isolate = info->isolate();
505     if (!script->IsUndefined(isolate) &&
506         !script->source()->IsUndefined(isolate)) {
507       DisallowHeapAllocation no_allocation;
508       int start = info->shared_info()->start_position();
509       int len = info->shared_info()->end_position() - start;
510       String::SubStringRange source(String::cast(script->source()), start, len);
511       for (const auto& c : source) {
512         json_of << AsEscapedUC16ForJSON(c);
513       }
514     }
515     json_of << "\",\n\"phases\":[";
516   }
517 
518   return pipeline_statistics;
519 }
520 
521 }  // namespace
522 
523 class PipelineCompilationJob final : public CompilationJob {
524  public:
PipelineCompilationJob(Isolate * isolate,Handle<JSFunction> function)525   PipelineCompilationJob(Isolate* isolate, Handle<JSFunction> function)
526       // Note that the CompilationInfo is not initialized at the time we pass it
527       // to the CompilationJob constructor, but it is not dereferenced there.
528       : CompilationJob(isolate, &info_, "TurboFan"),
529         zone_(isolate->allocator(), ZONE_NAME),
530         zone_stats_(isolate->allocator()),
531         parse_info_(&zone_, handle(function->shared())),
532         info_(&parse_info_, function),
533         pipeline_statistics_(CreatePipelineStatistics(info(), &zone_stats_)),
534         data_(&zone_stats_, info(), pipeline_statistics_.get()),
535         pipeline_(&data_),
536         linkage_(nullptr) {}
537 
538  protected:
539   Status PrepareJobImpl() final;
540   Status ExecuteJobImpl() final;
541   Status FinalizeJobImpl() final;
542 
543  private:
544   Zone zone_;
545   ZoneStats zone_stats_;
546   ParseInfo parse_info_;
547   CompilationInfo info_;
548   std::unique_ptr<PipelineStatistics> pipeline_statistics_;
549   PipelineData data_;
550   PipelineImpl pipeline_;
551   Linkage* linkage_;
552 
553   DISALLOW_COPY_AND_ASSIGN(PipelineCompilationJob);
554 };
555 
PrepareJobImpl()556 PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
557   if (info()->shared_info()->asm_function()) {
558     if (info()->osr_frame()) info()->MarkAsFrameSpecializing();
559     info()->MarkAsFunctionContextSpecializing();
560   } else {
561     if (!FLAG_always_opt) {
562       info()->MarkAsBailoutOnUninitialized();
563     }
564     if (FLAG_turbo_inlining) {
565       info()->MarkAsInliningEnabled();
566     }
567   }
568   if (!info()->shared_info()->asm_function() || FLAG_turbo_asm_deoptimization) {
569     info()->MarkAsDeoptimizationEnabled();
570     if (FLAG_inline_accessors) {
571       info()->MarkAsAccessorInliningEnabled();
572     }
573   }
574   if (!info()->is_optimizing_from_bytecode()) {
575     if (info()->is_deoptimization_enabled() && FLAG_turbo_type_feedback) {
576       info()->MarkAsTypeFeedbackEnabled();
577     }
578     if (!Compiler::EnsureDeoptimizationSupport(info())) return FAILED;
579   }
580 
581   linkage_ = new (&zone_) Linkage(Linkage::ComputeIncoming(&zone_, info()));
582 
583   if (!pipeline_.CreateGraph()) {
584     if (isolate()->has_pending_exception()) return FAILED;  // Stack overflowed.
585     return AbortOptimization(kGraphBuildingFailed);
586   }
587 
588   return SUCCEEDED;
589 }
590 
ExecuteJobImpl()591 PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl() {
592   if (!pipeline_.OptimizeGraph(linkage_)) return FAILED;
593   return SUCCEEDED;
594 }
595 
FinalizeJobImpl()596 PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl() {
597   Handle<Code> code = pipeline_.GenerateCode(linkage_);
598   if (code.is_null()) {
599     if (info()->bailout_reason() == kNoReason) {
600       return AbortOptimization(kCodeGenerationFailed);
601     }
602     return FAILED;
603   }
604   info()->dependencies()->Commit(code);
605   info()->SetCode(code);
606   if (info()->is_deoptimization_enabled()) {
607     info()->context()->native_context()->AddOptimizedCode(*code);
608     RegisterWeakObjectsInOptimizedCode(code);
609   }
610   return SUCCEEDED;
611 }
612 
613 class PipelineWasmCompilationJob final : public CompilationJob {
614  public:
PipelineWasmCompilationJob(CompilationInfo * info,Graph * graph,CallDescriptor * descriptor,SourcePositionTable * source_positions)615   explicit PipelineWasmCompilationJob(CompilationInfo* info, Graph* graph,
616                                       CallDescriptor* descriptor,
617                                       SourcePositionTable* source_positions)
618       : CompilationJob(info->isolate(), info, "TurboFan",
619                        State::kReadyToExecute),
620         zone_stats_(info->isolate()->allocator()),
621         data_(&zone_stats_, info, graph, source_positions),
622         pipeline_(&data_),
623         linkage_(descriptor) {}
624 
625  protected:
626   Status PrepareJobImpl() final;
627   Status ExecuteJobImpl() final;
628   Status FinalizeJobImpl() final;
629 
630  private:
631   ZoneStats zone_stats_;
632   PipelineData data_;
633   PipelineImpl pipeline_;
634   Linkage linkage_;
635 };
636 
637 PipelineWasmCompilationJob::Status
PrepareJobImpl()638 PipelineWasmCompilationJob::PrepareJobImpl() {
639   UNREACHABLE();  // Prepare should always be skipped for WasmCompilationJob.
640   return SUCCEEDED;
641 }
642 
643 PipelineWasmCompilationJob::Status
ExecuteJobImpl()644 PipelineWasmCompilationJob::ExecuteJobImpl() {
645   if (FLAG_trace_turbo) {
646     TurboJsonFile json_of(info(), std::ios_base::trunc);
647     json_of << "{\"function\":\"" << info()->GetDebugName().get()
648             << "\", \"source\":\"\",\n\"phases\":[";
649   }
650 
651   pipeline_.RunPrintAndVerify("Machine", true);
652 
653   if (!pipeline_.ScheduleAndSelectInstructions(&linkage_, true)) return FAILED;
654   return SUCCEEDED;
655 }
656 
657 PipelineWasmCompilationJob::Status
FinalizeJobImpl()658 PipelineWasmCompilationJob::FinalizeJobImpl() {
659   pipeline_.GenerateCode(&linkage_);
660   return SUCCEEDED;
661 }
662 
663 template <typename Phase>
Run()664 void PipelineImpl::Run() {
665   PipelineRunScope scope(this->data_, Phase::phase_name());
666   Phase phase;
667   phase.Run(this->data_, scope.zone());
668 }
669 
670 template <typename Phase, typename Arg0>
Run(Arg0 arg_0)671 void PipelineImpl::Run(Arg0 arg_0) {
672   PipelineRunScope scope(this->data_, Phase::phase_name());
673   Phase phase;
674   phase.Run(this->data_, scope.zone(), arg_0);
675 }
676 
677 template <typename Phase, typename Arg0, typename Arg1>
Run(Arg0 arg_0,Arg1 arg_1)678 void PipelineImpl::Run(Arg0 arg_0, Arg1 arg_1) {
679   PipelineRunScope scope(this->data_, Phase::phase_name());
680   Phase phase;
681   phase.Run(this->data_, scope.zone(), arg_0, arg_1);
682 }
683 
684 struct LoopAssignmentAnalysisPhase {
phase_namev8::internal::compiler::LoopAssignmentAnalysisPhase685   static const char* phase_name() { return "loop assignment analysis"; }
686 
Runv8::internal::compiler::LoopAssignmentAnalysisPhase687   void Run(PipelineData* data, Zone* temp_zone) {
688     if (!data->info()->is_optimizing_from_bytecode()) {
689       AstLoopAssignmentAnalyzer analyzer(data->graph_zone(), data->info());
690       LoopAssignmentAnalysis* loop_assignment = analyzer.Analyze();
691       data->set_loop_assignment(loop_assignment);
692     }
693   }
694 };
695 
696 
697 struct TypeHintAnalysisPhase {
phase_namev8::internal::compiler::TypeHintAnalysisPhase698   static const char* phase_name() { return "type hint analysis"; }
699 
Runv8::internal::compiler::TypeHintAnalysisPhase700   void Run(PipelineData* data, Zone* temp_zone) {
701     if (data->info()->is_type_feedback_enabled()) {
702       TypeHintAnalyzer analyzer(data->graph_zone());
703       Handle<Code> code(data->info()->shared_info()->code(), data->isolate());
704       TypeHintAnalysis* type_hint_analysis = analyzer.Analyze(code);
705       data->set_type_hint_analysis(type_hint_analysis);
706     }
707   }
708 };
709 
710 
711 struct GraphBuilderPhase {
phase_namev8::internal::compiler::GraphBuilderPhase712   static const char* phase_name() { return "graph builder"; }
713 
Runv8::internal::compiler::GraphBuilderPhase714   void Run(PipelineData* data, Zone* temp_zone) {
715     bool succeeded = false;
716 
717     if (data->info()->is_optimizing_from_bytecode()) {
718       BytecodeGraphBuilder graph_builder(temp_zone, data->info(),
719                                          data->jsgraph(), 1.0f,
720                                          data->source_positions());
721       succeeded = graph_builder.CreateGraph();
722     } else {
723       AstGraphBuilderWithPositions graph_builder(
724           temp_zone, data->info(), data->jsgraph(), 1.0f,
725           data->loop_assignment(), data->type_hint_analysis(),
726           data->source_positions());
727       succeeded = graph_builder.CreateGraph();
728     }
729 
730     if (!succeeded) {
731       data->set_compilation_failed();
732     }
733   }
734 };
735 
736 
737 struct InliningPhase {
phase_namev8::internal::compiler::InliningPhase738   static const char* phase_name() { return "inlining"; }
739 
Runv8::internal::compiler::InliningPhase740   void Run(PipelineData* data, Zone* temp_zone) {
741     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
742     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
743                                               data->common());
744     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
745                                          data->common(), data->machine());
746     JSCallReducer::Flags call_reducer_flags = JSCallReducer::kNoFlags;
747     if (data->info()->is_bailout_on_uninitialized()) {
748       call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized;
749     }
750     if (data->info()->is_deoptimization_enabled()) {
751       call_reducer_flags |= JSCallReducer::kDeoptimizationEnabled;
752     }
753     JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
754                                call_reducer_flags, data->native_context());
755     JSContextSpecialization context_specialization(
756         &graph_reducer, data->jsgraph(),
757         data->info()->is_function_context_specializing()
758             ? handle(data->info()->context())
759             : MaybeHandle<Context>());
760     JSFrameSpecialization frame_specialization(
761         &graph_reducer, data->info()->osr_frame(), data->jsgraph());
762     JSGlobalObjectSpecialization global_object_specialization(
763         &graph_reducer, data->jsgraph(), data->global_object(),
764         data->info()->dependencies());
765     JSNativeContextSpecialization::Flags flags =
766         JSNativeContextSpecialization::kNoFlags;
767     if (data->info()->is_accessor_inlining_enabled()) {
768       flags |= JSNativeContextSpecialization::kAccessorInliningEnabled;
769     }
770     if (data->info()->is_bailout_on_uninitialized()) {
771       flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
772     }
773     if (data->info()->is_deoptimization_enabled()) {
774       flags |= JSNativeContextSpecialization::kDeoptimizationEnabled;
775     }
776     JSNativeContextSpecialization native_context_specialization(
777         &graph_reducer, data->jsgraph(), flags, data->native_context(),
778         data->info()->dependencies(), temp_zone);
779     JSInliningHeuristic inlining(
780         &graph_reducer, data->info()->is_inlining_enabled()
781                             ? JSInliningHeuristic::kGeneralInlining
782                             : JSInliningHeuristic::kRestrictedInlining,
783         temp_zone, data->info(), data->jsgraph(), data->source_positions());
784     JSIntrinsicLowering intrinsic_lowering(
785         &graph_reducer, data->jsgraph(),
786         data->info()->is_deoptimization_enabled()
787             ? JSIntrinsicLowering::kDeoptimizationEnabled
788             : JSIntrinsicLowering::kDeoptimizationDisabled);
789     AddReducer(data, &graph_reducer, &dead_code_elimination);
790     AddReducer(data, &graph_reducer, &common_reducer);
791     if (data->info()->is_frame_specializing()) {
792       AddReducer(data, &graph_reducer, &frame_specialization);
793     }
794     if (data->info()->is_deoptimization_enabled()) {
795       AddReducer(data, &graph_reducer, &global_object_specialization);
796     }
797     AddReducer(data, &graph_reducer, &native_context_specialization);
798     AddReducer(data, &graph_reducer, &context_specialization);
799     AddReducer(data, &graph_reducer, &intrinsic_lowering);
800     AddReducer(data, &graph_reducer, &call_reducer);
801     AddReducer(data, &graph_reducer, &inlining);
802     graph_reducer.ReduceGraph();
803   }
804 };
805 
806 
807 struct TyperPhase {
phase_namev8::internal::compiler::TyperPhase808   static const char* phase_name() { return "typer"; }
809 
Runv8::internal::compiler::TyperPhase810   void Run(PipelineData* data, Zone* temp_zone, Typer* typer) {
811     NodeVector roots(temp_zone);
812     data->jsgraph()->GetCachedNodes(&roots);
813     LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
814                                          data->common(), temp_zone);
815     if (FLAG_turbo_loop_variable) induction_vars.Run();
816     typer->Run(roots, &induction_vars);
817   }
818 };
819 
820 struct OsrTyperPhase {
phase_namev8::internal::compiler::OsrTyperPhase821   static const char* phase_name() { return "osr typer"; }
822 
Runv8::internal::compiler::OsrTyperPhase823   void Run(PipelineData* data, Zone* temp_zone) {
824     NodeVector roots(temp_zone);
825     data->jsgraph()->GetCachedNodes(&roots);
826     // Dummy induction variable optimizer: at the moment, we do not try
827     // to compute loop variable bounds on OSR.
828     LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
829                                          data->common(), temp_zone);
830     Typer typer(data->isolate(), Typer::kNoFlags, data->graph());
831     typer.Run(roots, &induction_vars);
832   }
833 };
834 
835 struct UntyperPhase {
phase_namev8::internal::compiler::UntyperPhase836   static const char* phase_name() { return "untyper"; }
837 
Runv8::internal::compiler::UntyperPhase838   void Run(PipelineData* data, Zone* temp_zone) {
839     class RemoveTypeReducer final : public Reducer {
840      public:
841       Reduction Reduce(Node* node) final {
842         if (NodeProperties::IsTyped(node)) {
843           NodeProperties::RemoveType(node);
844           return Changed(node);
845         }
846         return NoChange();
847       }
848     };
849 
850     NodeVector roots(temp_zone);
851     data->jsgraph()->GetCachedNodes(&roots);
852     for (Node* node : roots) {
853       NodeProperties::RemoveType(node);
854     }
855 
856     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
857     RemoveTypeReducer remove_type_reducer;
858     AddReducer(data, &graph_reducer, &remove_type_reducer);
859     graph_reducer.ReduceGraph();
860   }
861 };
862 
863 struct OsrDeconstructionPhase {
phase_namev8::internal::compiler::OsrDeconstructionPhase864   static const char* phase_name() { return "OSR deconstruction"; }
865 
Runv8::internal::compiler::OsrDeconstructionPhase866   void Run(PipelineData* data, Zone* temp_zone) {
867     GraphTrimmer trimmer(temp_zone, data->graph());
868     NodeVector roots(temp_zone);
869     data->jsgraph()->GetCachedNodes(&roots);
870     trimmer.TrimGraph(roots.begin(), roots.end());
871 
872     OsrHelper osr_helper(data->info());
873     osr_helper.Deconstruct(data->jsgraph(), data->common(), temp_zone);
874   }
875 };
876 
877 
878 struct TypedLoweringPhase {
phase_namev8::internal::compiler::TypedLoweringPhase879   static const char* phase_name() { return "typed lowering"; }
880 
Runv8::internal::compiler::TypedLoweringPhase881   void Run(PipelineData* data, Zone* temp_zone) {
882     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
883     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
884                                               data->common());
885     JSBuiltinReducer builtin_reducer(
886         &graph_reducer, data->jsgraph(),
887         data->info()->is_deoptimization_enabled()
888             ? JSBuiltinReducer::kDeoptimizationEnabled
889             : JSBuiltinReducer::kNoFlags,
890         data->info()->dependencies(), data->native_context());
891     Handle<LiteralsArray> literals_array(data->info()->closure()->literals());
892     JSCreateLowering create_lowering(
893         &graph_reducer, data->info()->dependencies(), data->jsgraph(),
894         literals_array, data->native_context(), temp_zone);
895     JSTypedLowering::Flags typed_lowering_flags = JSTypedLowering::kNoFlags;
896     if (data->info()->is_deoptimization_enabled()) {
897       typed_lowering_flags |= JSTypedLowering::kDeoptimizationEnabled;
898     }
899     JSTypedLowering typed_lowering(&graph_reducer, data->info()->dependencies(),
900                                    typed_lowering_flags, data->jsgraph(),
901                                    temp_zone);
902     TypedOptimization typed_optimization(
903         &graph_reducer, data->info()->dependencies(),
904         data->info()->is_deoptimization_enabled()
905             ? TypedOptimization::kDeoptimizationEnabled
906             : TypedOptimization::kNoFlags,
907         data->jsgraph());
908     SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph());
909     CheckpointElimination checkpoint_elimination(&graph_reducer);
910     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
911                                          data->common(), data->machine());
912     AddReducer(data, &graph_reducer, &dead_code_elimination);
913     AddReducer(data, &graph_reducer, &builtin_reducer);
914     if (data->info()->is_deoptimization_enabled()) {
915       AddReducer(data, &graph_reducer, &create_lowering);
916     }
917     AddReducer(data, &graph_reducer, &typed_optimization);
918     AddReducer(data, &graph_reducer, &typed_lowering);
919     AddReducer(data, &graph_reducer, &simple_reducer);
920     AddReducer(data, &graph_reducer, &checkpoint_elimination);
921     AddReducer(data, &graph_reducer, &common_reducer);
922     graph_reducer.ReduceGraph();
923   }
924 };
925 
926 
927 struct EscapeAnalysisPhase {
phase_namev8::internal::compiler::EscapeAnalysisPhase928   static const char* phase_name() { return "escape analysis"; }
929 
Runv8::internal::compiler::EscapeAnalysisPhase930   void Run(PipelineData* data, Zone* temp_zone) {
931     EscapeAnalysis escape_analysis(data->graph(), data->jsgraph()->common(),
932                                    temp_zone);
933     escape_analysis.Run();
934     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
935     EscapeAnalysisReducer escape_reducer(&graph_reducer, data->jsgraph(),
936                                          &escape_analysis, temp_zone);
937     AddReducer(data, &graph_reducer, &escape_reducer);
938     graph_reducer.ReduceGraph();
939     if (escape_reducer.compilation_failed()) {
940       data->set_compilation_failed();
941       return;
942     }
943     escape_reducer.VerifyReplacement();
944   }
945 };
946 
947 struct RepresentationSelectionPhase {
phase_namev8::internal::compiler::RepresentationSelectionPhase948   static const char* phase_name() { return "representation selection"; }
949 
Runv8::internal::compiler::RepresentationSelectionPhase950   void Run(PipelineData* data, Zone* temp_zone) {
951     SimplifiedLowering lowering(data->jsgraph(), temp_zone,
952                                 data->source_positions());
953     lowering.LowerAllNodes();
954   }
955 };
956 
957 struct LoopPeelingPhase {
phase_namev8::internal::compiler::LoopPeelingPhase958   static const char* phase_name() { return "loop peeling"; }
959 
Runv8::internal::compiler::LoopPeelingPhase960   void Run(PipelineData* data, Zone* temp_zone) {
961     GraphTrimmer trimmer(temp_zone, data->graph());
962     NodeVector roots(temp_zone);
963     data->jsgraph()->GetCachedNodes(&roots);
964     trimmer.TrimGraph(roots.begin(), roots.end());
965 
966     LoopTree* loop_tree =
967         LoopFinder::BuildLoopTree(data->jsgraph()->graph(), temp_zone);
968     LoopPeeler::PeelInnerLoopsOfTree(data->graph(), data->common(), loop_tree,
969                                      temp_zone);
970   }
971 };
972 
973 struct LoopExitEliminationPhase {
phase_namev8::internal::compiler::LoopExitEliminationPhase974   static const char* phase_name() { return "loop exit elimination"; }
975 
Runv8::internal::compiler::LoopExitEliminationPhase976   void Run(PipelineData* data, Zone* temp_zone) {
977     LoopPeeler::EliminateLoopExits(data->graph(), temp_zone);
978   }
979 };
980 
981 struct GenericLoweringPhase {
phase_namev8::internal::compiler::GenericLoweringPhase982   static const char* phase_name() { return "generic lowering"; }
983 
Runv8::internal::compiler::GenericLoweringPhase984   void Run(PipelineData* data, Zone* temp_zone) {
985     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
986     JSGenericLowering generic_lowering(data->jsgraph());
987     AddReducer(data, &graph_reducer, &generic_lowering);
988     graph_reducer.ReduceGraph();
989   }
990 };
991 
992 struct EarlyOptimizationPhase {
phase_namev8::internal::compiler::EarlyOptimizationPhase993   static const char* phase_name() { return "early optimization"; }
994 
Runv8::internal::compiler::EarlyOptimizationPhase995   void Run(PipelineData* data, Zone* temp_zone) {
996     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
997     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
998                                               data->common());
999     SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph());
1000     RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
1001     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1002     MachineOperatorReducer machine_reducer(data->jsgraph());
1003     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1004                                          data->common(), data->machine());
1005     AddReducer(data, &graph_reducer, &dead_code_elimination);
1006     AddReducer(data, &graph_reducer, &simple_reducer);
1007     AddReducer(data, &graph_reducer, &redundancy_elimination);
1008     AddReducer(data, &graph_reducer, &value_numbering);
1009     AddReducer(data, &graph_reducer, &machine_reducer);
1010     AddReducer(data, &graph_reducer, &common_reducer);
1011     graph_reducer.ReduceGraph();
1012   }
1013 };
1014 
1015 struct ControlFlowOptimizationPhase {
phase_namev8::internal::compiler::ControlFlowOptimizationPhase1016   static const char* phase_name() { return "control flow optimization"; }
1017 
Runv8::internal::compiler::ControlFlowOptimizationPhase1018   void Run(PipelineData* data, Zone* temp_zone) {
1019     ControlFlowOptimizer optimizer(data->graph(), data->common(),
1020                                    data->machine(), temp_zone);
1021     optimizer.Optimize();
1022   }
1023 };
1024 
1025 struct EffectControlLinearizationPhase {
phase_namev8::internal::compiler::EffectControlLinearizationPhase1026   static const char* phase_name() { return "effect linearization"; }
1027 
Runv8::internal::compiler::EffectControlLinearizationPhase1028   void Run(PipelineData* data, Zone* temp_zone) {
1029     // The scheduler requires the graphs to be trimmed, so trim now.
1030     // TODO(jarin) Remove the trimming once the scheduler can handle untrimmed
1031     // graphs.
1032     GraphTrimmer trimmer(temp_zone, data->graph());
1033     NodeVector roots(temp_zone);
1034     data->jsgraph()->GetCachedNodes(&roots);
1035     trimmer.TrimGraph(roots.begin(), roots.end());
1036 
1037     // Schedule the graph without node splitting so that we can
1038     // fix the effect and control flow for nodes with low-level side
1039     // effects (such as changing representation to tagged or
1040     // 'floating' allocation regions.)
1041     Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
1042                                                     Scheduler::kNoFlags);
1043     if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
1044     TraceSchedule(data->info(), schedule);
1045 
1046     // Post-pass for wiring the control/effects
1047     // - connect allocating representation changes into the control&effect
1048     //   chains and lower them,
1049     // - get rid of the region markers,
1050     // - introduce effect phis and rewire effects to get SSA again.
1051     EffectControlLinearizer linearizer(data->jsgraph(), schedule, temp_zone,
1052                                        data->source_positions());
1053     linearizer.Run();
1054   }
1055 };
1056 
1057 // The store-store elimination greatly benefits from doing a common operator
1058 // reducer and dead code elimination just before it, to eliminate conditional
1059 // deopts with a constant condition.
1060 
1061 struct DeadCodeEliminationPhase {
phase_namev8::internal::compiler::DeadCodeEliminationPhase1062   static const char* phase_name() { return "dead code elimination"; }
1063 
Runv8::internal::compiler::DeadCodeEliminationPhase1064   void Run(PipelineData* data, Zone* temp_zone) {
1065     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
1066     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1067                                               data->common());
1068     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1069                                          data->common(), data->machine());
1070     AddReducer(data, &graph_reducer, &dead_code_elimination);
1071     AddReducer(data, &graph_reducer, &common_reducer);
1072     graph_reducer.ReduceGraph();
1073   }
1074 };
1075 
1076 struct StoreStoreEliminationPhase {
phase_namev8::internal::compiler::StoreStoreEliminationPhase1077   static const char* phase_name() { return "store-store elimination"; }
1078 
Runv8::internal::compiler::StoreStoreEliminationPhase1079   void Run(PipelineData* data, Zone* temp_zone) {
1080     GraphTrimmer trimmer(temp_zone, data->graph());
1081     NodeVector roots(temp_zone);
1082     data->jsgraph()->GetCachedNodes(&roots);
1083     trimmer.TrimGraph(roots.begin(), roots.end());
1084 
1085     StoreStoreElimination::Run(data->jsgraph(), temp_zone);
1086   }
1087 };
1088 
1089 struct LoadEliminationPhase {
phase_namev8::internal::compiler::LoadEliminationPhase1090   static const char* phase_name() { return "load elimination"; }
1091 
Runv8::internal::compiler::LoadEliminationPhase1092   void Run(PipelineData* data, Zone* temp_zone) {
1093     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
1094     BranchElimination branch_condition_elimination(&graph_reducer,
1095                                                    data->jsgraph(), temp_zone);
1096     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1097                                               data->common());
1098     RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
1099     LoadElimination load_elimination(&graph_reducer, data->jsgraph(),
1100                                      temp_zone);
1101     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1102     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1103                                          data->common(), data->machine());
1104     AddReducer(data, &graph_reducer, &branch_condition_elimination);
1105     AddReducer(data, &graph_reducer, &dead_code_elimination);
1106     AddReducer(data, &graph_reducer, &redundancy_elimination);
1107     AddReducer(data, &graph_reducer, &load_elimination);
1108     AddReducer(data, &graph_reducer, &value_numbering);
1109     AddReducer(data, &graph_reducer, &common_reducer);
1110     graph_reducer.ReduceGraph();
1111   }
1112 };
1113 
1114 struct MemoryOptimizationPhase {
phase_namev8::internal::compiler::MemoryOptimizationPhase1115   static const char* phase_name() { return "memory optimization"; }
1116 
Runv8::internal::compiler::MemoryOptimizationPhase1117   void Run(PipelineData* data, Zone* temp_zone) {
1118     // The memory optimizer requires the graphs to be trimmed, so trim now.
1119     GraphTrimmer trimmer(temp_zone, data->graph());
1120     NodeVector roots(temp_zone);
1121     data->jsgraph()->GetCachedNodes(&roots);
1122     trimmer.TrimGraph(roots.begin(), roots.end());
1123 
1124     // Optimize allocations and load/store operations.
1125     MemoryOptimizer optimizer(data->jsgraph(), temp_zone);
1126     optimizer.Optimize();
1127   }
1128 };
1129 
1130 struct LateOptimizationPhase {
phase_namev8::internal::compiler::LateOptimizationPhase1131   static const char* phase_name() { return "late optimization"; }
1132 
Runv8::internal::compiler::LateOptimizationPhase1133   void Run(PipelineData* data, Zone* temp_zone) {
1134     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
1135     BranchElimination branch_condition_elimination(&graph_reducer,
1136                                                    data->jsgraph(), temp_zone);
1137     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1138                                               data->common());
1139     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1140     MachineOperatorReducer machine_reducer(data->jsgraph());
1141     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1142                                          data->common(), data->machine());
1143     SelectLowering select_lowering(data->jsgraph()->graph(),
1144                                    data->jsgraph()->common());
1145     TailCallOptimization tco(data->common(), data->graph());
1146     AddReducer(data, &graph_reducer, &branch_condition_elimination);
1147     AddReducer(data, &graph_reducer, &dead_code_elimination);
1148     AddReducer(data, &graph_reducer, &value_numbering);
1149     AddReducer(data, &graph_reducer, &machine_reducer);
1150     AddReducer(data, &graph_reducer, &common_reducer);
1151     AddReducer(data, &graph_reducer, &select_lowering);
1152     AddReducer(data, &graph_reducer, &tco);
1153     graph_reducer.ReduceGraph();
1154   }
1155 };
1156 
1157 struct EarlyGraphTrimmingPhase {
phase_namev8::internal::compiler::EarlyGraphTrimmingPhase1158   static const char* phase_name() { return "early graph trimming"; }
Runv8::internal::compiler::EarlyGraphTrimmingPhase1159   void Run(PipelineData* data, Zone* temp_zone) {
1160     GraphTrimmer trimmer(temp_zone, data->graph());
1161     NodeVector roots(temp_zone);
1162     data->jsgraph()->GetCachedNodes(&roots);
1163     trimmer.TrimGraph(roots.begin(), roots.end());
1164   }
1165 };
1166 
1167 
1168 struct LateGraphTrimmingPhase {
phase_namev8::internal::compiler::LateGraphTrimmingPhase1169   static const char* phase_name() { return "late graph trimming"; }
Runv8::internal::compiler::LateGraphTrimmingPhase1170   void Run(PipelineData* data, Zone* temp_zone) {
1171     GraphTrimmer trimmer(temp_zone, data->graph());
1172     NodeVector roots(temp_zone);
1173     if (data->jsgraph()) {
1174       data->jsgraph()->GetCachedNodes(&roots);
1175     }
1176     trimmer.TrimGraph(roots.begin(), roots.end());
1177   }
1178 };
1179 
1180 
1181 struct StressLoopPeelingPhase {
phase_namev8::internal::compiler::StressLoopPeelingPhase1182   static const char* phase_name() { return "stress loop peeling"; }
1183 
Runv8::internal::compiler::StressLoopPeelingPhase1184   void Run(PipelineData* data, Zone* temp_zone) {
1185     // Peel the first outer loop for testing.
1186     // TODO(titzer): peel all loops? the N'th loop? Innermost loops?
1187     LoopTree* loop_tree = LoopFinder::BuildLoopTree(data->graph(), temp_zone);
1188     if (loop_tree != nullptr && loop_tree->outer_loops().size() > 0) {
1189       LoopPeeler::Peel(data->graph(), data->common(), loop_tree,
1190                        loop_tree->outer_loops()[0], temp_zone);
1191     }
1192   }
1193 };
1194 
1195 
1196 struct ComputeSchedulePhase {
phase_namev8::internal::compiler::ComputeSchedulePhase1197   static const char* phase_name() { return "scheduling"; }
1198 
Runv8::internal::compiler::ComputeSchedulePhase1199   void Run(PipelineData* data, Zone* temp_zone) {
1200     Schedule* schedule = Scheduler::ComputeSchedule(
1201         temp_zone, data->graph(), data->info()->is_splitting_enabled()
1202                                       ? Scheduler::kSplitNodes
1203                                       : Scheduler::kNoFlags);
1204     if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
1205     data->set_schedule(schedule);
1206   }
1207 };
1208 
1209 
1210 struct InstructionSelectionPhase {
phase_namev8::internal::compiler::InstructionSelectionPhase1211   static const char* phase_name() { return "select instructions"; }
1212 
Runv8::internal::compiler::InstructionSelectionPhase1213   void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
1214     InstructionSelector selector(
1215         temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
1216         data->schedule(), data->source_positions(), data->frame(),
1217         data->info()->is_source_positions_enabled()
1218             ? InstructionSelector::kAllSourcePositions
1219             : InstructionSelector::kCallSourcePositions,
1220         InstructionSelector::SupportedFeatures(),
1221         FLAG_turbo_instruction_scheduling
1222             ? InstructionSelector::kEnableScheduling
1223             : InstructionSelector::kDisableScheduling,
1224         data->info()->will_serialize()
1225             ? InstructionSelector::kEnableSerialization
1226             : InstructionSelector::kDisableSerialization);
1227     if (!selector.SelectInstructions()) {
1228       data->set_compilation_failed();
1229     }
1230   }
1231 };
1232 
1233 
1234 struct MeetRegisterConstraintsPhase {
phase_namev8::internal::compiler::MeetRegisterConstraintsPhase1235   static const char* phase_name() { return "meet register constraints"; }
1236 
Runv8::internal::compiler::MeetRegisterConstraintsPhase1237   void Run(PipelineData* data, Zone* temp_zone) {
1238     ConstraintBuilder builder(data->register_allocation_data());
1239     builder.MeetRegisterConstraints();
1240   }
1241 };
1242 
1243 
1244 struct ResolvePhisPhase {
phase_namev8::internal::compiler::ResolvePhisPhase1245   static const char* phase_name() { return "resolve phis"; }
1246 
Runv8::internal::compiler::ResolvePhisPhase1247   void Run(PipelineData* data, Zone* temp_zone) {
1248     ConstraintBuilder builder(data->register_allocation_data());
1249     builder.ResolvePhis();
1250   }
1251 };
1252 
1253 
1254 struct BuildLiveRangesPhase {
phase_namev8::internal::compiler::BuildLiveRangesPhase1255   static const char* phase_name() { return "build live ranges"; }
1256 
Runv8::internal::compiler::BuildLiveRangesPhase1257   void Run(PipelineData* data, Zone* temp_zone) {
1258     LiveRangeBuilder builder(data->register_allocation_data(), temp_zone);
1259     builder.BuildLiveRanges();
1260   }
1261 };
1262 
1263 
1264 struct SplinterLiveRangesPhase {
phase_namev8::internal::compiler::SplinterLiveRangesPhase1265   static const char* phase_name() { return "splinter live ranges"; }
1266 
Runv8::internal::compiler::SplinterLiveRangesPhase1267   void Run(PipelineData* data, Zone* temp_zone) {
1268     LiveRangeSeparator live_range_splinterer(data->register_allocation_data(),
1269                                              temp_zone);
1270     live_range_splinterer.Splinter();
1271   }
1272 };
1273 
1274 
1275 template <typename RegAllocator>
1276 struct AllocateGeneralRegistersPhase {
phase_namev8::internal::compiler::AllocateGeneralRegistersPhase1277   static const char* phase_name() { return "allocate general registers"; }
1278 
Runv8::internal::compiler::AllocateGeneralRegistersPhase1279   void Run(PipelineData* data, Zone* temp_zone) {
1280     RegAllocator allocator(data->register_allocation_data(), GENERAL_REGISTERS,
1281                            temp_zone);
1282     allocator.AllocateRegisters();
1283   }
1284 };
1285 
1286 template <typename RegAllocator>
1287 struct AllocateFPRegistersPhase {
phase_namev8::internal::compiler::AllocateFPRegistersPhase1288   static const char* phase_name() {
1289     return "allocate floating point registers";
1290   }
1291 
Runv8::internal::compiler::AllocateFPRegistersPhase1292   void Run(PipelineData* data, Zone* temp_zone) {
1293     RegAllocator allocator(data->register_allocation_data(), FP_REGISTERS,
1294                            temp_zone);
1295     allocator.AllocateRegisters();
1296   }
1297 };
1298 
1299 
1300 struct MergeSplintersPhase {
phase_namev8::internal::compiler::MergeSplintersPhase1301   static const char* phase_name() { return "merge splintered ranges"; }
Runv8::internal::compiler::MergeSplintersPhase1302   void Run(PipelineData* pipeline_data, Zone* temp_zone) {
1303     RegisterAllocationData* data = pipeline_data->register_allocation_data();
1304     LiveRangeMerger live_range_merger(data, temp_zone);
1305     live_range_merger.Merge();
1306   }
1307 };
1308 
1309 
1310 struct LocateSpillSlotsPhase {
phase_namev8::internal::compiler::LocateSpillSlotsPhase1311   static const char* phase_name() { return "locate spill slots"; }
1312 
Runv8::internal::compiler::LocateSpillSlotsPhase1313   void Run(PipelineData* data, Zone* temp_zone) {
1314     SpillSlotLocator locator(data->register_allocation_data());
1315     locator.LocateSpillSlots();
1316   }
1317 };
1318 
1319 
1320 struct AssignSpillSlotsPhase {
phase_namev8::internal::compiler::AssignSpillSlotsPhase1321   static const char* phase_name() { return "assign spill slots"; }
1322 
Runv8::internal::compiler::AssignSpillSlotsPhase1323   void Run(PipelineData* data, Zone* temp_zone) {
1324     OperandAssigner assigner(data->register_allocation_data());
1325     assigner.AssignSpillSlots();
1326   }
1327 };
1328 
1329 
1330 struct CommitAssignmentPhase {
phase_namev8::internal::compiler::CommitAssignmentPhase1331   static const char* phase_name() { return "commit assignment"; }
1332 
Runv8::internal::compiler::CommitAssignmentPhase1333   void Run(PipelineData* data, Zone* temp_zone) {
1334     OperandAssigner assigner(data->register_allocation_data());
1335     assigner.CommitAssignment();
1336   }
1337 };
1338 
1339 
1340 struct PopulateReferenceMapsPhase {
phase_namev8::internal::compiler::PopulateReferenceMapsPhase1341   static const char* phase_name() { return "populate pointer maps"; }
1342 
Runv8::internal::compiler::PopulateReferenceMapsPhase1343   void Run(PipelineData* data, Zone* temp_zone) {
1344     ReferenceMapPopulator populator(data->register_allocation_data());
1345     populator.PopulateReferenceMaps();
1346   }
1347 };
1348 
1349 
1350 struct ConnectRangesPhase {
phase_namev8::internal::compiler::ConnectRangesPhase1351   static const char* phase_name() { return "connect ranges"; }
1352 
Runv8::internal::compiler::ConnectRangesPhase1353   void Run(PipelineData* data, Zone* temp_zone) {
1354     LiveRangeConnector connector(data->register_allocation_data());
1355     connector.ConnectRanges(temp_zone);
1356   }
1357 };
1358 
1359 
1360 struct ResolveControlFlowPhase {
phase_namev8::internal::compiler::ResolveControlFlowPhase1361   static const char* phase_name() { return "resolve control flow"; }
1362 
Runv8::internal::compiler::ResolveControlFlowPhase1363   void Run(PipelineData* data, Zone* temp_zone) {
1364     LiveRangeConnector connector(data->register_allocation_data());
1365     connector.ResolveControlFlow(temp_zone);
1366   }
1367 };
1368 
1369 
1370 struct OptimizeMovesPhase {
phase_namev8::internal::compiler::OptimizeMovesPhase1371   static const char* phase_name() { return "optimize moves"; }
1372 
Runv8::internal::compiler::OptimizeMovesPhase1373   void Run(PipelineData* data, Zone* temp_zone) {
1374     MoveOptimizer move_optimizer(temp_zone, data->sequence());
1375     move_optimizer.Run();
1376   }
1377 };
1378 
1379 
1380 struct FrameElisionPhase {
phase_namev8::internal::compiler::FrameElisionPhase1381   static const char* phase_name() { return "frame elision"; }
1382 
Runv8::internal::compiler::FrameElisionPhase1383   void Run(PipelineData* data, Zone* temp_zone) {
1384     FrameElider(data->sequence()).Run();
1385   }
1386 };
1387 
1388 
1389 struct JumpThreadingPhase {
phase_namev8::internal::compiler::JumpThreadingPhase1390   static const char* phase_name() { return "jump threading"; }
1391 
Runv8::internal::compiler::JumpThreadingPhase1392   void Run(PipelineData* data, Zone* temp_zone, bool frame_at_start) {
1393     ZoneVector<RpoNumber> result(temp_zone);
1394     if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence(),
1395                                          frame_at_start)) {
1396       JumpThreading::ApplyForwarding(result, data->sequence());
1397     }
1398   }
1399 };
1400 
1401 
1402 struct GenerateCodePhase {
phase_namev8::internal::compiler::GenerateCodePhase1403   static const char* phase_name() { return "generate code"; }
1404 
Runv8::internal::compiler::GenerateCodePhase1405   void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
1406     CodeGenerator generator(data->frame(), linkage, data->sequence(),
1407                             data->info());
1408     data->set_code(generator.GenerateCode());
1409   }
1410 };
1411 
1412 
1413 struct PrintGraphPhase {
phase_namev8::internal::compiler::PrintGraphPhase1414   static const char* phase_name() { return nullptr; }
1415 
Runv8::internal::compiler::PrintGraphPhase1416   void Run(PipelineData* data, Zone* temp_zone, const char* phase) {
1417     CompilationInfo* info = data->info();
1418     Graph* graph = data->graph();
1419 
1420     {  // Print JSON.
1421       AllowHandleDereference allow_deref;
1422       TurboJsonFile json_of(info, std::ios_base::app);
1423       json_of << "{\"name\":\"" << phase << "\",\"type\":\"graph\",\"data\":"
1424               << AsJSON(*graph, data->source_positions()) << "},\n";
1425     }
1426 
1427     if (FLAG_trace_turbo_graph) {  // Simple textual RPO.
1428       AllowHandleDereference allow_deref;
1429       CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
1430       OFStream os(tracing_scope.file());
1431       os << "-- Graph after " << phase << " -- " << std::endl;
1432       os << AsRPO(*graph);
1433     }
1434   }
1435 };
1436 
1437 
1438 struct VerifyGraphPhase {
phase_namev8::internal::compiler::VerifyGraphPhase1439   static const char* phase_name() { return nullptr; }
1440 
Runv8::internal::compiler::VerifyGraphPhase1441   void Run(PipelineData* data, Zone* temp_zone, const bool untyped,
1442            bool values_only = false) {
1443     Verifier::Run(data->graph(), !untyped ? Verifier::TYPED : Verifier::UNTYPED,
1444                   values_only ? Verifier::kValuesOnly : Verifier::kAll);
1445   }
1446 };
1447 
RunPrintAndVerify(const char * phase,bool untyped)1448 void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) {
1449   if (FLAG_trace_turbo) {
1450     Run<PrintGraphPhase>(phase);
1451   }
1452   if (FLAG_turbo_verify) {
1453     Run<VerifyGraphPhase>(untyped);
1454   }
1455 }
1456 
CreateGraph()1457 bool PipelineImpl::CreateGraph() {
1458   PipelineData* data = this->data_;
1459 
1460   data->BeginPhaseKind("graph creation");
1461 
1462   if (FLAG_trace_turbo) {
1463     CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
1464     OFStream os(tracing_scope.file());
1465     os << "---------------------------------------------------\n"
1466        << "Begin compiling method " << info()->GetDebugName().get()
1467        << " using Turbofan" << std::endl;
1468     TurboCfgFile tcf(isolate());
1469     tcf << AsC1VCompilation(info());
1470   }
1471 
1472   data->source_positions()->AddDecorator();
1473 
1474   if (FLAG_loop_assignment_analysis) {
1475     Run<LoopAssignmentAnalysisPhase>();
1476   }
1477 
1478   Run<TypeHintAnalysisPhase>();
1479 
1480   Run<GraphBuilderPhase>();
1481   if (data->compilation_failed()) {
1482     data->EndPhaseKind();
1483     return false;
1484   }
1485   RunPrintAndVerify("Initial untyped", true);
1486 
1487   // Perform OSR deconstruction.
1488   if (info()->is_osr()) {
1489     Run<OsrTyperPhase>();
1490 
1491     Run<OsrDeconstructionPhase>();
1492 
1493     Run<UntyperPhase>();
1494     RunPrintAndVerify("OSR deconstruction", true);
1495   }
1496 
1497   // Perform function context specialization and inlining (if enabled).
1498   Run<InliningPhase>();
1499   RunPrintAndVerify("Inlined", true);
1500 
1501   // Remove dead->live edges from the graph.
1502   Run<EarlyGraphTrimmingPhase>();
1503   RunPrintAndVerify("Early trimmed", true);
1504 
1505   if (FLAG_print_turbo_replay) {
1506     // Print a replay of the initial graph.
1507     GraphReplayPrinter::PrintReplay(data->graph());
1508   }
1509 
1510   // Run the type-sensitive lowerings and optimizations on the graph.
1511   {
1512     // Determine the Typer operation flags.
1513     Typer::Flags flags = Typer::kNoFlags;
1514     if (is_sloppy(info()->shared_info()->language_mode()) &&
1515         !info()->shared_info()->IsBuiltin()) {
1516       // Sloppy mode functions always have an Object for this.
1517       flags |= Typer::kThisIsReceiver;
1518     }
1519     if (IsClassConstructor(info()->shared_info()->kind())) {
1520       // Class constructors cannot be [[Call]]ed.
1521       flags |= Typer::kNewTargetIsReceiver;
1522     }
1523 
1524     // Type the graph and keep the Typer running on newly created nodes within
1525     // this scope; the Typer is automatically unlinked from the Graph once we
1526     // leave this scope below.
1527     Typer typer(isolate(), flags, data->graph());
1528     Run<TyperPhase>(&typer);
1529     RunPrintAndVerify("Typed");
1530 
1531     data->BeginPhaseKind("lowering");
1532 
1533     // Lower JSOperators where we can determine types.
1534     Run<TypedLoweringPhase>();
1535     RunPrintAndVerify("Lowered typed");
1536 
1537     if (FLAG_turbo_loop_peeling) {
1538       Run<LoopPeelingPhase>();
1539       RunPrintAndVerify("Loops peeled", true);
1540     } else {
1541       Run<LoopExitEliminationPhase>();
1542       RunPrintAndVerify("Loop exits eliminated", true);
1543     }
1544 
1545     if (FLAG_turbo_stress_loop_peeling) {
1546       Run<StressLoopPeelingPhase>();
1547       RunPrintAndVerify("Loop peeled");
1548     }
1549 
1550     if (!info()->shared_info()->asm_function()) {
1551       if (FLAG_turbo_load_elimination) {
1552         Run<LoadEliminationPhase>();
1553         RunPrintAndVerify("Load eliminated");
1554       }
1555 
1556       if (FLAG_turbo_escape) {
1557         Run<EscapeAnalysisPhase>();
1558         if (data->compilation_failed()) {
1559           info()->AbortOptimization(kCyclicObjectStateDetectedInEscapeAnalysis);
1560           data->EndPhaseKind();
1561           return false;
1562         }
1563         RunPrintAndVerify("Escape Analysed");
1564       }
1565     }
1566   }
1567 
1568   // Select representations. This has to run w/o the Typer decorator, because
1569   // we cannot compute meaningful types anyways, and the computed types might
1570   // even conflict with the representation/truncation logic.
1571   Run<RepresentationSelectionPhase>();
1572   RunPrintAndVerify("Representations selected", true);
1573 
1574 #ifdef DEBUG
1575   // From now on it is invalid to look at types on the nodes, because:
1576   //
1577   //  (a) The remaining passes (might) run concurrent to the main thread and
1578   //      therefore must not access the Heap or the Isolate in an uncontrolled
1579   //      way (as done by the type system), and
1580   //  (b) the types on the nodes might not make sense after representation
1581   //      selection due to the way we handle truncations; if we'd want to look
1582   //      at types afterwards we'd essentially need to re-type (large portions
1583   //      of) the graph.
1584   //
1585   // In order to catch bugs related to type access after this point we remove
1586   // the types from the nodes at this point (currently only in Debug builds).
1587   Run<UntyperPhase>();
1588   RunPrintAndVerify("Untyped", true);
1589 #endif
1590 
1591   // Run generic lowering pass.
1592   Run<GenericLoweringPhase>();
1593   RunPrintAndVerify("Generic lowering", true);
1594 
1595   data->EndPhaseKind();
1596 
1597   return true;
1598 }
1599 
OptimizeGraph(Linkage * linkage)1600 bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
1601   PipelineData* data = this->data_;
1602 
1603   data->BeginPhaseKind("block building");
1604 
1605   // Run early optimization pass.
1606   Run<EarlyOptimizationPhase>();
1607   RunPrintAndVerify("Early optimized", true);
1608 
1609   Run<EffectControlLinearizationPhase>();
1610   RunPrintAndVerify("Effect and control linearized", true);
1611 
1612   Run<DeadCodeEliminationPhase>();
1613   RunPrintAndVerify("Dead code elimination", true);
1614 
1615   if (FLAG_turbo_store_elimination) {
1616     Run<StoreStoreEliminationPhase>();
1617     RunPrintAndVerify("Store-store elimination", true);
1618   }
1619 
1620   // Optimize control flow.
1621   if (FLAG_turbo_cf_optimization) {
1622     Run<ControlFlowOptimizationPhase>();
1623     RunPrintAndVerify("Control flow optimized", true);
1624   }
1625 
1626   // Optimize memory access and allocation operations.
1627   Run<MemoryOptimizationPhase>();
1628   // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
1629   RunPrintAndVerify("Memory optimized", true);
1630 
1631   // Lower changes that have been inserted before.
1632   Run<LateOptimizationPhase>();
1633   // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
1634   RunPrintAndVerify("Late optimized", true);
1635 
1636   data->source_positions()->RemoveDecorator();
1637 
1638   return ScheduleAndSelectInstructions(linkage, true);
1639 }
1640 
GenerateCodeForCodeStub(Isolate * isolate,CallDescriptor * call_descriptor,Graph * graph,Schedule * schedule,Code::Flags flags,const char * debug_name)1641 Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
1642                                                CallDescriptor* call_descriptor,
1643                                                Graph* graph, Schedule* schedule,
1644                                                Code::Flags flags,
1645                                                const char* debug_name) {
1646   CompilationInfo info(CStrVector(debug_name), isolate, graph->zone(), flags);
1647   if (isolate->serializer_enabled()) info.PrepareForSerializing();
1648 
1649   // Construct a pipeline for scheduling and code generation.
1650   ZoneStats zone_stats(isolate->allocator());
1651   PipelineData data(&zone_stats, &info, graph, schedule);
1652   std::unique_ptr<PipelineStatistics> pipeline_statistics;
1653   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
1654     pipeline_statistics.reset(new PipelineStatistics(&info, &zone_stats));
1655     pipeline_statistics->BeginPhaseKind("stub codegen");
1656   }
1657 
1658   PipelineImpl pipeline(&data);
1659   DCHECK_NOT_NULL(data.schedule());
1660 
1661   if (FLAG_trace_turbo) {
1662     {
1663       TurboJsonFile json_of(&info, std::ios_base::trunc);
1664       json_of << "{\"function\":\"" << info.GetDebugName().get()
1665               << "\", \"source\":\"\",\n\"phases\":[";
1666     }
1667     pipeline.Run<PrintGraphPhase>("Machine");
1668   }
1669 
1670   pipeline.Run<VerifyGraphPhase>(false, true);
1671   return pipeline.ScheduleAndGenerateCode(call_descriptor);
1672 }
1673 
1674 // static
GenerateCodeForTesting(CompilationInfo * info)1675 Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info) {
1676   ZoneStats zone_stats(info->isolate()->allocator());
1677   std::unique_ptr<PipelineStatistics> pipeline_statistics(
1678       CreatePipelineStatistics(info, &zone_stats));
1679   PipelineData data(&zone_stats, info, pipeline_statistics.get());
1680   PipelineImpl pipeline(&data);
1681 
1682   Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
1683 
1684   if (!pipeline.CreateGraph()) return Handle<Code>::null();
1685   if (!pipeline.OptimizeGraph(&linkage)) return Handle<Code>::null();
1686   return pipeline.GenerateCode(&linkage);
1687 }
1688 
1689 // static
GenerateCodeForTesting(CompilationInfo * info,Graph * graph,Schedule * schedule)1690 Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
1691                                               Graph* graph,
1692                                               Schedule* schedule) {
1693   CallDescriptor* call_descriptor =
1694       Linkage::ComputeIncoming(info->zone(), info);
1695   return GenerateCodeForTesting(info, call_descriptor, graph, schedule);
1696 }
1697 
1698 // static
GenerateCodeForTesting(CompilationInfo * info,CallDescriptor * call_descriptor,Graph * graph,Schedule * schedule)1699 Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
1700                                               CallDescriptor* call_descriptor,
1701                                               Graph* graph,
1702                                               Schedule* schedule) {
1703   // Construct a pipeline for scheduling and code generation.
1704   ZoneStats zone_stats(info->isolate()->allocator());
1705   PipelineData data(&zone_stats, info, graph, schedule);
1706   std::unique_ptr<PipelineStatistics> pipeline_statistics;
1707   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
1708     pipeline_statistics.reset(new PipelineStatistics(info, &zone_stats));
1709     pipeline_statistics->BeginPhaseKind("test codegen");
1710   }
1711 
1712   PipelineImpl pipeline(&data);
1713 
1714   if (FLAG_trace_turbo) {
1715     TurboJsonFile json_of(info, std::ios_base::trunc);
1716     json_of << "{\"function\":\"" << info->GetDebugName().get()
1717             << "\", \"source\":\"\",\n\"phases\":[";
1718   }
1719   // TODO(rossberg): Should this really be untyped?
1720   pipeline.RunPrintAndVerify("Machine", true);
1721 
1722   return pipeline.ScheduleAndGenerateCode(call_descriptor);
1723 }
1724 
1725 // static
NewCompilationJob(Handle<JSFunction> function)1726 CompilationJob* Pipeline::NewCompilationJob(Handle<JSFunction> function) {
1727   return new PipelineCompilationJob(function->GetIsolate(), function);
1728 }
1729 
1730 // static
NewWasmCompilationJob(CompilationInfo * info,Graph * graph,CallDescriptor * descriptor,SourcePositionTable * source_positions)1731 CompilationJob* Pipeline::NewWasmCompilationJob(
1732     CompilationInfo* info, Graph* graph, CallDescriptor* descriptor,
1733     SourcePositionTable* source_positions) {
1734   return new PipelineWasmCompilationJob(info, graph, descriptor,
1735                                         source_positions);
1736 }
1737 
AllocateRegistersForTesting(const RegisterConfiguration * config,InstructionSequence * sequence,bool run_verifier)1738 bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
1739                                            InstructionSequence* sequence,
1740                                            bool run_verifier) {
1741   CompilationInfo info(ArrayVector("testing"), sequence->isolate(),
1742                        sequence->zone(), Code::ComputeFlags(Code::STUB));
1743   ZoneStats zone_stats(sequence->isolate()->allocator());
1744   PipelineData data(&zone_stats, &info, sequence);
1745   PipelineImpl pipeline(&data);
1746   pipeline.data_->InitializeFrameData(nullptr);
1747   pipeline.AllocateRegisters(config, nullptr, run_verifier);
1748   return !data.compilation_failed();
1749 }
1750 
ScheduleAndSelectInstructions(Linkage * linkage,bool trim_graph)1751 bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
1752                                                  bool trim_graph) {
1753   CallDescriptor* call_descriptor = linkage->GetIncomingDescriptor();
1754   PipelineData* data = this->data_;
1755 
1756   DCHECK_NOT_NULL(data->graph());
1757 
1758   if (trim_graph) {
1759     Run<LateGraphTrimmingPhase>();
1760     RunPrintAndVerify("Late trimmed", true);
1761   }
1762   if (data->schedule() == nullptr) Run<ComputeSchedulePhase>();
1763   TraceSchedule(data->info(), data->schedule());
1764 
1765   if (FLAG_turbo_profiling) {
1766     data->set_profiler_data(BasicBlockInstrumentor::Instrument(
1767         info(), data->graph(), data->schedule()));
1768   }
1769 
1770   if (FLAG_turbo_verify_machine_graph != nullptr &&
1771       (!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
1772        !strcmp(FLAG_turbo_verify_machine_graph,
1773                data->info()->GetDebugName().get()))) {
1774     Zone temp_zone(data->isolate()->allocator(), ZONE_NAME);
1775     MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage,
1776                               &temp_zone);
1777   }
1778 
1779   data->InitializeInstructionSequence(call_descriptor);
1780 
1781   data->InitializeFrameData(call_descriptor);
1782   // Select and schedule instructions covering the scheduled graph.
1783   Run<InstructionSelectionPhase>(linkage);
1784   if (data->compilation_failed()) {
1785     info()->AbortOptimization(kCodeGenerationFailed);
1786     data->EndPhaseKind();
1787     return false;
1788   }
1789 
1790   if (FLAG_trace_turbo && !data->MayHaveUnverifiableGraph()) {
1791     AllowHandleDereference allow_deref;
1792     TurboCfgFile tcf(isolate());
1793     tcf << AsC1V("CodeGen", data->schedule(), data->source_positions(),
1794                  data->sequence());
1795   }
1796 
1797   if (FLAG_trace_turbo) {
1798     std::ostringstream source_position_output;
1799     // Output source position information before the graph is deleted.
1800     data_->source_positions()->Print(source_position_output);
1801     data_->set_source_position_output(source_position_output.str());
1802   }
1803 
1804   data->DeleteGraphZone();
1805 
1806   data->BeginPhaseKind("register allocation");
1807 
1808   bool run_verifier = FLAG_turbo_verify_allocation;
1809 
1810   // Allocate registers.
1811   AllocateRegisters(RegisterConfiguration::Turbofan(), call_descriptor,
1812                     run_verifier);
1813   Run<FrameElisionPhase>();
1814   if (data->compilation_failed()) {
1815     info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
1816     data->EndPhaseKind();
1817     return false;
1818   }
1819 
1820   // TODO(mtrofin): move this off to the register allocator.
1821   bool generate_frame_at_start =
1822       data_->sequence()->instruction_blocks().front()->must_construct_frame();
1823   // Optimimize jumps.
1824   if (FLAG_turbo_jt) {
1825     Run<JumpThreadingPhase>(generate_frame_at_start);
1826   }
1827 
1828   data->EndPhaseKind();
1829 
1830   return true;
1831 }
1832 
GenerateCode(Linkage * linkage)1833 Handle<Code> PipelineImpl::GenerateCode(Linkage* linkage) {
1834   PipelineData* data = this->data_;
1835 
1836   data->BeginPhaseKind("code generation");
1837 
1838   // Generate final machine code.
1839   Run<GenerateCodePhase>(linkage);
1840 
1841   Handle<Code> code = data->code();
1842   if (data->profiler_data()) {
1843 #if ENABLE_DISASSEMBLER
1844     std::ostringstream os;
1845     code->Disassemble(nullptr, os);
1846     data->profiler_data()->SetCode(&os);
1847 #endif
1848   }
1849 
1850   info()->SetCode(code);
1851   v8::internal::CodeGenerator::PrintCode(code, info());
1852 
1853   if (FLAG_trace_turbo) {
1854     TurboJsonFile json_of(info(), std::ios_base::app);
1855     json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
1856 #if ENABLE_DISASSEMBLER
1857     std::stringstream disassembly_stream;
1858     code->Disassemble(nullptr, disassembly_stream);
1859     std::string disassembly_string(disassembly_stream.str());
1860     for (const auto& c : disassembly_string) {
1861       json_of << AsEscapedUC16ForJSON(c);
1862     }
1863 #endif  // ENABLE_DISASSEMBLER
1864     json_of << "\"}\n],\n";
1865     json_of << "\"nodePositions\":";
1866     json_of << data->source_position_output();
1867     json_of << "}";
1868 
1869     CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
1870     OFStream os(tracing_scope.file());
1871     os << "---------------------------------------------------\n"
1872        << "Finished compiling method " << info()->GetDebugName().get()
1873        << " using Turbofan" << std::endl;
1874   }
1875 
1876   return code;
1877 }
1878 
ScheduleAndGenerateCode(CallDescriptor * call_descriptor)1879 Handle<Code> PipelineImpl::ScheduleAndGenerateCode(
1880     CallDescriptor* call_descriptor) {
1881   Linkage linkage(call_descriptor);
1882 
1883   // Schedule the graph, perform instruction selection and register allocation.
1884   if (!ScheduleAndSelectInstructions(&linkage, false)) return Handle<Code>();
1885 
1886   // Generate the final machine code.
1887   return GenerateCode(&linkage);
1888 }
1889 
AllocateRegisters(const RegisterConfiguration * config,CallDescriptor * descriptor,bool run_verifier)1890 void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
1891                                      CallDescriptor* descriptor,
1892                                      bool run_verifier) {
1893   PipelineData* data = this->data_;
1894   // Don't track usage for this zone in compiler stats.
1895   std::unique_ptr<Zone> verifier_zone;
1896   RegisterAllocatorVerifier* verifier = nullptr;
1897   if (run_verifier) {
1898     verifier_zone.reset(new Zone(isolate()->allocator(), ZONE_NAME));
1899     verifier = new (verifier_zone.get()) RegisterAllocatorVerifier(
1900         verifier_zone.get(), config, data->sequence());
1901   }
1902 
1903 #ifdef DEBUG
1904   data_->sequence()->ValidateEdgeSplitForm();
1905   data_->sequence()->ValidateDeferredBlockEntryPaths();
1906   data_->sequence()->ValidateDeferredBlockExitPaths();
1907 #endif
1908 
1909   data->InitializeRegisterAllocationData(config, descriptor);
1910   if (info()->is_osr()) {
1911     AllowHandleDereference allow_deref;
1912     OsrHelper osr_helper(info());
1913     osr_helper.SetupFrame(data->frame());
1914   }
1915 
1916   Run<MeetRegisterConstraintsPhase>();
1917   Run<ResolvePhisPhase>();
1918   Run<BuildLiveRangesPhase>();
1919   if (FLAG_trace_turbo_graph) {
1920     AllowHandleDereference allow_deref;
1921     CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
1922     OFStream os(tracing_scope.file());
1923     os << "----- Instruction sequence before register allocation -----\n"
1924        << PrintableInstructionSequence({config, data->sequence()});
1925   }
1926   if (verifier != nullptr) {
1927     CHECK(!data->register_allocation_data()->ExistsUseWithoutDefinition());
1928     CHECK(data->register_allocation_data()
1929               ->RangesDefinedInDeferredStayInDeferred());
1930   }
1931 
1932   if (FLAG_turbo_preprocess_ranges) {
1933     Run<SplinterLiveRangesPhase>();
1934   }
1935 
1936   Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
1937   Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
1938 
1939   if (FLAG_turbo_preprocess_ranges) {
1940     Run<MergeSplintersPhase>();
1941   }
1942 
1943   Run<AssignSpillSlotsPhase>();
1944 
1945   Run<CommitAssignmentPhase>();
1946   Run<PopulateReferenceMapsPhase>();
1947   Run<ConnectRangesPhase>();
1948   Run<ResolveControlFlowPhase>();
1949   if (FLAG_turbo_move_optimization) {
1950     Run<OptimizeMovesPhase>();
1951   }
1952 
1953   Run<LocateSpillSlotsPhase>();
1954 
1955   if (FLAG_trace_turbo_graph) {
1956     AllowHandleDereference allow_deref;
1957     CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
1958     OFStream os(tracing_scope.file());
1959     os << "----- Instruction sequence after register allocation -----\n"
1960        << PrintableInstructionSequence({config, data->sequence()});
1961   }
1962 
1963   if (verifier != nullptr) {
1964     verifier->VerifyAssignment();
1965     verifier->VerifyGapMoves();
1966   }
1967 
1968   if (FLAG_trace_turbo && !data->MayHaveUnverifiableGraph()) {
1969     TurboCfgFile tcf(data->isolate());
1970     tcf << AsC1VRegisterAllocationData("CodeGen",
1971                                        data->register_allocation_data());
1972   }
1973 
1974   data->DeleteRegisterAllocationZone();
1975 }
1976 
info() const1977 CompilationInfo* PipelineImpl::info() const { return data_->info(); }
1978 
isolate() const1979 Isolate* PipelineImpl::isolate() const { return info()->isolate(); }
1980 
1981 }  // namespace compiler
1982 }  // namespace internal
1983 }  // namespace v8
1984