1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/scheduler.h"
6 
7 #include <iomanip>
8 
9 #include "src/base/adapters.h"
10 #include "src/bit-vector.h"
11 #include "src/compiler/common-operator.h"
12 #include "src/compiler/control-equivalence.h"
13 #include "src/compiler/graph.h"
14 #include "src/compiler/node.h"
15 #include "src/compiler/node-marker.h"
16 #include "src/compiler/node-properties.h"
17 #include "src/zone-containers.h"
18 
19 namespace v8 {
20 namespace internal {
21 namespace compiler {
22 
23 #define TRACE(...)                                       \
24   do {                                                   \
25     if (FLAG_trace_turbo_scheduler) PrintF(__VA_ARGS__); \
26   } while (false)
27 
Scheduler(Zone * zone,Graph * graph,Schedule * schedule,Flags flags)28 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags)
29     : zone_(zone),
30       graph_(graph),
31       schedule_(schedule),
32       flags_(flags),
33       scheduled_nodes_(zone),
34       schedule_root_nodes_(zone),
35       schedule_queue_(zone),
36       node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone) {}
37 
38 
ComputeSchedule(Zone * zone,Graph * graph,Flags flags)39 Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
40   Schedule* schedule = new (graph->zone())
41       Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
42   Scheduler scheduler(zone, graph, schedule, flags);
43 
44   scheduler.BuildCFG();
45   scheduler.ComputeSpecialRPONumbering();
46   scheduler.GenerateImmediateDominatorTree();
47 
48   scheduler.PrepareUses();
49   scheduler.ScheduleEarly();
50   scheduler.ScheduleLate();
51 
52   scheduler.SealFinalSchedule();
53 
54   return schedule;
55 }
56 
57 
DefaultSchedulerData()58 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
59   SchedulerData def = {schedule_->start(), 0, kUnknown};
60   return def;
61 }
62 
63 
GetData(Node * node)64 Scheduler::SchedulerData* Scheduler::GetData(Node* node) {
65   return &node_data_[node->id()];
66 }
67 
68 
GetPlacement(Node * node)69 Scheduler::Placement Scheduler::GetPlacement(Node* node) {
70   SchedulerData* data = GetData(node);
71   if (data->placement_ == kUnknown) {  // Compute placement, once, on demand.
72     switch (node->opcode()) {
73       case IrOpcode::kParameter:
74       case IrOpcode::kOsrValue:
75         // Parameters and OSR values are always fixed to the start block.
76         data->placement_ = kFixed;
77         break;
78       case IrOpcode::kPhi:
79       case IrOpcode::kEffectPhi: {
80         // Phis and effect phis are fixed if their control inputs are, whereas
81         // otherwise they are coupled to a floating control node.
82         Placement p = GetPlacement(NodeProperties::GetControlInput(node));
83         data->placement_ = (p == kFixed ? kFixed : kCoupled);
84         break;
85       }
86 #define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
87       CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
88 #undef DEFINE_CONTROL_CASE
89       {
90         // Control nodes that were not control-reachable from end may float.
91         data->placement_ = kSchedulable;
92         break;
93       }
94       default:
95         data->placement_ = kSchedulable;
96         break;
97     }
98   }
99   return data->placement_;
100 }
101 
102 
UpdatePlacement(Node * node,Placement placement)103 void Scheduler::UpdatePlacement(Node* node, Placement placement) {
104   SchedulerData* data = GetData(node);
105   if (data->placement_ != kUnknown) {  // Trap on mutation, not initialization.
106     switch (node->opcode()) {
107       case IrOpcode::kParameter:
108         // Parameters are fixed once and for all.
109         UNREACHABLE();
110         break;
111       case IrOpcode::kPhi:
112       case IrOpcode::kEffectPhi: {
113         // Phis and effect phis are coupled to their respective blocks.
114         DCHECK_EQ(Scheduler::kCoupled, data->placement_);
115         DCHECK_EQ(Scheduler::kFixed, placement);
116         Node* control = NodeProperties::GetControlInput(node);
117         BasicBlock* block = schedule_->block(control);
118         schedule_->AddNode(block, node);
119         break;
120       }
121 #define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
122       CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
123 #undef DEFINE_CONTROL_CASE
124       {
125         // Control nodes force coupled uses to be placed.
126         for (auto use : node->uses()) {
127           if (GetPlacement(use) == Scheduler::kCoupled) {
128             DCHECK_EQ(node, NodeProperties::GetControlInput(use));
129             UpdatePlacement(use, placement);
130           }
131         }
132         break;
133       }
134       default:
135         DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
136         DCHECK_EQ(Scheduler::kScheduled, placement);
137         break;
138     }
139     // Reduce the use count of the node's inputs to potentially make them
140     // schedulable. If all the uses of a node have been scheduled, then the node
141     // itself can be scheduled.
142     for (Edge const edge : node->input_edges()) {
143       DecrementUnscheduledUseCount(edge.to(), edge.index(), edge.from());
144     }
145   }
146   data->placement_ = placement;
147 }
148 
149 
IsCoupledControlEdge(Node * node,int index)150 bool Scheduler::IsCoupledControlEdge(Node* node, int index) {
151   return GetPlacement(node) == kCoupled &&
152          NodeProperties::FirstControlIndex(node) == index;
153 }
154 
155 
IncrementUnscheduledUseCount(Node * node,int index,Node * from)156 void Scheduler::IncrementUnscheduledUseCount(Node* node, int index,
157                                              Node* from) {
158   // Make sure that control edges from coupled nodes are not counted.
159   if (IsCoupledControlEdge(from, index)) return;
160 
161   // Tracking use counts for fixed nodes is useless.
162   if (GetPlacement(node) == kFixed) return;
163 
164   // Use count for coupled nodes is summed up on their control.
165   if (GetPlacement(node) == kCoupled) {
166     Node* control = NodeProperties::GetControlInput(node);
167     return IncrementUnscheduledUseCount(control, index, from);
168   }
169 
170   ++(GetData(node)->unscheduled_count_);
171   if (FLAG_trace_turbo_scheduler) {
172     TRACE("  Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
173           node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
174           GetData(node)->unscheduled_count_);
175   }
176 }
177 
178 
DecrementUnscheduledUseCount(Node * node,int index,Node * from)179 void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
180                                              Node* from) {
181   // Make sure that control edges from coupled nodes are not counted.
182   if (IsCoupledControlEdge(from, index)) return;
183 
184   // Tracking use counts for fixed nodes is useless.
185   if (GetPlacement(node) == kFixed) return;
186 
187   // Use count for coupled nodes is summed up on their control.
188   if (GetPlacement(node) == kCoupled) {
189     Node* control = NodeProperties::GetControlInput(node);
190     return DecrementUnscheduledUseCount(control, index, from);
191   }
192 
193   DCHECK(GetData(node)->unscheduled_count_ > 0);
194   --(GetData(node)->unscheduled_count_);
195   if (FLAG_trace_turbo_scheduler) {
196     TRACE("  Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
197           node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
198           GetData(node)->unscheduled_count_);
199   }
200   if (GetData(node)->unscheduled_count_ == 0) {
201     TRACE("    newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
202     schedule_queue_.push(node);
203   }
204 }
205 
206 
207 // -----------------------------------------------------------------------------
208 // Phase 1: Build control-flow graph.
209 
210 
211 // Internal class to build a control flow graph (i.e the basic blocks and edges
212 // between them within a Schedule) from the node graph. Visits control edges of
213 // the graph backwards from an end node in order to find the connected control
214 // subgraph, needed for scheduling.
215 class CFGBuilder : public ZoneObject {
216  public:
CFGBuilder(Zone * zone,Scheduler * scheduler)217   CFGBuilder(Zone* zone, Scheduler* scheduler)
218       : zone_(zone),
219         scheduler_(scheduler),
220         schedule_(scheduler->schedule_),
221         queued_(scheduler->graph_, 2),
222         queue_(zone),
223         control_(zone),
224         component_entry_(nullptr),
225         component_start_(nullptr),
226         component_end_(nullptr) {}
227 
228   // Run the control flow graph construction algorithm by walking the graph
229   // backwards from end through control edges, building and connecting the
230   // basic blocks for control nodes.
Run()231   void Run() {
232     ResetDataStructures();
233     Queue(scheduler_->graph_->end());
234 
235     while (!queue_.empty()) {  // Breadth-first backwards traversal.
236       Node* node = queue_.front();
237       queue_.pop();
238       int max = NodeProperties::PastControlIndex(node);
239       for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
240         Queue(node->InputAt(i));
241       }
242     }
243 
244     for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
245       ConnectBlocks(*i);  // Connect block to its predecessor/successors.
246     }
247   }
248 
249   // Run the control flow graph construction for a minimal control-connected
250   // component ending in {exit} and merge that component into an existing
251   // control flow graph at the bottom of {block}.
Run(BasicBlock * block,Node * exit)252   void Run(BasicBlock* block, Node* exit) {
253     ResetDataStructures();
254     Queue(exit);
255 
256     component_entry_ = nullptr;
257     component_start_ = block;
258     component_end_ = schedule_->block(exit);
259     scheduler_->equivalence_->Run(exit);
260     while (!queue_.empty()) {  // Breadth-first backwards traversal.
261       Node* node = queue_.front();
262       queue_.pop();
263 
264       // Use control dependence equivalence to find a canonical single-entry
265       // single-exit region that makes up a minimal component to be scheduled.
266       if (IsSingleEntrySingleExitRegion(node, exit)) {
267         TRACE("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
268         DCHECK(!component_entry_);
269         component_entry_ = node;
270         continue;
271       }
272 
273       int max = NodeProperties::PastControlIndex(node);
274       for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
275         Queue(node->InputAt(i));
276       }
277     }
278     DCHECK(component_entry_);
279 
280     for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
281       ConnectBlocks(*i);  // Connect block to its predecessor/successors.
282     }
283   }
284 
285  private:
286   friend class ScheduleLateNodeVisitor;
287   friend class Scheduler;
288 
FixNode(BasicBlock * block,Node * node)289   void FixNode(BasicBlock* block, Node* node) {
290     schedule_->AddNode(block, node);
291     scheduler_->UpdatePlacement(node, Scheduler::kFixed);
292   }
293 
Queue(Node * node)294   void Queue(Node* node) {
295     // Mark the connected control nodes as they are queued.
296     if (!queued_.Get(node)) {
297       BuildBlocks(node);
298       queue_.push(node);
299       queued_.Set(node, true);
300       control_.push_back(node);
301     }
302   }
303 
BuildBlocks(Node * node)304   void BuildBlocks(Node* node) {
305     switch (node->opcode()) {
306       case IrOpcode::kEnd:
307         FixNode(schedule_->end(), node);
308         break;
309       case IrOpcode::kStart:
310         FixNode(schedule_->start(), node);
311         break;
312       case IrOpcode::kLoop:
313       case IrOpcode::kMerge:
314         BuildBlockForNode(node);
315         break;
316       case IrOpcode::kTerminate: {
317         // Put Terminate in the loop to which it refers.
318         Node* loop = NodeProperties::GetControlInput(node);
319         BasicBlock* block = BuildBlockForNode(loop);
320         FixNode(block, node);
321         break;
322       }
323       case IrOpcode::kBranch:
324       case IrOpcode::kSwitch:
325         BuildBlocksForSuccessors(node);
326         break;
327       case IrOpcode::kCall:
328         if (NodeProperties::IsExceptionalCall(node)) {
329           BuildBlocksForSuccessors(node);
330         }
331         break;
332       default:
333         break;
334     }
335   }
336 
ConnectBlocks(Node * node)337   void ConnectBlocks(Node* node) {
338     switch (node->opcode()) {
339       case IrOpcode::kLoop:
340       case IrOpcode::kMerge:
341         ConnectMerge(node);
342         break;
343       case IrOpcode::kBranch:
344         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
345         ConnectBranch(node);
346         break;
347       case IrOpcode::kSwitch:
348         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
349         ConnectSwitch(node);
350         break;
351       case IrOpcode::kDeoptimize:
352         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
353         ConnectDeoptimize(node);
354         break;
355       case IrOpcode::kTailCall:
356         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
357         ConnectTailCall(node);
358         break;
359       case IrOpcode::kReturn:
360         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
361         ConnectReturn(node);
362         break;
363       case IrOpcode::kThrow:
364         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
365         ConnectThrow(node);
366         break;
367       case IrOpcode::kCall:
368         if (NodeProperties::IsExceptionalCall(node)) {
369           scheduler_->UpdatePlacement(node, Scheduler::kFixed);
370           ConnectCall(node);
371         }
372         break;
373       default:
374         break;
375     }
376   }
377 
BuildBlockForNode(Node * node)378   BasicBlock* BuildBlockForNode(Node* node) {
379     BasicBlock* block = schedule_->block(node);
380     if (block == nullptr) {
381       block = schedule_->NewBasicBlock();
382       TRACE("Create block id:%d for #%d:%s\n", block->id().ToInt(), node->id(),
383             node->op()->mnemonic());
384       FixNode(block, node);
385     }
386     return block;
387   }
388 
BuildBlocksForSuccessors(Node * node)389   void BuildBlocksForSuccessors(Node* node) {
390     size_t const successor_cnt = node->op()->ControlOutputCount();
391     Node** successors = zone_->NewArray<Node*>(successor_cnt);
392     NodeProperties::CollectControlProjections(node, successors, successor_cnt);
393     for (size_t index = 0; index < successor_cnt; ++index) {
394       BuildBlockForNode(successors[index]);
395     }
396   }
397 
CollectSuccessorBlocks(Node * node,BasicBlock ** successor_blocks,size_t successor_cnt)398   void CollectSuccessorBlocks(Node* node, BasicBlock** successor_blocks,
399                               size_t successor_cnt) {
400     Node** successors = reinterpret_cast<Node**>(successor_blocks);
401     NodeProperties::CollectControlProjections(node, successors, successor_cnt);
402     for (size_t index = 0; index < successor_cnt; ++index) {
403       successor_blocks[index] = schedule_->block(successors[index]);
404     }
405   }
406 
FindPredecessorBlock(Node * node)407   BasicBlock* FindPredecessorBlock(Node* node) {
408     BasicBlock* predecessor_block = nullptr;
409     while (true) {
410       predecessor_block = schedule_->block(node);
411       if (predecessor_block != nullptr) break;
412       node = NodeProperties::GetControlInput(node);
413     }
414     return predecessor_block;
415   }
416 
ConnectCall(Node * call)417   void ConnectCall(Node* call) {
418     BasicBlock* successor_blocks[2];
419     CollectSuccessorBlocks(call, successor_blocks, arraysize(successor_blocks));
420 
421     // Consider the exception continuation to be deferred.
422     successor_blocks[1]->set_deferred(true);
423 
424     Node* call_control = NodeProperties::GetControlInput(call);
425     BasicBlock* call_block = FindPredecessorBlock(call_control);
426     TraceConnect(call, call_block, successor_blocks[0]);
427     TraceConnect(call, call_block, successor_blocks[1]);
428     schedule_->AddCall(call_block, call, successor_blocks[0],
429                        successor_blocks[1]);
430   }
431 
ConnectBranch(Node * branch)432   void ConnectBranch(Node* branch) {
433     BasicBlock* successor_blocks[2];
434     CollectSuccessorBlocks(branch, successor_blocks,
435                            arraysize(successor_blocks));
436 
437     // Consider branch hints.
438     switch (BranchHintOf(branch->op())) {
439       case BranchHint::kNone:
440         break;
441       case BranchHint::kTrue:
442         successor_blocks[1]->set_deferred(true);
443         break;
444       case BranchHint::kFalse:
445         successor_blocks[0]->set_deferred(true);
446         break;
447     }
448 
449     if (branch == component_entry_) {
450       TraceConnect(branch, component_start_, successor_blocks[0]);
451       TraceConnect(branch, component_start_, successor_blocks[1]);
452       schedule_->InsertBranch(component_start_, component_end_, branch,
453                               successor_blocks[0], successor_blocks[1]);
454     } else {
455       Node* branch_control = NodeProperties::GetControlInput(branch);
456       BasicBlock* branch_block = FindPredecessorBlock(branch_control);
457       TraceConnect(branch, branch_block, successor_blocks[0]);
458       TraceConnect(branch, branch_block, successor_blocks[1]);
459       schedule_->AddBranch(branch_block, branch, successor_blocks[0],
460                            successor_blocks[1]);
461     }
462   }
463 
ConnectSwitch(Node * sw)464   void ConnectSwitch(Node* sw) {
465     size_t const successor_count = sw->op()->ControlOutputCount();
466     BasicBlock** successor_blocks =
467         zone_->NewArray<BasicBlock*>(successor_count);
468     CollectSuccessorBlocks(sw, successor_blocks, successor_count);
469 
470     if (sw == component_entry_) {
471       for (size_t index = 0; index < successor_count; ++index) {
472         TraceConnect(sw, component_start_, successor_blocks[index]);
473       }
474       schedule_->InsertSwitch(component_start_, component_end_, sw,
475                               successor_blocks, successor_count);
476     } else {
477       Node* switch_control = NodeProperties::GetControlInput(sw);
478       BasicBlock* switch_block = FindPredecessorBlock(switch_control);
479       for (size_t index = 0; index < successor_count; ++index) {
480         TraceConnect(sw, switch_block, successor_blocks[index]);
481       }
482       schedule_->AddSwitch(switch_block, sw, successor_blocks, successor_count);
483     }
484   }
485 
ConnectMerge(Node * merge)486   void ConnectMerge(Node* merge) {
487     // Don't connect the special merge at the end to its predecessors.
488     if (IsFinalMerge(merge)) return;
489 
490     BasicBlock* block = schedule_->block(merge);
491     DCHECK_NOT_NULL(block);
492     // For all of the merge's control inputs, add a goto at the end to the
493     // merge's basic block.
494     for (Node* const input : merge->inputs()) {
495       BasicBlock* predecessor_block = FindPredecessorBlock(input);
496       TraceConnect(merge, predecessor_block, block);
497       schedule_->AddGoto(predecessor_block, block);
498     }
499   }
500 
ConnectTailCall(Node * call)501   void ConnectTailCall(Node* call) {
502     Node* call_control = NodeProperties::GetControlInput(call);
503     BasicBlock* call_block = FindPredecessorBlock(call_control);
504     TraceConnect(call, call_block, nullptr);
505     schedule_->AddTailCall(call_block, call);
506   }
507 
ConnectReturn(Node * ret)508   void ConnectReturn(Node* ret) {
509     Node* return_control = NodeProperties::GetControlInput(ret);
510     BasicBlock* return_block = FindPredecessorBlock(return_control);
511     TraceConnect(ret, return_block, nullptr);
512     schedule_->AddReturn(return_block, ret);
513   }
514 
ConnectDeoptimize(Node * deopt)515   void ConnectDeoptimize(Node* deopt) {
516     Node* deoptimize_control = NodeProperties::GetControlInput(deopt);
517     BasicBlock* deoptimize_block = FindPredecessorBlock(deoptimize_control);
518     TraceConnect(deopt, deoptimize_block, nullptr);
519     schedule_->AddDeoptimize(deoptimize_block, deopt);
520   }
521 
ConnectThrow(Node * thr)522   void ConnectThrow(Node* thr) {
523     Node* throw_control = NodeProperties::GetControlInput(thr);
524     BasicBlock* throw_block = FindPredecessorBlock(throw_control);
525     TraceConnect(thr, throw_block, nullptr);
526     schedule_->AddThrow(throw_block, thr);
527   }
528 
TraceConnect(Node * node,BasicBlock * block,BasicBlock * succ)529   void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
530     DCHECK_NOT_NULL(block);
531     if (succ == nullptr) {
532       TRACE("Connect #%d:%s, id:%d -> end\n", node->id(),
533             node->op()->mnemonic(), block->id().ToInt());
534     } else {
535       TRACE("Connect #%d:%s, id:%d -> id:%d\n", node->id(),
536             node->op()->mnemonic(), block->id().ToInt(), succ->id().ToInt());
537     }
538   }
539 
IsFinalMerge(Node * node)540   bool IsFinalMerge(Node* node) {
541     return (node->opcode() == IrOpcode::kMerge &&
542             node == scheduler_->graph_->end()->InputAt(0));
543   }
544 
IsSingleEntrySingleExitRegion(Node * entry,Node * exit) const545   bool IsSingleEntrySingleExitRegion(Node* entry, Node* exit) const {
546     size_t entry_class = scheduler_->equivalence_->ClassOf(entry);
547     size_t exit_class = scheduler_->equivalence_->ClassOf(exit);
548     return entry != exit && entry_class == exit_class;
549   }
550 
ResetDataStructures()551   void ResetDataStructures() {
552     control_.clear();
553     DCHECK(queue_.empty());
554     DCHECK(control_.empty());
555   }
556 
557   Zone* zone_;
558   Scheduler* scheduler_;
559   Schedule* schedule_;
560   NodeMarker<bool> queued_;      // Mark indicating whether node is queued.
561   ZoneQueue<Node*> queue_;       // Queue used for breadth-first traversal.
562   NodeVector control_;           // List of encountered control nodes.
563   Node* component_entry_;        // Component single-entry node.
564   BasicBlock* component_start_;  // Component single-entry block.
565   BasicBlock* component_end_;    // Component single-exit block.
566 };
567 
568 
BuildCFG()569 void Scheduler::BuildCFG() {
570   TRACE("--- CREATING CFG -------------------------------------------\n");
571 
572   // Instantiate a new control equivalence algorithm for the graph.
573   equivalence_ = new (zone_) ControlEquivalence(zone_, graph_);
574 
575   // Build a control-flow graph for the main control-connected component that
576   // is being spanned by the graph's start and end nodes.
577   control_flow_builder_ = new (zone_) CFGBuilder(zone_, this);
578   control_flow_builder_->Run();
579 
580   // Initialize per-block data.
581   scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
582 }
583 
584 
585 // -----------------------------------------------------------------------------
586 // Phase 2: Compute special RPO and dominator tree.
587 
588 
589 // Compute the special reverse-post-order block ordering, which is essentially
590 // a RPO of the graph where loop bodies are contiguous. Properties:
591 // 1. If block A is a predecessor of B, then A appears before B in the order,
592 //    unless B is a loop header and A is in the loop headed at B
593 //    (i.e. A -> B is a backedge).
594 // => If block A dominates block B, then A appears before B in the order.
595 // => If block A is a loop header, A appears before all blocks in the loop
596 //    headed at A.
597 // 2. All loops are contiguous in the order (i.e. no intervening blocks that
598 //    do not belong to the loop.)
599 // Note a simple RPO traversal satisfies (1) but not (2).
600 class SpecialRPONumberer : public ZoneObject {
601  public:
SpecialRPONumberer(Zone * zone,Schedule * schedule)602   SpecialRPONumberer(Zone* zone, Schedule* schedule)
603       : zone_(zone),
604         schedule_(schedule),
605         order_(nullptr),
606         beyond_end_(nullptr),
607         loops_(zone),
608         backedges_(zone),
609         stack_(zone),
610         previous_block_count_(0),
611         empty_(0, zone) {}
612 
613   // Computes the special reverse-post-order for the main control flow graph,
614   // that is for the graph spanned between the schedule's start and end blocks.
ComputeSpecialRPO()615   void ComputeSpecialRPO() {
616     DCHECK(schedule_->end()->SuccessorCount() == 0);
617     DCHECK(!order_);  // Main order does not exist yet.
618     ComputeAndInsertSpecialRPO(schedule_->start(), schedule_->end());
619   }
620 
621   // Computes the special reverse-post-order for a partial control flow graph,
622   // that is for the graph spanned between the given {entry} and {end} blocks,
623   // then updates the existing ordering with this new information.
UpdateSpecialRPO(BasicBlock * entry,BasicBlock * end)624   void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) {
625     DCHECK(order_);  // Main order to be updated is present.
626     ComputeAndInsertSpecialRPO(entry, end);
627   }
628 
629   // Serialize the previously computed order as a special reverse-post-order
630   // numbering for basic blocks into the final schedule.
SerializeRPOIntoSchedule()631   void SerializeRPOIntoSchedule() {
632     int32_t number = 0;
633     for (BasicBlock* b = order_; b != nullptr; b = b->rpo_next()) {
634       b->set_rpo_number(number++);
635       schedule_->rpo_order()->push_back(b);
636     }
637     BeyondEndSentinel()->set_rpo_number(number);
638   }
639 
640   // Print and verify the special reverse-post-order.
PrintAndVerifySpecialRPO()641   void PrintAndVerifySpecialRPO() {
642 #if DEBUG
643     if (FLAG_trace_turbo_scheduler) PrintRPO();
644     VerifySpecialRPO();
645 #endif
646   }
647 
GetOutgoingBlocks(BasicBlock * block)648   const ZoneVector<BasicBlock*>& GetOutgoingBlocks(BasicBlock* block) {
649     if (HasLoopNumber(block)) {
650       LoopInfo const& loop = loops_[GetLoopNumber(block)];
651       if (loop.outgoing) return *loop.outgoing;
652     }
653     return empty_;
654   }
655 
656  private:
657   typedef std::pair<BasicBlock*, size_t> Backedge;
658 
659   // Numbering for BasicBlock::rpo_number for this block traversal:
660   static const int kBlockOnStack = -2;
661   static const int kBlockVisited1 = -3;
662   static const int kBlockVisited2 = -4;
663   static const int kBlockUnvisited1 = -1;
664   static const int kBlockUnvisited2 = kBlockVisited1;
665 
666   struct SpecialRPOStackFrame {
667     BasicBlock* block;
668     size_t index;
669   };
670 
671   struct LoopInfo {
672     BasicBlock* header;
673     ZoneVector<BasicBlock*>* outgoing;
674     BitVector* members;
675     LoopInfo* prev;
676     BasicBlock* end;
677     BasicBlock* start;
678 
AddOutgoingv8::internal::compiler::SpecialRPONumberer::LoopInfo679     void AddOutgoing(Zone* zone, BasicBlock* block) {
680       if (outgoing == nullptr) {
681         outgoing = new (zone->New(sizeof(ZoneVector<BasicBlock*>)))
682             ZoneVector<BasicBlock*>(zone);
683       }
684       outgoing->push_back(block);
685     }
686   };
687 
Push(ZoneVector<SpecialRPOStackFrame> & stack,int depth,BasicBlock * child,int unvisited)688   int Push(ZoneVector<SpecialRPOStackFrame>& stack, int depth,
689            BasicBlock* child, int unvisited) {
690     if (child->rpo_number() == unvisited) {
691       stack[depth].block = child;
692       stack[depth].index = 0;
693       child->set_rpo_number(kBlockOnStack);
694       return depth + 1;
695     }
696     return depth;
697   }
698 
PushFront(BasicBlock * head,BasicBlock * block)699   BasicBlock* PushFront(BasicBlock* head, BasicBlock* block) {
700     block->set_rpo_next(head);
701     return block;
702   }
703 
GetLoopNumber(BasicBlock * block)704   static int GetLoopNumber(BasicBlock* block) { return block->loop_number(); }
SetLoopNumber(BasicBlock * block,int loop_number)705   static void SetLoopNumber(BasicBlock* block, int loop_number) {
706     return block->set_loop_number(loop_number);
707   }
HasLoopNumber(BasicBlock * block)708   static bool HasLoopNumber(BasicBlock* block) {
709     return block->loop_number() >= 0;
710   }
711 
712   // TODO(mstarzinger): We only need this special sentinel because some tests
713   // use the schedule's end block in actual control flow (e.g. with end having
714   // successors). Once this has been cleaned up we can use the end block here.
BeyondEndSentinel()715   BasicBlock* BeyondEndSentinel() {
716     if (beyond_end_ == nullptr) {
717       BasicBlock::Id id = BasicBlock::Id::FromInt(-1);
718       beyond_end_ = new (schedule_->zone()) BasicBlock(schedule_->zone(), id);
719     }
720     return beyond_end_;
721   }
722 
723   // Compute special RPO for the control flow graph between {entry} and {end},
724   // mutating any existing order so that the result is still valid.
ComputeAndInsertSpecialRPO(BasicBlock * entry,BasicBlock * end)725   void ComputeAndInsertSpecialRPO(BasicBlock* entry, BasicBlock* end) {
726     // RPO should not have been serialized for this schedule yet.
727     CHECK_EQ(kBlockUnvisited1, schedule_->start()->loop_number());
728     CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number());
729     CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size()));
730 
731     // Find correct insertion point within existing order.
732     BasicBlock* insertion_point = entry->rpo_next();
733     BasicBlock* order = insertion_point;
734 
735     // Perform an iterative RPO traversal using an explicit stack,
736     // recording backedges that form cycles. O(|B|).
737     DCHECK_LT(previous_block_count_, schedule_->BasicBlockCount());
738     stack_.resize(schedule_->BasicBlockCount() - previous_block_count_);
739     previous_block_count_ = schedule_->BasicBlockCount();
740     int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1);
741     int num_loops = static_cast<int>(loops_.size());
742 
743     while (stack_depth > 0) {
744       int current = stack_depth - 1;
745       SpecialRPOStackFrame* frame = &stack_[current];
746 
747       if (frame->block != end &&
748           frame->index < frame->block->SuccessorCount()) {
749         // Process the next successor.
750         BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
751         if (succ->rpo_number() == kBlockVisited1) continue;
752         if (succ->rpo_number() == kBlockOnStack) {
753           // The successor is on the stack, so this is a backedge (cycle).
754           backedges_.push_back(Backedge(frame->block, frame->index - 1));
755           if (!HasLoopNumber(succ)) {
756             // Assign a new loop number to the header if it doesn't have one.
757             SetLoopNumber(succ, num_loops++);
758           }
759         } else {
760           // Push the successor onto the stack.
761           DCHECK(succ->rpo_number() == kBlockUnvisited1);
762           stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited1);
763         }
764       } else {
765         // Finished with all successors; pop the stack and add the block.
766         order = PushFront(order, frame->block);
767         frame->block->set_rpo_number(kBlockVisited1);
768         stack_depth--;
769       }
770     }
771 
772     // If no loops were encountered, then the order we computed was correct.
773     if (num_loops > static_cast<int>(loops_.size())) {
774       // Otherwise, compute the loop information from the backedges in order
775       // to perform a traversal that groups loop bodies together.
776       ComputeLoopInfo(stack_, num_loops, &backedges_);
777 
778       // Initialize the "loop stack". Note the entry could be a loop header.
779       LoopInfo* loop =
780           HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : nullptr;
781       order = insertion_point;
782 
783       // Perform an iterative post-order traversal, visiting loop bodies before
784       // edges that lead out of loops. Visits each block once, but linking loop
785       // sections together is linear in the loop size, so overall is
786       // O(|B| + max(loop_depth) * max(|loop|))
787       stack_depth = Push(stack_, 0, entry, kBlockUnvisited2);
788       while (stack_depth > 0) {
789         SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
790         BasicBlock* block = frame->block;
791         BasicBlock* succ = nullptr;
792 
793         if (block != end && frame->index < block->SuccessorCount()) {
794           // Process the next normal successor.
795           succ = block->SuccessorAt(frame->index++);
796         } else if (HasLoopNumber(block)) {
797           // Process additional outgoing edges from the loop header.
798           if (block->rpo_number() == kBlockOnStack) {
799             // Finish the loop body the first time the header is left on the
800             // stack.
801             DCHECK(loop != nullptr && loop->header == block);
802             loop->start = PushFront(order, block);
803             order = loop->end;
804             block->set_rpo_number(kBlockVisited2);
805             // Pop the loop stack and continue visiting outgoing edges within
806             // the context of the outer loop, if any.
807             loop = loop->prev;
808             // We leave the loop header on the stack; the rest of this iteration
809             // and later iterations will go through its outgoing edges list.
810           }
811 
812           // Use the next outgoing edge if there are any.
813           size_t outgoing_index = frame->index - block->SuccessorCount();
814           LoopInfo* info = &loops_[GetLoopNumber(block)];
815           DCHECK(loop != info);
816           if (block != entry && info->outgoing != nullptr &&
817               outgoing_index < info->outgoing->size()) {
818             succ = info->outgoing->at(outgoing_index);
819             frame->index++;
820           }
821         }
822 
823         if (succ != nullptr) {
824           // Process the next successor.
825           if (succ->rpo_number() == kBlockOnStack) continue;
826           if (succ->rpo_number() == kBlockVisited2) continue;
827           DCHECK(succ->rpo_number() == kBlockUnvisited2);
828           if (loop != nullptr && !loop->members->Contains(succ->id().ToInt())) {
829             // The successor is not in the current loop or any nested loop.
830             // Add it to the outgoing edges of this loop and visit it later.
831             loop->AddOutgoing(zone_, succ);
832           } else {
833             // Push the successor onto the stack.
834             stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited2);
835             if (HasLoopNumber(succ)) {
836               // Push the inner loop onto the loop stack.
837               DCHECK(GetLoopNumber(succ) < num_loops);
838               LoopInfo* next = &loops_[GetLoopNumber(succ)];
839               next->end = order;
840               next->prev = loop;
841               loop = next;
842             }
843           }
844         } else {
845           // Finished with all successors of the current block.
846           if (HasLoopNumber(block)) {
847             // If we are going to pop a loop header, then add its entire body.
848             LoopInfo* info = &loops_[GetLoopNumber(block)];
849             for (BasicBlock* b = info->start; true; b = b->rpo_next()) {
850               if (b->rpo_next() == info->end) {
851                 b->set_rpo_next(order);
852                 info->end = order;
853                 break;
854               }
855             }
856             order = info->start;
857           } else {
858             // Pop a single node off the stack and add it to the order.
859             order = PushFront(order, block);
860             block->set_rpo_number(kBlockVisited2);
861           }
862           stack_depth--;
863         }
864       }
865     }
866 
867     // Publish new order the first time.
868     if (order_ == nullptr) order_ = order;
869 
870     // Compute the correct loop headers and set the correct loop ends.
871     LoopInfo* current_loop = nullptr;
872     BasicBlock* current_header = entry->loop_header();
873     int32_t loop_depth = entry->loop_depth();
874     if (entry->IsLoopHeader()) --loop_depth;  // Entry might be a loop header.
875     for (BasicBlock* b = order; b != insertion_point; b = b->rpo_next()) {
876       BasicBlock* current = b;
877 
878       // Reset BasicBlock::rpo_number again.
879       current->set_rpo_number(kBlockUnvisited1);
880 
881       // Finish the previous loop(s) if we just exited them.
882       while (current_header != nullptr &&
883              current == current_header->loop_end()) {
884         DCHECK(current_header->IsLoopHeader());
885         DCHECK_NOT_NULL(current_loop);
886         current_loop = current_loop->prev;
887         current_header =
888             current_loop == nullptr ? nullptr : current_loop->header;
889         --loop_depth;
890       }
891       current->set_loop_header(current_header);
892 
893       // Push a new loop onto the stack if this loop is a loop header.
894       if (HasLoopNumber(current)) {
895         ++loop_depth;
896         current_loop = &loops_[GetLoopNumber(current)];
897         BasicBlock* end = current_loop->end;
898         current->set_loop_end(end == nullptr ? BeyondEndSentinel() : end);
899         current_header = current_loop->header;
900         TRACE("id:%d is a loop header, increment loop depth to %d\n",
901               current->id().ToInt(), loop_depth);
902       }
903 
904       current->set_loop_depth(loop_depth);
905 
906       if (current->loop_header() == nullptr) {
907         TRACE("id:%d is not in a loop (depth == %d)\n", current->id().ToInt(),
908               current->loop_depth());
909       } else {
910         TRACE("id:%d has loop header id:%d, (depth == %d)\n",
911               current->id().ToInt(), current->loop_header()->id().ToInt(),
912               current->loop_depth());
913       }
914     }
915   }
916 
917   // Computes loop membership from the backedges of the control flow graph.
ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame> & queue,size_t num_loops,ZoneVector<Backedge> * backedges)918   void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>& queue,
919                        size_t num_loops, ZoneVector<Backedge>* backedges) {
920     // Extend existing loop membership vectors.
921     for (LoopInfo& loop : loops_) {
922       BitVector* new_members = new (zone_)
923           BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
924       new_members->CopyFrom(*loop.members);
925       loop.members = new_members;
926     }
927 
928     // Extend loop information vector.
929     loops_.resize(num_loops, LoopInfo());
930 
931     // Compute loop membership starting from backedges.
932     // O(max(loop_depth) * max(|loop|)
933     for (size_t i = 0; i < backedges->size(); i++) {
934       BasicBlock* member = backedges->at(i).first;
935       BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
936       size_t loop_num = GetLoopNumber(header);
937       if (loops_[loop_num].header == nullptr) {
938         loops_[loop_num].header = header;
939         loops_[loop_num].members = new (zone_)
940             BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
941       }
942 
943       int queue_length = 0;
944       if (member != header) {
945         // As long as the header doesn't have a backedge to itself,
946         // Push the member onto the queue and process its predecessors.
947         if (!loops_[loop_num].members->Contains(member->id().ToInt())) {
948           loops_[loop_num].members->Add(member->id().ToInt());
949         }
950         queue[queue_length++].block = member;
951       }
952 
953       // Propagate loop membership backwards. All predecessors of M up to the
954       // loop header H are members of the loop too. O(|blocks between M and H|).
955       while (queue_length > 0) {
956         BasicBlock* block = queue[--queue_length].block;
957         for (size_t i = 0; i < block->PredecessorCount(); i++) {
958           BasicBlock* pred = block->PredecessorAt(i);
959           if (pred != header) {
960             if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
961               loops_[loop_num].members->Add(pred->id().ToInt());
962               queue[queue_length++].block = pred;
963             }
964           }
965         }
966       }
967     }
968   }
969 
970 #if DEBUG
PrintRPO()971   void PrintRPO() {
972     OFStream os(stdout);
973     os << "RPO with " << loops_.size() << " loops";
974     if (loops_.size() > 0) {
975       os << " (";
976       for (size_t i = 0; i < loops_.size(); i++) {
977         if (i > 0) os << " ";
978         os << "id:" << loops_[i].header->id();
979       }
980       os << ")";
981     }
982     os << ":\n";
983 
984     for (BasicBlock* block = order_; block != nullptr;
985          block = block->rpo_next()) {
986       os << std::setw(5) << "B" << block->rpo_number() << ":";
987       for (size_t i = 0; i < loops_.size(); i++) {
988         bool range = loops_[i].header->LoopContains(block);
989         bool membership = loops_[i].header != block && range;
990         os << (membership ? " |" : "  ");
991         os << (range ? "x" : " ");
992       }
993       os << "  id:" << block->id() << ": ";
994       if (block->loop_end() != nullptr) {
995         os << " range: [B" << block->rpo_number() << ", B"
996            << block->loop_end()->rpo_number() << ")";
997       }
998       if (block->loop_header() != nullptr) {
999         os << " header: id:" << block->loop_header()->id();
1000       }
1001       if (block->loop_depth() > 0) {
1002         os << " depth: " << block->loop_depth();
1003       }
1004       os << "\n";
1005     }
1006   }
1007 
VerifySpecialRPO()1008   void VerifySpecialRPO() {
1009     BasicBlockVector* order = schedule_->rpo_order();
1010     DCHECK(order->size() > 0);
1011     DCHECK((*order)[0]->id().ToInt() == 0);  // entry should be first.
1012 
1013     for (size_t i = 0; i < loops_.size(); i++) {
1014       LoopInfo* loop = &loops_[i];
1015       BasicBlock* header = loop->header;
1016       BasicBlock* end = header->loop_end();
1017 
1018       DCHECK_NOT_NULL(header);
1019       DCHECK(header->rpo_number() >= 0);
1020       DCHECK(header->rpo_number() < static_cast<int>(order->size()));
1021       DCHECK_NOT_NULL(end);
1022       DCHECK(end->rpo_number() <= static_cast<int>(order->size()));
1023       DCHECK(end->rpo_number() > header->rpo_number());
1024       DCHECK(header->loop_header() != header);
1025 
1026       // Verify the start ... end list relationship.
1027       int links = 0;
1028       BasicBlock* block = loop->start;
1029       DCHECK_EQ(header, block);
1030       bool end_found;
1031       while (true) {
1032         if (block == nullptr || block == loop->end) {
1033           end_found = (loop->end == block);
1034           break;
1035         }
1036         // The list should be in same order as the final result.
1037         DCHECK(block->rpo_number() == links + header->rpo_number());
1038         links++;
1039         block = block->rpo_next();
1040         DCHECK_LT(links, static_cast<int>(2 * order->size()));  // cycle?
1041       }
1042       DCHECK(links > 0);
1043       DCHECK(links == end->rpo_number() - header->rpo_number());
1044       DCHECK(end_found);
1045 
1046       // Check loop depth of the header.
1047       int loop_depth = 0;
1048       for (LoopInfo* outer = loop; outer != nullptr; outer = outer->prev) {
1049         loop_depth++;
1050       }
1051       DCHECK_EQ(loop_depth, header->loop_depth());
1052 
1053       // Check the contiguousness of loops.
1054       int count = 0;
1055       for (int j = 0; j < static_cast<int>(order->size()); j++) {
1056         BasicBlock* block = order->at(j);
1057         DCHECK(block->rpo_number() == j);
1058         if (j < header->rpo_number() || j >= end->rpo_number()) {
1059           DCHECK(!header->LoopContains(block));
1060         } else {
1061           DCHECK(header->LoopContains(block));
1062           DCHECK_GE(block->loop_depth(), loop_depth);
1063           count++;
1064         }
1065       }
1066       DCHECK(links == count);
1067     }
1068   }
1069 #endif  // DEBUG
1070 
1071   Zone* zone_;
1072   Schedule* schedule_;
1073   BasicBlock* order_;
1074   BasicBlock* beyond_end_;
1075   ZoneVector<LoopInfo> loops_;
1076   ZoneVector<Backedge> backedges_;
1077   ZoneVector<SpecialRPOStackFrame> stack_;
1078   size_t previous_block_count_;
1079   ZoneVector<BasicBlock*> const empty_;
1080 };
1081 
1082 
ComputeSpecialRPO(Zone * zone,Schedule * schedule)1083 BasicBlockVector* Scheduler::ComputeSpecialRPO(Zone* zone, Schedule* schedule) {
1084   SpecialRPONumberer numberer(zone, schedule);
1085   numberer.ComputeSpecialRPO();
1086   numberer.SerializeRPOIntoSchedule();
1087   numberer.PrintAndVerifySpecialRPO();
1088   return schedule->rpo_order();
1089 }
1090 
1091 
ComputeSpecialRPONumbering()1092 void Scheduler::ComputeSpecialRPONumbering() {
1093   TRACE("--- COMPUTING SPECIAL RPO ----------------------------------\n");
1094 
1095   // Compute the special reverse-post-order for basic blocks.
1096   special_rpo_ = new (zone_) SpecialRPONumberer(zone_, schedule_);
1097   special_rpo_->ComputeSpecialRPO();
1098 }
1099 
1100 
PropagateImmediateDominators(BasicBlock * block)1101 void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
1102   for (/*nop*/; block != nullptr; block = block->rpo_next()) {
1103     auto pred = block->predecessors().begin();
1104     auto end = block->predecessors().end();
1105     DCHECK(pred != end);  // All blocks except start have predecessors.
1106     BasicBlock* dominator = *pred;
1107     bool deferred = dominator->deferred();
1108     // For multiple predecessors, walk up the dominator tree until a common
1109     // dominator is found. Visitation order guarantees that all predecessors
1110     // except for backwards edges have been visited.
1111     for (++pred; pred != end; ++pred) {
1112       // Don't examine backwards edges.
1113       if ((*pred)->dominator_depth() < 0) continue;
1114       dominator = BasicBlock::GetCommonDominator(dominator, *pred);
1115       deferred = deferred & (*pred)->deferred();
1116     }
1117     block->set_dominator(dominator);
1118     block->set_dominator_depth(dominator->dominator_depth() + 1);
1119     block->set_deferred(deferred | block->deferred());
1120     TRACE("Block id:%d's idom is id:%d, depth = %d\n", block->id().ToInt(),
1121           dominator->id().ToInt(), block->dominator_depth());
1122   }
1123 }
1124 
1125 
GenerateImmediateDominatorTree()1126 void Scheduler::GenerateImmediateDominatorTree() {
1127   TRACE("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
1128 
1129   // Seed start block to be the first dominator.
1130   schedule_->start()->set_dominator_depth(0);
1131 
1132   // Build the block dominator tree resulting from the above seed.
1133   PropagateImmediateDominators(schedule_->start()->rpo_next());
1134 }
1135 
1136 
1137 // -----------------------------------------------------------------------------
1138 // Phase 3: Prepare use counts for nodes.
1139 
1140 
1141 class PrepareUsesVisitor {
1142  public:
PrepareUsesVisitor(Scheduler * scheduler)1143   explicit PrepareUsesVisitor(Scheduler* scheduler)
1144       : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
1145 
Pre(Node * node)1146   void Pre(Node* node) {
1147     if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
1148       // Fixed nodes are always roots for schedule late.
1149       scheduler_->schedule_root_nodes_.push_back(node);
1150       if (!schedule_->IsScheduled(node)) {
1151         // Make sure root nodes are scheduled in their respective blocks.
1152         TRACE("Scheduling fixed position node #%d:%s\n", node->id(),
1153               node->op()->mnemonic());
1154         IrOpcode::Value opcode = node->opcode();
1155         BasicBlock* block =
1156             opcode == IrOpcode::kParameter
1157                 ? schedule_->start()
1158                 : schedule_->block(NodeProperties::GetControlInput(node));
1159         DCHECK_NOT_NULL(block);
1160         schedule_->AddNode(block, node);
1161       }
1162     }
1163   }
1164 
PostEdge(Node * from,int index,Node * to)1165   void PostEdge(Node* from, int index, Node* to) {
1166     // If the edge is from an unscheduled node, then tally it in the use count
1167     // for all of its inputs. The same criterion will be used in ScheduleLate
1168     // for decrementing use counts.
1169     if (!schedule_->IsScheduled(from)) {
1170       DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
1171       scheduler_->IncrementUnscheduledUseCount(to, index, from);
1172     }
1173   }
1174 
1175  private:
1176   Scheduler* scheduler_;
1177   Schedule* schedule_;
1178 };
1179 
1180 
PrepareUses()1181 void Scheduler::PrepareUses() {
1182   TRACE("--- PREPARE USES -------------------------------------------\n");
1183 
1184   // Count the uses of every node, which is used to ensure that all of a
1185   // node's uses are scheduled before the node itself.
1186   PrepareUsesVisitor prepare_uses(this);
1187 
1188   // TODO(turbofan): simplify the careful pre/post ordering here.
1189   BoolVector visited(graph_->NodeCount(), false, zone_);
1190   ZoneStack<Node::InputEdges::iterator> stack(zone_);
1191   Node* node = graph_->end();
1192   prepare_uses.Pre(node);
1193   visited[node->id()] = true;
1194   stack.push(node->input_edges().begin());
1195   while (!stack.empty()) {
1196     Edge edge = *stack.top();
1197     Node* node = edge.to();
1198     if (visited[node->id()]) {
1199       prepare_uses.PostEdge(edge.from(), edge.index(), edge.to());
1200       if (++stack.top() == edge.from()->input_edges().end()) stack.pop();
1201     } else {
1202       prepare_uses.Pre(node);
1203       visited[node->id()] = true;
1204       if (node->InputCount() > 0) stack.push(node->input_edges().begin());
1205     }
1206   }
1207 }
1208 
1209 
1210 // -----------------------------------------------------------------------------
1211 // Phase 4: Schedule nodes early.
1212 
1213 
1214 class ScheduleEarlyNodeVisitor {
1215  public:
ScheduleEarlyNodeVisitor(Zone * zone,Scheduler * scheduler)1216   ScheduleEarlyNodeVisitor(Zone* zone, Scheduler* scheduler)
1217       : scheduler_(scheduler), schedule_(scheduler->schedule_), queue_(zone) {}
1218 
1219   // Run the schedule early algorithm on a set of fixed root nodes.
Run(NodeVector * roots)1220   void Run(NodeVector* roots) {
1221     for (Node* const root : *roots) {
1222       queue_.push(root);
1223       while (!queue_.empty()) {
1224         VisitNode(queue_.front());
1225         queue_.pop();
1226       }
1227     }
1228   }
1229 
1230  private:
1231   // Visits one node from the queue and propagates its current schedule early
1232   // position to all uses. This in turn might push more nodes onto the queue.
VisitNode(Node * node)1233   void VisitNode(Node* node) {
1234     Scheduler::SchedulerData* data = scheduler_->GetData(node);
1235 
1236     // Fixed nodes already know their schedule early position.
1237     if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
1238       data->minimum_block_ = schedule_->block(node);
1239       TRACE("Fixing #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
1240             node->id(), node->op()->mnemonic(),
1241             data->minimum_block_->id().ToInt(),
1242             data->minimum_block_->dominator_depth());
1243     }
1244 
1245     // No need to propagate unconstrained schedule early positions.
1246     if (data->minimum_block_ == schedule_->start()) return;
1247 
1248     // Propagate schedule early position.
1249     DCHECK_NOT_NULL(data->minimum_block_);
1250     for (auto use : node->uses()) {
1251       PropagateMinimumPositionToNode(data->minimum_block_, use);
1252     }
1253   }
1254 
1255   // Propagates {block} as another minimum position into the given {node}. This
1256   // has the net effect of computing the minimum dominator block of {node} that
1257   // still post-dominates all inputs to {node} when the queue is processed.
PropagateMinimumPositionToNode(BasicBlock * block,Node * node)1258   void PropagateMinimumPositionToNode(BasicBlock* block, Node* node) {
1259     Scheduler::SchedulerData* data = scheduler_->GetData(node);
1260 
1261     // No need to propagate to fixed node, it's guaranteed to be a root.
1262     if (scheduler_->GetPlacement(node) == Scheduler::kFixed) return;
1263 
1264     // Coupled nodes influence schedule early position of their control.
1265     if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
1266       Node* control = NodeProperties::GetControlInput(node);
1267       PropagateMinimumPositionToNode(block, control);
1268     }
1269 
1270     // Propagate new position if it is deeper down the dominator tree than the
1271     // current. Note that all inputs need to have minimum block position inside
1272     // the dominator chain of {node}'s minimum block position.
1273     DCHECK(InsideSameDominatorChain(block, data->minimum_block_));
1274     if (block->dominator_depth() > data->minimum_block_->dominator_depth()) {
1275       data->minimum_block_ = block;
1276       queue_.push(node);
1277       TRACE("Propagating #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
1278             node->id(), node->op()->mnemonic(),
1279             data->minimum_block_->id().ToInt(),
1280             data->minimum_block_->dominator_depth());
1281     }
1282   }
1283 
1284 #if DEBUG
InsideSameDominatorChain(BasicBlock * b1,BasicBlock * b2)1285   bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) {
1286     BasicBlock* dominator = BasicBlock::GetCommonDominator(b1, b2);
1287     return dominator == b1 || dominator == b2;
1288   }
1289 #endif
1290 
1291   Scheduler* scheduler_;
1292   Schedule* schedule_;
1293   ZoneQueue<Node*> queue_;
1294 };
1295 
1296 
ScheduleEarly()1297 void Scheduler::ScheduleEarly() {
1298   TRACE("--- SCHEDULE EARLY -----------------------------------------\n");
1299   if (FLAG_trace_turbo_scheduler) {
1300     TRACE("roots: ");
1301     for (Node* node : schedule_root_nodes_) {
1302       TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
1303     }
1304     TRACE("\n");
1305   }
1306 
1307   // Compute the minimum block for each node thereby determining the earliest
1308   // position each node could be placed within a valid schedule.
1309   ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
1310   schedule_early_visitor.Run(&schedule_root_nodes_);
1311 }
1312 
1313 
1314 // -----------------------------------------------------------------------------
1315 // Phase 5: Schedule nodes late.
1316 
1317 
1318 class ScheduleLateNodeVisitor {
1319  public:
ScheduleLateNodeVisitor(Zone * zone,Scheduler * scheduler)1320   ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler)
1321       : scheduler_(scheduler),
1322         schedule_(scheduler_->schedule_),
1323         marked_(scheduler->zone_),
1324         marking_queue_(scheduler->zone_) {}
1325 
1326   // Run the schedule late algorithm on a set of fixed root nodes.
Run(NodeVector * roots)1327   void Run(NodeVector* roots) {
1328     for (Node* const root : *roots) {
1329       ProcessQueue(root);
1330     }
1331   }
1332 
1333  private:
ProcessQueue(Node * root)1334   void ProcessQueue(Node* root) {
1335     ZoneQueue<Node*>* queue = &(scheduler_->schedule_queue_);
1336     for (Node* node : root->inputs()) {
1337       // Don't schedule coupled nodes on their own.
1338       if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
1339         node = NodeProperties::GetControlInput(node);
1340       }
1341 
1342       // Test schedulability condition by looking at unscheduled use count.
1343       if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
1344 
1345       queue->push(node);
1346       do {
1347         Node* const node = queue->front();
1348         queue->pop();
1349         VisitNode(node);
1350       } while (!queue->empty());
1351     }
1352   }
1353 
1354   // Visits one node from the queue of schedulable nodes and determines its
1355   // schedule late position. Also hoists nodes out of loops to find a more
1356   // optimal scheduling position.
VisitNode(Node * node)1357   void VisitNode(Node* node) {
1358     DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1359 
1360     // Don't schedule nodes that are already scheduled.
1361     if (schedule_->IsScheduled(node)) return;
1362     DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node));
1363 
1364     // Determine the dominating block for all of the uses of this node. It is
1365     // the latest block that this node can be scheduled in.
1366     TRACE("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
1367     BasicBlock* block = GetCommonDominatorOfUses(node);
1368     DCHECK_NOT_NULL(block);
1369 
1370     // The schedule early block dominates the schedule late block.
1371     BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
1372     DCHECK_EQ(min_block, BasicBlock::GetCommonDominator(block, min_block));
1373     TRACE(
1374         "Schedule late of #%d:%s is id:%d at loop depth %d, minimum = id:%d\n",
1375         node->id(), node->op()->mnemonic(), block->id().ToInt(),
1376         block->loop_depth(), min_block->id().ToInt());
1377 
1378     // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
1379     // into enclosing loop pre-headers until they would preceed their schedule
1380     // early position.
1381     BasicBlock* hoist_block = GetHoistBlock(block);
1382     if (hoist_block &&
1383         hoist_block->dominator_depth() >= min_block->dominator_depth()) {
1384       do {
1385         TRACE("  hoisting #%d:%s to block id:%d\n", node->id(),
1386               node->op()->mnemonic(), hoist_block->id().ToInt());
1387         DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
1388         block = hoist_block;
1389         hoist_block = GetHoistBlock(hoist_block);
1390       } while (hoist_block &&
1391                hoist_block->dominator_depth() >= min_block->dominator_depth());
1392     } else if (scheduler_->flags_ & Scheduler::kSplitNodes) {
1393       // Split the {node} if beneficial and return the new {block} for it.
1394       block = SplitNode(block, node);
1395     }
1396 
1397     // Schedule the node or a floating control structure.
1398     if (IrOpcode::IsMergeOpcode(node->opcode())) {
1399       ScheduleFloatingControl(block, node);
1400     } else if (node->opcode() == IrOpcode::kFinishRegion) {
1401       ScheduleRegion(block, node);
1402     } else {
1403       ScheduleNode(block, node);
1404     }
1405   }
1406 
1407   // Mark {block} and push its non-marked predecessor on the marking queue.
MarkBlock(BasicBlock * block)1408   void MarkBlock(BasicBlock* block) {
1409     DCHECK_LT(block->id().ToSize(), marked_.size());
1410     marked_[block->id().ToSize()] = true;
1411     for (BasicBlock* pred_block : block->predecessors()) {
1412       DCHECK_LT(pred_block->id().ToSize(), marked_.size());
1413       if (marked_[pred_block->id().ToSize()]) continue;
1414       marking_queue_.push_back(pred_block);
1415     }
1416   }
1417 
SplitNode(BasicBlock * block,Node * node)1418   BasicBlock* SplitNode(BasicBlock* block, Node* node) {
1419     // For now, we limit splitting to pure nodes.
1420     if (!node->op()->HasProperty(Operator::kPure)) return block;
1421     // TODO(titzer): fix the special case of splitting of projections.
1422     if (node->opcode() == IrOpcode::kProjection) return block;
1423 
1424     // The {block} is common dominator of all uses of {node}, so we cannot
1425     // split anything unless the {block} has at least two successors.
1426     DCHECK_EQ(block, GetCommonDominatorOfUses(node));
1427     if (block->SuccessorCount() < 2) return block;
1428 
1429     // Clear marking bits.
1430     DCHECK(marking_queue_.empty());
1431     std::fill(marked_.begin(), marked_.end(), false);
1432     marked_.resize(schedule_->BasicBlockCount() + 1, false);
1433 
1434     // Check if the {node} has uses in {block}.
1435     for (Edge edge : node->use_edges()) {
1436       BasicBlock* use_block = GetBlockForUse(edge);
1437       if (use_block == nullptr || marked_[use_block->id().ToSize()]) continue;
1438       if (use_block == block) {
1439         TRACE("  not splitting #%d:%s, it is used in id:%d\n", node->id(),
1440               node->op()->mnemonic(), block->id().ToInt());
1441         marking_queue_.clear();
1442         return block;
1443       }
1444       MarkBlock(use_block);
1445     }
1446 
1447     // Compute transitive marking closure; a block is marked if all its
1448     // successors are marked.
1449     do {
1450       BasicBlock* top_block = marking_queue_.front();
1451       marking_queue_.pop_front();
1452       if (marked_[top_block->id().ToSize()]) continue;
1453       bool marked = true;
1454       for (BasicBlock* successor : top_block->successors()) {
1455         if (!marked_[successor->id().ToSize()]) {
1456           marked = false;
1457           break;
1458         }
1459       }
1460       if (marked) MarkBlock(top_block);
1461     } while (!marking_queue_.empty());
1462 
1463     // If the (common dominator) {block} is marked, we know that all paths from
1464     // {block} to the end contain at least one use of {node}, and hence there's
1465     // no point in splitting the {node} in this case.
1466     if (marked_[block->id().ToSize()]) {
1467       TRACE("  not splitting #%d:%s, its common dominator id:%d is perfect\n",
1468             node->id(), node->op()->mnemonic(), block->id().ToInt());
1469       return block;
1470     }
1471 
1472     // Split {node} for uses according to the previously computed marking
1473     // closure. Every marking partition has a unique dominator, which get's a
1474     // copy of the {node} with the exception of the first partition, which get's
1475     // the {node} itself.
1476     ZoneMap<BasicBlock*, Node*> dominators(scheduler_->zone_);
1477     for (Edge edge : node->use_edges()) {
1478       BasicBlock* use_block = GetBlockForUse(edge);
1479       if (use_block == nullptr) continue;
1480       while (marked_[use_block->dominator()->id().ToSize()]) {
1481         use_block = use_block->dominator();
1482       }
1483       auto& use_node = dominators[use_block];
1484       if (use_node == nullptr) {
1485         if (dominators.size() == 1u) {
1486           // Place the {node} at {use_block}.
1487           block = use_block;
1488           use_node = node;
1489           TRACE("  pushing #%d:%s down to id:%d\n", node->id(),
1490                 node->op()->mnemonic(), block->id().ToInt());
1491         } else {
1492           // Place a copy of {node} at {use_block}.
1493           use_node = CloneNode(node);
1494           TRACE("  cloning #%d:%s for id:%d\n", use_node->id(),
1495                 use_node->op()->mnemonic(), use_block->id().ToInt());
1496           scheduler_->schedule_queue_.push(use_node);
1497         }
1498       }
1499       edge.UpdateTo(use_node);
1500     }
1501     return block;
1502   }
1503 
GetHoistBlock(BasicBlock * block)1504   BasicBlock* GetHoistBlock(BasicBlock* block) {
1505     if (block->IsLoopHeader()) return block->dominator();
1506     // We have to check to make sure that the {block} dominates all
1507     // of the outgoing blocks.  If it doesn't, then there is a path
1508     // out of the loop which does not execute this {block}, so we
1509     // can't hoist operations from this {block} out of the loop, as
1510     // that would introduce additional computations.
1511     if (BasicBlock* header_block = block->loop_header()) {
1512       for (BasicBlock* outgoing_block :
1513            scheduler_->special_rpo_->GetOutgoingBlocks(header_block)) {
1514         if (BasicBlock::GetCommonDominator(block, outgoing_block) != block) {
1515           return nullptr;
1516         }
1517       }
1518       return header_block->dominator();
1519     }
1520     return nullptr;
1521   }
1522 
GetCommonDominatorOfUses(Node * node)1523   BasicBlock* GetCommonDominatorOfUses(Node* node) {
1524     BasicBlock* block = nullptr;
1525     for (Edge edge : node->use_edges()) {
1526       BasicBlock* use_block = GetBlockForUse(edge);
1527       block = block == nullptr
1528                   ? use_block
1529                   : use_block == nullptr
1530                         ? block
1531                         : BasicBlock::GetCommonDominator(block, use_block);
1532     }
1533     return block;
1534   }
1535 
FindPredecessorBlock(Node * node)1536   BasicBlock* FindPredecessorBlock(Node* node) {
1537     return scheduler_->control_flow_builder_->FindPredecessorBlock(node);
1538   }
1539 
GetBlockForUse(Edge edge)1540   BasicBlock* GetBlockForUse(Edge edge) {
1541     Node* use = edge.from();
1542     if (IrOpcode::IsPhiOpcode(use->opcode())) {
1543       // If the use is from a coupled (i.e. floating) phi, compute the common
1544       // dominator of its uses. This will not recurse more than one level.
1545       if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
1546         TRACE("  inspecting uses of coupled #%d:%s\n", use->id(),
1547               use->op()->mnemonic());
1548         DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
1549         return GetCommonDominatorOfUses(use);
1550       }
1551       // If the use is from a fixed (i.e. non-floating) phi, we use the
1552       // predecessor block of the corresponding control input to the merge.
1553       if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
1554         TRACE("  input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
1555               use->op()->mnemonic());
1556         Node* merge = NodeProperties::GetControlInput(use, 0);
1557         DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
1558         Node* input = NodeProperties::GetControlInput(merge, edge.index());
1559         return FindPredecessorBlock(input);
1560       }
1561     } else if (IrOpcode::IsMergeOpcode(use->opcode())) {
1562       // If the use is from a fixed (i.e. non-floating) merge, we use the
1563       // predecessor block of the current input to the merge.
1564       if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
1565         TRACE("  input@%d into a fixed merge #%d:%s\n", edge.index(), use->id(),
1566               use->op()->mnemonic());
1567         return FindPredecessorBlock(edge.to());
1568       }
1569     }
1570     BasicBlock* result = schedule_->block(use);
1571     if (result == nullptr) return nullptr;
1572     TRACE("  must dominate use #%d:%s in id:%d\n", use->id(),
1573           use->op()->mnemonic(), result->id().ToInt());
1574     return result;
1575   }
1576 
ScheduleFloatingControl(BasicBlock * block,Node * node)1577   void ScheduleFloatingControl(BasicBlock* block, Node* node) {
1578     scheduler_->FuseFloatingControl(block, node);
1579   }
1580 
ScheduleRegion(BasicBlock * block,Node * region_end)1581   void ScheduleRegion(BasicBlock* block, Node* region_end) {
1582     // We only allow regions of instructions connected into a linear
1583     // effect chain. The only value allowed to be produced by a node
1584     // in the chain must be the value consumed by the FinishRegion node.
1585 
1586     // We schedule back to front; we first schedule FinishRegion.
1587     CHECK_EQ(IrOpcode::kFinishRegion, region_end->opcode());
1588     ScheduleNode(block, region_end);
1589 
1590     // Schedule the chain.
1591     Node* node = NodeProperties::GetEffectInput(region_end);
1592     while (node->opcode() != IrOpcode::kBeginRegion) {
1593       DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1594       DCHECK_EQ(1, node->op()->EffectInputCount());
1595       DCHECK_EQ(1, node->op()->EffectOutputCount());
1596       DCHECK_EQ(0, node->op()->ControlOutputCount());
1597       // The value output (if there is any) must be consumed
1598       // by the EndRegion node.
1599       DCHECK(node->op()->ValueOutputCount() == 0 ||
1600              node == region_end->InputAt(0));
1601       ScheduleNode(block, node);
1602       node = NodeProperties::GetEffectInput(node);
1603     }
1604     // Schedule the BeginRegion node.
1605     DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1606     ScheduleNode(block, node);
1607   }
1608 
ScheduleNode(BasicBlock * block,Node * node)1609   void ScheduleNode(BasicBlock* block, Node* node) {
1610     schedule_->PlanNode(block, node);
1611     scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node);
1612     scheduler_->UpdatePlacement(node, Scheduler::kScheduled);
1613   }
1614 
CloneNode(Node * node)1615   Node* CloneNode(Node* node) {
1616     int const input_count = node->InputCount();
1617     for (int index = 0; index < input_count; ++index) {
1618       Node* const input = node->InputAt(index);
1619       scheduler_->IncrementUnscheduledUseCount(input, index, node);
1620     }
1621     Node* const copy = scheduler_->graph_->CloneNode(node);
1622     TRACE(("clone #%d:%s -> #%d\n"), node->id(), node->op()->mnemonic(),
1623           copy->id());
1624     scheduler_->node_data_.resize(copy->id() + 1,
1625                                   scheduler_->DefaultSchedulerData());
1626     scheduler_->node_data_[copy->id()] = scheduler_->node_data_[node->id()];
1627     return copy;
1628   }
1629 
1630   Scheduler* scheduler_;
1631   Schedule* schedule_;
1632   BoolVector marked_;
1633   ZoneDeque<BasicBlock*> marking_queue_;
1634 };
1635 
1636 
ScheduleLate()1637 void Scheduler::ScheduleLate() {
1638   TRACE("--- SCHEDULE LATE ------------------------------------------\n");
1639   if (FLAG_trace_turbo_scheduler) {
1640     TRACE("roots: ");
1641     for (Node* node : schedule_root_nodes_) {
1642       TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
1643     }
1644     TRACE("\n");
1645   }
1646 
1647   // Schedule: Places nodes in dominator block of all their uses.
1648   ScheduleLateNodeVisitor schedule_late_visitor(zone_, this);
1649   schedule_late_visitor.Run(&schedule_root_nodes_);
1650 }
1651 
1652 
1653 // -----------------------------------------------------------------------------
1654 // Phase 6: Seal the final schedule.
1655 
1656 
SealFinalSchedule()1657 void Scheduler::SealFinalSchedule() {
1658   TRACE("--- SEAL FINAL SCHEDULE ------------------------------------\n");
1659 
1660   // Serialize the assembly order and reverse-post-order numbering.
1661   special_rpo_->SerializeRPOIntoSchedule();
1662   special_rpo_->PrintAndVerifySpecialRPO();
1663 
1664   // Add collected nodes for basic blocks to their blocks in the right order.
1665   int block_num = 0;
1666   for (NodeVector& nodes : scheduled_nodes_) {
1667     BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++);
1668     BasicBlock* block = schedule_->GetBlockById(id);
1669     for (Node* node : base::Reversed(nodes)) {
1670       schedule_->AddNode(block, node);
1671     }
1672   }
1673 }
1674 
1675 
1676 // -----------------------------------------------------------------------------
1677 
1678 
FuseFloatingControl(BasicBlock * block,Node * node)1679 void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
1680   TRACE("--- FUSE FLOATING CONTROL ----------------------------------\n");
1681   if (FLAG_trace_turbo_scheduler) {
1682     OFStream os(stdout);
1683     os << "Schedule before control flow fusion:\n" << *schedule_;
1684   }
1685 
1686   // Iterate on phase 1: Build control-flow graph.
1687   control_flow_builder_->Run(block, node);
1688 
1689   // Iterate on phase 2: Compute special RPO and dominator tree.
1690   special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
1691   // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
1692   for (BasicBlock* b = block->rpo_next(); b != nullptr; b = b->rpo_next()) {
1693     b->set_dominator_depth(-1);
1694     b->set_dominator(nullptr);
1695   }
1696   PropagateImmediateDominators(block->rpo_next());
1697 
1698   // Iterate on phase 4: Schedule nodes early.
1699   // TODO(mstarzinger): The following loop gathering the propagation roots is a
1700   // temporary solution and should be merged into the rest of the scheduler as
1701   // soon as the approach settled for all floating loops.
1702   NodeVector propagation_roots(control_flow_builder_->control_);
1703   for (Node* node : control_flow_builder_->control_) {
1704     for (Node* use : node->uses()) {
1705       if (NodeProperties::IsPhi(use)) propagation_roots.push_back(use);
1706     }
1707   }
1708   if (FLAG_trace_turbo_scheduler) {
1709     TRACE("propagation roots: ");
1710     for (Node* node : propagation_roots) {
1711       TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
1712     }
1713     TRACE("\n");
1714   }
1715   ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
1716   schedule_early_visitor.Run(&propagation_roots);
1717 
1718   // Move previously planned nodes.
1719   // TODO(mstarzinger): Improve that by supporting bulk moves.
1720   scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
1721   MovePlannedNodes(block, schedule_->block(node));
1722 
1723   if (FLAG_trace_turbo_scheduler) {
1724     OFStream os(stdout);
1725     os << "Schedule after control flow fusion:\n" << *schedule_;
1726   }
1727 }
1728 
1729 
MovePlannedNodes(BasicBlock * from,BasicBlock * to)1730 void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) {
1731   TRACE("Move planned nodes from id:%d to id:%d\n", from->id().ToInt(),
1732         to->id().ToInt());
1733   NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]);
1734   for (Node* const node : *nodes) {
1735     schedule_->SetBlockForNode(to, node);
1736     scheduled_nodes_[to->id().ToSize()].push_back(node);
1737   }
1738   nodes->clear();
1739 }
1740 
1741 }  // namespace compiler
1742 }  // namespace internal
1743 }  // namespace v8
1744