1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/CodeGen/MachineScheduler.h"
16 #include "llvm/ADT/PriorityQueue.h"
17 #include "llvm/Analysis/AliasAnalysis.h"
18 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
19 #include "llvm/CodeGen/MachineDominators.h"
20 #include "llvm/CodeGen/MachineLoopInfo.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/Passes.h"
23 #include "llvm/CodeGen/RegisterClassInfo.h"
24 #include "llvm/CodeGen/ScheduleDFS.h"
25 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
26 #include "llvm/Support/CommandLine.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/GraphWriter.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include <queue>
33 
34 using namespace llvm;
35 
36 #define DEBUG_TYPE "misched"
37 
38 namespace llvm {
39 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
40                            cl::desc("Force top-down list scheduling"));
41 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
42                             cl::desc("Force bottom-up list scheduling"));
43 cl::opt<bool>
44 DumpCriticalPathLength("misched-dcpl", cl::Hidden,
45                        cl::desc("Print critical path length to stdout"));
46 }
47 
48 #ifndef NDEBUG
49 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
50   cl::desc("Pop up a window to show MISched dags after they are processed"));
51 
52 /// In some situations a few uninteresting nodes depend on nearly all other
53 /// nodes in the graph, provide a cutoff to hide them.
54 static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden,
55   cl::desc("Hide nodes with more predecessor/successor than cutoff"));
56 
57 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
58   cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
59 
60 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
61   cl::desc("Only schedule this function"));
62 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
63   cl::desc("Only schedule this MBB#"));
64 #else
65 static bool ViewMISchedDAGs = false;
66 #endif // NDEBUG
67 
68 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
69   cl::desc("Enable register pressure scheduling."), cl::init(true));
70 
71 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
72   cl::desc("Enable cyclic critical path analysis."), cl::init(true));
73 
74 static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden,
75   cl::desc("Enable load clustering."), cl::init(true));
76 
77 // Experimental heuristics
78 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
79   cl::desc("Enable scheduling for macro fusion."), cl::init(true));
80 
81 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
82   cl::desc("Verify machine instrs before and after machine scheduling"));
83 
84 // DAG subtrees must have at least this many nodes.
85 static const unsigned MinSubtreeSize = 8;
86 
87 // Pin the vtables to this file.
anchor()88 void MachineSchedStrategy::anchor() {}
anchor()89 void ScheduleDAGMutation::anchor() {}
90 
91 //===----------------------------------------------------------------------===//
92 // Machine Instruction Scheduling Pass and Registry
93 //===----------------------------------------------------------------------===//
94 
MachineSchedContext()95 MachineSchedContext::MachineSchedContext():
96     MF(nullptr), MLI(nullptr), MDT(nullptr), PassConfig(nullptr), AA(nullptr), LIS(nullptr) {
97   RegClassInfo = new RegisterClassInfo();
98 }
99 
~MachineSchedContext()100 MachineSchedContext::~MachineSchedContext() {
101   delete RegClassInfo;
102 }
103 
104 namespace {
105 /// Base class for a machine scheduler class that can run at any point.
106 class MachineSchedulerBase : public MachineSchedContext,
107                              public MachineFunctionPass {
108 public:
MachineSchedulerBase(char & ID)109   MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
110 
111   void print(raw_ostream &O, const Module* = nullptr) const override;
112 
113 protected:
114   void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags);
115 };
116 
117 /// MachineScheduler runs after coalescing and before register allocation.
118 class MachineScheduler : public MachineSchedulerBase {
119 public:
120   MachineScheduler();
121 
122   void getAnalysisUsage(AnalysisUsage &AU) const override;
123 
124   bool runOnMachineFunction(MachineFunction&) override;
125 
126   static char ID; // Class identification, replacement for typeinfo
127 
128 protected:
129   ScheduleDAGInstrs *createMachineScheduler();
130 };
131 
132 /// PostMachineScheduler runs after shortly before code emission.
133 class PostMachineScheduler : public MachineSchedulerBase {
134 public:
135   PostMachineScheduler();
136 
137   void getAnalysisUsage(AnalysisUsage &AU) const override;
138 
139   bool runOnMachineFunction(MachineFunction&) override;
140 
141   static char ID; // Class identification, replacement for typeinfo
142 
143 protected:
144   ScheduleDAGInstrs *createPostMachineScheduler();
145 };
146 } // namespace
147 
148 char MachineScheduler::ID = 0;
149 
150 char &llvm::MachineSchedulerID = MachineScheduler::ID;
151 
152 INITIALIZE_PASS_BEGIN(MachineScheduler, "machine-scheduler",
153                       "Machine Instruction Scheduler", false, false)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)154 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
155 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
156 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
157 INITIALIZE_PASS_END(MachineScheduler, "machine-scheduler",
158                     "Machine Instruction Scheduler", false, false)
159 
160 MachineScheduler::MachineScheduler()
161 : MachineSchedulerBase(ID) {
162   initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
163 }
164 
getAnalysisUsage(AnalysisUsage & AU) const165 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
166   AU.setPreservesCFG();
167   AU.addRequiredID(MachineDominatorsID);
168   AU.addRequired<MachineLoopInfo>();
169   AU.addRequired<AAResultsWrapperPass>();
170   AU.addRequired<TargetPassConfig>();
171   AU.addRequired<SlotIndexes>();
172   AU.addPreserved<SlotIndexes>();
173   AU.addRequired<LiveIntervals>();
174   AU.addPreserved<LiveIntervals>();
175   MachineFunctionPass::getAnalysisUsage(AU);
176 }
177 
178 char PostMachineScheduler::ID = 0;
179 
180 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
181 
182 INITIALIZE_PASS(PostMachineScheduler, "postmisched",
183                 "PostRA Machine Instruction Scheduler", false, false)
184 
PostMachineScheduler()185 PostMachineScheduler::PostMachineScheduler()
186 : MachineSchedulerBase(ID) {
187   initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
188 }
189 
getAnalysisUsage(AnalysisUsage & AU) const190 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
191   AU.setPreservesCFG();
192   AU.addRequiredID(MachineDominatorsID);
193   AU.addRequired<MachineLoopInfo>();
194   AU.addRequired<TargetPassConfig>();
195   MachineFunctionPass::getAnalysisUsage(AU);
196 }
197 
198 MachinePassRegistry MachineSchedRegistry::Registry;
199 
200 /// A dummy default scheduler factory indicates whether the scheduler
201 /// is overridden on the command line.
useDefaultMachineSched(MachineSchedContext * C)202 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
203   return nullptr;
204 }
205 
206 /// MachineSchedOpt allows command line selection of the scheduler.
207 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
208                RegisterPassParser<MachineSchedRegistry> >
209 MachineSchedOpt("misched",
210                 cl::init(&useDefaultMachineSched), cl::Hidden,
211                 cl::desc("Machine instruction scheduler to use"));
212 
213 static MachineSchedRegistry
214 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
215                      useDefaultMachineSched);
216 
217 static cl::opt<bool> EnableMachineSched(
218     "enable-misched",
219     cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
220     cl::Hidden);
221 
222 /// Forward declare the standard machine scheduler. This will be used as the
223 /// default scheduler if the target does not set a default.
224 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C);
225 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C);
226 
227 /// Decrement this iterator until reaching the top or a non-debug instr.
228 static MachineBasicBlock::const_iterator
priorNonDebug(MachineBasicBlock::const_iterator I,MachineBasicBlock::const_iterator Beg)229 priorNonDebug(MachineBasicBlock::const_iterator I,
230               MachineBasicBlock::const_iterator Beg) {
231   assert(I != Beg && "reached the top of the region, cannot decrement");
232   while (--I != Beg) {
233     if (!I->isDebugValue())
234       break;
235   }
236   return I;
237 }
238 
239 /// Non-const version.
240 static MachineBasicBlock::iterator
priorNonDebug(MachineBasicBlock::iterator I,MachineBasicBlock::const_iterator Beg)241 priorNonDebug(MachineBasicBlock::iterator I,
242               MachineBasicBlock::const_iterator Beg) {
243   return const_cast<MachineInstr*>(
244     &*priorNonDebug(MachineBasicBlock::const_iterator(I), Beg));
245 }
246 
247 /// If this iterator is a debug value, increment until reaching the End or a
248 /// non-debug instruction.
249 static MachineBasicBlock::const_iterator
nextIfDebug(MachineBasicBlock::const_iterator I,MachineBasicBlock::const_iterator End)250 nextIfDebug(MachineBasicBlock::const_iterator I,
251             MachineBasicBlock::const_iterator End) {
252   for(; I != End; ++I) {
253     if (!I->isDebugValue())
254       break;
255   }
256   return I;
257 }
258 
259 /// Non-const version.
260 static MachineBasicBlock::iterator
nextIfDebug(MachineBasicBlock::iterator I,MachineBasicBlock::const_iterator End)261 nextIfDebug(MachineBasicBlock::iterator I,
262             MachineBasicBlock::const_iterator End) {
263   // Cast the return value to nonconst MachineInstr, then cast to an
264   // instr_iterator, which does not check for null, finally return a
265   // bundle_iterator.
266   return MachineBasicBlock::instr_iterator(
267     const_cast<MachineInstr*>(
268       &*nextIfDebug(MachineBasicBlock::const_iterator(I), End)));
269 }
270 
271 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
createMachineScheduler()272 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
273   // Select the scheduler, or set the default.
274   MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
275   if (Ctor != useDefaultMachineSched)
276     return Ctor(this);
277 
278   // Get the default scheduler set by the target for this function.
279   ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
280   if (Scheduler)
281     return Scheduler;
282 
283   // Default to GenericScheduler.
284   return createGenericSchedLive(this);
285 }
286 
287 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
288 /// the caller. We don't have a command line option to override the postRA
289 /// scheduler. The Target must configure it.
createPostMachineScheduler()290 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
291   // Get the postRA scheduler set by the target for this function.
292   ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
293   if (Scheduler)
294     return Scheduler;
295 
296   // Default to GenericScheduler.
297   return createGenericSchedPostRA(this);
298 }
299 
300 /// Top-level MachineScheduler pass driver.
301 ///
302 /// Visit blocks in function order. Divide each block into scheduling regions
303 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
304 /// consistent with the DAG builder, which traverses the interior of the
305 /// scheduling regions bottom-up.
306 ///
307 /// This design avoids exposing scheduling boundaries to the DAG builder,
308 /// simplifying the DAG builder's support for "special" target instructions.
309 /// At the same time the design allows target schedulers to operate across
310 /// scheduling boundaries, for example to bundle the boudary instructions
311 /// without reordering them. This creates complexity, because the target
312 /// scheduler must update the RegionBegin and RegionEnd positions cached by
313 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
314 /// design would be to split blocks at scheduling boundaries, but LLVM has a
315 /// general bias against block splitting purely for implementation simplicity.
runOnMachineFunction(MachineFunction & mf)316 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
317   if (EnableMachineSched.getNumOccurrences()) {
318     if (!EnableMachineSched)
319       return false;
320   } else if (!mf.getSubtarget().enableMachineScheduler())
321     return false;
322 
323   DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs()));
324 
325   // Initialize the context of the pass.
326   MF = &mf;
327   MLI = &getAnalysis<MachineLoopInfo>();
328   MDT = &getAnalysis<MachineDominatorTree>();
329   PassConfig = &getAnalysis<TargetPassConfig>();
330   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
331 
332   LIS = &getAnalysis<LiveIntervals>();
333 
334   if (VerifyScheduling) {
335     DEBUG(LIS->dump());
336     MF->verify(this, "Before machine scheduling.");
337   }
338   RegClassInfo->runOnMachineFunction(*MF);
339 
340   // Instantiate the selected scheduler for this target, function, and
341   // optimization level.
342   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
343   scheduleRegions(*Scheduler, false);
344 
345   DEBUG(LIS->dump());
346   if (VerifyScheduling)
347     MF->verify(this, "After machine scheduling.");
348   return true;
349 }
350 
runOnMachineFunction(MachineFunction & mf)351 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
352   if (skipOptnoneFunction(*mf.getFunction()))
353     return false;
354 
355   if (!mf.getSubtarget().enablePostRAScheduler()) {
356     DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
357     return false;
358   }
359   DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
360 
361   // Initialize the context of the pass.
362   MF = &mf;
363   PassConfig = &getAnalysis<TargetPassConfig>();
364 
365   if (VerifyScheduling)
366     MF->verify(this, "Before post machine scheduling.");
367 
368   // Instantiate the selected scheduler for this target, function, and
369   // optimization level.
370   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
371   scheduleRegions(*Scheduler, true);
372 
373   if (VerifyScheduling)
374     MF->verify(this, "After post machine scheduling.");
375   return true;
376 }
377 
378 /// Return true of the given instruction should not be included in a scheduling
379 /// region.
380 ///
381 /// MachineScheduler does not currently support scheduling across calls. To
382 /// handle calls, the DAG builder needs to be modified to create register
383 /// anti/output dependencies on the registers clobbered by the call's regmask
384 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents
385 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
386 /// the boundary, but there would be no benefit to postRA scheduling across
387 /// calls this late anyway.
isSchedBoundary(MachineBasicBlock::iterator MI,MachineBasicBlock * MBB,MachineFunction * MF,const TargetInstrInfo * TII)388 static bool isSchedBoundary(MachineBasicBlock::iterator MI,
389                             MachineBasicBlock *MBB,
390                             MachineFunction *MF,
391                             const TargetInstrInfo *TII) {
392   return MI->isCall() || TII->isSchedulingBoundary(MI, MBB, *MF);
393 }
394 
395 /// Main driver for both MachineScheduler and PostMachineScheduler.
scheduleRegions(ScheduleDAGInstrs & Scheduler,bool FixKillFlags)396 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler,
397                                            bool FixKillFlags) {
398   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
399 
400   // Visit all machine basic blocks.
401   //
402   // TODO: Visit blocks in global postorder or postorder within the bottom-up
403   // loop tree. Then we can optionally compute global RegPressure.
404   for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
405        MBB != MBBEnd; ++MBB) {
406 
407     Scheduler.startBlock(&*MBB);
408 
409 #ifndef NDEBUG
410     if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
411       continue;
412     if (SchedOnlyBlock.getNumOccurrences()
413         && (int)SchedOnlyBlock != MBB->getNumber())
414       continue;
415 #endif
416 
417     // Break the block into scheduling regions [I, RegionEnd), and schedule each
418     // region as soon as it is discovered. RegionEnd points the scheduling
419     // boundary at the bottom of the region. The DAG does not include RegionEnd,
420     // but the region does (i.e. the next RegionEnd is above the previous
421     // RegionBegin). If the current block has no terminator then RegionEnd ==
422     // MBB->end() for the bottom region.
423     //
424     // The Scheduler may insert instructions during either schedule() or
425     // exitRegion(), even for empty regions. So the local iterators 'I' and
426     // 'RegionEnd' are invalid across these calls.
427     //
428     // MBB::size() uses instr_iterator to count. Here we need a bundle to count
429     // as a single instruction.
430     unsigned RemainingInstrs = std::distance(MBB->begin(), MBB->end());
431     for(MachineBasicBlock::iterator RegionEnd = MBB->end();
432         RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) {
433 
434       // Avoid decrementing RegionEnd for blocks with no terminator.
435       if (RegionEnd != MBB->end() ||
436           isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) {
437         --RegionEnd;
438         // Count the boundary instruction.
439         --RemainingInstrs;
440       }
441 
442       // The next region starts above the previous region. Look backward in the
443       // instruction stream until we find the nearest boundary.
444       unsigned NumRegionInstrs = 0;
445       MachineBasicBlock::iterator I = RegionEnd;
446       for(;I != MBB->begin(); --I, --RemainingInstrs) {
447         if (isSchedBoundary(&*std::prev(I), &*MBB, MF, TII))
448           break;
449         if (!I->isDebugValue())
450           ++NumRegionInstrs;
451       }
452       // Notify the scheduler of the region, even if we may skip scheduling
453       // it. Perhaps it still needs to be bundled.
454       Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs);
455 
456       // Skip empty scheduling regions (0 or 1 schedulable instructions).
457       if (I == RegionEnd || I == std::prev(RegionEnd)) {
458         // Close the current region. Bundle the terminator if needed.
459         // This invalidates 'RegionEnd' and 'I'.
460         Scheduler.exitRegion();
461         continue;
462       }
463       DEBUG(dbgs() << "********** MI Scheduling **********\n");
464       DEBUG(dbgs() << MF->getName()
465             << ":BB#" << MBB->getNumber() << " " << MBB->getName()
466             << "\n  From: " << *I << "    To: ";
467             if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
468             else dbgs() << "End";
469             dbgs() << " RegionInstrs: " << NumRegionInstrs
470             << " Remaining: " << RemainingInstrs << "\n");
471       if (DumpCriticalPathLength) {
472         errs() << MF->getName();
473         errs() << ":BB# " << MBB->getNumber();
474         errs() << " " << MBB->getName() << " \n";
475       }
476 
477       // Schedule a region: possibly reorder instructions.
478       // This invalidates 'RegionEnd' and 'I'.
479       Scheduler.schedule();
480 
481       // Close the current region.
482       Scheduler.exitRegion();
483 
484       // Scheduling has invalidated the current iterator 'I'. Ask the
485       // scheduler for the top of it's scheduled region.
486       RegionEnd = Scheduler.begin();
487     }
488     assert(RemainingInstrs == 0 && "Instruction count mismatch!");
489     Scheduler.finishBlock();
490     // FIXME: Ideally, no further passes should rely on kill flags. However,
491     // thumb2 size reduction is currently an exception, so the PostMIScheduler
492     // needs to do this.
493     if (FixKillFlags)
494         Scheduler.fixupKills(&*MBB);
495   }
496   Scheduler.finalizeSchedule();
497 }
498 
print(raw_ostream & O,const Module * m) const499 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
500   // unimplemented
501 }
502 
503 LLVM_DUMP_METHOD
dump()504 void ReadyQueue::dump() {
505   dbgs() << "Queue " << Name << ": ";
506   for (unsigned i = 0, e = Queue.size(); i < e; ++i)
507     dbgs() << Queue[i]->NodeNum << " ";
508   dbgs() << "\n";
509 }
510 
511 //===----------------------------------------------------------------------===//
512 // ScheduleDAGMI - Basic machine instruction scheduling. This is
513 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for
514 // virtual registers.
515 // ===----------------------------------------------------------------------===/
516 
517 // Provide a vtable anchor.
~ScheduleDAGMI()518 ScheduleDAGMI::~ScheduleDAGMI() {
519 }
520 
canAddEdge(SUnit * SuccSU,SUnit * PredSU)521 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
522   return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
523 }
524 
addEdge(SUnit * SuccSU,const SDep & PredDep)525 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
526   if (SuccSU != &ExitSU) {
527     // Do not use WillCreateCycle, it assumes SD scheduling.
528     // If Pred is reachable from Succ, then the edge creates a cycle.
529     if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
530       return false;
531     Topo.AddPred(SuccSU, PredDep.getSUnit());
532   }
533   SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
534   // Return true regardless of whether a new edge needed to be inserted.
535   return true;
536 }
537 
538 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
539 /// NumPredsLeft reaches zero, release the successor node.
540 ///
541 /// FIXME: Adjust SuccSU height based on MinLatency.
releaseSucc(SUnit * SU,SDep * SuccEdge)542 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
543   SUnit *SuccSU = SuccEdge->getSUnit();
544 
545   if (SuccEdge->isWeak()) {
546     --SuccSU->WeakPredsLeft;
547     if (SuccEdge->isCluster())
548       NextClusterSucc = SuccSU;
549     return;
550   }
551 #ifndef NDEBUG
552   if (SuccSU->NumPredsLeft == 0) {
553     dbgs() << "*** Scheduling failed! ***\n";
554     SuccSU->dump(this);
555     dbgs() << " has been released too many times!\n";
556     llvm_unreachable(nullptr);
557   }
558 #endif
559   // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
560   // CurrCycle may have advanced since then.
561   if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
562     SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
563 
564   --SuccSU->NumPredsLeft;
565   if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
566     SchedImpl->releaseTopNode(SuccSU);
567 }
568 
569 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
releaseSuccessors(SUnit * SU)570 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
571   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
572        I != E; ++I) {
573     releaseSucc(SU, &*I);
574   }
575 }
576 
577 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
578 /// NumSuccsLeft reaches zero, release the predecessor node.
579 ///
580 /// FIXME: Adjust PredSU height based on MinLatency.
releasePred(SUnit * SU,SDep * PredEdge)581 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
582   SUnit *PredSU = PredEdge->getSUnit();
583 
584   if (PredEdge->isWeak()) {
585     --PredSU->WeakSuccsLeft;
586     if (PredEdge->isCluster())
587       NextClusterPred = PredSU;
588     return;
589   }
590 #ifndef NDEBUG
591   if (PredSU->NumSuccsLeft == 0) {
592     dbgs() << "*** Scheduling failed! ***\n";
593     PredSU->dump(this);
594     dbgs() << " has been released too many times!\n";
595     llvm_unreachable(nullptr);
596   }
597 #endif
598   // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
599   // CurrCycle may have advanced since then.
600   if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
601     PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
602 
603   --PredSU->NumSuccsLeft;
604   if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
605     SchedImpl->releaseBottomNode(PredSU);
606 }
607 
608 /// releasePredecessors - Call releasePred on each of SU's predecessors.
releasePredecessors(SUnit * SU)609 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
610   for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
611        I != E; ++I) {
612     releasePred(SU, &*I);
613   }
614 }
615 
616 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
617 /// crossing a scheduling boundary. [begin, end) includes all instructions in
618 /// the region, including the boundary itself and single-instruction regions
619 /// that don't get scheduled.
enterRegion(MachineBasicBlock * bb,MachineBasicBlock::iterator begin,MachineBasicBlock::iterator end,unsigned regioninstrs)620 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
621                                      MachineBasicBlock::iterator begin,
622                                      MachineBasicBlock::iterator end,
623                                      unsigned regioninstrs)
624 {
625   ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
626 
627   SchedImpl->initPolicy(begin, end, regioninstrs);
628 }
629 
630 /// This is normally called from the main scheduler loop but may also be invoked
631 /// by the scheduling strategy to perform additional code motion.
moveInstruction(MachineInstr * MI,MachineBasicBlock::iterator InsertPos)632 void ScheduleDAGMI::moveInstruction(
633   MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
634   // Advance RegionBegin if the first instruction moves down.
635   if (&*RegionBegin == MI)
636     ++RegionBegin;
637 
638   // Update the instruction stream.
639   BB->splice(InsertPos, BB, MI);
640 
641   // Update LiveIntervals
642   if (LIS)
643     LIS->handleMove(MI, /*UpdateFlags=*/true);
644 
645   // Recede RegionBegin if an instruction moves above the first.
646   if (RegionBegin == InsertPos)
647     RegionBegin = MI;
648 }
649 
checkSchedLimit()650 bool ScheduleDAGMI::checkSchedLimit() {
651 #ifndef NDEBUG
652   if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
653     CurrentTop = CurrentBottom;
654     return false;
655   }
656   ++NumInstrsScheduled;
657 #endif
658   return true;
659 }
660 
661 /// Per-region scheduling driver, called back from
662 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that
663 /// does not consider liveness or register pressure. It is useful for PostRA
664 /// scheduling and potentially other custom schedulers.
schedule()665 void ScheduleDAGMI::schedule() {
666   DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
667   DEBUG(SchedImpl->dumpPolicy());
668 
669   // Build the DAG.
670   buildSchedGraph(AA);
671 
672   Topo.InitDAGTopologicalSorting();
673 
674   postprocessDAG();
675 
676   SmallVector<SUnit*, 8> TopRoots, BotRoots;
677   findRootsAndBiasEdges(TopRoots, BotRoots);
678 
679   // Initialize the strategy before modifying the DAG.
680   // This may initialize a DFSResult to be used for queue priority.
681   SchedImpl->initialize(this);
682 
683   DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
684           SUnits[su].dumpAll(this));
685   if (ViewMISchedDAGs) viewGraph();
686 
687   // Initialize ready queues now that the DAG and priority data are finalized.
688   initQueues(TopRoots, BotRoots);
689 
690   bool IsTopNode = false;
691   while (true) {
692     DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
693     SUnit *SU = SchedImpl->pickNode(IsTopNode);
694     if (!SU) break;
695 
696     assert(!SU->isScheduled && "Node already scheduled");
697     if (!checkSchedLimit())
698       break;
699 
700     MachineInstr *MI = SU->getInstr();
701     if (IsTopNode) {
702       assert(SU->isTopReady() && "node still has unscheduled dependencies");
703       if (&*CurrentTop == MI)
704         CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
705       else
706         moveInstruction(MI, CurrentTop);
707     }
708     else {
709       assert(SU->isBottomReady() && "node still has unscheduled dependencies");
710       MachineBasicBlock::iterator priorII =
711         priorNonDebug(CurrentBottom, CurrentTop);
712       if (&*priorII == MI)
713         CurrentBottom = priorII;
714       else {
715         if (&*CurrentTop == MI)
716           CurrentTop = nextIfDebug(++CurrentTop, priorII);
717         moveInstruction(MI, CurrentBottom);
718         CurrentBottom = MI;
719       }
720     }
721     // Notify the scheduling strategy before updating the DAG.
722     // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
723     // runs, it can then use the accurate ReadyCycle time to determine whether
724     // newly released nodes can move to the readyQ.
725     SchedImpl->schedNode(SU, IsTopNode);
726 
727     updateQueues(SU, IsTopNode);
728   }
729   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
730 
731   placeDebugValues();
732 
733   DEBUG({
734       unsigned BBNum = begin()->getParent()->getNumber();
735       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
736       dumpSchedule();
737       dbgs() << '\n';
738     });
739 }
740 
741 /// Apply each ScheduleDAGMutation step in order.
postprocessDAG()742 void ScheduleDAGMI::postprocessDAG() {
743   for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
744     Mutations[i]->apply(this);
745   }
746 }
747 
748 void ScheduleDAGMI::
findRootsAndBiasEdges(SmallVectorImpl<SUnit * > & TopRoots,SmallVectorImpl<SUnit * > & BotRoots)749 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
750                       SmallVectorImpl<SUnit*> &BotRoots) {
751   for (std::vector<SUnit>::iterator
752          I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
753     SUnit *SU = &(*I);
754     assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
755 
756     // Order predecessors so DFSResult follows the critical path.
757     SU->biasCriticalPath();
758 
759     // A SUnit is ready to top schedule if it has no predecessors.
760     if (!I->NumPredsLeft)
761       TopRoots.push_back(SU);
762     // A SUnit is ready to bottom schedule if it has no successors.
763     if (!I->NumSuccsLeft)
764       BotRoots.push_back(SU);
765   }
766   ExitSU.biasCriticalPath();
767 }
768 
769 /// Identify DAG roots and setup scheduler queues.
initQueues(ArrayRef<SUnit * > TopRoots,ArrayRef<SUnit * > BotRoots)770 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
771                                ArrayRef<SUnit*> BotRoots) {
772   NextClusterSucc = nullptr;
773   NextClusterPred = nullptr;
774 
775   // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
776   //
777   // Nodes with unreleased weak edges can still be roots.
778   // Release top roots in forward order.
779   for (SmallVectorImpl<SUnit*>::const_iterator
780          I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
781     SchedImpl->releaseTopNode(*I);
782   }
783   // Release bottom roots in reverse order so the higher priority nodes appear
784   // first. This is more natural and slightly more efficient.
785   for (SmallVectorImpl<SUnit*>::const_reverse_iterator
786          I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
787     SchedImpl->releaseBottomNode(*I);
788   }
789 
790   releaseSuccessors(&EntrySU);
791   releasePredecessors(&ExitSU);
792 
793   SchedImpl->registerRoots();
794 
795   // Advance past initial DebugValues.
796   CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
797   CurrentBottom = RegionEnd;
798 }
799 
800 /// Update scheduler queues after scheduling an instruction.
updateQueues(SUnit * SU,bool IsTopNode)801 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
802   // Release dependent instructions for scheduling.
803   if (IsTopNode)
804     releaseSuccessors(SU);
805   else
806     releasePredecessors(SU);
807 
808   SU->isScheduled = true;
809 }
810 
811 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
placeDebugValues()812 void ScheduleDAGMI::placeDebugValues() {
813   // If first instruction was a DBG_VALUE then put it back.
814   if (FirstDbgValue) {
815     BB->splice(RegionBegin, BB, FirstDbgValue);
816     RegionBegin = FirstDbgValue;
817   }
818 
819   for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
820          DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
821     std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
822     MachineInstr *DbgValue = P.first;
823     MachineBasicBlock::iterator OrigPrevMI = P.second;
824     if (&*RegionBegin == DbgValue)
825       ++RegionBegin;
826     BB->splice(++OrigPrevMI, BB, DbgValue);
827     if (OrigPrevMI == std::prev(RegionEnd))
828       RegionEnd = DbgValue;
829   }
830   DbgValues.clear();
831   FirstDbgValue = nullptr;
832 }
833 
834 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dumpSchedule() const835 void ScheduleDAGMI::dumpSchedule() const {
836   for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
837     if (SUnit *SU = getSUnit(&(*MI)))
838       SU->dump(this);
839     else
840       dbgs() << "Missing SUnit\n";
841   }
842 }
843 #endif
844 
845 //===----------------------------------------------------------------------===//
846 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
847 // preservation.
848 //===----------------------------------------------------------------------===//
849 
~ScheduleDAGMILive()850 ScheduleDAGMILive::~ScheduleDAGMILive() {
851   delete DFSResult;
852 }
853 
854 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
855 /// crossing a scheduling boundary. [begin, end) includes all instructions in
856 /// the region, including the boundary itself and single-instruction regions
857 /// that don't get scheduled.
enterRegion(MachineBasicBlock * bb,MachineBasicBlock::iterator begin,MachineBasicBlock::iterator end,unsigned regioninstrs)858 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
859                                 MachineBasicBlock::iterator begin,
860                                 MachineBasicBlock::iterator end,
861                                 unsigned regioninstrs)
862 {
863   // ScheduleDAGMI initializes SchedImpl's per-region policy.
864   ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
865 
866   // For convenience remember the end of the liveness region.
867   LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
868 
869   SUPressureDiffs.clear();
870 
871   ShouldTrackPressure = SchedImpl->shouldTrackPressure();
872 }
873 
874 // Setup the register pressure trackers for the top scheduled top and bottom
875 // scheduled regions.
initRegPressure()876 void ScheduleDAGMILive::initRegPressure() {
877   TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin);
878   BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
879 
880   // Close the RPTracker to finalize live ins.
881   RPTracker.closeRegion();
882 
883   DEBUG(RPTracker.dump());
884 
885   // Initialize the live ins and live outs.
886   TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
887   BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
888 
889   // Close one end of the tracker so we can call
890   // getMaxUpward/DownwardPressureDelta before advancing across any
891   // instructions. This converts currently live regs into live ins/outs.
892   TopRPTracker.closeTop();
893   BotRPTracker.closeBottom();
894 
895   BotRPTracker.initLiveThru(RPTracker);
896   if (!BotRPTracker.getLiveThru().empty()) {
897     TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
898     DEBUG(dbgs() << "Live Thru: ";
899           dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
900   };
901 
902   // For each live out vreg reduce the pressure change associated with other
903   // uses of the same vreg below the live-out reaching def.
904   updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
905 
906   // Account for liveness generated by the region boundary.
907   if (LiveRegionEnd != RegionEnd) {
908     SmallVector<unsigned, 8> LiveUses;
909     BotRPTracker.recede(&LiveUses);
910     updatePressureDiffs(LiveUses);
911   }
912 
913   DEBUG(
914     dbgs() << "Top Pressure:\n";
915     dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
916     dbgs() << "Bottom Pressure:\n";
917     dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
918   );
919 
920   assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
921 
922   // Cache the list of excess pressure sets in this region. This will also track
923   // the max pressure in the scheduled code for these sets.
924   RegionCriticalPSets.clear();
925   const std::vector<unsigned> &RegionPressure =
926     RPTracker.getPressure().MaxSetPressure;
927   for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
928     unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
929     if (RegionPressure[i] > Limit) {
930       DEBUG(dbgs() << TRI->getRegPressureSetName(i)
931             << " Limit " << Limit
932             << " Actual " << RegionPressure[i] << "\n");
933       RegionCriticalPSets.push_back(PressureChange(i));
934     }
935   }
936   DEBUG(dbgs() << "Excess PSets: ";
937         for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
938           dbgs() << TRI->getRegPressureSetName(
939             RegionCriticalPSets[i].getPSet()) << " ";
940         dbgs() << "\n");
941 }
942 
943 void ScheduleDAGMILive::
updateScheduledPressure(const SUnit * SU,const std::vector<unsigned> & NewMaxPressure)944 updateScheduledPressure(const SUnit *SU,
945                         const std::vector<unsigned> &NewMaxPressure) {
946   const PressureDiff &PDiff = getPressureDiff(SU);
947   unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
948   for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end();
949        I != E; ++I) {
950     if (!I->isValid())
951       break;
952     unsigned ID = I->getPSet();
953     while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
954       ++CritIdx;
955     if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
956       if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
957           && NewMaxPressure[ID] <= INT16_MAX)
958         RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
959     }
960     unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
961     if (NewMaxPressure[ID] >= Limit - 2) {
962       DEBUG(dbgs() << "  " << TRI->getRegPressureSetName(ID) << ": "
963             << NewMaxPressure[ID]
964             << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") << Limit
965             << "(+ " << BotRPTracker.getLiveThru()[ID] << " livethru)\n");
966     }
967   }
968 }
969 
970 /// Update the PressureDiff array for liveness after scheduling this
971 /// instruction.
updatePressureDiffs(ArrayRef<unsigned> LiveUses)972 void ScheduleDAGMILive::updatePressureDiffs(ArrayRef<unsigned> LiveUses) {
973   for (unsigned LUIdx = 0, LUEnd = LiveUses.size(); LUIdx != LUEnd; ++LUIdx) {
974     /// FIXME: Currently assuming single-use physregs.
975     unsigned Reg = LiveUses[LUIdx];
976     DEBUG(dbgs() << "  LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n");
977     if (!TRI->isVirtualRegister(Reg))
978       continue;
979 
980     // This may be called before CurrentBottom has been initialized. However,
981     // BotRPTracker must have a valid position. We want the value live into the
982     // instruction or live out of the block, so ask for the previous
983     // instruction's live-out.
984     const LiveInterval &LI = LIS->getInterval(Reg);
985     VNInfo *VNI;
986     MachineBasicBlock::const_iterator I =
987       nextIfDebug(BotRPTracker.getPos(), BB->end());
988     if (I == BB->end())
989       VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
990     else {
991       LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(I));
992       VNI = LRQ.valueIn();
993     }
994     // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
995     assert(VNI && "No live value at use.");
996     for (const VReg2SUnit &V2SU
997          : make_range(VRegUses.find(Reg), VRegUses.end())) {
998       SUnit *SU = V2SU.SU;
999       // If this use comes before the reaching def, it cannot be a last use, so
1000       // descrease its pressure change.
1001       if (!SU->isScheduled && SU != &ExitSU) {
1002         LiveQueryResult LRQ
1003           = LI.Query(LIS->getInstructionIndex(SU->getInstr()));
1004         if (LRQ.valueIn() == VNI) {
1005           PressureDiff &PDiff = getPressureDiff(SU);
1006           PDiff.addPressureChange(Reg, true, &MRI);
1007           DEBUG(
1008             dbgs() << "  UpdateRegP: SU(" << SU->NodeNum << ") "
1009                    << *SU->getInstr();
1010             dbgs() << "              to ";
1011             PDiff.dump(*TRI);
1012           );
1013         }
1014       }
1015     }
1016   }
1017 }
1018 
1019 /// schedule - Called back from MachineScheduler::runOnMachineFunction
1020 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
1021 /// only includes instructions that have DAG nodes, not scheduling boundaries.
1022 ///
1023 /// This is a skeletal driver, with all the functionality pushed into helpers,
1024 /// so that it can be easily extended by experimental schedulers. Generally,
1025 /// implementing MachineSchedStrategy should be sufficient to implement a new
1026 /// scheduling algorithm. However, if a scheduler further subclasses
1027 /// ScheduleDAGMILive then it will want to override this virtual method in order
1028 /// to update any specialized state.
schedule()1029 void ScheduleDAGMILive::schedule() {
1030   DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
1031   DEBUG(SchedImpl->dumpPolicy());
1032   buildDAGWithRegPressure();
1033 
1034   Topo.InitDAGTopologicalSorting();
1035 
1036   postprocessDAG();
1037 
1038   SmallVector<SUnit*, 8> TopRoots, BotRoots;
1039   findRootsAndBiasEdges(TopRoots, BotRoots);
1040 
1041   // Initialize the strategy before modifying the DAG.
1042   // This may initialize a DFSResult to be used for queue priority.
1043   SchedImpl->initialize(this);
1044 
1045   DEBUG(
1046     for (const SUnit &SU : SUnits) {
1047       SU.dumpAll(this);
1048       if (ShouldTrackPressure) {
1049         dbgs() << "  Pressure Diff      : ";
1050         getPressureDiff(&SU).dump(*TRI);
1051       }
1052       dbgs() << '\n';
1053     }
1054   );
1055   if (ViewMISchedDAGs) viewGraph();
1056 
1057   // Initialize ready queues now that the DAG and priority data are finalized.
1058   initQueues(TopRoots, BotRoots);
1059 
1060   if (ShouldTrackPressure) {
1061     assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
1062     TopRPTracker.setPos(CurrentTop);
1063   }
1064 
1065   bool IsTopNode = false;
1066   while (true) {
1067     DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
1068     SUnit *SU = SchedImpl->pickNode(IsTopNode);
1069     if (!SU) break;
1070 
1071     assert(!SU->isScheduled && "Node already scheduled");
1072     if (!checkSchedLimit())
1073       break;
1074 
1075     scheduleMI(SU, IsTopNode);
1076 
1077     if (DFSResult) {
1078       unsigned SubtreeID = DFSResult->getSubtreeID(SU);
1079       if (!ScheduledTrees.test(SubtreeID)) {
1080         ScheduledTrees.set(SubtreeID);
1081         DFSResult->scheduleTree(SubtreeID);
1082         SchedImpl->scheduleTree(SubtreeID);
1083       }
1084     }
1085 
1086     // Notify the scheduling strategy after updating the DAG.
1087     SchedImpl->schedNode(SU, IsTopNode);
1088 
1089     updateQueues(SU, IsTopNode);
1090   }
1091   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
1092 
1093   placeDebugValues();
1094 
1095   DEBUG({
1096       unsigned BBNum = begin()->getParent()->getNumber();
1097       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
1098       dumpSchedule();
1099       dbgs() << '\n';
1100     });
1101 }
1102 
1103 /// Build the DAG and setup three register pressure trackers.
buildDAGWithRegPressure()1104 void ScheduleDAGMILive::buildDAGWithRegPressure() {
1105   if (!ShouldTrackPressure) {
1106     RPTracker.reset();
1107     RegionCriticalPSets.clear();
1108     buildSchedGraph(AA);
1109     return;
1110   }
1111 
1112   // Initialize the register pressure tracker used by buildSchedGraph.
1113   RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
1114                  /*TrackUntiedDefs=*/true);
1115 
1116   // Account for liveness generate by the region boundary.
1117   if (LiveRegionEnd != RegionEnd)
1118     RPTracker.recede();
1119 
1120   // Build the DAG, and compute current register pressure.
1121   buildSchedGraph(AA, &RPTracker, &SUPressureDiffs);
1122 
1123   // Initialize top/bottom trackers after computing region pressure.
1124   initRegPressure();
1125 }
1126 
computeDFSResult()1127 void ScheduleDAGMILive::computeDFSResult() {
1128   if (!DFSResult)
1129     DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
1130   DFSResult->clear();
1131   ScheduledTrees.clear();
1132   DFSResult->resize(SUnits.size());
1133   DFSResult->compute(SUnits);
1134   ScheduledTrees.resize(DFSResult->getNumSubtrees());
1135 }
1136 
1137 /// Compute the max cyclic critical path through the DAG. The scheduling DAG
1138 /// only provides the critical path for single block loops. To handle loops that
1139 /// span blocks, we could use the vreg path latencies provided by
1140 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1141 /// available for use in the scheduler.
1142 ///
1143 /// The cyclic path estimation identifies a def-use pair that crosses the back
1144 /// edge and considers the depth and height of the nodes. For example, consider
1145 /// the following instruction sequence where each instruction has unit latency
1146 /// and defines an epomymous virtual register:
1147 ///
1148 /// a->b(a,c)->c(b)->d(c)->exit
1149 ///
1150 /// The cyclic critical path is a two cycles: b->c->b
1151 /// The acyclic critical path is four cycles: a->b->c->d->exit
1152 /// LiveOutHeight = height(c) = len(c->d->exit) = 2
1153 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1154 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1155 /// LiveInDepth = depth(b) = len(a->b) = 1
1156 ///
1157 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1158 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1159 /// CyclicCriticalPath = min(2, 2) = 2
1160 ///
1161 /// This could be relevant to PostRA scheduling, but is currently implemented
1162 /// assuming LiveIntervals.
computeCyclicCriticalPath()1163 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
1164   // This only applies to single block loop.
1165   if (!BB->isSuccessor(BB))
1166     return 0;
1167 
1168   unsigned MaxCyclicLatency = 0;
1169   // Visit each live out vreg def to find def/use pairs that cross iterations.
1170   ArrayRef<unsigned> LiveOuts = RPTracker.getPressure().LiveOutRegs;
1171   for (ArrayRef<unsigned>::iterator RI = LiveOuts.begin(), RE = LiveOuts.end();
1172        RI != RE; ++RI) {
1173     unsigned Reg = *RI;
1174     if (!TRI->isVirtualRegister(Reg))
1175         continue;
1176     const LiveInterval &LI = LIS->getInterval(Reg);
1177     const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1178     if (!DefVNI)
1179       continue;
1180 
1181     MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
1182     const SUnit *DefSU = getSUnit(DefMI);
1183     if (!DefSU)
1184       continue;
1185 
1186     unsigned LiveOutHeight = DefSU->getHeight();
1187     unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
1188     // Visit all local users of the vreg def.
1189     for (const VReg2SUnit &V2SU
1190          : make_range(VRegUses.find(Reg), VRegUses.end())) {
1191       SUnit *SU = V2SU.SU;
1192       if (SU == &ExitSU)
1193         continue;
1194 
1195       // Only consider uses of the phi.
1196       LiveQueryResult LRQ =
1197         LI.Query(LIS->getInstructionIndex(SU->getInstr()));
1198       if (!LRQ.valueIn()->isPHIDef())
1199         continue;
1200 
1201       // Assume that a path spanning two iterations is a cycle, which could
1202       // overestimate in strange cases. This allows cyclic latency to be
1203       // estimated as the minimum slack of the vreg's depth or height.
1204       unsigned CyclicLatency = 0;
1205       if (LiveOutDepth > SU->getDepth())
1206         CyclicLatency = LiveOutDepth - SU->getDepth();
1207 
1208       unsigned LiveInHeight = SU->getHeight() + DefSU->Latency;
1209       if (LiveInHeight > LiveOutHeight) {
1210         if (LiveInHeight - LiveOutHeight < CyclicLatency)
1211           CyclicLatency = LiveInHeight - LiveOutHeight;
1212       }
1213       else
1214         CyclicLatency = 0;
1215 
1216       DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
1217             << SU->NodeNum << ") = " << CyclicLatency << "c\n");
1218       if (CyclicLatency > MaxCyclicLatency)
1219         MaxCyclicLatency = CyclicLatency;
1220     }
1221   }
1222   DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
1223   return MaxCyclicLatency;
1224 }
1225 
1226 /// Move an instruction and update register pressure.
scheduleMI(SUnit * SU,bool IsTopNode)1227 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
1228   // Move the instruction to its new location in the instruction stream.
1229   MachineInstr *MI = SU->getInstr();
1230 
1231   if (IsTopNode) {
1232     assert(SU->isTopReady() && "node still has unscheduled dependencies");
1233     if (&*CurrentTop == MI)
1234       CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
1235     else {
1236       moveInstruction(MI, CurrentTop);
1237       TopRPTracker.setPos(MI);
1238     }
1239 
1240     if (ShouldTrackPressure) {
1241       // Update top scheduled pressure.
1242       TopRPTracker.advance();
1243       assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
1244       DEBUG(
1245         dbgs() << "Top Pressure:\n";
1246         dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
1247       );
1248 
1249       updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
1250     }
1251   }
1252   else {
1253     assert(SU->isBottomReady() && "node still has unscheduled dependencies");
1254     MachineBasicBlock::iterator priorII =
1255       priorNonDebug(CurrentBottom, CurrentTop);
1256     if (&*priorII == MI)
1257       CurrentBottom = priorII;
1258     else {
1259       if (&*CurrentTop == MI) {
1260         CurrentTop = nextIfDebug(++CurrentTop, priorII);
1261         TopRPTracker.setPos(CurrentTop);
1262       }
1263       moveInstruction(MI, CurrentBottom);
1264       CurrentBottom = MI;
1265     }
1266     if (ShouldTrackPressure) {
1267       // Update bottom scheduled pressure.
1268       SmallVector<unsigned, 8> LiveUses;
1269       BotRPTracker.recede(&LiveUses);
1270       assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
1271       DEBUG(
1272         dbgs() << "Bottom Pressure:\n";
1273         dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
1274       );
1275 
1276       updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
1277       updatePressureDiffs(LiveUses);
1278     }
1279   }
1280 }
1281 
1282 //===----------------------------------------------------------------------===//
1283 // LoadClusterMutation - DAG post-processing to cluster loads.
1284 //===----------------------------------------------------------------------===//
1285 
1286 namespace {
1287 /// \brief Post-process the DAG to create cluster edges between neighboring
1288 /// loads.
1289 class LoadClusterMutation : public ScheduleDAGMutation {
1290   struct LoadInfo {
1291     SUnit *SU;
1292     unsigned BaseReg;
1293     unsigned Offset;
LoadInfo__anond1ea022a0211::LoadClusterMutation::LoadInfo1294     LoadInfo(SUnit *su, unsigned reg, unsigned ofs)
1295       : SU(su), BaseReg(reg), Offset(ofs) {}
1296 
operator <__anond1ea022a0211::LoadClusterMutation::LoadInfo1297     bool operator<(const LoadInfo &RHS) const {
1298       return std::tie(BaseReg, Offset) < std::tie(RHS.BaseReg, RHS.Offset);
1299     }
1300   };
1301 
1302   const TargetInstrInfo *TII;
1303   const TargetRegisterInfo *TRI;
1304 public:
LoadClusterMutation(const TargetInstrInfo * tii,const TargetRegisterInfo * tri)1305   LoadClusterMutation(const TargetInstrInfo *tii,
1306                       const TargetRegisterInfo *tri)
1307     : TII(tii), TRI(tri) {}
1308 
1309   void apply(ScheduleDAGMI *DAG) override;
1310 protected:
1311   void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG);
1312 };
1313 } // anonymous
1314 
clusterNeighboringLoads(ArrayRef<SUnit * > Loads,ScheduleDAGMI * DAG)1315 void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads,
1316                                                   ScheduleDAGMI *DAG) {
1317   SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords;
1318   for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) {
1319     SUnit *SU = Loads[Idx];
1320     unsigned BaseReg;
1321     unsigned Offset;
1322     if (TII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
1323       LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset));
1324   }
1325   if (LoadRecords.size() < 2)
1326     return;
1327   std::sort(LoadRecords.begin(), LoadRecords.end());
1328   unsigned ClusterLength = 1;
1329   for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) {
1330     if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) {
1331       ClusterLength = 1;
1332       continue;
1333     }
1334 
1335     SUnit *SUa = LoadRecords[Idx].SU;
1336     SUnit *SUb = LoadRecords[Idx+1].SU;
1337     if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength)
1338         && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
1339 
1340       DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU("
1341             << SUb->NodeNum << ")\n");
1342       // Copy successor edges from SUa to SUb. Interleaving computation
1343       // dependent on SUa can prevent load combining due to register reuse.
1344       // Predecessor edges do not need to be copied from SUb to SUa since nearby
1345       // loads should have effectively the same inputs.
1346       for (SUnit::const_succ_iterator
1347              SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
1348         if (SI->getSUnit() == SUb)
1349           continue;
1350         DEBUG(dbgs() << "  Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
1351         DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
1352       }
1353       ++ClusterLength;
1354     }
1355     else
1356       ClusterLength = 1;
1357   }
1358 }
1359 
1360 /// \brief Callback from DAG postProcessing to create cluster edges for loads.
apply(ScheduleDAGMI * DAG)1361 void LoadClusterMutation::apply(ScheduleDAGMI *DAG) {
1362   // Map DAG NodeNum to store chain ID.
1363   DenseMap<unsigned, unsigned> StoreChainIDs;
1364   // Map each store chain to a set of dependent loads.
1365   SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
1366   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1367     SUnit *SU = &DAG->SUnits[Idx];
1368     if (!SU->getInstr()->mayLoad())
1369       continue;
1370     unsigned ChainPredID = DAG->SUnits.size();
1371     for (SUnit::const_pred_iterator
1372            PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
1373       if (PI->isCtrl()) {
1374         ChainPredID = PI->getSUnit()->NodeNum;
1375         break;
1376       }
1377     }
1378     // Check if this chain-like pred has been seen
1379     // before. ChainPredID==MaxNodeID for loads at the top of the schedule.
1380     unsigned NumChains = StoreChainDependents.size();
1381     std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
1382       StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
1383     if (Result.second)
1384       StoreChainDependents.resize(NumChains + 1);
1385     StoreChainDependents[Result.first->second].push_back(SU);
1386   }
1387   // Iterate over the store chains.
1388   for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
1389     clusterNeighboringLoads(StoreChainDependents[Idx], DAG);
1390 }
1391 
1392 //===----------------------------------------------------------------------===//
1393 // MacroFusion - DAG post-processing to encourage fusion of macro ops.
1394 //===----------------------------------------------------------------------===//
1395 
1396 namespace {
1397 /// \brief Post-process the DAG to create cluster edges between instructions
1398 /// that may be fused by the processor into a single operation.
1399 class MacroFusion : public ScheduleDAGMutation {
1400   const TargetInstrInfo &TII;
1401   const TargetRegisterInfo &TRI;
1402 public:
MacroFusion(const TargetInstrInfo & TII,const TargetRegisterInfo & TRI)1403   MacroFusion(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI)
1404     : TII(TII), TRI(TRI) {}
1405 
1406   void apply(ScheduleDAGMI *DAG) override;
1407 };
1408 } // anonymous
1409 
1410 /// Returns true if \p MI reads a register written by \p Other.
HasDataDep(const TargetRegisterInfo & TRI,const MachineInstr & MI,const MachineInstr & Other)1411 static bool HasDataDep(const TargetRegisterInfo &TRI, const MachineInstr &MI,
1412                        const MachineInstr &Other) {
1413   for (const MachineOperand &MO : MI.uses()) {
1414     if (!MO.isReg() || !MO.readsReg())
1415       continue;
1416 
1417     unsigned Reg = MO.getReg();
1418     if (Other.modifiesRegister(Reg, &TRI))
1419       return true;
1420   }
1421   return false;
1422 }
1423 
1424 /// \brief Callback from DAG postProcessing to create cluster edges to encourage
1425 /// fused operations.
apply(ScheduleDAGMI * DAG)1426 void MacroFusion::apply(ScheduleDAGMI *DAG) {
1427   // For now, assume targets can only fuse with the branch.
1428   SUnit &ExitSU = DAG->ExitSU;
1429   MachineInstr *Branch = ExitSU.getInstr();
1430   if (!Branch)
1431     return;
1432 
1433   for (SUnit &SU : DAG->SUnits) {
1434     // SUnits with successors can't be schedule in front of the ExitSU.
1435     if (!SU.Succs.empty())
1436       continue;
1437     // We only care if the node writes to a register that the branch reads.
1438     MachineInstr *Pred = SU.getInstr();
1439     if (!HasDataDep(TRI, *Branch, *Pred))
1440       continue;
1441 
1442     if (!TII.shouldScheduleAdjacent(Pred, Branch))
1443       continue;
1444 
1445     // Create a single weak edge from SU to ExitSU. The only effect is to cause
1446     // bottom-up scheduling to heavily prioritize the clustered SU.  There is no
1447     // need to copy predecessor edges from ExitSU to SU, since top-down
1448     // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
1449     // of SU, we could create an artificial edge from the deepest root, but it
1450     // hasn't been needed yet.
1451     bool Success = DAG->addEdge(&ExitSU, SDep(&SU, SDep::Cluster));
1452     (void)Success;
1453     assert(Success && "No DAG nodes should be reachable from ExitSU");
1454 
1455     DEBUG(dbgs() << "Macro Fuse SU(" << SU.NodeNum << ")\n");
1456     break;
1457   }
1458 }
1459 
1460 //===----------------------------------------------------------------------===//
1461 // CopyConstrain - DAG post-processing to encourage copy elimination.
1462 //===----------------------------------------------------------------------===//
1463 
1464 namespace {
1465 /// \brief Post-process the DAG to create weak edges from all uses of a copy to
1466 /// the one use that defines the copy's source vreg, most likely an induction
1467 /// variable increment.
1468 class CopyConstrain : public ScheduleDAGMutation {
1469   // Transient state.
1470   SlotIndex RegionBeginIdx;
1471   // RegionEndIdx is the slot index of the last non-debug instruction in the
1472   // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
1473   SlotIndex RegionEndIdx;
1474 public:
CopyConstrain(const TargetInstrInfo *,const TargetRegisterInfo *)1475   CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
1476 
1477   void apply(ScheduleDAGMI *DAG) override;
1478 
1479 protected:
1480   void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
1481 };
1482 } // anonymous
1483 
1484 /// constrainLocalCopy handles two possibilities:
1485 /// 1) Local src:
1486 /// I0:     = dst
1487 /// I1: src = ...
1488 /// I2:     = dst
1489 /// I3: dst = src (copy)
1490 /// (create pred->succ edges I0->I1, I2->I1)
1491 ///
1492 /// 2) Local copy:
1493 /// I0: dst = src (copy)
1494 /// I1:     = dst
1495 /// I2: src = ...
1496 /// I3:     = dst
1497 /// (create pred->succ edges I1->I2, I3->I2)
1498 ///
1499 /// Although the MachineScheduler is currently constrained to single blocks,
1500 /// this algorithm should handle extended blocks. An EBB is a set of
1501 /// contiguously numbered blocks such that the previous block in the EBB is
1502 /// always the single predecessor.
constrainLocalCopy(SUnit * CopySU,ScheduleDAGMILive * DAG)1503 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
1504   LiveIntervals *LIS = DAG->getLIS();
1505   MachineInstr *Copy = CopySU->getInstr();
1506 
1507   // Check for pure vreg copies.
1508   unsigned SrcReg = Copy->getOperand(1).getReg();
1509   if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
1510     return;
1511 
1512   unsigned DstReg = Copy->getOperand(0).getReg();
1513   if (!TargetRegisterInfo::isVirtualRegister(DstReg))
1514     return;
1515 
1516   // Check if either the dest or source is local. If it's live across a back
1517   // edge, it's not local. Note that if both vregs are live across the back
1518   // edge, we cannot successfully contrain the copy without cyclic scheduling.
1519   // If both the copy's source and dest are local live intervals, then we
1520   // should treat the dest as the global for the purpose of adding
1521   // constraints. This adds edges from source's other uses to the copy.
1522   unsigned LocalReg = SrcReg;
1523   unsigned GlobalReg = DstReg;
1524   LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
1525   if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
1526     LocalReg = DstReg;
1527     GlobalReg = SrcReg;
1528     LocalLI = &LIS->getInterval(LocalReg);
1529     if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
1530       return;
1531   }
1532   LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
1533 
1534   // Find the global segment after the start of the local LI.
1535   LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
1536   // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
1537   // local live range. We could create edges from other global uses to the local
1538   // start, but the coalescer should have already eliminated these cases, so
1539   // don't bother dealing with it.
1540   if (GlobalSegment == GlobalLI->end())
1541     return;
1542 
1543   // If GlobalSegment is killed at the LocalLI->start, the call to find()
1544   // returned the next global segment. But if GlobalSegment overlaps with
1545   // LocalLI->start, then advance to the next segement. If a hole in GlobalLI
1546   // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1547   if (GlobalSegment->contains(LocalLI->beginIndex()))
1548     ++GlobalSegment;
1549 
1550   if (GlobalSegment == GlobalLI->end())
1551     return;
1552 
1553   // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1554   if (GlobalSegment != GlobalLI->begin()) {
1555     // Two address defs have no hole.
1556     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
1557                                GlobalSegment->start)) {
1558       return;
1559     }
1560     // If the prior global segment may be defined by the same two-address
1561     // instruction that also defines LocalLI, then can't make a hole here.
1562     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
1563                                LocalLI->beginIndex())) {
1564       return;
1565     }
1566     // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1567     // it would be a disconnected component in the live range.
1568     assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
1569            "Disconnected LRG within the scheduling region.");
1570   }
1571   MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1572   if (!GlobalDef)
1573     return;
1574 
1575   SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1576   if (!GlobalSU)
1577     return;
1578 
1579   // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1580   // constraining the uses of the last local def to precede GlobalDef.
1581   SmallVector<SUnit*,8> LocalUses;
1582   const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1583   MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1584   SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1585   for (SUnit::const_succ_iterator
1586          I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end();
1587        I != E; ++I) {
1588     if (I->getKind() != SDep::Data || I->getReg() != LocalReg)
1589       continue;
1590     if (I->getSUnit() == GlobalSU)
1591       continue;
1592     if (!DAG->canAddEdge(GlobalSU, I->getSUnit()))
1593       return;
1594     LocalUses.push_back(I->getSUnit());
1595   }
1596   // Open the top of the GlobalLI hole by constraining any earlier global uses
1597   // to precede the start of LocalLI.
1598   SmallVector<SUnit*,8> GlobalUses;
1599   MachineInstr *FirstLocalDef =
1600     LIS->getInstructionFromIndex(LocalLI->beginIndex());
1601   SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1602   for (SUnit::const_pred_iterator
1603          I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) {
1604     if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg)
1605       continue;
1606     if (I->getSUnit() == FirstLocalSU)
1607       continue;
1608     if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit()))
1609       return;
1610     GlobalUses.push_back(I->getSUnit());
1611   }
1612   DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1613   // Add the weak edges.
1614   for (SmallVectorImpl<SUnit*>::const_iterator
1615          I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1616     DEBUG(dbgs() << "  Local use SU(" << (*I)->NodeNum << ") -> SU("
1617           << GlobalSU->NodeNum << ")\n");
1618     DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1619   }
1620   for (SmallVectorImpl<SUnit*>::const_iterator
1621          I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1622     DEBUG(dbgs() << "  Global use SU(" << (*I)->NodeNum << ") -> SU("
1623           << FirstLocalSU->NodeNum << ")\n");
1624     DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1625   }
1626 }
1627 
1628 /// \brief Callback from DAG postProcessing to create weak edges to encourage
1629 /// copy elimination.
apply(ScheduleDAGMI * DAG)1630 void CopyConstrain::apply(ScheduleDAGMI *DAG) {
1631   assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
1632 
1633   MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1634   if (FirstPos == DAG->end())
1635     return;
1636   RegionBeginIdx = DAG->getLIS()->getInstructionIndex(&*FirstPos);
1637   RegionEndIdx = DAG->getLIS()->getInstructionIndex(
1638     &*priorNonDebug(DAG->end(), DAG->begin()));
1639 
1640   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1641     SUnit *SU = &DAG->SUnits[Idx];
1642     if (!SU->getInstr()->isCopy())
1643       continue;
1644 
1645     constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG));
1646   }
1647 }
1648 
1649 //===----------------------------------------------------------------------===//
1650 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
1651 // and possibly other custom schedulers.
1652 //===----------------------------------------------------------------------===//
1653 
1654 static const unsigned InvalidCycle = ~0U;
1655 
~SchedBoundary()1656 SchedBoundary::~SchedBoundary() { delete HazardRec; }
1657 
reset()1658 void SchedBoundary::reset() {
1659   // A new HazardRec is created for each DAG and owned by SchedBoundary.
1660   // Destroying and reconstructing it is very expensive though. So keep
1661   // invalid, placeholder HazardRecs.
1662   if (HazardRec && HazardRec->isEnabled()) {
1663     delete HazardRec;
1664     HazardRec = nullptr;
1665   }
1666   Available.clear();
1667   Pending.clear();
1668   CheckPending = false;
1669   NextSUs.clear();
1670   CurrCycle = 0;
1671   CurrMOps = 0;
1672   MinReadyCycle = UINT_MAX;
1673   ExpectedLatency = 0;
1674   DependentLatency = 0;
1675   RetiredMOps = 0;
1676   MaxExecutedResCount = 0;
1677   ZoneCritResIdx = 0;
1678   IsResourceLimited = false;
1679   ReservedCycles.clear();
1680 #ifndef NDEBUG
1681   // Track the maximum number of stall cycles that could arise either from the
1682   // latency of a DAG edge or the number of cycles that a processor resource is
1683   // reserved (SchedBoundary::ReservedCycles).
1684   MaxObservedStall = 0;
1685 #endif
1686   // Reserve a zero-count for invalid CritResIdx.
1687   ExecutedResCounts.resize(1);
1688   assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
1689 }
1690 
1691 void SchedRemainder::
init(ScheduleDAGMI * DAG,const TargetSchedModel * SchedModel)1692 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1693   reset();
1694   if (!SchedModel->hasInstrSchedModel())
1695     return;
1696   RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1697   for (std::vector<SUnit>::iterator
1698          I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
1699     const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
1700     RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC)
1701       * SchedModel->getMicroOpFactor();
1702     for (TargetSchedModel::ProcResIter
1703            PI = SchedModel->getWriteProcResBegin(SC),
1704            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1705       unsigned PIdx = PI->ProcResourceIdx;
1706       unsigned Factor = SchedModel->getResourceFactor(PIdx);
1707       RemainingCounts[PIdx] += (Factor * PI->Cycles);
1708     }
1709   }
1710 }
1711 
1712 void SchedBoundary::
init(ScheduleDAGMI * dag,const TargetSchedModel * smodel,SchedRemainder * rem)1713 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1714   reset();
1715   DAG = dag;
1716   SchedModel = smodel;
1717   Rem = rem;
1718   if (SchedModel->hasInstrSchedModel()) {
1719     ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
1720     ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle);
1721   }
1722 }
1723 
1724 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
1725 /// these "soft stalls" differently than the hard stall cycles based on CPU
1726 /// resources and computed by checkHazard(). A fully in-order model
1727 /// (MicroOpBufferSize==0) will not make use of this since instructions are not
1728 /// available for scheduling until they are ready. However, a weaker in-order
1729 /// model may use this for heuristics. For example, if a processor has in-order
1730 /// behavior when reading certain resources, this may come into play.
getLatencyStallCycles(SUnit * SU)1731 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
1732   if (!SU->isUnbuffered)
1733     return 0;
1734 
1735   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1736   if (ReadyCycle > CurrCycle)
1737     return ReadyCycle - CurrCycle;
1738   return 0;
1739 }
1740 
1741 /// Compute the next cycle at which the given processor resource can be
1742 /// scheduled.
1743 unsigned SchedBoundary::
getNextResourceCycle(unsigned PIdx,unsigned Cycles)1744 getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
1745   unsigned NextUnreserved = ReservedCycles[PIdx];
1746   // If this resource has never been used, always return cycle zero.
1747   if (NextUnreserved == InvalidCycle)
1748     return 0;
1749   // For bottom-up scheduling add the cycles needed for the current operation.
1750   if (!isTop())
1751     NextUnreserved += Cycles;
1752   return NextUnreserved;
1753 }
1754 
1755 /// Does this SU have a hazard within the current instruction group.
1756 ///
1757 /// The scheduler supports two modes of hazard recognition. The first is the
1758 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1759 /// supports highly complicated in-order reservation tables
1760 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
1761 ///
1762 /// The second is a streamlined mechanism that checks for hazards based on
1763 /// simple counters that the scheduler itself maintains. It explicitly checks
1764 /// for instruction dispatch limitations, including the number of micro-ops that
1765 /// can dispatch per cycle.
1766 ///
1767 /// TODO: Also check whether the SU must start a new group.
checkHazard(SUnit * SU)1768 bool SchedBoundary::checkHazard(SUnit *SU) {
1769   if (HazardRec->isEnabled()
1770       && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
1771     return true;
1772   }
1773   unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
1774   if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
1775     DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") uops="
1776           << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
1777     return true;
1778   }
1779   if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
1780     const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1781     for (TargetSchedModel::ProcResIter
1782            PI = SchedModel->getWriteProcResBegin(SC),
1783            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1784       unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles);
1785       if (NRCycle > CurrCycle) {
1786 #ifndef NDEBUG
1787         MaxObservedStall = std::max(PI->Cycles, MaxObservedStall);
1788 #endif
1789         DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") "
1790               << SchedModel->getResourceName(PI->ProcResourceIdx)
1791               << "=" << NRCycle << "c\n");
1792         return true;
1793       }
1794     }
1795   }
1796   return false;
1797 }
1798 
1799 // Find the unscheduled node in ReadySUs with the highest latency.
1800 unsigned SchedBoundary::
findMaxLatency(ArrayRef<SUnit * > ReadySUs)1801 findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
1802   SUnit *LateSU = nullptr;
1803   unsigned RemLatency = 0;
1804   for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end();
1805        I != E; ++I) {
1806     unsigned L = getUnscheduledLatency(*I);
1807     if (L > RemLatency) {
1808       RemLatency = L;
1809       LateSU = *I;
1810     }
1811   }
1812   if (LateSU) {
1813     DEBUG(dbgs() << Available.getName() << " RemLatency SU("
1814           << LateSU->NodeNum << ") " << RemLatency << "c\n");
1815   }
1816   return RemLatency;
1817 }
1818 
1819 // Count resources in this zone and the remaining unscheduled
1820 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical
1821 // resource index, or zero if the zone is issue limited.
1822 unsigned SchedBoundary::
getOtherResourceCount(unsigned & OtherCritIdx)1823 getOtherResourceCount(unsigned &OtherCritIdx) {
1824   OtherCritIdx = 0;
1825   if (!SchedModel->hasInstrSchedModel())
1826     return 0;
1827 
1828   unsigned OtherCritCount = Rem->RemIssueCount
1829     + (RetiredMOps * SchedModel->getMicroOpFactor());
1830   DEBUG(dbgs() << "  " << Available.getName() << " + Remain MOps: "
1831         << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
1832   for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
1833        PIdx != PEnd; ++PIdx) {
1834     unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
1835     if (OtherCount > OtherCritCount) {
1836       OtherCritCount = OtherCount;
1837       OtherCritIdx = PIdx;
1838     }
1839   }
1840   if (OtherCritIdx) {
1841     DEBUG(dbgs() << "  " << Available.getName() << " + Remain CritRes: "
1842           << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
1843           << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
1844   }
1845   return OtherCritCount;
1846 }
1847 
releaseNode(SUnit * SU,unsigned ReadyCycle)1848 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) {
1849   assert(SU->getInstr() && "Scheduled SUnit must have instr");
1850 
1851 #ifndef NDEBUG
1852   // ReadyCycle was been bumped up to the CurrCycle when this node was
1853   // scheduled, but CurrCycle may have been eagerly advanced immediately after
1854   // scheduling, so may now be greater than ReadyCycle.
1855   if (ReadyCycle > CurrCycle)
1856     MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
1857 #endif
1858 
1859   if (ReadyCycle < MinReadyCycle)
1860     MinReadyCycle = ReadyCycle;
1861 
1862   // Check for interlocks first. For the purpose of other heuristics, an
1863   // instruction that cannot issue appears as if it's not in the ReadyQueue.
1864   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
1865   if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU))
1866     Pending.push(SU);
1867   else
1868     Available.push(SU);
1869 
1870   // Record this node as an immediate dependent of the scheduled node.
1871   NextSUs.insert(SU);
1872 }
1873 
releaseTopNode(SUnit * SU)1874 void SchedBoundary::releaseTopNode(SUnit *SU) {
1875   if (SU->isScheduled)
1876     return;
1877 
1878   releaseNode(SU, SU->TopReadyCycle);
1879 }
1880 
releaseBottomNode(SUnit * SU)1881 void SchedBoundary::releaseBottomNode(SUnit *SU) {
1882   if (SU->isScheduled)
1883     return;
1884 
1885   releaseNode(SU, SU->BotReadyCycle);
1886 }
1887 
1888 /// Move the boundary of scheduled code by one cycle.
bumpCycle(unsigned NextCycle)1889 void SchedBoundary::bumpCycle(unsigned NextCycle) {
1890   if (SchedModel->getMicroOpBufferSize() == 0) {
1891     assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
1892     if (MinReadyCycle > NextCycle)
1893       NextCycle = MinReadyCycle;
1894   }
1895   // Update the current micro-ops, which will issue in the next cycle.
1896   unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
1897   CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
1898 
1899   // Decrement DependentLatency based on the next cycle.
1900   if ((NextCycle - CurrCycle) > DependentLatency)
1901     DependentLatency = 0;
1902   else
1903     DependentLatency -= (NextCycle - CurrCycle);
1904 
1905   if (!HazardRec->isEnabled()) {
1906     // Bypass HazardRec virtual calls.
1907     CurrCycle = NextCycle;
1908   }
1909   else {
1910     // Bypass getHazardType calls in case of long latency.
1911     for (; CurrCycle != NextCycle; ++CurrCycle) {
1912       if (isTop())
1913         HazardRec->AdvanceCycle();
1914       else
1915         HazardRec->RecedeCycle();
1916     }
1917   }
1918   CheckPending = true;
1919   unsigned LFactor = SchedModel->getLatencyFactor();
1920   IsResourceLimited =
1921     (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
1922     > (int)LFactor;
1923 
1924   DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
1925 }
1926 
incExecutedResources(unsigned PIdx,unsigned Count)1927 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
1928   ExecutedResCounts[PIdx] += Count;
1929   if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
1930     MaxExecutedResCount = ExecutedResCounts[PIdx];
1931 }
1932 
1933 /// Add the given processor resource to this scheduled zone.
1934 ///
1935 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles
1936 /// during which this resource is consumed.
1937 ///
1938 /// \return the next cycle at which the instruction may execute without
1939 /// oversubscribing resources.
1940 unsigned SchedBoundary::
countResource(unsigned PIdx,unsigned Cycles,unsigned NextCycle)1941 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
1942   unsigned Factor = SchedModel->getResourceFactor(PIdx);
1943   unsigned Count = Factor * Cycles;
1944   DEBUG(dbgs() << "  " << SchedModel->getResourceName(PIdx)
1945         << " +" << Cycles << "x" << Factor << "u\n");
1946 
1947   // Update Executed resources counts.
1948   incExecutedResources(PIdx, Count);
1949   assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
1950   Rem->RemainingCounts[PIdx] -= Count;
1951 
1952   // Check if this resource exceeds the current critical resource. If so, it
1953   // becomes the critical resource.
1954   if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
1955     ZoneCritResIdx = PIdx;
1956     DEBUG(dbgs() << "  *** Critical resource "
1957           << SchedModel->getResourceName(PIdx) << ": "
1958           << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
1959   }
1960   // For reserved resources, record the highest cycle using the resource.
1961   unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles);
1962   if (NextAvailable > CurrCycle) {
1963     DEBUG(dbgs() << "  Resource conflict: "
1964           << SchedModel->getProcResource(PIdx)->Name << " reserved until @"
1965           << NextAvailable << "\n");
1966   }
1967   return NextAvailable;
1968 }
1969 
1970 /// Move the boundary of scheduled code by one SUnit.
bumpNode(SUnit * SU)1971 void SchedBoundary::bumpNode(SUnit *SU) {
1972   // Update the reservation table.
1973   if (HazardRec->isEnabled()) {
1974     if (!isTop() && SU->isCall) {
1975       // Calls are scheduled with their preceding instructions. For bottom-up
1976       // scheduling, clear the pipeline state before emitting.
1977       HazardRec->Reset();
1978     }
1979     HazardRec->EmitInstruction(SU);
1980   }
1981   // checkHazard should prevent scheduling multiple instructions per cycle that
1982   // exceed the issue width.
1983   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1984   unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
1985   assert(
1986       (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
1987       "Cannot schedule this instruction's MicroOps in the current cycle.");
1988 
1989   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1990   DEBUG(dbgs() << "  Ready @" << ReadyCycle << "c\n");
1991 
1992   unsigned NextCycle = CurrCycle;
1993   switch (SchedModel->getMicroOpBufferSize()) {
1994   case 0:
1995     assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
1996     break;
1997   case 1:
1998     if (ReadyCycle > NextCycle) {
1999       NextCycle = ReadyCycle;
2000       DEBUG(dbgs() << "  *** Stall until: " << ReadyCycle << "\n");
2001     }
2002     break;
2003   default:
2004     // We don't currently model the OOO reorder buffer, so consider all
2005     // scheduled MOps to be "retired". We do loosely model in-order resource
2006     // latency. If this instruction uses an in-order resource, account for any
2007     // likely stall cycles.
2008     if (SU->isUnbuffered && ReadyCycle > NextCycle)
2009       NextCycle = ReadyCycle;
2010     break;
2011   }
2012   RetiredMOps += IncMOps;
2013 
2014   // Update resource counts and critical resource.
2015   if (SchedModel->hasInstrSchedModel()) {
2016     unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
2017     assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
2018     Rem->RemIssueCount -= DecRemIssue;
2019     if (ZoneCritResIdx) {
2020       // Scale scheduled micro-ops for comparing with the critical resource.
2021       unsigned ScaledMOps =
2022         RetiredMOps * SchedModel->getMicroOpFactor();
2023 
2024       // If scaled micro-ops are now more than the previous critical resource by
2025       // a full cycle, then micro-ops issue becomes critical.
2026       if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
2027           >= (int)SchedModel->getLatencyFactor()) {
2028         ZoneCritResIdx = 0;
2029         DEBUG(dbgs() << "  *** Critical resource NumMicroOps: "
2030               << ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
2031       }
2032     }
2033     for (TargetSchedModel::ProcResIter
2034            PI = SchedModel->getWriteProcResBegin(SC),
2035            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2036       unsigned RCycle =
2037         countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
2038       if (RCycle > NextCycle)
2039         NextCycle = RCycle;
2040     }
2041     if (SU->hasReservedResource) {
2042       // For reserved resources, record the highest cycle using the resource.
2043       // For top-down scheduling, this is the cycle in which we schedule this
2044       // instruction plus the number of cycles the operations reserves the
2045       // resource. For bottom-up is it simply the instruction's cycle.
2046       for (TargetSchedModel::ProcResIter
2047              PI = SchedModel->getWriteProcResBegin(SC),
2048              PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2049         unsigned PIdx = PI->ProcResourceIdx;
2050         if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
2051           if (isTop()) {
2052             ReservedCycles[PIdx] =
2053               std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles);
2054           }
2055           else
2056             ReservedCycles[PIdx] = NextCycle;
2057         }
2058       }
2059     }
2060   }
2061   // Update ExpectedLatency and DependentLatency.
2062   unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
2063   unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
2064   if (SU->getDepth() > TopLatency) {
2065     TopLatency = SU->getDepth();
2066     DEBUG(dbgs() << "  " << Available.getName()
2067           << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
2068   }
2069   if (SU->getHeight() > BotLatency) {
2070     BotLatency = SU->getHeight();
2071     DEBUG(dbgs() << "  " << Available.getName()
2072           << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
2073   }
2074   // If we stall for any reason, bump the cycle.
2075   if (NextCycle > CurrCycle) {
2076     bumpCycle(NextCycle);
2077   }
2078   else {
2079     // After updating ZoneCritResIdx and ExpectedLatency, check if we're
2080     // resource limited. If a stall occurred, bumpCycle does this.
2081     unsigned LFactor = SchedModel->getLatencyFactor();
2082     IsResourceLimited =
2083       (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2084       > (int)LFactor;
2085   }
2086   // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
2087   // resets CurrMOps. Loop to handle instructions with more MOps than issue in
2088   // one cycle.  Since we commonly reach the max MOps here, opportunistically
2089   // bump the cycle to avoid uselessly checking everything in the readyQ.
2090   CurrMOps += IncMOps;
2091   while (CurrMOps >= SchedModel->getIssueWidth()) {
2092     DEBUG(dbgs() << "  *** Max MOps " << CurrMOps
2093           << " at cycle " << CurrCycle << '\n');
2094     bumpCycle(++NextCycle);
2095   }
2096   DEBUG(dumpScheduledState());
2097 }
2098 
2099 /// Release pending ready nodes in to the available queue. This makes them
2100 /// visible to heuristics.
releasePending()2101 void SchedBoundary::releasePending() {
2102   // If the available queue is empty, it is safe to reset MinReadyCycle.
2103   if (Available.empty())
2104     MinReadyCycle = UINT_MAX;
2105 
2106   // Check to see if any of the pending instructions are ready to issue.  If
2107   // so, add them to the available queue.
2108   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
2109   for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
2110     SUnit *SU = *(Pending.begin()+i);
2111     unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
2112 
2113     if (ReadyCycle < MinReadyCycle)
2114       MinReadyCycle = ReadyCycle;
2115 
2116     if (!IsBuffered && ReadyCycle > CurrCycle)
2117       continue;
2118 
2119     if (checkHazard(SU))
2120       continue;
2121 
2122     Available.push(SU);
2123     Pending.remove(Pending.begin()+i);
2124     --i; --e;
2125   }
2126   DEBUG(if (!Pending.empty()) Pending.dump());
2127   CheckPending = false;
2128 }
2129 
2130 /// Remove SU from the ready set for this boundary.
removeReady(SUnit * SU)2131 void SchedBoundary::removeReady(SUnit *SU) {
2132   if (Available.isInQueue(SU))
2133     Available.remove(Available.find(SU));
2134   else {
2135     assert(Pending.isInQueue(SU) && "bad ready count");
2136     Pending.remove(Pending.find(SU));
2137   }
2138 }
2139 
2140 /// If this queue only has one ready candidate, return it. As a side effect,
2141 /// defer any nodes that now hit a hazard, and advance the cycle until at least
2142 /// one node is ready. If multiple instructions are ready, return NULL.
pickOnlyChoice()2143 SUnit *SchedBoundary::pickOnlyChoice() {
2144   if (CheckPending)
2145     releasePending();
2146 
2147   if (CurrMOps > 0) {
2148     // Defer any ready instrs that now have a hazard.
2149     for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
2150       if (checkHazard(*I)) {
2151         Pending.push(*I);
2152         I = Available.remove(I);
2153         continue;
2154       }
2155       ++I;
2156     }
2157   }
2158   for (unsigned i = 0; Available.empty(); ++i) {
2159 //  FIXME: Re-enable assert once PR20057 is resolved.
2160 //    assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2161 //           "permanent hazard");
2162     (void)i;
2163     bumpCycle(CurrCycle + 1);
2164     releasePending();
2165   }
2166   if (Available.size() == 1)
2167     return *Available.begin();
2168   return nullptr;
2169 }
2170 
2171 #ifndef NDEBUG
2172 // This is useful information to dump after bumpNode.
2173 // Note that the Queue contents are more useful before pickNodeFromQueue.
dumpScheduledState()2174 void SchedBoundary::dumpScheduledState() {
2175   unsigned ResFactor;
2176   unsigned ResCount;
2177   if (ZoneCritResIdx) {
2178     ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
2179     ResCount = getResourceCount(ZoneCritResIdx);
2180   }
2181   else {
2182     ResFactor = SchedModel->getMicroOpFactor();
2183     ResCount = RetiredMOps * SchedModel->getMicroOpFactor();
2184   }
2185   unsigned LFactor = SchedModel->getLatencyFactor();
2186   dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
2187          << "  Retired: " << RetiredMOps;
2188   dbgs() << "\n  Executed: " << getExecutedCount() / LFactor << "c";
2189   dbgs() << "\n  Critical: " << ResCount / LFactor << "c, "
2190          << ResCount / ResFactor << " "
2191          << SchedModel->getResourceName(ZoneCritResIdx)
2192          << "\n  ExpectedLatency: " << ExpectedLatency << "c\n"
2193          << (IsResourceLimited ? "  - Resource" : "  - Latency")
2194          << " limited.\n";
2195 }
2196 #endif
2197 
2198 //===----------------------------------------------------------------------===//
2199 // GenericScheduler - Generic implementation of MachineSchedStrategy.
2200 //===----------------------------------------------------------------------===//
2201 
2202 void GenericSchedulerBase::SchedCandidate::
initResourceDelta(const ScheduleDAGMI * DAG,const TargetSchedModel * SchedModel)2203 initResourceDelta(const ScheduleDAGMI *DAG,
2204                   const TargetSchedModel *SchedModel) {
2205   if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
2206     return;
2207 
2208   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2209   for (TargetSchedModel::ProcResIter
2210          PI = SchedModel->getWriteProcResBegin(SC),
2211          PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2212     if (PI->ProcResourceIdx == Policy.ReduceResIdx)
2213       ResDelta.CritResources += PI->Cycles;
2214     if (PI->ProcResourceIdx == Policy.DemandResIdx)
2215       ResDelta.DemandedResources += PI->Cycles;
2216   }
2217 }
2218 
2219 /// Set the CandPolicy given a scheduling zone given the current resources and
2220 /// latencies inside and outside the zone.
setPolicy(CandPolicy & Policy,bool IsPostRA,SchedBoundary & CurrZone,SchedBoundary * OtherZone)2221 void GenericSchedulerBase::setPolicy(CandPolicy &Policy,
2222                                      bool IsPostRA,
2223                                      SchedBoundary &CurrZone,
2224                                      SchedBoundary *OtherZone) {
2225   // Apply preemptive heuristics based on the total latency and resources
2226   // inside and outside this zone. Potential stalls should be considered before
2227   // following this policy.
2228 
2229   // Compute remaining latency. We need this both to determine whether the
2230   // overall schedule has become latency-limited and whether the instructions
2231   // outside this zone are resource or latency limited.
2232   //
2233   // The "dependent" latency is updated incrementally during scheduling as the
2234   // max height/depth of scheduled nodes minus the cycles since it was
2235   // scheduled:
2236   //   DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2237   //
2238   // The "independent" latency is the max ready queue depth:
2239   //   ILat = max N.depth for N in Available|Pending
2240   //
2241   // RemainingLatency is the greater of independent and dependent latency.
2242   unsigned RemLatency = CurrZone.getDependentLatency();
2243   RemLatency = std::max(RemLatency,
2244                         CurrZone.findMaxLatency(CurrZone.Available.elements()));
2245   RemLatency = std::max(RemLatency,
2246                         CurrZone.findMaxLatency(CurrZone.Pending.elements()));
2247 
2248   // Compute the critical resource outside the zone.
2249   unsigned OtherCritIdx = 0;
2250   unsigned OtherCount =
2251     OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
2252 
2253   bool OtherResLimited = false;
2254   if (SchedModel->hasInstrSchedModel()) {
2255     unsigned LFactor = SchedModel->getLatencyFactor();
2256     OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
2257   }
2258   // Schedule aggressively for latency in PostRA mode. We don't check for
2259   // acyclic latency during PostRA, and highly out-of-order processors will
2260   // skip PostRA scheduling.
2261   if (!OtherResLimited) {
2262     if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) {
2263       Policy.ReduceLatency |= true;
2264       DEBUG(dbgs() << "  " << CurrZone.Available.getName()
2265             << " RemainingLatency " << RemLatency << " + "
2266             << CurrZone.getCurrCycle() << "c > CritPath "
2267             << Rem.CriticalPath << "\n");
2268     }
2269   }
2270   // If the same resource is limiting inside and outside the zone, do nothing.
2271   if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
2272     return;
2273 
2274   DEBUG(
2275     if (CurrZone.isResourceLimited()) {
2276       dbgs() << "  " << CurrZone.Available.getName() << " ResourceLimited: "
2277              << SchedModel->getResourceName(CurrZone.getZoneCritResIdx())
2278              << "\n";
2279     }
2280     if (OtherResLimited)
2281       dbgs() << "  RemainingLimit: "
2282              << SchedModel->getResourceName(OtherCritIdx) << "\n";
2283     if (!CurrZone.isResourceLimited() && !OtherResLimited)
2284       dbgs() << "  Latency limited both directions.\n");
2285 
2286   if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
2287     Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
2288 
2289   if (OtherResLimited)
2290     Policy.DemandResIdx = OtherCritIdx;
2291 }
2292 
2293 #ifndef NDEBUG
getReasonStr(GenericSchedulerBase::CandReason Reason)2294 const char *GenericSchedulerBase::getReasonStr(
2295   GenericSchedulerBase::CandReason Reason) {
2296   switch (Reason) {
2297   case NoCand:         return "NOCAND    ";
2298   case PhysRegCopy:    return "PREG-COPY";
2299   case RegExcess:      return "REG-EXCESS";
2300   case RegCritical:    return "REG-CRIT  ";
2301   case Stall:          return "STALL     ";
2302   case Cluster:        return "CLUSTER   ";
2303   case Weak:           return "WEAK      ";
2304   case RegMax:         return "REG-MAX   ";
2305   case ResourceReduce: return "RES-REDUCE";
2306   case ResourceDemand: return "RES-DEMAND";
2307   case TopDepthReduce: return "TOP-DEPTH ";
2308   case TopPathReduce:  return "TOP-PATH  ";
2309   case BotHeightReduce:return "BOT-HEIGHT";
2310   case BotPathReduce:  return "BOT-PATH  ";
2311   case NextDefUse:     return "DEF-USE   ";
2312   case NodeOrder:      return "ORDER     ";
2313   };
2314   llvm_unreachable("Unknown reason!");
2315 }
2316 
traceCandidate(const SchedCandidate & Cand)2317 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) {
2318   PressureChange P;
2319   unsigned ResIdx = 0;
2320   unsigned Latency = 0;
2321   switch (Cand.Reason) {
2322   default:
2323     break;
2324   case RegExcess:
2325     P = Cand.RPDelta.Excess;
2326     break;
2327   case RegCritical:
2328     P = Cand.RPDelta.CriticalMax;
2329     break;
2330   case RegMax:
2331     P = Cand.RPDelta.CurrentMax;
2332     break;
2333   case ResourceReduce:
2334     ResIdx = Cand.Policy.ReduceResIdx;
2335     break;
2336   case ResourceDemand:
2337     ResIdx = Cand.Policy.DemandResIdx;
2338     break;
2339   case TopDepthReduce:
2340     Latency = Cand.SU->getDepth();
2341     break;
2342   case TopPathReduce:
2343     Latency = Cand.SU->getHeight();
2344     break;
2345   case BotHeightReduce:
2346     Latency = Cand.SU->getHeight();
2347     break;
2348   case BotPathReduce:
2349     Latency = Cand.SU->getDepth();
2350     break;
2351   }
2352   dbgs() << "  Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
2353   if (P.isValid())
2354     dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
2355            << ":" << P.getUnitInc() << " ";
2356   else
2357     dbgs() << "      ";
2358   if (ResIdx)
2359     dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2360   else
2361     dbgs() << "         ";
2362   if (Latency)
2363     dbgs() << " " << Latency << " cycles ";
2364   else
2365     dbgs() << "          ";
2366   dbgs() << '\n';
2367 }
2368 #endif
2369 
2370 /// Return true if this heuristic determines order.
tryLess(int TryVal,int CandVal,GenericSchedulerBase::SchedCandidate & TryCand,GenericSchedulerBase::SchedCandidate & Cand,GenericSchedulerBase::CandReason Reason)2371 static bool tryLess(int TryVal, int CandVal,
2372                     GenericSchedulerBase::SchedCandidate &TryCand,
2373                     GenericSchedulerBase::SchedCandidate &Cand,
2374                     GenericSchedulerBase::CandReason Reason) {
2375   if (TryVal < CandVal) {
2376     TryCand.Reason = Reason;
2377     return true;
2378   }
2379   if (TryVal > CandVal) {
2380     if (Cand.Reason > Reason)
2381       Cand.Reason = Reason;
2382     return true;
2383   }
2384   Cand.setRepeat(Reason);
2385   return false;
2386 }
2387 
tryGreater(int TryVal,int CandVal,GenericSchedulerBase::SchedCandidate & TryCand,GenericSchedulerBase::SchedCandidate & Cand,GenericSchedulerBase::CandReason Reason)2388 static bool tryGreater(int TryVal, int CandVal,
2389                        GenericSchedulerBase::SchedCandidate &TryCand,
2390                        GenericSchedulerBase::SchedCandidate &Cand,
2391                        GenericSchedulerBase::CandReason Reason) {
2392   if (TryVal > CandVal) {
2393     TryCand.Reason = Reason;
2394     return true;
2395   }
2396   if (TryVal < CandVal) {
2397     if (Cand.Reason > Reason)
2398       Cand.Reason = Reason;
2399     return true;
2400   }
2401   Cand.setRepeat(Reason);
2402   return false;
2403 }
2404 
tryLatency(GenericSchedulerBase::SchedCandidate & TryCand,GenericSchedulerBase::SchedCandidate & Cand,SchedBoundary & Zone)2405 static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
2406                        GenericSchedulerBase::SchedCandidate &Cand,
2407                        SchedBoundary &Zone) {
2408   if (Zone.isTop()) {
2409     if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
2410       if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2411                   TryCand, Cand, GenericSchedulerBase::TopDepthReduce))
2412         return true;
2413     }
2414     if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2415                    TryCand, Cand, GenericSchedulerBase::TopPathReduce))
2416       return true;
2417   }
2418   else {
2419     if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
2420       if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2421                   TryCand, Cand, GenericSchedulerBase::BotHeightReduce))
2422         return true;
2423     }
2424     if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2425                    TryCand, Cand, GenericSchedulerBase::BotPathReduce))
2426       return true;
2427   }
2428   return false;
2429 }
2430 
tracePick(const GenericSchedulerBase::SchedCandidate & Cand,bool IsTop)2431 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand,
2432                       bool IsTop) {
2433   DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2434         << GenericSchedulerBase::getReasonStr(Cand.Reason) << '\n');
2435 }
2436 
initialize(ScheduleDAGMI * dag)2437 void GenericScheduler::initialize(ScheduleDAGMI *dag) {
2438   assert(dag->hasVRegLiveness() &&
2439          "(PreRA)GenericScheduler needs vreg liveness");
2440   DAG = static_cast<ScheduleDAGMILive*>(dag);
2441   SchedModel = DAG->getSchedModel();
2442   TRI = DAG->TRI;
2443 
2444   Rem.init(DAG, SchedModel);
2445   Top.init(DAG, SchedModel, &Rem);
2446   Bot.init(DAG, SchedModel, &Rem);
2447 
2448   // Initialize resource counts.
2449 
2450   // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
2451   // are disabled, then these HazardRecs will be disabled.
2452   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
2453   if (!Top.HazardRec) {
2454     Top.HazardRec =
2455         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2456             Itin, DAG);
2457   }
2458   if (!Bot.HazardRec) {
2459     Bot.HazardRec =
2460         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2461             Itin, DAG);
2462   }
2463 }
2464 
2465 /// Initialize the per-region scheduling policy.
initPolicy(MachineBasicBlock::iterator Begin,MachineBasicBlock::iterator End,unsigned NumRegionInstrs)2466 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
2467                                   MachineBasicBlock::iterator End,
2468                                   unsigned NumRegionInstrs) {
2469   const MachineFunction &MF = *Begin->getParent()->getParent();
2470   const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
2471 
2472   // Avoid setting up the register pressure tracker for small regions to save
2473   // compile time. As a rough heuristic, only track pressure when the number of
2474   // schedulable instructions exceeds half the integer register file.
2475   RegionPolicy.ShouldTrackPressure = true;
2476   for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
2477     MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT;
2478     if (TLI->isTypeLegal(LegalIntVT)) {
2479       unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
2480         TLI->getRegClassFor(LegalIntVT));
2481       RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
2482     }
2483   }
2484 
2485   // For generic targets, we default to bottom-up, because it's simpler and more
2486   // compile-time optimizations have been implemented in that direction.
2487   RegionPolicy.OnlyBottomUp = true;
2488 
2489   // Allow the subtarget to override default policy.
2490   MF.getSubtarget().overrideSchedPolicy(RegionPolicy, Begin, End,
2491                                         NumRegionInstrs);
2492 
2493   // After subtarget overrides, apply command line options.
2494   if (!EnableRegPressure)
2495     RegionPolicy.ShouldTrackPressure = false;
2496 
2497   // Check -misched-topdown/bottomup can force or unforce scheduling direction.
2498   // e.g. -misched-bottomup=false allows scheduling in both directions.
2499   assert((!ForceTopDown || !ForceBottomUp) &&
2500          "-misched-topdown incompatible with -misched-bottomup");
2501   if (ForceBottomUp.getNumOccurrences() > 0) {
2502     RegionPolicy.OnlyBottomUp = ForceBottomUp;
2503     if (RegionPolicy.OnlyBottomUp)
2504       RegionPolicy.OnlyTopDown = false;
2505   }
2506   if (ForceTopDown.getNumOccurrences() > 0) {
2507     RegionPolicy.OnlyTopDown = ForceTopDown;
2508     if (RegionPolicy.OnlyTopDown)
2509       RegionPolicy.OnlyBottomUp = false;
2510   }
2511 }
2512 
dumpPolicy()2513 void GenericScheduler::dumpPolicy() {
2514   dbgs() << "GenericScheduler RegionPolicy: "
2515          << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure
2516          << " OnlyTopDown=" << RegionPolicy.OnlyTopDown
2517          << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp
2518          << "\n";
2519 }
2520 
2521 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
2522 /// critical path by more cycles than it takes to drain the instruction buffer.
2523 /// We estimate an upper bounds on in-flight instructions as:
2524 ///
2525 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
2526 /// InFlightIterations = AcyclicPath / CyclesPerIteration
2527 /// InFlightResources = InFlightIterations * LoopResources
2528 ///
2529 /// TODO: Check execution resources in addition to IssueCount.
checkAcyclicLatency()2530 void GenericScheduler::checkAcyclicLatency() {
2531   if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
2532     return;
2533 
2534   // Scaled number of cycles per loop iteration.
2535   unsigned IterCount =
2536     std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
2537              Rem.RemIssueCount);
2538   // Scaled acyclic critical path.
2539   unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
2540   // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
2541   unsigned InFlightCount =
2542     (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
2543   unsigned BufferLimit =
2544     SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
2545 
2546   Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
2547 
2548   DEBUG(dbgs() << "IssueCycles="
2549         << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
2550         << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
2551         << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
2552         << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
2553         << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
2554         if (Rem.IsAcyclicLatencyLimited)
2555           dbgs() << "  ACYCLIC LATENCY LIMIT\n");
2556 }
2557 
registerRoots()2558 void GenericScheduler::registerRoots() {
2559   Rem.CriticalPath = DAG->ExitSU.getDepth();
2560 
2561   // Some roots may not feed into ExitSU. Check all of them in case.
2562   for (std::vector<SUnit*>::const_iterator
2563          I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
2564     if ((*I)->getDepth() > Rem.CriticalPath)
2565       Rem.CriticalPath = (*I)->getDepth();
2566   }
2567   DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
2568   if (DumpCriticalPathLength) {
2569     errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n";
2570   }
2571 
2572   if (EnableCyclicPath) {
2573     Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
2574     checkAcyclicLatency();
2575   }
2576 }
2577 
tryPressure(const PressureChange & TryP,const PressureChange & CandP,GenericSchedulerBase::SchedCandidate & TryCand,GenericSchedulerBase::SchedCandidate & Cand,GenericSchedulerBase::CandReason Reason,const TargetRegisterInfo * TRI,const MachineFunction & MF)2578 static bool tryPressure(const PressureChange &TryP,
2579                         const PressureChange &CandP,
2580                         GenericSchedulerBase::SchedCandidate &TryCand,
2581                         GenericSchedulerBase::SchedCandidate &Cand,
2582                         GenericSchedulerBase::CandReason Reason,
2583                         const TargetRegisterInfo *TRI,
2584                         const MachineFunction &MF) {
2585   unsigned TryPSet = TryP.getPSetOrMax();
2586   unsigned CandPSet = CandP.getPSetOrMax();
2587   // If both candidates affect the same set, go with the smallest increase.
2588   if (TryPSet == CandPSet) {
2589     return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
2590                    Reason);
2591   }
2592   // If one candidate decreases and the other increases, go with it.
2593   // Invalid candidates have UnitInc==0.
2594   if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
2595                  Reason)) {
2596     return true;
2597   }
2598 
2599   int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) :
2600                                  std::numeric_limits<int>::max();
2601 
2602   int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) :
2603                                    std::numeric_limits<int>::max();
2604 
2605   // If the candidates are decreasing pressure, reverse priority.
2606   if (TryP.getUnitInc() < 0)
2607     std::swap(TryRank, CandRank);
2608   return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
2609 }
2610 
getWeakLeft(const SUnit * SU,bool isTop)2611 static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
2612   return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
2613 }
2614 
2615 /// Minimize physical register live ranges. Regalloc wants them adjacent to
2616 /// their physreg def/use.
2617 ///
2618 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
2619 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
2620 /// with the operation that produces or consumes the physreg. We'll do this when
2621 /// regalloc has support for parallel copies.
biasPhysRegCopy(const SUnit * SU,bool isTop)2622 static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
2623   const MachineInstr *MI = SU->getInstr();
2624   if (!MI->isCopy())
2625     return 0;
2626 
2627   unsigned ScheduledOper = isTop ? 1 : 0;
2628   unsigned UnscheduledOper = isTop ? 0 : 1;
2629   // If we have already scheduled the physreg produce/consumer, immediately
2630   // schedule the copy.
2631   if (TargetRegisterInfo::isPhysicalRegister(
2632         MI->getOperand(ScheduledOper).getReg()))
2633     return 1;
2634   // If the physreg is at the boundary, defer it. Otherwise schedule it
2635   // immediately to free the dependent. We can hoist the copy later.
2636   bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
2637   if (TargetRegisterInfo::isPhysicalRegister(
2638         MI->getOperand(UnscheduledOper).getReg()))
2639     return AtBoundary ? -1 : 1;
2640   return 0;
2641 }
2642 
2643 /// Apply a set of heursitics to a new candidate. Heuristics are currently
2644 /// hierarchical. This may be more efficient than a graduated cost model because
2645 /// we don't need to evaluate all aspects of the model for each node in the
2646 /// queue. But it's really done to make the heuristics easier to debug and
2647 /// statistically analyze.
2648 ///
2649 /// \param Cand provides the policy and current best candidate.
2650 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
2651 /// \param Zone describes the scheduled zone that we are extending.
2652 /// \param RPTracker describes reg pressure within the scheduled zone.
2653 /// \param TempTracker is a scratch pressure tracker to reuse in queries.
tryCandidate(SchedCandidate & Cand,SchedCandidate & TryCand,SchedBoundary & Zone,const RegPressureTracker & RPTracker,RegPressureTracker & TempTracker)2654 void GenericScheduler::tryCandidate(SchedCandidate &Cand,
2655                                     SchedCandidate &TryCand,
2656                                     SchedBoundary &Zone,
2657                                     const RegPressureTracker &RPTracker,
2658                                     RegPressureTracker &TempTracker) {
2659 
2660   if (DAG->isTrackingPressure()) {
2661     // Always initialize TryCand's RPDelta.
2662     if (Zone.isTop()) {
2663       TempTracker.getMaxDownwardPressureDelta(
2664         TryCand.SU->getInstr(),
2665         TryCand.RPDelta,
2666         DAG->getRegionCriticalPSets(),
2667         DAG->getRegPressure().MaxSetPressure);
2668     }
2669     else {
2670       if (VerifyScheduling) {
2671         TempTracker.getMaxUpwardPressureDelta(
2672           TryCand.SU->getInstr(),
2673           &DAG->getPressureDiff(TryCand.SU),
2674           TryCand.RPDelta,
2675           DAG->getRegionCriticalPSets(),
2676           DAG->getRegPressure().MaxSetPressure);
2677       }
2678       else {
2679         RPTracker.getUpwardPressureDelta(
2680           TryCand.SU->getInstr(),
2681           DAG->getPressureDiff(TryCand.SU),
2682           TryCand.RPDelta,
2683           DAG->getRegionCriticalPSets(),
2684           DAG->getRegPressure().MaxSetPressure);
2685       }
2686     }
2687   }
2688   DEBUG(if (TryCand.RPDelta.Excess.isValid())
2689           dbgs() << "  Try  SU(" << TryCand.SU->NodeNum << ") "
2690                  << TRI->getRegPressureSetName(TryCand.RPDelta.Excess.getPSet())
2691                  << ":" << TryCand.RPDelta.Excess.getUnitInc() << "\n");
2692 
2693   // Initialize the candidate if needed.
2694   if (!Cand.isValid()) {
2695     TryCand.Reason = NodeOrder;
2696     return;
2697   }
2698 
2699   if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()),
2700                  biasPhysRegCopy(Cand.SU, Zone.isTop()),
2701                  TryCand, Cand, PhysRegCopy))
2702     return;
2703 
2704   // Avoid exceeding the target's limit.
2705   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
2706                                                Cand.RPDelta.Excess,
2707                                                TryCand, Cand, RegExcess, TRI,
2708                                                DAG->MF))
2709     return;
2710 
2711   // Avoid increasing the max critical pressure in the scheduled region.
2712   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
2713                                                Cand.RPDelta.CriticalMax,
2714                                                TryCand, Cand, RegCritical, TRI,
2715                                                DAG->MF))
2716     return;
2717 
2718   // For loops that are acyclic path limited, aggressively schedule for latency.
2719   // This can result in very long dependence chains scheduled in sequence, so
2720   // once every cycle (when CurrMOps == 0), switch to normal heuristics.
2721   if (Rem.IsAcyclicLatencyLimited && !Zone.getCurrMOps()
2722       && tryLatency(TryCand, Cand, Zone))
2723     return;
2724 
2725   // Prioritize instructions that read unbuffered resources by stall cycles.
2726   if (tryLess(Zone.getLatencyStallCycles(TryCand.SU),
2727               Zone.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
2728     return;
2729 
2730   // Keep clustered nodes together to encourage downstream peephole
2731   // optimizations which may reduce resource requirements.
2732   //
2733   // This is a best effort to set things up for a post-RA pass. Optimizations
2734   // like generating loads of multiple registers should ideally be done within
2735   // the scheduler pass by combining the loads during DAG postprocessing.
2736   const SUnit *NextClusterSU =
2737     Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2738   if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU,
2739                  TryCand, Cand, Cluster))
2740     return;
2741 
2742   // Weak edges are for clustering and other constraints.
2743   if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()),
2744               getWeakLeft(Cand.SU, Zone.isTop()),
2745               TryCand, Cand, Weak)) {
2746     return;
2747   }
2748   // Avoid increasing the max pressure of the entire region.
2749   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
2750                                                Cand.RPDelta.CurrentMax,
2751                                                TryCand, Cand, RegMax, TRI,
2752                                                DAG->MF))
2753     return;
2754 
2755   // Avoid critical resource consumption and balance the schedule.
2756   TryCand.initResourceDelta(DAG, SchedModel);
2757   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2758               TryCand, Cand, ResourceReduce))
2759     return;
2760   if (tryGreater(TryCand.ResDelta.DemandedResources,
2761                  Cand.ResDelta.DemandedResources,
2762                  TryCand, Cand, ResourceDemand))
2763     return;
2764 
2765   // Avoid serializing long latency dependence chains.
2766   // For acyclic path limited loops, latency was already checked above.
2767   if (!RegionPolicy.DisableLatencyHeuristic && Cand.Policy.ReduceLatency &&
2768       !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, Zone)) {
2769     return;
2770   }
2771 
2772   // Prefer immediate defs/users of the last scheduled instruction. This is a
2773   // local pressure avoidance strategy that also makes the machine code
2774   // readable.
2775   if (tryGreater(Zone.isNextSU(TryCand.SU), Zone.isNextSU(Cand.SU),
2776                  TryCand, Cand, NextDefUse))
2777     return;
2778 
2779   // Fall through to original instruction order.
2780   if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
2781       || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
2782     TryCand.Reason = NodeOrder;
2783   }
2784 }
2785 
2786 /// Pick the best candidate from the queue.
2787 ///
2788 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
2789 /// DAG building. To adjust for the current scheduling location we need to
2790 /// maintain the number of vreg uses remaining to be top-scheduled.
pickNodeFromQueue(SchedBoundary & Zone,const RegPressureTracker & RPTracker,SchedCandidate & Cand)2791 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
2792                                          const RegPressureTracker &RPTracker,
2793                                          SchedCandidate &Cand) {
2794   ReadyQueue &Q = Zone.Available;
2795 
2796   DEBUG(Q.dump());
2797 
2798   // getMaxPressureDelta temporarily modifies the tracker.
2799   RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
2800 
2801   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
2802 
2803     SchedCandidate TryCand(Cand.Policy);
2804     TryCand.SU = *I;
2805     tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker);
2806     if (TryCand.Reason != NoCand) {
2807       // Initialize resource delta if needed in case future heuristics query it.
2808       if (TryCand.ResDelta == SchedResourceDelta())
2809         TryCand.initResourceDelta(DAG, SchedModel);
2810       Cand.setBest(TryCand);
2811       DEBUG(traceCandidate(Cand));
2812     }
2813   }
2814 }
2815 
2816 /// Pick the best candidate node from either the top or bottom queue.
pickNodeBidirectional(bool & IsTopNode)2817 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
2818   // Schedule as far as possible in the direction of no choice. This is most
2819   // efficient, but also provides the best heuristics for CriticalPSets.
2820   if (SUnit *SU = Bot.pickOnlyChoice()) {
2821     IsTopNode = false;
2822     DEBUG(dbgs() << "Pick Bot ONLY1\n");
2823     return SU;
2824   }
2825   if (SUnit *SU = Top.pickOnlyChoice()) {
2826     IsTopNode = true;
2827     DEBUG(dbgs() << "Pick Top ONLY1\n");
2828     return SU;
2829   }
2830   CandPolicy NoPolicy;
2831   SchedCandidate BotCand(NoPolicy);
2832   SchedCandidate TopCand(NoPolicy);
2833   // Set the bottom-up policy based on the state of the current bottom zone and
2834   // the instructions outside the zone, including the top zone.
2835   setPolicy(BotCand.Policy, /*IsPostRA=*/false, Bot, &Top);
2836   // Set the top-down policy based on the state of the current top zone and
2837   // the instructions outside the zone, including the bottom zone.
2838   setPolicy(TopCand.Policy, /*IsPostRA=*/false, Top, &Bot);
2839 
2840   // Prefer bottom scheduling when heuristics are silent.
2841   pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2842   assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2843 
2844   // If either Q has a single candidate that provides the least increase in
2845   // Excess pressure, we can immediately schedule from that Q.
2846   //
2847   // RegionCriticalPSets summarizes the pressure within the scheduled region and
2848   // affects picking from either Q. If scheduling in one direction must
2849   // increase pressure for one of the excess PSets, then schedule in that
2850   // direction first to provide more freedom in the other direction.
2851   if ((BotCand.Reason == RegExcess && !BotCand.isRepeat(RegExcess))
2852       || (BotCand.Reason == RegCritical
2853           && !BotCand.isRepeat(RegCritical)))
2854   {
2855     IsTopNode = false;
2856     tracePick(BotCand, IsTopNode);
2857     return BotCand.SU;
2858   }
2859   // Check if the top Q has a better candidate.
2860   pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2861   assert(TopCand.Reason != NoCand && "failed to find the first candidate");
2862 
2863   // Choose the queue with the most important (lowest enum) reason.
2864   if (TopCand.Reason < BotCand.Reason) {
2865     IsTopNode = true;
2866     tracePick(TopCand, IsTopNode);
2867     return TopCand.SU;
2868   }
2869   // Otherwise prefer the bottom candidate, in node order if all else failed.
2870   IsTopNode = false;
2871   tracePick(BotCand, IsTopNode);
2872   return BotCand.SU;
2873 }
2874 
2875 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
pickNode(bool & IsTopNode)2876 SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
2877   if (DAG->top() == DAG->bottom()) {
2878     assert(Top.Available.empty() && Top.Pending.empty() &&
2879            Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
2880     return nullptr;
2881   }
2882   SUnit *SU;
2883   do {
2884     if (RegionPolicy.OnlyTopDown) {
2885       SU = Top.pickOnlyChoice();
2886       if (!SU) {
2887         CandPolicy NoPolicy;
2888         SchedCandidate TopCand(NoPolicy);
2889         pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2890         assert(TopCand.Reason != NoCand && "failed to find a candidate");
2891         tracePick(TopCand, true);
2892         SU = TopCand.SU;
2893       }
2894       IsTopNode = true;
2895     }
2896     else if (RegionPolicy.OnlyBottomUp) {
2897       SU = Bot.pickOnlyChoice();
2898       if (!SU) {
2899         CandPolicy NoPolicy;
2900         SchedCandidate BotCand(NoPolicy);
2901         pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2902         assert(BotCand.Reason != NoCand && "failed to find a candidate");
2903         tracePick(BotCand, false);
2904         SU = BotCand.SU;
2905       }
2906       IsTopNode = false;
2907     }
2908     else {
2909       SU = pickNodeBidirectional(IsTopNode);
2910     }
2911   } while (SU->isScheduled);
2912 
2913   if (SU->isTopReady())
2914     Top.removeReady(SU);
2915   if (SU->isBottomReady())
2916     Bot.removeReady(SU);
2917 
2918   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
2919   return SU;
2920 }
2921 
reschedulePhysRegCopies(SUnit * SU,bool isTop)2922 void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
2923 
2924   MachineBasicBlock::iterator InsertPos = SU->getInstr();
2925   if (!isTop)
2926     ++InsertPos;
2927   SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
2928 
2929   // Find already scheduled copies with a single physreg dependence and move
2930   // them just above the scheduled instruction.
2931   for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end();
2932        I != E; ++I) {
2933     if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg()))
2934       continue;
2935     SUnit *DepSU = I->getSUnit();
2936     if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
2937       continue;
2938     MachineInstr *Copy = DepSU->getInstr();
2939     if (!Copy->isCopy())
2940       continue;
2941     DEBUG(dbgs() << "  Rescheduling physreg copy ";
2942           I->getSUnit()->dump(DAG));
2943     DAG->moveInstruction(Copy, InsertPos);
2944   }
2945 }
2946 
2947 /// Update the scheduler's state after scheduling a node. This is the same node
2948 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
2949 /// update it's state based on the current cycle before MachineSchedStrategy
2950 /// does.
2951 ///
2952 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
2953 /// them here. See comments in biasPhysRegCopy.
schedNode(SUnit * SU,bool IsTopNode)2954 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
2955   if (IsTopNode) {
2956     SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
2957     Top.bumpNode(SU);
2958     if (SU->hasPhysRegUses)
2959       reschedulePhysRegCopies(SU, true);
2960   }
2961   else {
2962     SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
2963     Bot.bumpNode(SU);
2964     if (SU->hasPhysRegDefs)
2965       reschedulePhysRegCopies(SU, false);
2966   }
2967 }
2968 
2969 /// Create the standard converging machine scheduler. This will be used as the
2970 /// default scheduler if the target does not set a default.
createGenericSchedLive(MachineSchedContext * C)2971 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) {
2972   ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, make_unique<GenericScheduler>(C));
2973   // Register DAG post-processors.
2974   //
2975   // FIXME: extend the mutation API to allow earlier mutations to instantiate
2976   // data and pass it to later mutations. Have a single mutation that gathers
2977   // the interesting nodes in one pass.
2978   DAG->addMutation(make_unique<CopyConstrain>(DAG->TII, DAG->TRI));
2979   if (EnableLoadCluster && DAG->TII->enableClusterLoads())
2980     DAG->addMutation(make_unique<LoadClusterMutation>(DAG->TII, DAG->TRI));
2981   if (EnableMacroFusion)
2982     DAG->addMutation(make_unique<MacroFusion>(*DAG->TII, *DAG->TRI));
2983   return DAG;
2984 }
2985 
2986 static MachineSchedRegistry
2987 GenericSchedRegistry("converge", "Standard converging scheduler.",
2988                      createGenericSchedLive);
2989 
2990 //===----------------------------------------------------------------------===//
2991 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
2992 //===----------------------------------------------------------------------===//
2993 
initialize(ScheduleDAGMI * Dag)2994 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
2995   DAG = Dag;
2996   SchedModel = DAG->getSchedModel();
2997   TRI = DAG->TRI;
2998 
2999   Rem.init(DAG, SchedModel);
3000   Top.init(DAG, SchedModel, &Rem);
3001   BotRoots.clear();
3002 
3003   // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
3004   // or are disabled, then these HazardRecs will be disabled.
3005   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
3006   if (!Top.HazardRec) {
3007     Top.HazardRec =
3008         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
3009             Itin, DAG);
3010   }
3011 }
3012 
3013 
registerRoots()3014 void PostGenericScheduler::registerRoots() {
3015   Rem.CriticalPath = DAG->ExitSU.getDepth();
3016 
3017   // Some roots may not feed into ExitSU. Check all of them in case.
3018   for (SmallVectorImpl<SUnit*>::const_iterator
3019          I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) {
3020     if ((*I)->getDepth() > Rem.CriticalPath)
3021       Rem.CriticalPath = (*I)->getDepth();
3022   }
3023   DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
3024   if (DumpCriticalPathLength) {
3025     errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n";
3026   }
3027 }
3028 
3029 /// Apply a set of heursitics to a new candidate for PostRA scheduling.
3030 ///
3031 /// \param Cand provides the policy and current best candidate.
3032 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
tryCandidate(SchedCandidate & Cand,SchedCandidate & TryCand)3033 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
3034                                         SchedCandidate &TryCand) {
3035 
3036   // Initialize the candidate if needed.
3037   if (!Cand.isValid()) {
3038     TryCand.Reason = NodeOrder;
3039     return;
3040   }
3041 
3042   // Prioritize instructions that read unbuffered resources by stall cycles.
3043   if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
3044               Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
3045     return;
3046 
3047   // Avoid critical resource consumption and balance the schedule.
3048   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
3049               TryCand, Cand, ResourceReduce))
3050     return;
3051   if (tryGreater(TryCand.ResDelta.DemandedResources,
3052                  Cand.ResDelta.DemandedResources,
3053                  TryCand, Cand, ResourceDemand))
3054     return;
3055 
3056   // Avoid serializing long latency dependence chains.
3057   if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
3058     return;
3059   }
3060 
3061   // Fall through to original instruction order.
3062   if (TryCand.SU->NodeNum < Cand.SU->NodeNum)
3063     TryCand.Reason = NodeOrder;
3064 }
3065 
pickNodeFromQueue(SchedCandidate & Cand)3066 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
3067   ReadyQueue &Q = Top.Available;
3068 
3069   DEBUG(Q.dump());
3070 
3071   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
3072     SchedCandidate TryCand(Cand.Policy);
3073     TryCand.SU = *I;
3074     TryCand.initResourceDelta(DAG, SchedModel);
3075     tryCandidate(Cand, TryCand);
3076     if (TryCand.Reason != NoCand) {
3077       Cand.setBest(TryCand);
3078       DEBUG(traceCandidate(Cand));
3079     }
3080   }
3081 }
3082 
3083 /// Pick the next node to schedule.
pickNode(bool & IsTopNode)3084 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
3085   if (DAG->top() == DAG->bottom()) {
3086     assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
3087     return nullptr;
3088   }
3089   SUnit *SU;
3090   do {
3091     SU = Top.pickOnlyChoice();
3092     if (!SU) {
3093       CandPolicy NoPolicy;
3094       SchedCandidate TopCand(NoPolicy);
3095       // Set the top-down policy based on the state of the current top zone and
3096       // the instructions outside the zone, including the bottom zone.
3097       setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
3098       pickNodeFromQueue(TopCand);
3099       assert(TopCand.Reason != NoCand && "failed to find a candidate");
3100       tracePick(TopCand, true);
3101       SU = TopCand.SU;
3102     }
3103   } while (SU->isScheduled);
3104 
3105   IsTopNode = true;
3106   Top.removeReady(SU);
3107 
3108   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
3109   return SU;
3110 }
3111 
3112 /// Called after ScheduleDAGMI has scheduled an instruction and updated
3113 /// scheduled/remaining flags in the DAG nodes.
schedNode(SUnit * SU,bool IsTopNode)3114 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3115   SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3116   Top.bumpNode(SU);
3117 }
3118 
3119 /// Create a generic scheduler with no vreg liveness or DAG mutation passes.
createGenericSchedPostRA(MachineSchedContext * C)3120 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) {
3121   return new ScheduleDAGMI(C, make_unique<PostGenericScheduler>(C), /*IsPostRA=*/true);
3122 }
3123 
3124 //===----------------------------------------------------------------------===//
3125 // ILP Scheduler. Currently for experimental analysis of heuristics.
3126 //===----------------------------------------------------------------------===//
3127 
3128 namespace {
3129 /// \brief Order nodes by the ILP metric.
3130 struct ILPOrder {
3131   const SchedDFSResult *DFSResult;
3132   const BitVector *ScheduledTrees;
3133   bool MaximizeILP;
3134 
ILPOrder__anond1ea022a0511::ILPOrder3135   ILPOrder(bool MaxILP)
3136     : DFSResult(nullptr), ScheduledTrees(nullptr), MaximizeILP(MaxILP) {}
3137 
3138   /// \brief Apply a less-than relation on node priority.
3139   ///
3140   /// (Return true if A comes after B in the Q.)
operator ()__anond1ea022a0511::ILPOrder3141   bool operator()(const SUnit *A, const SUnit *B) const {
3142     unsigned SchedTreeA = DFSResult->getSubtreeID(A);
3143     unsigned SchedTreeB = DFSResult->getSubtreeID(B);
3144     if (SchedTreeA != SchedTreeB) {
3145       // Unscheduled trees have lower priority.
3146       if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
3147         return ScheduledTrees->test(SchedTreeB);
3148 
3149       // Trees with shallower connections have have lower priority.
3150       if (DFSResult->getSubtreeLevel(SchedTreeA)
3151           != DFSResult->getSubtreeLevel(SchedTreeB)) {
3152         return DFSResult->getSubtreeLevel(SchedTreeA)
3153           < DFSResult->getSubtreeLevel(SchedTreeB);
3154       }
3155     }
3156     if (MaximizeILP)
3157       return DFSResult->getILP(A) < DFSResult->getILP(B);
3158     else
3159       return DFSResult->getILP(A) > DFSResult->getILP(B);
3160   }
3161 };
3162 
3163 /// \brief Schedule based on the ILP metric.
3164 class ILPScheduler : public MachineSchedStrategy {
3165   ScheduleDAGMILive *DAG;
3166   ILPOrder Cmp;
3167 
3168   std::vector<SUnit*> ReadyQ;
3169 public:
ILPScheduler(bool MaximizeILP)3170   ILPScheduler(bool MaximizeILP): DAG(nullptr), Cmp(MaximizeILP) {}
3171 
initialize(ScheduleDAGMI * dag)3172   void initialize(ScheduleDAGMI *dag) override {
3173     assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
3174     DAG = static_cast<ScheduleDAGMILive*>(dag);
3175     DAG->computeDFSResult();
3176     Cmp.DFSResult = DAG->getDFSResult();
3177     Cmp.ScheduledTrees = &DAG->getScheduledTrees();
3178     ReadyQ.clear();
3179   }
3180 
registerRoots()3181   void registerRoots() override {
3182     // Restore the heap in ReadyQ with the updated DFS results.
3183     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3184   }
3185 
3186   /// Implement MachineSchedStrategy interface.
3187   /// -----------------------------------------
3188 
3189   /// Callback to select the highest priority node from the ready Q.
pickNode(bool & IsTopNode)3190   SUnit *pickNode(bool &IsTopNode) override {
3191     if (ReadyQ.empty()) return nullptr;
3192     std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3193     SUnit *SU = ReadyQ.back();
3194     ReadyQ.pop_back();
3195     IsTopNode = false;
3196     DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
3197           << " ILP: " << DAG->getDFSResult()->getILP(SU)
3198           << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
3199           << DAG->getDFSResult()->getSubtreeLevel(
3200             DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
3201           << "Scheduling " << *SU->getInstr());
3202     return SU;
3203   }
3204 
3205   /// \brief Scheduler callback to notify that a new subtree is scheduled.
scheduleTree(unsigned SubtreeID)3206   void scheduleTree(unsigned SubtreeID) override {
3207     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3208   }
3209 
3210   /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
3211   /// DFSResults, and resort the priority Q.
schedNode(SUnit * SU,bool IsTopNode)3212   void schedNode(SUnit *SU, bool IsTopNode) override {
3213     assert(!IsTopNode && "SchedDFSResult needs bottom-up");
3214   }
3215 
releaseTopNode(SUnit *)3216   void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
3217 
releaseBottomNode(SUnit * SU)3218   void releaseBottomNode(SUnit *SU) override {
3219     ReadyQ.push_back(SU);
3220     std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3221   }
3222 };
3223 } // namespace
3224 
createILPMaxScheduler(MachineSchedContext * C)3225 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
3226   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(true));
3227 }
createILPMinScheduler(MachineSchedContext * C)3228 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
3229   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(false));
3230 }
3231 static MachineSchedRegistry ILPMaxRegistry(
3232   "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
3233 static MachineSchedRegistry ILPMinRegistry(
3234   "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
3235 
3236 //===----------------------------------------------------------------------===//
3237 // Machine Instruction Shuffler for Correctness Testing
3238 //===----------------------------------------------------------------------===//
3239 
3240 #ifndef NDEBUG
3241 namespace {
3242 /// Apply a less-than relation on the node order, which corresponds to the
3243 /// instruction order prior to scheduling. IsReverse implements greater-than.
3244 template<bool IsReverse>
3245 struct SUnitOrder {
operator ()__anond1ea022a0611::SUnitOrder3246   bool operator()(SUnit *A, SUnit *B) const {
3247     if (IsReverse)
3248       return A->NodeNum > B->NodeNum;
3249     else
3250       return A->NodeNum < B->NodeNum;
3251   }
3252 };
3253 
3254 /// Reorder instructions as much as possible.
3255 class InstructionShuffler : public MachineSchedStrategy {
3256   bool IsAlternating;
3257   bool IsTopDown;
3258 
3259   // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
3260   // gives nodes with a higher number higher priority causing the latest
3261   // instructions to be scheduled first.
3262   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
3263     TopQ;
3264   // When scheduling bottom-up, use greater-than as the queue priority.
3265   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
3266     BottomQ;
3267 public:
InstructionShuffler(bool alternate,bool topdown)3268   InstructionShuffler(bool alternate, bool topdown)
3269     : IsAlternating(alternate), IsTopDown(topdown) {}
3270 
initialize(ScheduleDAGMI *)3271   void initialize(ScheduleDAGMI*) override {
3272     TopQ.clear();
3273     BottomQ.clear();
3274   }
3275 
3276   /// Implement MachineSchedStrategy interface.
3277   /// -----------------------------------------
3278 
pickNode(bool & IsTopNode)3279   SUnit *pickNode(bool &IsTopNode) override {
3280     SUnit *SU;
3281     if (IsTopDown) {
3282       do {
3283         if (TopQ.empty()) return nullptr;
3284         SU = TopQ.top();
3285         TopQ.pop();
3286       } while (SU->isScheduled);
3287       IsTopNode = true;
3288     }
3289     else {
3290       do {
3291         if (BottomQ.empty()) return nullptr;
3292         SU = BottomQ.top();
3293         BottomQ.pop();
3294       } while (SU->isScheduled);
3295       IsTopNode = false;
3296     }
3297     if (IsAlternating)
3298       IsTopDown = !IsTopDown;
3299     return SU;
3300   }
3301 
schedNode(SUnit * SU,bool IsTopNode)3302   void schedNode(SUnit *SU, bool IsTopNode) override {}
3303 
releaseTopNode(SUnit * SU)3304   void releaseTopNode(SUnit *SU) override {
3305     TopQ.push(SU);
3306   }
releaseBottomNode(SUnit * SU)3307   void releaseBottomNode(SUnit *SU) override {
3308     BottomQ.push(SU);
3309   }
3310 };
3311 } // namespace
3312 
createInstructionShuffler(MachineSchedContext * C)3313 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
3314   bool Alternate = !ForceTopDown && !ForceBottomUp;
3315   bool TopDown = !ForceBottomUp;
3316   assert((TopDown || !ForceTopDown) &&
3317          "-misched-topdown incompatible with -misched-bottomup");
3318   return new ScheduleDAGMILive(C, make_unique<InstructionShuffler>(Alternate, TopDown));
3319 }
3320 static MachineSchedRegistry ShufflerRegistry(
3321   "shuffle", "Shuffle machine instructions alternating directions",
3322   createInstructionShuffler);
3323 #endif // !NDEBUG
3324 
3325 //===----------------------------------------------------------------------===//
3326 // GraphWriter support for ScheduleDAGMILive.
3327 //===----------------------------------------------------------------------===//
3328 
3329 #ifndef NDEBUG
3330 namespace llvm {
3331 
3332 template<> struct GraphTraits<
3333   ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
3334 
3335 template<>
3336 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
3337 
DOTGraphTraitsllvm::DOTGraphTraits3338   DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
3339 
getGraphNamellvm::DOTGraphTraits3340   static std::string getGraphName(const ScheduleDAG *G) {
3341     return G->MF.getName();
3342   }
3343 
renderGraphFromBottomUpllvm::DOTGraphTraits3344   static bool renderGraphFromBottomUp() {
3345     return true;
3346   }
3347 
isNodeHiddenllvm::DOTGraphTraits3348   static bool isNodeHidden(const SUnit *Node) {
3349     if (ViewMISchedCutoff == 0)
3350       return false;
3351     return (Node->Preds.size() > ViewMISchedCutoff
3352          || Node->Succs.size() > ViewMISchedCutoff);
3353   }
3354 
3355   /// If you want to override the dot attributes printed for a particular
3356   /// edge, override this method.
getEdgeAttributesllvm::DOTGraphTraits3357   static std::string getEdgeAttributes(const SUnit *Node,
3358                                        SUnitIterator EI,
3359                                        const ScheduleDAG *Graph) {
3360     if (EI.isArtificialDep())
3361       return "color=cyan,style=dashed";
3362     if (EI.isCtrlDep())
3363       return "color=blue,style=dashed";
3364     return "";
3365   }
3366 
getNodeLabelllvm::DOTGraphTraits3367   static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
3368     std::string Str;
3369     raw_string_ostream SS(Str);
3370     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3371     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3372       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3373     SS << "SU:" << SU->NodeNum;
3374     if (DFS)
3375       SS << " I:" << DFS->getNumInstrs(SU);
3376     return SS.str();
3377   }
getNodeDescriptionllvm::DOTGraphTraits3378   static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
3379     return G->getGraphNodeLabel(SU);
3380   }
3381 
getNodeAttributesllvm::DOTGraphTraits3382   static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
3383     std::string Str("shape=Mrecord");
3384     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3385     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3386       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3387     if (DFS) {
3388       Str += ",style=filled,fillcolor=\"#";
3389       Str += DOT::getColorString(DFS->getSubtreeID(N));
3390       Str += '"';
3391     }
3392     return Str;
3393   }
3394 };
3395 } // namespace llvm
3396 #endif // NDEBUG
3397 
3398 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
3399 /// rendered using 'dot'.
3400 ///
viewGraph(const Twine & Name,const Twine & Title)3401 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
3402 #ifndef NDEBUG
3403   ViewGraph(this, Name, false, Title);
3404 #else
3405   errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
3406          << "systems with Graphviz or gv!\n";
3407 #endif  // NDEBUG
3408 }
3409 
3410 /// Out-of-line implementation with no arguments is handy for gdb.
viewGraph()3411 void ScheduleDAGMI::viewGraph() {
3412   viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
3413 }
3414