1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/CodeGen/MachineScheduler.h"
16 #include "llvm/ADT/PriorityQueue.h"
17 #include "llvm/Analysis/AliasAnalysis.h"
18 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
19 #include "llvm/CodeGen/MachineDominators.h"
20 #include "llvm/CodeGen/MachineLoopInfo.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/Passes.h"
23 #include "llvm/CodeGen/RegisterClassInfo.h"
24 #include "llvm/CodeGen/ScheduleDFS.h"
25 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
26 #include "llvm/Support/CommandLine.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/GraphWriter.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include <queue>
33 
34 using namespace llvm;
35 
36 #define DEBUG_TYPE "misched"
37 
38 namespace llvm {
39 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
40                            cl::desc("Force top-down list scheduling"));
41 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
42                             cl::desc("Force bottom-up list scheduling"));
43 cl::opt<bool>
44 DumpCriticalPathLength("misched-dcpl", cl::Hidden,
45                        cl::desc("Print critical path length to stdout"));
46 }
47 
48 #ifndef NDEBUG
49 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
50   cl::desc("Pop up a window to show MISched dags after they are processed"));
51 
52 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
53   cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
54 
55 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
56   cl::desc("Only schedule this function"));
57 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
58   cl::desc("Only schedule this MBB#"));
59 #else
60 static bool ViewMISchedDAGs = false;
61 #endif // NDEBUG
62 
63 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
64   cl::desc("Enable register pressure scheduling."), cl::init(true));
65 
66 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
67   cl::desc("Enable cyclic critical path analysis."), cl::init(true));
68 
69 static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden,
70   cl::desc("Enable load clustering."), cl::init(true));
71 
72 // Experimental heuristics
73 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
74   cl::desc("Enable scheduling for macro fusion."), cl::init(true));
75 
76 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
77   cl::desc("Verify machine instrs before and after machine scheduling"));
78 
79 // DAG subtrees must have at least this many nodes.
80 static const unsigned MinSubtreeSize = 8;
81 
82 // Pin the vtables to this file.
anchor()83 void MachineSchedStrategy::anchor() {}
anchor()84 void ScheduleDAGMutation::anchor() {}
85 
86 //===----------------------------------------------------------------------===//
87 // Machine Instruction Scheduling Pass and Registry
88 //===----------------------------------------------------------------------===//
89 
MachineSchedContext()90 MachineSchedContext::MachineSchedContext():
91     MF(nullptr), MLI(nullptr), MDT(nullptr), PassConfig(nullptr), AA(nullptr), LIS(nullptr) {
92   RegClassInfo = new RegisterClassInfo();
93 }
94 
~MachineSchedContext()95 MachineSchedContext::~MachineSchedContext() {
96   delete RegClassInfo;
97 }
98 
99 namespace {
100 /// Base class for a machine scheduler class that can run at any point.
101 class MachineSchedulerBase : public MachineSchedContext,
102                              public MachineFunctionPass {
103 public:
MachineSchedulerBase(char & ID)104   MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
105 
106   void print(raw_ostream &O, const Module* = nullptr) const override;
107 
108 protected:
109   void scheduleRegions(ScheduleDAGInstrs &Scheduler);
110 };
111 
112 /// MachineScheduler runs after coalescing and before register allocation.
113 class MachineScheduler : public MachineSchedulerBase {
114 public:
115   MachineScheduler();
116 
117   void getAnalysisUsage(AnalysisUsage &AU) const override;
118 
119   bool runOnMachineFunction(MachineFunction&) override;
120 
121   static char ID; // Class identification, replacement for typeinfo
122 
123 protected:
124   ScheduleDAGInstrs *createMachineScheduler();
125 };
126 
127 /// PostMachineScheduler runs after shortly before code emission.
128 class PostMachineScheduler : public MachineSchedulerBase {
129 public:
130   PostMachineScheduler();
131 
132   void getAnalysisUsage(AnalysisUsage &AU) const override;
133 
134   bool runOnMachineFunction(MachineFunction&) override;
135 
136   static char ID; // Class identification, replacement for typeinfo
137 
138 protected:
139   ScheduleDAGInstrs *createPostMachineScheduler();
140 };
141 } // namespace
142 
143 char MachineScheduler::ID = 0;
144 
145 char &llvm::MachineSchedulerID = MachineScheduler::ID;
146 
147 INITIALIZE_PASS_BEGIN(MachineScheduler, "machine-scheduler",
148                       "Machine Instruction Scheduler", false, false)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)149 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
150 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
151 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
152 INITIALIZE_PASS_END(MachineScheduler, "machine-scheduler",
153                     "Machine Instruction Scheduler", false, false)
154 
155 MachineScheduler::MachineScheduler()
156 : MachineSchedulerBase(ID) {
157   initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
158 }
159 
getAnalysisUsage(AnalysisUsage & AU) const160 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
161   AU.setPreservesCFG();
162   AU.addRequiredID(MachineDominatorsID);
163   AU.addRequired<MachineLoopInfo>();
164   AU.addRequired<AliasAnalysis>();
165   AU.addRequired<TargetPassConfig>();
166   AU.addRequired<SlotIndexes>();
167   AU.addPreserved<SlotIndexes>();
168   AU.addRequired<LiveIntervals>();
169   AU.addPreserved<LiveIntervals>();
170   MachineFunctionPass::getAnalysisUsage(AU);
171 }
172 
173 char PostMachineScheduler::ID = 0;
174 
175 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
176 
177 INITIALIZE_PASS(PostMachineScheduler, "postmisched",
178                 "PostRA Machine Instruction Scheduler", false, false)
179 
PostMachineScheduler()180 PostMachineScheduler::PostMachineScheduler()
181 : MachineSchedulerBase(ID) {
182   initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
183 }
184 
getAnalysisUsage(AnalysisUsage & AU) const185 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
186   AU.setPreservesCFG();
187   AU.addRequiredID(MachineDominatorsID);
188   AU.addRequired<MachineLoopInfo>();
189   AU.addRequired<TargetPassConfig>();
190   MachineFunctionPass::getAnalysisUsage(AU);
191 }
192 
193 MachinePassRegistry MachineSchedRegistry::Registry;
194 
195 /// A dummy default scheduler factory indicates whether the scheduler
196 /// is overridden on the command line.
useDefaultMachineSched(MachineSchedContext * C)197 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
198   return nullptr;
199 }
200 
201 /// MachineSchedOpt allows command line selection of the scheduler.
202 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
203                RegisterPassParser<MachineSchedRegistry> >
204 MachineSchedOpt("misched",
205                 cl::init(&useDefaultMachineSched), cl::Hidden,
206                 cl::desc("Machine instruction scheduler to use"));
207 
208 static MachineSchedRegistry
209 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
210                      useDefaultMachineSched);
211 
212 static cl::opt<bool> EnableMachineSched(
213     "enable-misched",
214     cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
215     cl::Hidden);
216 
217 /// Forward declare the standard machine scheduler. This will be used as the
218 /// default scheduler if the target does not set a default.
219 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C);
220 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C);
221 
222 /// Decrement this iterator until reaching the top or a non-debug instr.
223 static MachineBasicBlock::const_iterator
priorNonDebug(MachineBasicBlock::const_iterator I,MachineBasicBlock::const_iterator Beg)224 priorNonDebug(MachineBasicBlock::const_iterator I,
225               MachineBasicBlock::const_iterator Beg) {
226   assert(I != Beg && "reached the top of the region, cannot decrement");
227   while (--I != Beg) {
228     if (!I->isDebugValue())
229       break;
230   }
231   return I;
232 }
233 
234 /// Non-const version.
235 static MachineBasicBlock::iterator
priorNonDebug(MachineBasicBlock::iterator I,MachineBasicBlock::const_iterator Beg)236 priorNonDebug(MachineBasicBlock::iterator I,
237               MachineBasicBlock::const_iterator Beg) {
238   return const_cast<MachineInstr*>(
239     &*priorNonDebug(MachineBasicBlock::const_iterator(I), Beg));
240 }
241 
242 /// If this iterator is a debug value, increment until reaching the End or a
243 /// non-debug instruction.
244 static MachineBasicBlock::const_iterator
nextIfDebug(MachineBasicBlock::const_iterator I,MachineBasicBlock::const_iterator End)245 nextIfDebug(MachineBasicBlock::const_iterator I,
246             MachineBasicBlock::const_iterator End) {
247   for(; I != End; ++I) {
248     if (!I->isDebugValue())
249       break;
250   }
251   return I;
252 }
253 
254 /// Non-const version.
255 static MachineBasicBlock::iterator
nextIfDebug(MachineBasicBlock::iterator I,MachineBasicBlock::const_iterator End)256 nextIfDebug(MachineBasicBlock::iterator I,
257             MachineBasicBlock::const_iterator End) {
258   // Cast the return value to nonconst MachineInstr, then cast to an
259   // instr_iterator, which does not check for null, finally return a
260   // bundle_iterator.
261   return MachineBasicBlock::instr_iterator(
262     const_cast<MachineInstr*>(
263       &*nextIfDebug(MachineBasicBlock::const_iterator(I), End)));
264 }
265 
266 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
createMachineScheduler()267 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
268   // Select the scheduler, or set the default.
269   MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
270   if (Ctor != useDefaultMachineSched)
271     return Ctor(this);
272 
273   // Get the default scheduler set by the target for this function.
274   ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
275   if (Scheduler)
276     return Scheduler;
277 
278   // Default to GenericScheduler.
279   return createGenericSchedLive(this);
280 }
281 
282 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
283 /// the caller. We don't have a command line option to override the postRA
284 /// scheduler. The Target must configure it.
createPostMachineScheduler()285 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
286   // Get the postRA scheduler set by the target for this function.
287   ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
288   if (Scheduler)
289     return Scheduler;
290 
291   // Default to GenericScheduler.
292   return createGenericSchedPostRA(this);
293 }
294 
295 /// Top-level MachineScheduler pass driver.
296 ///
297 /// Visit blocks in function order. Divide each block into scheduling regions
298 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
299 /// consistent with the DAG builder, which traverses the interior of the
300 /// scheduling regions bottom-up.
301 ///
302 /// This design avoids exposing scheduling boundaries to the DAG builder,
303 /// simplifying the DAG builder's support for "special" target instructions.
304 /// At the same time the design allows target schedulers to operate across
305 /// scheduling boundaries, for example to bundle the boudary instructions
306 /// without reordering them. This creates complexity, because the target
307 /// scheduler must update the RegionBegin and RegionEnd positions cached by
308 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
309 /// design would be to split blocks at scheduling boundaries, but LLVM has a
310 /// general bias against block splitting purely for implementation simplicity.
runOnMachineFunction(MachineFunction & mf)311 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
312   if (EnableMachineSched.getNumOccurrences()) {
313     if (!EnableMachineSched)
314       return false;
315   } else if (!mf.getSubtarget().enableMachineScheduler())
316     return false;
317 
318   DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs()));
319 
320   // Initialize the context of the pass.
321   MF = &mf;
322   MLI = &getAnalysis<MachineLoopInfo>();
323   MDT = &getAnalysis<MachineDominatorTree>();
324   PassConfig = &getAnalysis<TargetPassConfig>();
325   AA = &getAnalysis<AliasAnalysis>();
326 
327   LIS = &getAnalysis<LiveIntervals>();
328 
329   if (VerifyScheduling) {
330     DEBUG(LIS->dump());
331     MF->verify(this, "Before machine scheduling.");
332   }
333   RegClassInfo->runOnMachineFunction(*MF);
334 
335   // Instantiate the selected scheduler for this target, function, and
336   // optimization level.
337   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
338   scheduleRegions(*Scheduler);
339 
340   DEBUG(LIS->dump());
341   if (VerifyScheduling)
342     MF->verify(this, "After machine scheduling.");
343   return true;
344 }
345 
runOnMachineFunction(MachineFunction & mf)346 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
347   if (skipOptnoneFunction(*mf.getFunction()))
348     return false;
349 
350   if (!mf.getSubtarget().enablePostMachineScheduler()) {
351     DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
352     return false;
353   }
354   DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
355 
356   // Initialize the context of the pass.
357   MF = &mf;
358   PassConfig = &getAnalysis<TargetPassConfig>();
359 
360   if (VerifyScheduling)
361     MF->verify(this, "Before post machine scheduling.");
362 
363   // Instantiate the selected scheduler for this target, function, and
364   // optimization level.
365   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
366   scheduleRegions(*Scheduler);
367 
368   if (VerifyScheduling)
369     MF->verify(this, "After post machine scheduling.");
370   return true;
371 }
372 
373 /// Return true of the given instruction should not be included in a scheduling
374 /// region.
375 ///
376 /// MachineScheduler does not currently support scheduling across calls. To
377 /// handle calls, the DAG builder needs to be modified to create register
378 /// anti/output dependencies on the registers clobbered by the call's regmask
379 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents
380 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
381 /// the boundary, but there would be no benefit to postRA scheduling across
382 /// calls this late anyway.
isSchedBoundary(MachineBasicBlock::iterator MI,MachineBasicBlock * MBB,MachineFunction * MF,const TargetInstrInfo * TII,bool IsPostRA)383 static bool isSchedBoundary(MachineBasicBlock::iterator MI,
384                             MachineBasicBlock *MBB,
385                             MachineFunction *MF,
386                             const TargetInstrInfo *TII,
387                             bool IsPostRA) {
388   return MI->isCall() || TII->isSchedulingBoundary(MI, MBB, *MF);
389 }
390 
391 /// Main driver for both MachineScheduler and PostMachineScheduler.
scheduleRegions(ScheduleDAGInstrs & Scheduler)392 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler) {
393   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
394   bool IsPostRA = Scheduler.isPostRA();
395 
396   // Visit all machine basic blocks.
397   //
398   // TODO: Visit blocks in global postorder or postorder within the bottom-up
399   // loop tree. Then we can optionally compute global RegPressure.
400   for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
401        MBB != MBBEnd; ++MBB) {
402 
403     Scheduler.startBlock(MBB);
404 
405 #ifndef NDEBUG
406     if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
407       continue;
408     if (SchedOnlyBlock.getNumOccurrences()
409         && (int)SchedOnlyBlock != MBB->getNumber())
410       continue;
411 #endif
412 
413     // Break the block into scheduling regions [I, RegionEnd), and schedule each
414     // region as soon as it is discovered. RegionEnd points the scheduling
415     // boundary at the bottom of the region. The DAG does not include RegionEnd,
416     // but the region does (i.e. the next RegionEnd is above the previous
417     // RegionBegin). If the current block has no terminator then RegionEnd ==
418     // MBB->end() for the bottom region.
419     //
420     // The Scheduler may insert instructions during either schedule() or
421     // exitRegion(), even for empty regions. So the local iterators 'I' and
422     // 'RegionEnd' are invalid across these calls.
423     //
424     // MBB::size() uses instr_iterator to count. Here we need a bundle to count
425     // as a single instruction.
426     unsigned RemainingInstrs = std::distance(MBB->begin(), MBB->end());
427     for(MachineBasicBlock::iterator RegionEnd = MBB->end();
428         RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) {
429 
430       // Avoid decrementing RegionEnd for blocks with no terminator.
431       if (RegionEnd != MBB->end() ||
432           isSchedBoundary(std::prev(RegionEnd), MBB, MF, TII, IsPostRA)) {
433         --RegionEnd;
434         // Count the boundary instruction.
435         --RemainingInstrs;
436       }
437 
438       // The next region starts above the previous region. Look backward in the
439       // instruction stream until we find the nearest boundary.
440       unsigned NumRegionInstrs = 0;
441       MachineBasicBlock::iterator I = RegionEnd;
442       for(;I != MBB->begin(); --I, --RemainingInstrs) {
443         if (isSchedBoundary(std::prev(I), MBB, MF, TII, IsPostRA))
444           break;
445         if (!I->isDebugValue())
446           ++NumRegionInstrs;
447       }
448       // Notify the scheduler of the region, even if we may skip scheduling
449       // it. Perhaps it still needs to be bundled.
450       Scheduler.enterRegion(MBB, I, RegionEnd, NumRegionInstrs);
451 
452       // Skip empty scheduling regions (0 or 1 schedulable instructions).
453       if (I == RegionEnd || I == std::prev(RegionEnd)) {
454         // Close the current region. Bundle the terminator if needed.
455         // This invalidates 'RegionEnd' and 'I'.
456         Scheduler.exitRegion();
457         continue;
458       }
459       DEBUG(dbgs() << "********** " << ((Scheduler.isPostRA()) ? "PostRA " : "")
460             << "MI Scheduling **********\n");
461       DEBUG(dbgs() << MF->getName()
462             << ":BB#" << MBB->getNumber() << " " << MBB->getName()
463             << "\n  From: " << *I << "    To: ";
464             if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
465             else dbgs() << "End";
466             dbgs() << " RegionInstrs: " << NumRegionInstrs
467             << " Remaining: " << RemainingInstrs << "\n");
468       if (DumpCriticalPathLength) {
469         errs() << MF->getName();
470         errs() << ":BB# " << MBB->getNumber();
471         errs() << " " << MBB->getName() << " \n";
472       }
473 
474       // Schedule a region: possibly reorder instructions.
475       // This invalidates 'RegionEnd' and 'I'.
476       Scheduler.schedule();
477 
478       // Close the current region.
479       Scheduler.exitRegion();
480 
481       // Scheduling has invalidated the current iterator 'I'. Ask the
482       // scheduler for the top of it's scheduled region.
483       RegionEnd = Scheduler.begin();
484     }
485     assert(RemainingInstrs == 0 && "Instruction count mismatch!");
486     Scheduler.finishBlock();
487     if (Scheduler.isPostRA()) {
488       // FIXME: Ideally, no further passes should rely on kill flags. However,
489       // thumb2 size reduction is currently an exception.
490       Scheduler.fixupKills(MBB);
491     }
492   }
493   Scheduler.finalizeSchedule();
494 }
495 
print(raw_ostream & O,const Module * m) const496 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
497   // unimplemented
498 }
499 
500 LLVM_DUMP_METHOD
dump()501 void ReadyQueue::dump() {
502   dbgs() << Name << ": ";
503   for (unsigned i = 0, e = Queue.size(); i < e; ++i)
504     dbgs() << Queue[i]->NodeNum << " ";
505   dbgs() << "\n";
506 }
507 
508 //===----------------------------------------------------------------------===//
509 // ScheduleDAGMI - Basic machine instruction scheduling. This is
510 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for
511 // virtual registers.
512 // ===----------------------------------------------------------------------===/
513 
514 // Provide a vtable anchor.
~ScheduleDAGMI()515 ScheduleDAGMI::~ScheduleDAGMI() {
516 }
517 
canAddEdge(SUnit * SuccSU,SUnit * PredSU)518 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
519   return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
520 }
521 
addEdge(SUnit * SuccSU,const SDep & PredDep)522 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
523   if (SuccSU != &ExitSU) {
524     // Do not use WillCreateCycle, it assumes SD scheduling.
525     // If Pred is reachable from Succ, then the edge creates a cycle.
526     if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
527       return false;
528     Topo.AddPred(SuccSU, PredDep.getSUnit());
529   }
530   SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
531   // Return true regardless of whether a new edge needed to be inserted.
532   return true;
533 }
534 
535 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
536 /// NumPredsLeft reaches zero, release the successor node.
537 ///
538 /// FIXME: Adjust SuccSU height based on MinLatency.
releaseSucc(SUnit * SU,SDep * SuccEdge)539 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
540   SUnit *SuccSU = SuccEdge->getSUnit();
541 
542   if (SuccEdge->isWeak()) {
543     --SuccSU->WeakPredsLeft;
544     if (SuccEdge->isCluster())
545       NextClusterSucc = SuccSU;
546     return;
547   }
548 #ifndef NDEBUG
549   if (SuccSU->NumPredsLeft == 0) {
550     dbgs() << "*** Scheduling failed! ***\n";
551     SuccSU->dump(this);
552     dbgs() << " has been released too many times!\n";
553     llvm_unreachable(nullptr);
554   }
555 #endif
556   // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
557   // CurrCycle may have advanced since then.
558   if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
559     SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
560 
561   --SuccSU->NumPredsLeft;
562   if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
563     SchedImpl->releaseTopNode(SuccSU);
564 }
565 
566 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
releaseSuccessors(SUnit * SU)567 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
568   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
569        I != E; ++I) {
570     releaseSucc(SU, &*I);
571   }
572 }
573 
574 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
575 /// NumSuccsLeft reaches zero, release the predecessor node.
576 ///
577 /// FIXME: Adjust PredSU height based on MinLatency.
releasePred(SUnit * SU,SDep * PredEdge)578 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
579   SUnit *PredSU = PredEdge->getSUnit();
580 
581   if (PredEdge->isWeak()) {
582     --PredSU->WeakSuccsLeft;
583     if (PredEdge->isCluster())
584       NextClusterPred = PredSU;
585     return;
586   }
587 #ifndef NDEBUG
588   if (PredSU->NumSuccsLeft == 0) {
589     dbgs() << "*** Scheduling failed! ***\n";
590     PredSU->dump(this);
591     dbgs() << " has been released too many times!\n";
592     llvm_unreachable(nullptr);
593   }
594 #endif
595   // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
596   // CurrCycle may have advanced since then.
597   if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
598     PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
599 
600   --PredSU->NumSuccsLeft;
601   if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
602     SchedImpl->releaseBottomNode(PredSU);
603 }
604 
605 /// releasePredecessors - Call releasePred on each of SU's predecessors.
releasePredecessors(SUnit * SU)606 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
607   for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
608        I != E; ++I) {
609     releasePred(SU, &*I);
610   }
611 }
612 
613 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
614 /// crossing a scheduling boundary. [begin, end) includes all instructions in
615 /// the region, including the boundary itself and single-instruction regions
616 /// that don't get scheduled.
enterRegion(MachineBasicBlock * bb,MachineBasicBlock::iterator begin,MachineBasicBlock::iterator end,unsigned regioninstrs)617 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
618                                      MachineBasicBlock::iterator begin,
619                                      MachineBasicBlock::iterator end,
620                                      unsigned regioninstrs)
621 {
622   ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
623 
624   SchedImpl->initPolicy(begin, end, regioninstrs);
625 }
626 
627 /// This is normally called from the main scheduler loop but may also be invoked
628 /// by the scheduling strategy to perform additional code motion.
moveInstruction(MachineInstr * MI,MachineBasicBlock::iterator InsertPos)629 void ScheduleDAGMI::moveInstruction(
630   MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
631   // Advance RegionBegin if the first instruction moves down.
632   if (&*RegionBegin == MI)
633     ++RegionBegin;
634 
635   // Update the instruction stream.
636   BB->splice(InsertPos, BB, MI);
637 
638   // Update LiveIntervals
639   if (LIS)
640     LIS->handleMove(MI, /*UpdateFlags=*/true);
641 
642   // Recede RegionBegin if an instruction moves above the first.
643   if (RegionBegin == InsertPos)
644     RegionBegin = MI;
645 }
646 
checkSchedLimit()647 bool ScheduleDAGMI::checkSchedLimit() {
648 #ifndef NDEBUG
649   if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
650     CurrentTop = CurrentBottom;
651     return false;
652   }
653   ++NumInstrsScheduled;
654 #endif
655   return true;
656 }
657 
658 /// Per-region scheduling driver, called back from
659 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that
660 /// does not consider liveness or register pressure. It is useful for PostRA
661 /// scheduling and potentially other custom schedulers.
schedule()662 void ScheduleDAGMI::schedule() {
663   // Build the DAG.
664   buildSchedGraph(AA);
665 
666   Topo.InitDAGTopologicalSorting();
667 
668   postprocessDAG();
669 
670   SmallVector<SUnit*, 8> TopRoots, BotRoots;
671   findRootsAndBiasEdges(TopRoots, BotRoots);
672 
673   // Initialize the strategy before modifying the DAG.
674   // This may initialize a DFSResult to be used for queue priority.
675   SchedImpl->initialize(this);
676 
677   DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
678           SUnits[su].dumpAll(this));
679   if (ViewMISchedDAGs) viewGraph();
680 
681   // Initialize ready queues now that the DAG and priority data are finalized.
682   initQueues(TopRoots, BotRoots);
683 
684   bool IsTopNode = false;
685   while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
686     assert(!SU->isScheduled && "Node already scheduled");
687     if (!checkSchedLimit())
688       break;
689 
690     MachineInstr *MI = SU->getInstr();
691     if (IsTopNode) {
692       assert(SU->isTopReady() && "node still has unscheduled dependencies");
693       if (&*CurrentTop == MI)
694         CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
695       else
696         moveInstruction(MI, CurrentTop);
697     }
698     else {
699       assert(SU->isBottomReady() && "node still has unscheduled dependencies");
700       MachineBasicBlock::iterator priorII =
701         priorNonDebug(CurrentBottom, CurrentTop);
702       if (&*priorII == MI)
703         CurrentBottom = priorII;
704       else {
705         if (&*CurrentTop == MI)
706           CurrentTop = nextIfDebug(++CurrentTop, priorII);
707         moveInstruction(MI, CurrentBottom);
708         CurrentBottom = MI;
709       }
710     }
711     // Notify the scheduling strategy before updating the DAG.
712     // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
713     // runs, it can then use the accurate ReadyCycle time to determine whether
714     // newly released nodes can move to the readyQ.
715     SchedImpl->schedNode(SU, IsTopNode);
716 
717     updateQueues(SU, IsTopNode);
718   }
719   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
720 
721   placeDebugValues();
722 
723   DEBUG({
724       unsigned BBNum = begin()->getParent()->getNumber();
725       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
726       dumpSchedule();
727       dbgs() << '\n';
728     });
729 }
730 
731 /// Apply each ScheduleDAGMutation step in order.
postprocessDAG()732 void ScheduleDAGMI::postprocessDAG() {
733   for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
734     Mutations[i]->apply(this);
735   }
736 }
737 
738 void ScheduleDAGMI::
findRootsAndBiasEdges(SmallVectorImpl<SUnit * > & TopRoots,SmallVectorImpl<SUnit * > & BotRoots)739 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
740                       SmallVectorImpl<SUnit*> &BotRoots) {
741   for (std::vector<SUnit>::iterator
742          I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
743     SUnit *SU = &(*I);
744     assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
745 
746     // Order predecessors so DFSResult follows the critical path.
747     SU->biasCriticalPath();
748 
749     // A SUnit is ready to top schedule if it has no predecessors.
750     if (!I->NumPredsLeft)
751       TopRoots.push_back(SU);
752     // A SUnit is ready to bottom schedule if it has no successors.
753     if (!I->NumSuccsLeft)
754       BotRoots.push_back(SU);
755   }
756   ExitSU.biasCriticalPath();
757 }
758 
759 /// Identify DAG roots and setup scheduler queues.
initQueues(ArrayRef<SUnit * > TopRoots,ArrayRef<SUnit * > BotRoots)760 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
761                                ArrayRef<SUnit*> BotRoots) {
762   NextClusterSucc = nullptr;
763   NextClusterPred = nullptr;
764 
765   // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
766   //
767   // Nodes with unreleased weak edges can still be roots.
768   // Release top roots in forward order.
769   for (SmallVectorImpl<SUnit*>::const_iterator
770          I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
771     SchedImpl->releaseTopNode(*I);
772   }
773   // Release bottom roots in reverse order so the higher priority nodes appear
774   // first. This is more natural and slightly more efficient.
775   for (SmallVectorImpl<SUnit*>::const_reverse_iterator
776          I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
777     SchedImpl->releaseBottomNode(*I);
778   }
779 
780   releaseSuccessors(&EntrySU);
781   releasePredecessors(&ExitSU);
782 
783   SchedImpl->registerRoots();
784 
785   // Advance past initial DebugValues.
786   CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
787   CurrentBottom = RegionEnd;
788 }
789 
790 /// Update scheduler queues after scheduling an instruction.
updateQueues(SUnit * SU,bool IsTopNode)791 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
792   // Release dependent instructions for scheduling.
793   if (IsTopNode)
794     releaseSuccessors(SU);
795   else
796     releasePredecessors(SU);
797 
798   SU->isScheduled = true;
799 }
800 
801 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
placeDebugValues()802 void ScheduleDAGMI::placeDebugValues() {
803   // If first instruction was a DBG_VALUE then put it back.
804   if (FirstDbgValue) {
805     BB->splice(RegionBegin, BB, FirstDbgValue);
806     RegionBegin = FirstDbgValue;
807   }
808 
809   for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
810          DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
811     std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
812     MachineInstr *DbgValue = P.first;
813     MachineBasicBlock::iterator OrigPrevMI = P.second;
814     if (&*RegionBegin == DbgValue)
815       ++RegionBegin;
816     BB->splice(++OrigPrevMI, BB, DbgValue);
817     if (OrigPrevMI == std::prev(RegionEnd))
818       RegionEnd = DbgValue;
819   }
820   DbgValues.clear();
821   FirstDbgValue = nullptr;
822 }
823 
824 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dumpSchedule() const825 void ScheduleDAGMI::dumpSchedule() const {
826   for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
827     if (SUnit *SU = getSUnit(&(*MI)))
828       SU->dump(this);
829     else
830       dbgs() << "Missing SUnit\n";
831   }
832 }
833 #endif
834 
835 //===----------------------------------------------------------------------===//
836 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
837 // preservation.
838 //===----------------------------------------------------------------------===//
839 
~ScheduleDAGMILive()840 ScheduleDAGMILive::~ScheduleDAGMILive() {
841   delete DFSResult;
842 }
843 
844 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
845 /// crossing a scheduling boundary. [begin, end) includes all instructions in
846 /// the region, including the boundary itself and single-instruction regions
847 /// that don't get scheduled.
enterRegion(MachineBasicBlock * bb,MachineBasicBlock::iterator begin,MachineBasicBlock::iterator end,unsigned regioninstrs)848 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
849                                 MachineBasicBlock::iterator begin,
850                                 MachineBasicBlock::iterator end,
851                                 unsigned regioninstrs)
852 {
853   // ScheduleDAGMI initializes SchedImpl's per-region policy.
854   ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
855 
856   // For convenience remember the end of the liveness region.
857   LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
858 
859   SUPressureDiffs.clear();
860 
861   ShouldTrackPressure = SchedImpl->shouldTrackPressure();
862 }
863 
864 // Setup the register pressure trackers for the top scheduled top and bottom
865 // scheduled regions.
initRegPressure()866 void ScheduleDAGMILive::initRegPressure() {
867   TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin);
868   BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
869 
870   // Close the RPTracker to finalize live ins.
871   RPTracker.closeRegion();
872 
873   DEBUG(RPTracker.dump());
874 
875   // Initialize the live ins and live outs.
876   TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
877   BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
878 
879   // Close one end of the tracker so we can call
880   // getMaxUpward/DownwardPressureDelta before advancing across any
881   // instructions. This converts currently live regs into live ins/outs.
882   TopRPTracker.closeTop();
883   BotRPTracker.closeBottom();
884 
885   BotRPTracker.initLiveThru(RPTracker);
886   if (!BotRPTracker.getLiveThru().empty()) {
887     TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
888     DEBUG(dbgs() << "Live Thru: ";
889           dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
890   };
891 
892   // For each live out vreg reduce the pressure change associated with other
893   // uses of the same vreg below the live-out reaching def.
894   updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
895 
896   // Account for liveness generated by the region boundary.
897   if (LiveRegionEnd != RegionEnd) {
898     SmallVector<unsigned, 8> LiveUses;
899     BotRPTracker.recede(&LiveUses);
900     updatePressureDiffs(LiveUses);
901   }
902 
903   assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
904 
905   // Cache the list of excess pressure sets in this region. This will also track
906   // the max pressure in the scheduled code for these sets.
907   RegionCriticalPSets.clear();
908   const std::vector<unsigned> &RegionPressure =
909     RPTracker.getPressure().MaxSetPressure;
910   for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
911     unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
912     if (RegionPressure[i] > Limit) {
913       DEBUG(dbgs() << TRI->getRegPressureSetName(i)
914             << " Limit " << Limit
915             << " Actual " << RegionPressure[i] << "\n");
916       RegionCriticalPSets.push_back(PressureChange(i));
917     }
918   }
919   DEBUG(dbgs() << "Excess PSets: ";
920         for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
921           dbgs() << TRI->getRegPressureSetName(
922             RegionCriticalPSets[i].getPSet()) << " ";
923         dbgs() << "\n");
924 }
925 
926 void ScheduleDAGMILive::
updateScheduledPressure(const SUnit * SU,const std::vector<unsigned> & NewMaxPressure)927 updateScheduledPressure(const SUnit *SU,
928                         const std::vector<unsigned> &NewMaxPressure) {
929   const PressureDiff &PDiff = getPressureDiff(SU);
930   unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
931   for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end();
932        I != E; ++I) {
933     if (!I->isValid())
934       break;
935     unsigned ID = I->getPSet();
936     while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
937       ++CritIdx;
938     if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
939       if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
940           && NewMaxPressure[ID] <= INT16_MAX)
941         RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
942     }
943     unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
944     if (NewMaxPressure[ID] >= Limit - 2) {
945       DEBUG(dbgs() << "  " << TRI->getRegPressureSetName(ID) << ": "
946             << NewMaxPressure[ID] << " > " << Limit << "(+ "
947             << BotRPTracker.getLiveThru()[ID] << " livethru)\n");
948     }
949   }
950 }
951 
952 /// Update the PressureDiff array for liveness after scheduling this
953 /// instruction.
updatePressureDiffs(ArrayRef<unsigned> LiveUses)954 void ScheduleDAGMILive::updatePressureDiffs(ArrayRef<unsigned> LiveUses) {
955   for (unsigned LUIdx = 0, LUEnd = LiveUses.size(); LUIdx != LUEnd; ++LUIdx) {
956     /// FIXME: Currently assuming single-use physregs.
957     unsigned Reg = LiveUses[LUIdx];
958     DEBUG(dbgs() << "  LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n");
959     if (!TRI->isVirtualRegister(Reg))
960       continue;
961 
962     // This may be called before CurrentBottom has been initialized. However,
963     // BotRPTracker must have a valid position. We want the value live into the
964     // instruction or live out of the block, so ask for the previous
965     // instruction's live-out.
966     const LiveInterval &LI = LIS->getInterval(Reg);
967     VNInfo *VNI;
968     MachineBasicBlock::const_iterator I =
969       nextIfDebug(BotRPTracker.getPos(), BB->end());
970     if (I == BB->end())
971       VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
972     else {
973       LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(I));
974       VNI = LRQ.valueIn();
975     }
976     // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
977     assert(VNI && "No live value at use.");
978     for (VReg2UseMap::iterator
979            UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) {
980       SUnit *SU = UI->SU;
981       DEBUG(dbgs() << "  UpdateRegP: SU(" << SU->NodeNum << ") "
982             << *SU->getInstr());
983       // If this use comes before the reaching def, it cannot be a last use, so
984       // descrease its pressure change.
985       if (!SU->isScheduled && SU != &ExitSU) {
986         LiveQueryResult LRQ
987           = LI.Query(LIS->getInstructionIndex(SU->getInstr()));
988         if (LRQ.valueIn() == VNI)
989           getPressureDiff(SU).addPressureChange(Reg, true, &MRI);
990       }
991     }
992   }
993 }
994 
995 /// schedule - Called back from MachineScheduler::runOnMachineFunction
996 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
997 /// only includes instructions that have DAG nodes, not scheduling boundaries.
998 ///
999 /// This is a skeletal driver, with all the functionality pushed into helpers,
1000 /// so that it can be easilly extended by experimental schedulers. Generally,
1001 /// implementing MachineSchedStrategy should be sufficient to implement a new
1002 /// scheduling algorithm. However, if a scheduler further subclasses
1003 /// ScheduleDAGMILive then it will want to override this virtual method in order
1004 /// to update any specialized state.
schedule()1005 void ScheduleDAGMILive::schedule() {
1006   buildDAGWithRegPressure();
1007 
1008   Topo.InitDAGTopologicalSorting();
1009 
1010   postprocessDAG();
1011 
1012   SmallVector<SUnit*, 8> TopRoots, BotRoots;
1013   findRootsAndBiasEdges(TopRoots, BotRoots);
1014 
1015   // Initialize the strategy before modifying the DAG.
1016   // This may initialize a DFSResult to be used for queue priority.
1017   SchedImpl->initialize(this);
1018 
1019   DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
1020           SUnits[su].dumpAll(this));
1021   if (ViewMISchedDAGs) viewGraph();
1022 
1023   // Initialize ready queues now that the DAG and priority data are finalized.
1024   initQueues(TopRoots, BotRoots);
1025 
1026   if (ShouldTrackPressure) {
1027     assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
1028     TopRPTracker.setPos(CurrentTop);
1029   }
1030 
1031   bool IsTopNode = false;
1032   while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
1033     assert(!SU->isScheduled && "Node already scheduled");
1034     if (!checkSchedLimit())
1035       break;
1036 
1037     scheduleMI(SU, IsTopNode);
1038 
1039     if (DFSResult) {
1040       unsigned SubtreeID = DFSResult->getSubtreeID(SU);
1041       if (!ScheduledTrees.test(SubtreeID)) {
1042         ScheduledTrees.set(SubtreeID);
1043         DFSResult->scheduleTree(SubtreeID);
1044         SchedImpl->scheduleTree(SubtreeID);
1045       }
1046     }
1047 
1048     // Notify the scheduling strategy after updating the DAG.
1049     SchedImpl->schedNode(SU, IsTopNode);
1050 
1051     updateQueues(SU, IsTopNode);
1052   }
1053   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
1054 
1055   placeDebugValues();
1056 
1057   DEBUG({
1058       unsigned BBNum = begin()->getParent()->getNumber();
1059       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
1060       dumpSchedule();
1061       dbgs() << '\n';
1062     });
1063 }
1064 
1065 /// Build the DAG and setup three register pressure trackers.
buildDAGWithRegPressure()1066 void ScheduleDAGMILive::buildDAGWithRegPressure() {
1067   if (!ShouldTrackPressure) {
1068     RPTracker.reset();
1069     RegionCriticalPSets.clear();
1070     buildSchedGraph(AA);
1071     return;
1072   }
1073 
1074   // Initialize the register pressure tracker used by buildSchedGraph.
1075   RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
1076                  /*TrackUntiedDefs=*/true);
1077 
1078   // Account for liveness generate by the region boundary.
1079   if (LiveRegionEnd != RegionEnd)
1080     RPTracker.recede();
1081 
1082   // Build the DAG, and compute current register pressure.
1083   buildSchedGraph(AA, &RPTracker, &SUPressureDiffs);
1084 
1085   // Initialize top/bottom trackers after computing region pressure.
1086   initRegPressure();
1087 }
1088 
computeDFSResult()1089 void ScheduleDAGMILive::computeDFSResult() {
1090   if (!DFSResult)
1091     DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
1092   DFSResult->clear();
1093   ScheduledTrees.clear();
1094   DFSResult->resize(SUnits.size());
1095   DFSResult->compute(SUnits);
1096   ScheduledTrees.resize(DFSResult->getNumSubtrees());
1097 }
1098 
1099 /// Compute the max cyclic critical path through the DAG. The scheduling DAG
1100 /// only provides the critical path for single block loops. To handle loops that
1101 /// span blocks, we could use the vreg path latencies provided by
1102 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1103 /// available for use in the scheduler.
1104 ///
1105 /// The cyclic path estimation identifies a def-use pair that crosses the back
1106 /// edge and considers the depth and height of the nodes. For example, consider
1107 /// the following instruction sequence where each instruction has unit latency
1108 /// and defines an epomymous virtual register:
1109 ///
1110 /// a->b(a,c)->c(b)->d(c)->exit
1111 ///
1112 /// The cyclic critical path is a two cycles: b->c->b
1113 /// The acyclic critical path is four cycles: a->b->c->d->exit
1114 /// LiveOutHeight = height(c) = len(c->d->exit) = 2
1115 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1116 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1117 /// LiveInDepth = depth(b) = len(a->b) = 1
1118 ///
1119 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1120 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1121 /// CyclicCriticalPath = min(2, 2) = 2
1122 ///
1123 /// This could be relevant to PostRA scheduling, but is currently implemented
1124 /// assuming LiveIntervals.
computeCyclicCriticalPath()1125 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
1126   // This only applies to single block loop.
1127   if (!BB->isSuccessor(BB))
1128     return 0;
1129 
1130   unsigned MaxCyclicLatency = 0;
1131   // Visit each live out vreg def to find def/use pairs that cross iterations.
1132   ArrayRef<unsigned> LiveOuts = RPTracker.getPressure().LiveOutRegs;
1133   for (ArrayRef<unsigned>::iterator RI = LiveOuts.begin(), RE = LiveOuts.end();
1134        RI != RE; ++RI) {
1135     unsigned Reg = *RI;
1136     if (!TRI->isVirtualRegister(Reg))
1137         continue;
1138     const LiveInterval &LI = LIS->getInterval(Reg);
1139     const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1140     if (!DefVNI)
1141       continue;
1142 
1143     MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
1144     const SUnit *DefSU = getSUnit(DefMI);
1145     if (!DefSU)
1146       continue;
1147 
1148     unsigned LiveOutHeight = DefSU->getHeight();
1149     unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
1150     // Visit all local users of the vreg def.
1151     for (VReg2UseMap::iterator
1152            UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) {
1153       if (UI->SU == &ExitSU)
1154         continue;
1155 
1156       // Only consider uses of the phi.
1157       LiveQueryResult LRQ =
1158         LI.Query(LIS->getInstructionIndex(UI->SU->getInstr()));
1159       if (!LRQ.valueIn()->isPHIDef())
1160         continue;
1161 
1162       // Assume that a path spanning two iterations is a cycle, which could
1163       // overestimate in strange cases. This allows cyclic latency to be
1164       // estimated as the minimum slack of the vreg's depth or height.
1165       unsigned CyclicLatency = 0;
1166       if (LiveOutDepth > UI->SU->getDepth())
1167         CyclicLatency = LiveOutDepth - UI->SU->getDepth();
1168 
1169       unsigned LiveInHeight = UI->SU->getHeight() + DefSU->Latency;
1170       if (LiveInHeight > LiveOutHeight) {
1171         if (LiveInHeight - LiveOutHeight < CyclicLatency)
1172           CyclicLatency = LiveInHeight - LiveOutHeight;
1173       }
1174       else
1175         CyclicLatency = 0;
1176 
1177       DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
1178             << UI->SU->NodeNum << ") = " << CyclicLatency << "c\n");
1179       if (CyclicLatency > MaxCyclicLatency)
1180         MaxCyclicLatency = CyclicLatency;
1181     }
1182   }
1183   DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
1184   return MaxCyclicLatency;
1185 }
1186 
1187 /// Move an instruction and update register pressure.
scheduleMI(SUnit * SU,bool IsTopNode)1188 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
1189   // Move the instruction to its new location in the instruction stream.
1190   MachineInstr *MI = SU->getInstr();
1191 
1192   if (IsTopNode) {
1193     assert(SU->isTopReady() && "node still has unscheduled dependencies");
1194     if (&*CurrentTop == MI)
1195       CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
1196     else {
1197       moveInstruction(MI, CurrentTop);
1198       TopRPTracker.setPos(MI);
1199     }
1200 
1201     if (ShouldTrackPressure) {
1202       // Update top scheduled pressure.
1203       TopRPTracker.advance();
1204       assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
1205       updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
1206     }
1207   }
1208   else {
1209     assert(SU->isBottomReady() && "node still has unscheduled dependencies");
1210     MachineBasicBlock::iterator priorII =
1211       priorNonDebug(CurrentBottom, CurrentTop);
1212     if (&*priorII == MI)
1213       CurrentBottom = priorII;
1214     else {
1215       if (&*CurrentTop == MI) {
1216         CurrentTop = nextIfDebug(++CurrentTop, priorII);
1217         TopRPTracker.setPos(CurrentTop);
1218       }
1219       moveInstruction(MI, CurrentBottom);
1220       CurrentBottom = MI;
1221     }
1222     if (ShouldTrackPressure) {
1223       // Update bottom scheduled pressure.
1224       SmallVector<unsigned, 8> LiveUses;
1225       BotRPTracker.recede(&LiveUses);
1226       assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
1227       updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
1228       updatePressureDiffs(LiveUses);
1229     }
1230   }
1231 }
1232 
1233 //===----------------------------------------------------------------------===//
1234 // LoadClusterMutation - DAG post-processing to cluster loads.
1235 //===----------------------------------------------------------------------===//
1236 
1237 namespace {
1238 /// \brief Post-process the DAG to create cluster edges between neighboring
1239 /// loads.
1240 class LoadClusterMutation : public ScheduleDAGMutation {
1241   struct LoadInfo {
1242     SUnit *SU;
1243     unsigned BaseReg;
1244     unsigned Offset;
LoadInfo__anond1ea022a0211::LoadClusterMutation::LoadInfo1245     LoadInfo(SUnit *su, unsigned reg, unsigned ofs)
1246       : SU(su), BaseReg(reg), Offset(ofs) {}
1247 
operator <__anond1ea022a0211::LoadClusterMutation::LoadInfo1248     bool operator<(const LoadInfo &RHS) const {
1249       return std::tie(BaseReg, Offset) < std::tie(RHS.BaseReg, RHS.Offset);
1250     }
1251   };
1252 
1253   const TargetInstrInfo *TII;
1254   const TargetRegisterInfo *TRI;
1255 public:
LoadClusterMutation(const TargetInstrInfo * tii,const TargetRegisterInfo * tri)1256   LoadClusterMutation(const TargetInstrInfo *tii,
1257                       const TargetRegisterInfo *tri)
1258     : TII(tii), TRI(tri) {}
1259 
1260   void apply(ScheduleDAGMI *DAG) override;
1261 protected:
1262   void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG);
1263 };
1264 } // anonymous
1265 
clusterNeighboringLoads(ArrayRef<SUnit * > Loads,ScheduleDAGMI * DAG)1266 void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads,
1267                                                   ScheduleDAGMI *DAG) {
1268   SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords;
1269   for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) {
1270     SUnit *SU = Loads[Idx];
1271     unsigned BaseReg;
1272     unsigned Offset;
1273     if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
1274       LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset));
1275   }
1276   if (LoadRecords.size() < 2)
1277     return;
1278   std::sort(LoadRecords.begin(), LoadRecords.end());
1279   unsigned ClusterLength = 1;
1280   for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) {
1281     if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) {
1282       ClusterLength = 1;
1283       continue;
1284     }
1285 
1286     SUnit *SUa = LoadRecords[Idx].SU;
1287     SUnit *SUb = LoadRecords[Idx+1].SU;
1288     if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength)
1289         && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
1290 
1291       DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU("
1292             << SUb->NodeNum << ")\n");
1293       // Copy successor edges from SUa to SUb. Interleaving computation
1294       // dependent on SUa can prevent load combining due to register reuse.
1295       // Predecessor edges do not need to be copied from SUb to SUa since nearby
1296       // loads should have effectively the same inputs.
1297       for (SUnit::const_succ_iterator
1298              SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
1299         if (SI->getSUnit() == SUb)
1300           continue;
1301         DEBUG(dbgs() << "  Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
1302         DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
1303       }
1304       ++ClusterLength;
1305     }
1306     else
1307       ClusterLength = 1;
1308   }
1309 }
1310 
1311 /// \brief Callback from DAG postProcessing to create cluster edges for loads.
apply(ScheduleDAGMI * DAG)1312 void LoadClusterMutation::apply(ScheduleDAGMI *DAG) {
1313   // Map DAG NodeNum to store chain ID.
1314   DenseMap<unsigned, unsigned> StoreChainIDs;
1315   // Map each store chain to a set of dependent loads.
1316   SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
1317   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1318     SUnit *SU = &DAG->SUnits[Idx];
1319     if (!SU->getInstr()->mayLoad())
1320       continue;
1321     unsigned ChainPredID = DAG->SUnits.size();
1322     for (SUnit::const_pred_iterator
1323            PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
1324       if (PI->isCtrl()) {
1325         ChainPredID = PI->getSUnit()->NodeNum;
1326         break;
1327       }
1328     }
1329     // Check if this chain-like pred has been seen
1330     // before. ChainPredID==MaxNodeID for loads at the top of the schedule.
1331     unsigned NumChains = StoreChainDependents.size();
1332     std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
1333       StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
1334     if (Result.second)
1335       StoreChainDependents.resize(NumChains + 1);
1336     StoreChainDependents[Result.first->second].push_back(SU);
1337   }
1338   // Iterate over the store chains.
1339   for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
1340     clusterNeighboringLoads(StoreChainDependents[Idx], DAG);
1341 }
1342 
1343 //===----------------------------------------------------------------------===//
1344 // MacroFusion - DAG post-processing to encourage fusion of macro ops.
1345 //===----------------------------------------------------------------------===//
1346 
1347 namespace {
1348 /// \brief Post-process the DAG to create cluster edges between instructions
1349 /// that may be fused by the processor into a single operation.
1350 class MacroFusion : public ScheduleDAGMutation {
1351   const TargetInstrInfo *TII;
1352 public:
MacroFusion(const TargetInstrInfo * tii)1353   MacroFusion(const TargetInstrInfo *tii): TII(tii) {}
1354 
1355   void apply(ScheduleDAGMI *DAG) override;
1356 };
1357 } // anonymous
1358 
1359 /// \brief Callback from DAG postProcessing to create cluster edges to encourage
1360 /// fused operations.
apply(ScheduleDAGMI * DAG)1361 void MacroFusion::apply(ScheduleDAGMI *DAG) {
1362   // For now, assume targets can only fuse with the branch.
1363   MachineInstr *Branch = DAG->ExitSU.getInstr();
1364   if (!Branch)
1365     return;
1366 
1367   for (unsigned Idx = DAG->SUnits.size(); Idx > 0;) {
1368     SUnit *SU = &DAG->SUnits[--Idx];
1369     if (!TII->shouldScheduleAdjacent(SU->getInstr(), Branch))
1370       continue;
1371 
1372     // Create a single weak edge from SU to ExitSU. The only effect is to cause
1373     // bottom-up scheduling to heavily prioritize the clustered SU.  There is no
1374     // need to copy predecessor edges from ExitSU to SU, since top-down
1375     // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
1376     // of SU, we could create an artificial edge from the deepest root, but it
1377     // hasn't been needed yet.
1378     bool Success = DAG->addEdge(&DAG->ExitSU, SDep(SU, SDep::Cluster));
1379     (void)Success;
1380     assert(Success && "No DAG nodes should be reachable from ExitSU");
1381 
1382     DEBUG(dbgs() << "Macro Fuse SU(" << SU->NodeNum << ")\n");
1383     break;
1384   }
1385 }
1386 
1387 //===----------------------------------------------------------------------===//
1388 // CopyConstrain - DAG post-processing to encourage copy elimination.
1389 //===----------------------------------------------------------------------===//
1390 
1391 namespace {
1392 /// \brief Post-process the DAG to create weak edges from all uses of a copy to
1393 /// the one use that defines the copy's source vreg, most likely an induction
1394 /// variable increment.
1395 class CopyConstrain : public ScheduleDAGMutation {
1396   // Transient state.
1397   SlotIndex RegionBeginIdx;
1398   // RegionEndIdx is the slot index of the last non-debug instruction in the
1399   // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
1400   SlotIndex RegionEndIdx;
1401 public:
CopyConstrain(const TargetInstrInfo *,const TargetRegisterInfo *)1402   CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
1403 
1404   void apply(ScheduleDAGMI *DAG) override;
1405 
1406 protected:
1407   void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
1408 };
1409 } // anonymous
1410 
1411 /// constrainLocalCopy handles two possibilities:
1412 /// 1) Local src:
1413 /// I0:     = dst
1414 /// I1: src = ...
1415 /// I2:     = dst
1416 /// I3: dst = src (copy)
1417 /// (create pred->succ edges I0->I1, I2->I1)
1418 ///
1419 /// 2) Local copy:
1420 /// I0: dst = src (copy)
1421 /// I1:     = dst
1422 /// I2: src = ...
1423 /// I3:     = dst
1424 /// (create pred->succ edges I1->I2, I3->I2)
1425 ///
1426 /// Although the MachineScheduler is currently constrained to single blocks,
1427 /// this algorithm should handle extended blocks. An EBB is a set of
1428 /// contiguously numbered blocks such that the previous block in the EBB is
1429 /// always the single predecessor.
constrainLocalCopy(SUnit * CopySU,ScheduleDAGMILive * DAG)1430 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
1431   LiveIntervals *LIS = DAG->getLIS();
1432   MachineInstr *Copy = CopySU->getInstr();
1433 
1434   // Check for pure vreg copies.
1435   unsigned SrcReg = Copy->getOperand(1).getReg();
1436   if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
1437     return;
1438 
1439   unsigned DstReg = Copy->getOperand(0).getReg();
1440   if (!TargetRegisterInfo::isVirtualRegister(DstReg))
1441     return;
1442 
1443   // Check if either the dest or source is local. If it's live across a back
1444   // edge, it's not local. Note that if both vregs are live across the back
1445   // edge, we cannot successfully contrain the copy without cyclic scheduling.
1446   // If both the copy's source and dest are local live intervals, then we
1447   // should treat the dest as the global for the purpose of adding
1448   // constraints. This adds edges from source's other uses to the copy.
1449   unsigned LocalReg = SrcReg;
1450   unsigned GlobalReg = DstReg;
1451   LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
1452   if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
1453     LocalReg = DstReg;
1454     GlobalReg = SrcReg;
1455     LocalLI = &LIS->getInterval(LocalReg);
1456     if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
1457       return;
1458   }
1459   LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
1460 
1461   // Find the global segment after the start of the local LI.
1462   LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
1463   // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
1464   // local live range. We could create edges from other global uses to the local
1465   // start, but the coalescer should have already eliminated these cases, so
1466   // don't bother dealing with it.
1467   if (GlobalSegment == GlobalLI->end())
1468     return;
1469 
1470   // If GlobalSegment is killed at the LocalLI->start, the call to find()
1471   // returned the next global segment. But if GlobalSegment overlaps with
1472   // LocalLI->start, then advance to the next segement. If a hole in GlobalLI
1473   // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1474   if (GlobalSegment->contains(LocalLI->beginIndex()))
1475     ++GlobalSegment;
1476 
1477   if (GlobalSegment == GlobalLI->end())
1478     return;
1479 
1480   // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1481   if (GlobalSegment != GlobalLI->begin()) {
1482     // Two address defs have no hole.
1483     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
1484                                GlobalSegment->start)) {
1485       return;
1486     }
1487     // If the prior global segment may be defined by the same two-address
1488     // instruction that also defines LocalLI, then can't make a hole here.
1489     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
1490                                LocalLI->beginIndex())) {
1491       return;
1492     }
1493     // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1494     // it would be a disconnected component in the live range.
1495     assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
1496            "Disconnected LRG within the scheduling region.");
1497   }
1498   MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1499   if (!GlobalDef)
1500     return;
1501 
1502   SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1503   if (!GlobalSU)
1504     return;
1505 
1506   // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1507   // constraining the uses of the last local def to precede GlobalDef.
1508   SmallVector<SUnit*,8> LocalUses;
1509   const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1510   MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1511   SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1512   for (SUnit::const_succ_iterator
1513          I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end();
1514        I != E; ++I) {
1515     if (I->getKind() != SDep::Data || I->getReg() != LocalReg)
1516       continue;
1517     if (I->getSUnit() == GlobalSU)
1518       continue;
1519     if (!DAG->canAddEdge(GlobalSU, I->getSUnit()))
1520       return;
1521     LocalUses.push_back(I->getSUnit());
1522   }
1523   // Open the top of the GlobalLI hole by constraining any earlier global uses
1524   // to precede the start of LocalLI.
1525   SmallVector<SUnit*,8> GlobalUses;
1526   MachineInstr *FirstLocalDef =
1527     LIS->getInstructionFromIndex(LocalLI->beginIndex());
1528   SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1529   for (SUnit::const_pred_iterator
1530          I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) {
1531     if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg)
1532       continue;
1533     if (I->getSUnit() == FirstLocalSU)
1534       continue;
1535     if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit()))
1536       return;
1537     GlobalUses.push_back(I->getSUnit());
1538   }
1539   DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1540   // Add the weak edges.
1541   for (SmallVectorImpl<SUnit*>::const_iterator
1542          I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1543     DEBUG(dbgs() << "  Local use SU(" << (*I)->NodeNum << ") -> SU("
1544           << GlobalSU->NodeNum << ")\n");
1545     DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1546   }
1547   for (SmallVectorImpl<SUnit*>::const_iterator
1548          I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1549     DEBUG(dbgs() << "  Global use SU(" << (*I)->NodeNum << ") -> SU("
1550           << FirstLocalSU->NodeNum << ")\n");
1551     DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1552   }
1553 }
1554 
1555 /// \brief Callback from DAG postProcessing to create weak edges to encourage
1556 /// copy elimination.
apply(ScheduleDAGMI * DAG)1557 void CopyConstrain::apply(ScheduleDAGMI *DAG) {
1558   assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
1559 
1560   MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1561   if (FirstPos == DAG->end())
1562     return;
1563   RegionBeginIdx = DAG->getLIS()->getInstructionIndex(&*FirstPos);
1564   RegionEndIdx = DAG->getLIS()->getInstructionIndex(
1565     &*priorNonDebug(DAG->end(), DAG->begin()));
1566 
1567   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1568     SUnit *SU = &DAG->SUnits[Idx];
1569     if (!SU->getInstr()->isCopy())
1570       continue;
1571 
1572     constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG));
1573   }
1574 }
1575 
1576 //===----------------------------------------------------------------------===//
1577 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
1578 // and possibly other custom schedulers.
1579 //===----------------------------------------------------------------------===//
1580 
1581 static const unsigned InvalidCycle = ~0U;
1582 
~SchedBoundary()1583 SchedBoundary::~SchedBoundary() { delete HazardRec; }
1584 
reset()1585 void SchedBoundary::reset() {
1586   // A new HazardRec is created for each DAG and owned by SchedBoundary.
1587   // Destroying and reconstructing it is very expensive though. So keep
1588   // invalid, placeholder HazardRecs.
1589   if (HazardRec && HazardRec->isEnabled()) {
1590     delete HazardRec;
1591     HazardRec = nullptr;
1592   }
1593   Available.clear();
1594   Pending.clear();
1595   CheckPending = false;
1596   NextSUs.clear();
1597   CurrCycle = 0;
1598   CurrMOps = 0;
1599   MinReadyCycle = UINT_MAX;
1600   ExpectedLatency = 0;
1601   DependentLatency = 0;
1602   RetiredMOps = 0;
1603   MaxExecutedResCount = 0;
1604   ZoneCritResIdx = 0;
1605   IsResourceLimited = false;
1606   ReservedCycles.clear();
1607 #ifndef NDEBUG
1608   // Track the maximum number of stall cycles that could arise either from the
1609   // latency of a DAG edge or the number of cycles that a processor resource is
1610   // reserved (SchedBoundary::ReservedCycles).
1611   MaxObservedStall = 0;
1612 #endif
1613   // Reserve a zero-count for invalid CritResIdx.
1614   ExecutedResCounts.resize(1);
1615   assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
1616 }
1617 
1618 void SchedRemainder::
init(ScheduleDAGMI * DAG,const TargetSchedModel * SchedModel)1619 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1620   reset();
1621   if (!SchedModel->hasInstrSchedModel())
1622     return;
1623   RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1624   for (std::vector<SUnit>::iterator
1625          I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
1626     const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
1627     RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC)
1628       * SchedModel->getMicroOpFactor();
1629     for (TargetSchedModel::ProcResIter
1630            PI = SchedModel->getWriteProcResBegin(SC),
1631            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1632       unsigned PIdx = PI->ProcResourceIdx;
1633       unsigned Factor = SchedModel->getResourceFactor(PIdx);
1634       RemainingCounts[PIdx] += (Factor * PI->Cycles);
1635     }
1636   }
1637 }
1638 
1639 void SchedBoundary::
init(ScheduleDAGMI * dag,const TargetSchedModel * smodel,SchedRemainder * rem)1640 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1641   reset();
1642   DAG = dag;
1643   SchedModel = smodel;
1644   Rem = rem;
1645   if (SchedModel->hasInstrSchedModel()) {
1646     ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
1647     ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle);
1648   }
1649 }
1650 
1651 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
1652 /// these "soft stalls" differently than the hard stall cycles based on CPU
1653 /// resources and computed by checkHazard(). A fully in-order model
1654 /// (MicroOpBufferSize==0) will not make use of this since instructions are not
1655 /// available for scheduling until they are ready. However, a weaker in-order
1656 /// model may use this for heuristics. For example, if a processor has in-order
1657 /// behavior when reading certain resources, this may come into play.
getLatencyStallCycles(SUnit * SU)1658 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
1659   if (!SU->isUnbuffered)
1660     return 0;
1661 
1662   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1663   if (ReadyCycle > CurrCycle)
1664     return ReadyCycle - CurrCycle;
1665   return 0;
1666 }
1667 
1668 /// Compute the next cycle at which the given processor resource can be
1669 /// scheduled.
1670 unsigned SchedBoundary::
getNextResourceCycle(unsigned PIdx,unsigned Cycles)1671 getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
1672   unsigned NextUnreserved = ReservedCycles[PIdx];
1673   // If this resource has never been used, always return cycle zero.
1674   if (NextUnreserved == InvalidCycle)
1675     return 0;
1676   // For bottom-up scheduling add the cycles needed for the current operation.
1677   if (!isTop())
1678     NextUnreserved += Cycles;
1679   return NextUnreserved;
1680 }
1681 
1682 /// Does this SU have a hazard within the current instruction group.
1683 ///
1684 /// The scheduler supports two modes of hazard recognition. The first is the
1685 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1686 /// supports highly complicated in-order reservation tables
1687 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
1688 ///
1689 /// The second is a streamlined mechanism that checks for hazards based on
1690 /// simple counters that the scheduler itself maintains. It explicitly checks
1691 /// for instruction dispatch limitations, including the number of micro-ops that
1692 /// can dispatch per cycle.
1693 ///
1694 /// TODO: Also check whether the SU must start a new group.
checkHazard(SUnit * SU)1695 bool SchedBoundary::checkHazard(SUnit *SU) {
1696   if (HazardRec->isEnabled()
1697       && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
1698     return true;
1699   }
1700   unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
1701   if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
1702     DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") uops="
1703           << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
1704     return true;
1705   }
1706   if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
1707     const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1708     for (TargetSchedModel::ProcResIter
1709            PI = SchedModel->getWriteProcResBegin(SC),
1710            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1711       unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles);
1712       if (NRCycle > CurrCycle) {
1713 #ifndef NDEBUG
1714         MaxObservedStall = std::max(PI->Cycles, MaxObservedStall);
1715 #endif
1716         DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") "
1717               << SchedModel->getResourceName(PI->ProcResourceIdx)
1718               << "=" << NRCycle << "c\n");
1719         return true;
1720       }
1721     }
1722   }
1723   return false;
1724 }
1725 
1726 // Find the unscheduled node in ReadySUs with the highest latency.
1727 unsigned SchedBoundary::
findMaxLatency(ArrayRef<SUnit * > ReadySUs)1728 findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
1729   SUnit *LateSU = nullptr;
1730   unsigned RemLatency = 0;
1731   for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end();
1732        I != E; ++I) {
1733     unsigned L = getUnscheduledLatency(*I);
1734     if (L > RemLatency) {
1735       RemLatency = L;
1736       LateSU = *I;
1737     }
1738   }
1739   if (LateSU) {
1740     DEBUG(dbgs() << Available.getName() << " RemLatency SU("
1741           << LateSU->NodeNum << ") " << RemLatency << "c\n");
1742   }
1743   return RemLatency;
1744 }
1745 
1746 // Count resources in this zone and the remaining unscheduled
1747 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical
1748 // resource index, or zero if the zone is issue limited.
1749 unsigned SchedBoundary::
getOtherResourceCount(unsigned & OtherCritIdx)1750 getOtherResourceCount(unsigned &OtherCritIdx) {
1751   OtherCritIdx = 0;
1752   if (!SchedModel->hasInstrSchedModel())
1753     return 0;
1754 
1755   unsigned OtherCritCount = Rem->RemIssueCount
1756     + (RetiredMOps * SchedModel->getMicroOpFactor());
1757   DEBUG(dbgs() << "  " << Available.getName() << " + Remain MOps: "
1758         << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
1759   for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
1760        PIdx != PEnd; ++PIdx) {
1761     unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
1762     if (OtherCount > OtherCritCount) {
1763       OtherCritCount = OtherCount;
1764       OtherCritIdx = PIdx;
1765     }
1766   }
1767   if (OtherCritIdx) {
1768     DEBUG(dbgs() << "  " << Available.getName() << " + Remain CritRes: "
1769           << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
1770           << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
1771   }
1772   return OtherCritCount;
1773 }
1774 
releaseNode(SUnit * SU,unsigned ReadyCycle)1775 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) {
1776   assert(SU->getInstr() && "Scheduled SUnit must have instr");
1777 
1778 #ifndef NDEBUG
1779   // ReadyCycle was been bumped up to the CurrCycle when this node was
1780   // scheduled, but CurrCycle may have been eagerly advanced immediately after
1781   // scheduling, so may now be greater than ReadyCycle.
1782   if (ReadyCycle > CurrCycle)
1783     MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
1784 #endif
1785 
1786   if (ReadyCycle < MinReadyCycle)
1787     MinReadyCycle = ReadyCycle;
1788 
1789   // Check for interlocks first. For the purpose of other heuristics, an
1790   // instruction that cannot issue appears as if it's not in the ReadyQueue.
1791   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
1792   if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU))
1793     Pending.push(SU);
1794   else
1795     Available.push(SU);
1796 
1797   // Record this node as an immediate dependent of the scheduled node.
1798   NextSUs.insert(SU);
1799 }
1800 
releaseTopNode(SUnit * SU)1801 void SchedBoundary::releaseTopNode(SUnit *SU) {
1802   if (SU->isScheduled)
1803     return;
1804 
1805   releaseNode(SU, SU->TopReadyCycle);
1806 }
1807 
releaseBottomNode(SUnit * SU)1808 void SchedBoundary::releaseBottomNode(SUnit *SU) {
1809   if (SU->isScheduled)
1810     return;
1811 
1812   releaseNode(SU, SU->BotReadyCycle);
1813 }
1814 
1815 /// Move the boundary of scheduled code by one cycle.
bumpCycle(unsigned NextCycle)1816 void SchedBoundary::bumpCycle(unsigned NextCycle) {
1817   if (SchedModel->getMicroOpBufferSize() == 0) {
1818     assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
1819     if (MinReadyCycle > NextCycle)
1820       NextCycle = MinReadyCycle;
1821   }
1822   // Update the current micro-ops, which will issue in the next cycle.
1823   unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
1824   CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
1825 
1826   // Decrement DependentLatency based on the next cycle.
1827   if ((NextCycle - CurrCycle) > DependentLatency)
1828     DependentLatency = 0;
1829   else
1830     DependentLatency -= (NextCycle - CurrCycle);
1831 
1832   if (!HazardRec->isEnabled()) {
1833     // Bypass HazardRec virtual calls.
1834     CurrCycle = NextCycle;
1835   }
1836   else {
1837     // Bypass getHazardType calls in case of long latency.
1838     for (; CurrCycle != NextCycle; ++CurrCycle) {
1839       if (isTop())
1840         HazardRec->AdvanceCycle();
1841       else
1842         HazardRec->RecedeCycle();
1843     }
1844   }
1845   CheckPending = true;
1846   unsigned LFactor = SchedModel->getLatencyFactor();
1847   IsResourceLimited =
1848     (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
1849     > (int)LFactor;
1850 
1851   DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
1852 }
1853 
incExecutedResources(unsigned PIdx,unsigned Count)1854 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
1855   ExecutedResCounts[PIdx] += Count;
1856   if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
1857     MaxExecutedResCount = ExecutedResCounts[PIdx];
1858 }
1859 
1860 /// Add the given processor resource to this scheduled zone.
1861 ///
1862 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles
1863 /// during which this resource is consumed.
1864 ///
1865 /// \return the next cycle at which the instruction may execute without
1866 /// oversubscribing resources.
1867 unsigned SchedBoundary::
countResource(unsigned PIdx,unsigned Cycles,unsigned NextCycle)1868 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
1869   unsigned Factor = SchedModel->getResourceFactor(PIdx);
1870   unsigned Count = Factor * Cycles;
1871   DEBUG(dbgs() << "  " << SchedModel->getResourceName(PIdx)
1872         << " +" << Cycles << "x" << Factor << "u\n");
1873 
1874   // Update Executed resources counts.
1875   incExecutedResources(PIdx, Count);
1876   assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
1877   Rem->RemainingCounts[PIdx] -= Count;
1878 
1879   // Check if this resource exceeds the current critical resource. If so, it
1880   // becomes the critical resource.
1881   if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
1882     ZoneCritResIdx = PIdx;
1883     DEBUG(dbgs() << "  *** Critical resource "
1884           << SchedModel->getResourceName(PIdx) << ": "
1885           << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
1886   }
1887   // For reserved resources, record the highest cycle using the resource.
1888   unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles);
1889   if (NextAvailable > CurrCycle) {
1890     DEBUG(dbgs() << "  Resource conflict: "
1891           << SchedModel->getProcResource(PIdx)->Name << " reserved until @"
1892           << NextAvailable << "\n");
1893   }
1894   return NextAvailable;
1895 }
1896 
1897 /// Move the boundary of scheduled code by one SUnit.
bumpNode(SUnit * SU)1898 void SchedBoundary::bumpNode(SUnit *SU) {
1899   // Update the reservation table.
1900   if (HazardRec->isEnabled()) {
1901     if (!isTop() && SU->isCall) {
1902       // Calls are scheduled with their preceding instructions. For bottom-up
1903       // scheduling, clear the pipeline state before emitting.
1904       HazardRec->Reset();
1905     }
1906     HazardRec->EmitInstruction(SU);
1907   }
1908   // checkHazard should prevent scheduling multiple instructions per cycle that
1909   // exceed the issue width.
1910   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1911   unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
1912   assert(
1913       (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
1914       "Cannot schedule this instruction's MicroOps in the current cycle.");
1915 
1916   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1917   DEBUG(dbgs() << "  Ready @" << ReadyCycle << "c\n");
1918 
1919   unsigned NextCycle = CurrCycle;
1920   switch (SchedModel->getMicroOpBufferSize()) {
1921   case 0:
1922     assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
1923     break;
1924   case 1:
1925     if (ReadyCycle > NextCycle) {
1926       NextCycle = ReadyCycle;
1927       DEBUG(dbgs() << "  *** Stall until: " << ReadyCycle << "\n");
1928     }
1929     break;
1930   default:
1931     // We don't currently model the OOO reorder buffer, so consider all
1932     // scheduled MOps to be "retired". We do loosely model in-order resource
1933     // latency. If this instruction uses an in-order resource, account for any
1934     // likely stall cycles.
1935     if (SU->isUnbuffered && ReadyCycle > NextCycle)
1936       NextCycle = ReadyCycle;
1937     break;
1938   }
1939   RetiredMOps += IncMOps;
1940 
1941   // Update resource counts and critical resource.
1942   if (SchedModel->hasInstrSchedModel()) {
1943     unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
1944     assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
1945     Rem->RemIssueCount -= DecRemIssue;
1946     if (ZoneCritResIdx) {
1947       // Scale scheduled micro-ops for comparing with the critical resource.
1948       unsigned ScaledMOps =
1949         RetiredMOps * SchedModel->getMicroOpFactor();
1950 
1951       // If scaled micro-ops are now more than the previous critical resource by
1952       // a full cycle, then micro-ops issue becomes critical.
1953       if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
1954           >= (int)SchedModel->getLatencyFactor()) {
1955         ZoneCritResIdx = 0;
1956         DEBUG(dbgs() << "  *** Critical resource NumMicroOps: "
1957               << ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
1958       }
1959     }
1960     for (TargetSchedModel::ProcResIter
1961            PI = SchedModel->getWriteProcResBegin(SC),
1962            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1963       unsigned RCycle =
1964         countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
1965       if (RCycle > NextCycle)
1966         NextCycle = RCycle;
1967     }
1968     if (SU->hasReservedResource) {
1969       // For reserved resources, record the highest cycle using the resource.
1970       // For top-down scheduling, this is the cycle in which we schedule this
1971       // instruction plus the number of cycles the operations reserves the
1972       // resource. For bottom-up is it simply the instruction's cycle.
1973       for (TargetSchedModel::ProcResIter
1974              PI = SchedModel->getWriteProcResBegin(SC),
1975              PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1976         unsigned PIdx = PI->ProcResourceIdx;
1977         if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
1978           if (isTop()) {
1979             ReservedCycles[PIdx] =
1980               std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles);
1981           }
1982           else
1983             ReservedCycles[PIdx] = NextCycle;
1984         }
1985       }
1986     }
1987   }
1988   // Update ExpectedLatency and DependentLatency.
1989   unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
1990   unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
1991   if (SU->getDepth() > TopLatency) {
1992     TopLatency = SU->getDepth();
1993     DEBUG(dbgs() << "  " << Available.getName()
1994           << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
1995   }
1996   if (SU->getHeight() > BotLatency) {
1997     BotLatency = SU->getHeight();
1998     DEBUG(dbgs() << "  " << Available.getName()
1999           << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
2000   }
2001   // If we stall for any reason, bump the cycle.
2002   if (NextCycle > CurrCycle) {
2003     bumpCycle(NextCycle);
2004   }
2005   else {
2006     // After updating ZoneCritResIdx and ExpectedLatency, check if we're
2007     // resource limited. If a stall occurred, bumpCycle does this.
2008     unsigned LFactor = SchedModel->getLatencyFactor();
2009     IsResourceLimited =
2010       (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2011       > (int)LFactor;
2012   }
2013   // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
2014   // resets CurrMOps. Loop to handle instructions with more MOps than issue in
2015   // one cycle.  Since we commonly reach the max MOps here, opportunistically
2016   // bump the cycle to avoid uselessly checking everything in the readyQ.
2017   CurrMOps += IncMOps;
2018   while (CurrMOps >= SchedModel->getIssueWidth()) {
2019     DEBUG(dbgs() << "  *** Max MOps " << CurrMOps
2020           << " at cycle " << CurrCycle << '\n');
2021     bumpCycle(++NextCycle);
2022   }
2023   DEBUG(dumpScheduledState());
2024 }
2025 
2026 /// Release pending ready nodes in to the available queue. This makes them
2027 /// visible to heuristics.
releasePending()2028 void SchedBoundary::releasePending() {
2029   // If the available queue is empty, it is safe to reset MinReadyCycle.
2030   if (Available.empty())
2031     MinReadyCycle = UINT_MAX;
2032 
2033   // Check to see if any of the pending instructions are ready to issue.  If
2034   // so, add them to the available queue.
2035   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
2036   for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
2037     SUnit *SU = *(Pending.begin()+i);
2038     unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
2039 
2040     if (ReadyCycle < MinReadyCycle)
2041       MinReadyCycle = ReadyCycle;
2042 
2043     if (!IsBuffered && ReadyCycle > CurrCycle)
2044       continue;
2045 
2046     if (checkHazard(SU))
2047       continue;
2048 
2049     Available.push(SU);
2050     Pending.remove(Pending.begin()+i);
2051     --i; --e;
2052   }
2053   DEBUG(if (!Pending.empty()) Pending.dump());
2054   CheckPending = false;
2055 }
2056 
2057 /// Remove SU from the ready set for this boundary.
removeReady(SUnit * SU)2058 void SchedBoundary::removeReady(SUnit *SU) {
2059   if (Available.isInQueue(SU))
2060     Available.remove(Available.find(SU));
2061   else {
2062     assert(Pending.isInQueue(SU) && "bad ready count");
2063     Pending.remove(Pending.find(SU));
2064   }
2065 }
2066 
2067 /// If this queue only has one ready candidate, return it. As a side effect,
2068 /// defer any nodes that now hit a hazard, and advance the cycle until at least
2069 /// one node is ready. If multiple instructions are ready, return NULL.
pickOnlyChoice()2070 SUnit *SchedBoundary::pickOnlyChoice() {
2071   if (CheckPending)
2072     releasePending();
2073 
2074   if (CurrMOps > 0) {
2075     // Defer any ready instrs that now have a hazard.
2076     for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
2077       if (checkHazard(*I)) {
2078         Pending.push(*I);
2079         I = Available.remove(I);
2080         continue;
2081       }
2082       ++I;
2083     }
2084   }
2085   for (unsigned i = 0; Available.empty(); ++i) {
2086 //  FIXME: Re-enable assert once PR20057 is resolved.
2087 //    assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2088 //           "permanent hazard");
2089     (void)i;
2090     bumpCycle(CurrCycle + 1);
2091     releasePending();
2092   }
2093   if (Available.size() == 1)
2094     return *Available.begin();
2095   return nullptr;
2096 }
2097 
2098 #ifndef NDEBUG
2099 // This is useful information to dump after bumpNode.
2100 // Note that the Queue contents are more useful before pickNodeFromQueue.
dumpScheduledState()2101 void SchedBoundary::dumpScheduledState() {
2102   unsigned ResFactor;
2103   unsigned ResCount;
2104   if (ZoneCritResIdx) {
2105     ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
2106     ResCount = getResourceCount(ZoneCritResIdx);
2107   }
2108   else {
2109     ResFactor = SchedModel->getMicroOpFactor();
2110     ResCount = RetiredMOps * SchedModel->getMicroOpFactor();
2111   }
2112   unsigned LFactor = SchedModel->getLatencyFactor();
2113   dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
2114          << "  Retired: " << RetiredMOps;
2115   dbgs() << "\n  Executed: " << getExecutedCount() / LFactor << "c";
2116   dbgs() << "\n  Critical: " << ResCount / LFactor << "c, "
2117          << ResCount / ResFactor << " "
2118          << SchedModel->getResourceName(ZoneCritResIdx)
2119          << "\n  ExpectedLatency: " << ExpectedLatency << "c\n"
2120          << (IsResourceLimited ? "  - Resource" : "  - Latency")
2121          << " limited.\n";
2122 }
2123 #endif
2124 
2125 //===----------------------------------------------------------------------===//
2126 // GenericScheduler - Generic implementation of MachineSchedStrategy.
2127 //===----------------------------------------------------------------------===//
2128 
2129 void GenericSchedulerBase::SchedCandidate::
initResourceDelta(const ScheduleDAGMI * DAG,const TargetSchedModel * SchedModel)2130 initResourceDelta(const ScheduleDAGMI *DAG,
2131                   const TargetSchedModel *SchedModel) {
2132   if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
2133     return;
2134 
2135   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2136   for (TargetSchedModel::ProcResIter
2137          PI = SchedModel->getWriteProcResBegin(SC),
2138          PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2139     if (PI->ProcResourceIdx == Policy.ReduceResIdx)
2140       ResDelta.CritResources += PI->Cycles;
2141     if (PI->ProcResourceIdx == Policy.DemandResIdx)
2142       ResDelta.DemandedResources += PI->Cycles;
2143   }
2144 }
2145 
2146 /// Set the CandPolicy given a scheduling zone given the current resources and
2147 /// latencies inside and outside the zone.
setPolicy(CandPolicy & Policy,bool IsPostRA,SchedBoundary & CurrZone,SchedBoundary * OtherZone)2148 void GenericSchedulerBase::setPolicy(CandPolicy &Policy,
2149                                      bool IsPostRA,
2150                                      SchedBoundary &CurrZone,
2151                                      SchedBoundary *OtherZone) {
2152   // Apply preemptive heuristics based on the the total latency and resources
2153   // inside and outside this zone. Potential stalls should be considered before
2154   // following this policy.
2155 
2156   // Compute remaining latency. We need this both to determine whether the
2157   // overall schedule has become latency-limited and whether the instructions
2158   // outside this zone are resource or latency limited.
2159   //
2160   // The "dependent" latency is updated incrementally during scheduling as the
2161   // max height/depth of scheduled nodes minus the cycles since it was
2162   // scheduled:
2163   //   DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2164   //
2165   // The "independent" latency is the max ready queue depth:
2166   //   ILat = max N.depth for N in Available|Pending
2167   //
2168   // RemainingLatency is the greater of independent and dependent latency.
2169   unsigned RemLatency = CurrZone.getDependentLatency();
2170   RemLatency = std::max(RemLatency,
2171                         CurrZone.findMaxLatency(CurrZone.Available.elements()));
2172   RemLatency = std::max(RemLatency,
2173                         CurrZone.findMaxLatency(CurrZone.Pending.elements()));
2174 
2175   // Compute the critical resource outside the zone.
2176   unsigned OtherCritIdx = 0;
2177   unsigned OtherCount =
2178     OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
2179 
2180   bool OtherResLimited = false;
2181   if (SchedModel->hasInstrSchedModel()) {
2182     unsigned LFactor = SchedModel->getLatencyFactor();
2183     OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
2184   }
2185   // Schedule aggressively for latency in PostRA mode. We don't check for
2186   // acyclic latency during PostRA, and highly out-of-order processors will
2187   // skip PostRA scheduling.
2188   if (!OtherResLimited) {
2189     if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) {
2190       Policy.ReduceLatency |= true;
2191       DEBUG(dbgs() << "  " << CurrZone.Available.getName()
2192             << " RemainingLatency " << RemLatency << " + "
2193             << CurrZone.getCurrCycle() << "c > CritPath "
2194             << Rem.CriticalPath << "\n");
2195     }
2196   }
2197   // If the same resource is limiting inside and outside the zone, do nothing.
2198   if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
2199     return;
2200 
2201   DEBUG(
2202     if (CurrZone.isResourceLimited()) {
2203       dbgs() << "  " << CurrZone.Available.getName() << " ResourceLimited: "
2204              << SchedModel->getResourceName(CurrZone.getZoneCritResIdx())
2205              << "\n";
2206     }
2207     if (OtherResLimited)
2208       dbgs() << "  RemainingLimit: "
2209              << SchedModel->getResourceName(OtherCritIdx) << "\n";
2210     if (!CurrZone.isResourceLimited() && !OtherResLimited)
2211       dbgs() << "  Latency limited both directions.\n");
2212 
2213   if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
2214     Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
2215 
2216   if (OtherResLimited)
2217     Policy.DemandResIdx = OtherCritIdx;
2218 }
2219 
2220 #ifndef NDEBUG
getReasonStr(GenericSchedulerBase::CandReason Reason)2221 const char *GenericSchedulerBase::getReasonStr(
2222   GenericSchedulerBase::CandReason Reason) {
2223   switch (Reason) {
2224   case NoCand:         return "NOCAND    ";
2225   case PhysRegCopy:    return "PREG-COPY";
2226   case RegExcess:      return "REG-EXCESS";
2227   case RegCritical:    return "REG-CRIT  ";
2228   case Stall:          return "STALL     ";
2229   case Cluster:        return "CLUSTER   ";
2230   case Weak:           return "WEAK      ";
2231   case RegMax:         return "REG-MAX   ";
2232   case ResourceReduce: return "RES-REDUCE";
2233   case ResourceDemand: return "RES-DEMAND";
2234   case TopDepthReduce: return "TOP-DEPTH ";
2235   case TopPathReduce:  return "TOP-PATH  ";
2236   case BotHeightReduce:return "BOT-HEIGHT";
2237   case BotPathReduce:  return "BOT-PATH  ";
2238   case NextDefUse:     return "DEF-USE   ";
2239   case NodeOrder:      return "ORDER     ";
2240   };
2241   llvm_unreachable("Unknown reason!");
2242 }
2243 
traceCandidate(const SchedCandidate & Cand)2244 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) {
2245   PressureChange P;
2246   unsigned ResIdx = 0;
2247   unsigned Latency = 0;
2248   switch (Cand.Reason) {
2249   default:
2250     break;
2251   case RegExcess:
2252     P = Cand.RPDelta.Excess;
2253     break;
2254   case RegCritical:
2255     P = Cand.RPDelta.CriticalMax;
2256     break;
2257   case RegMax:
2258     P = Cand.RPDelta.CurrentMax;
2259     break;
2260   case ResourceReduce:
2261     ResIdx = Cand.Policy.ReduceResIdx;
2262     break;
2263   case ResourceDemand:
2264     ResIdx = Cand.Policy.DemandResIdx;
2265     break;
2266   case TopDepthReduce:
2267     Latency = Cand.SU->getDepth();
2268     break;
2269   case TopPathReduce:
2270     Latency = Cand.SU->getHeight();
2271     break;
2272   case BotHeightReduce:
2273     Latency = Cand.SU->getHeight();
2274     break;
2275   case BotPathReduce:
2276     Latency = Cand.SU->getDepth();
2277     break;
2278   }
2279   dbgs() << "  SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
2280   if (P.isValid())
2281     dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
2282            << ":" << P.getUnitInc() << " ";
2283   else
2284     dbgs() << "      ";
2285   if (ResIdx)
2286     dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2287   else
2288     dbgs() << "         ";
2289   if (Latency)
2290     dbgs() << " " << Latency << " cycles ";
2291   else
2292     dbgs() << "          ";
2293   dbgs() << '\n';
2294 }
2295 #endif
2296 
2297 /// Return true if this heuristic determines order.
tryLess(int TryVal,int CandVal,GenericSchedulerBase::SchedCandidate & TryCand,GenericSchedulerBase::SchedCandidate & Cand,GenericSchedulerBase::CandReason Reason)2298 static bool tryLess(int TryVal, int CandVal,
2299                     GenericSchedulerBase::SchedCandidate &TryCand,
2300                     GenericSchedulerBase::SchedCandidate &Cand,
2301                     GenericSchedulerBase::CandReason Reason) {
2302   if (TryVal < CandVal) {
2303     TryCand.Reason = Reason;
2304     return true;
2305   }
2306   if (TryVal > CandVal) {
2307     if (Cand.Reason > Reason)
2308       Cand.Reason = Reason;
2309     return true;
2310   }
2311   Cand.setRepeat(Reason);
2312   return false;
2313 }
2314 
tryGreater(int TryVal,int CandVal,GenericSchedulerBase::SchedCandidate & TryCand,GenericSchedulerBase::SchedCandidate & Cand,GenericSchedulerBase::CandReason Reason)2315 static bool tryGreater(int TryVal, int CandVal,
2316                        GenericSchedulerBase::SchedCandidate &TryCand,
2317                        GenericSchedulerBase::SchedCandidate &Cand,
2318                        GenericSchedulerBase::CandReason Reason) {
2319   if (TryVal > CandVal) {
2320     TryCand.Reason = Reason;
2321     return true;
2322   }
2323   if (TryVal < CandVal) {
2324     if (Cand.Reason > Reason)
2325       Cand.Reason = Reason;
2326     return true;
2327   }
2328   Cand.setRepeat(Reason);
2329   return false;
2330 }
2331 
tryLatency(GenericSchedulerBase::SchedCandidate & TryCand,GenericSchedulerBase::SchedCandidate & Cand,SchedBoundary & Zone)2332 static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
2333                        GenericSchedulerBase::SchedCandidate &Cand,
2334                        SchedBoundary &Zone) {
2335   if (Zone.isTop()) {
2336     if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
2337       if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2338                   TryCand, Cand, GenericSchedulerBase::TopDepthReduce))
2339         return true;
2340     }
2341     if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2342                    TryCand, Cand, GenericSchedulerBase::TopPathReduce))
2343       return true;
2344   }
2345   else {
2346     if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
2347       if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2348                   TryCand, Cand, GenericSchedulerBase::BotHeightReduce))
2349         return true;
2350     }
2351     if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2352                    TryCand, Cand, GenericSchedulerBase::BotPathReduce))
2353       return true;
2354   }
2355   return false;
2356 }
2357 
tracePick(const GenericSchedulerBase::SchedCandidate & Cand,bool IsTop)2358 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand,
2359                       bool IsTop) {
2360   DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2361         << GenericSchedulerBase::getReasonStr(Cand.Reason) << '\n');
2362 }
2363 
initialize(ScheduleDAGMI * dag)2364 void GenericScheduler::initialize(ScheduleDAGMI *dag) {
2365   assert(dag->hasVRegLiveness() &&
2366          "(PreRA)GenericScheduler needs vreg liveness");
2367   DAG = static_cast<ScheduleDAGMILive*>(dag);
2368   SchedModel = DAG->getSchedModel();
2369   TRI = DAG->TRI;
2370 
2371   Rem.init(DAG, SchedModel);
2372   Top.init(DAG, SchedModel, &Rem);
2373   Bot.init(DAG, SchedModel, &Rem);
2374 
2375   // Initialize resource counts.
2376 
2377   // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
2378   // are disabled, then these HazardRecs will be disabled.
2379   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
2380   if (!Top.HazardRec) {
2381     Top.HazardRec =
2382         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2383             Itin, DAG);
2384   }
2385   if (!Bot.HazardRec) {
2386     Bot.HazardRec =
2387         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2388             Itin, DAG);
2389   }
2390 }
2391 
2392 /// Initialize the per-region scheduling policy.
initPolicy(MachineBasicBlock::iterator Begin,MachineBasicBlock::iterator End,unsigned NumRegionInstrs)2393 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
2394                                   MachineBasicBlock::iterator End,
2395                                   unsigned NumRegionInstrs) {
2396   const MachineFunction &MF = *Begin->getParent()->getParent();
2397   const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
2398 
2399   // Avoid setting up the register pressure tracker for small regions to save
2400   // compile time. As a rough heuristic, only track pressure when the number of
2401   // schedulable instructions exceeds half the integer register file.
2402   RegionPolicy.ShouldTrackPressure = true;
2403   for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
2404     MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT;
2405     if (TLI->isTypeLegal(LegalIntVT)) {
2406       unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
2407         TLI->getRegClassFor(LegalIntVT));
2408       RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
2409     }
2410   }
2411 
2412   // For generic targets, we default to bottom-up, because it's simpler and more
2413   // compile-time optimizations have been implemented in that direction.
2414   RegionPolicy.OnlyBottomUp = true;
2415 
2416   // Allow the subtarget to override default policy.
2417   MF.getSubtarget().overrideSchedPolicy(RegionPolicy, Begin, End,
2418                                         NumRegionInstrs);
2419 
2420   // After subtarget overrides, apply command line options.
2421   if (!EnableRegPressure)
2422     RegionPolicy.ShouldTrackPressure = false;
2423 
2424   // Check -misched-topdown/bottomup can force or unforce scheduling direction.
2425   // e.g. -misched-bottomup=false allows scheduling in both directions.
2426   assert((!ForceTopDown || !ForceBottomUp) &&
2427          "-misched-topdown incompatible with -misched-bottomup");
2428   if (ForceBottomUp.getNumOccurrences() > 0) {
2429     RegionPolicy.OnlyBottomUp = ForceBottomUp;
2430     if (RegionPolicy.OnlyBottomUp)
2431       RegionPolicy.OnlyTopDown = false;
2432   }
2433   if (ForceTopDown.getNumOccurrences() > 0) {
2434     RegionPolicy.OnlyTopDown = ForceTopDown;
2435     if (RegionPolicy.OnlyTopDown)
2436       RegionPolicy.OnlyBottomUp = false;
2437   }
2438 }
2439 
2440 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
2441 /// critical path by more cycles than it takes to drain the instruction buffer.
2442 /// We estimate an upper bounds on in-flight instructions as:
2443 ///
2444 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
2445 /// InFlightIterations = AcyclicPath / CyclesPerIteration
2446 /// InFlightResources = InFlightIterations * LoopResources
2447 ///
2448 /// TODO: Check execution resources in addition to IssueCount.
checkAcyclicLatency()2449 void GenericScheduler::checkAcyclicLatency() {
2450   if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
2451     return;
2452 
2453   // Scaled number of cycles per loop iteration.
2454   unsigned IterCount =
2455     std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
2456              Rem.RemIssueCount);
2457   // Scaled acyclic critical path.
2458   unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
2459   // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
2460   unsigned InFlightCount =
2461     (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
2462   unsigned BufferLimit =
2463     SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
2464 
2465   Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
2466 
2467   DEBUG(dbgs() << "IssueCycles="
2468         << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
2469         << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
2470         << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
2471         << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
2472         << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
2473         if (Rem.IsAcyclicLatencyLimited)
2474           dbgs() << "  ACYCLIC LATENCY LIMIT\n");
2475 }
2476 
registerRoots()2477 void GenericScheduler::registerRoots() {
2478   Rem.CriticalPath = DAG->ExitSU.getDepth();
2479 
2480   // Some roots may not feed into ExitSU. Check all of them in case.
2481   for (std::vector<SUnit*>::const_iterator
2482          I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
2483     if ((*I)->getDepth() > Rem.CriticalPath)
2484       Rem.CriticalPath = (*I)->getDepth();
2485   }
2486   DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
2487   if (DumpCriticalPathLength) {
2488     errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n";
2489   }
2490 
2491   if (EnableCyclicPath) {
2492     Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
2493     checkAcyclicLatency();
2494   }
2495 }
2496 
tryPressure(const PressureChange & TryP,const PressureChange & CandP,GenericSchedulerBase::SchedCandidate & TryCand,GenericSchedulerBase::SchedCandidate & Cand,GenericSchedulerBase::CandReason Reason)2497 static bool tryPressure(const PressureChange &TryP,
2498                         const PressureChange &CandP,
2499                         GenericSchedulerBase::SchedCandidate &TryCand,
2500                         GenericSchedulerBase::SchedCandidate &Cand,
2501                         GenericSchedulerBase::CandReason Reason) {
2502   int TryRank = TryP.getPSetOrMax();
2503   int CandRank = CandP.getPSetOrMax();
2504   // If both candidates affect the same set, go with the smallest increase.
2505   if (TryRank == CandRank) {
2506     return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
2507                    Reason);
2508   }
2509   // If one candidate decreases and the other increases, go with it.
2510   // Invalid candidates have UnitInc==0.
2511   if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
2512                  Reason)) {
2513     return true;
2514   }
2515   // If the candidates are decreasing pressure, reverse priority.
2516   if (TryP.getUnitInc() < 0)
2517     std::swap(TryRank, CandRank);
2518   return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
2519 }
2520 
getWeakLeft(const SUnit * SU,bool isTop)2521 static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
2522   return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
2523 }
2524 
2525 /// Minimize physical register live ranges. Regalloc wants them adjacent to
2526 /// their physreg def/use.
2527 ///
2528 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
2529 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
2530 /// with the operation that produces or consumes the physreg. We'll do this when
2531 /// regalloc has support for parallel copies.
biasPhysRegCopy(const SUnit * SU,bool isTop)2532 static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
2533   const MachineInstr *MI = SU->getInstr();
2534   if (!MI->isCopy())
2535     return 0;
2536 
2537   unsigned ScheduledOper = isTop ? 1 : 0;
2538   unsigned UnscheduledOper = isTop ? 0 : 1;
2539   // If we have already scheduled the physreg produce/consumer, immediately
2540   // schedule the copy.
2541   if (TargetRegisterInfo::isPhysicalRegister(
2542         MI->getOperand(ScheduledOper).getReg()))
2543     return 1;
2544   // If the physreg is at the boundary, defer it. Otherwise schedule it
2545   // immediately to free the dependent. We can hoist the copy later.
2546   bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
2547   if (TargetRegisterInfo::isPhysicalRegister(
2548         MI->getOperand(UnscheduledOper).getReg()))
2549     return AtBoundary ? -1 : 1;
2550   return 0;
2551 }
2552 
2553 /// Apply a set of heursitics to a new candidate. Heuristics are currently
2554 /// hierarchical. This may be more efficient than a graduated cost model because
2555 /// we don't need to evaluate all aspects of the model for each node in the
2556 /// queue. But it's really done to make the heuristics easier to debug and
2557 /// statistically analyze.
2558 ///
2559 /// \param Cand provides the policy and current best candidate.
2560 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
2561 /// \param Zone describes the scheduled zone that we are extending.
2562 /// \param RPTracker describes reg pressure within the scheduled zone.
2563 /// \param TempTracker is a scratch pressure tracker to reuse in queries.
tryCandidate(SchedCandidate & Cand,SchedCandidate & TryCand,SchedBoundary & Zone,const RegPressureTracker & RPTracker,RegPressureTracker & TempTracker)2564 void GenericScheduler::tryCandidate(SchedCandidate &Cand,
2565                                     SchedCandidate &TryCand,
2566                                     SchedBoundary &Zone,
2567                                     const RegPressureTracker &RPTracker,
2568                                     RegPressureTracker &TempTracker) {
2569 
2570   if (DAG->isTrackingPressure()) {
2571     // Always initialize TryCand's RPDelta.
2572     if (Zone.isTop()) {
2573       TempTracker.getMaxDownwardPressureDelta(
2574         TryCand.SU->getInstr(),
2575         TryCand.RPDelta,
2576         DAG->getRegionCriticalPSets(),
2577         DAG->getRegPressure().MaxSetPressure);
2578     }
2579     else {
2580       if (VerifyScheduling) {
2581         TempTracker.getMaxUpwardPressureDelta(
2582           TryCand.SU->getInstr(),
2583           &DAG->getPressureDiff(TryCand.SU),
2584           TryCand.RPDelta,
2585           DAG->getRegionCriticalPSets(),
2586           DAG->getRegPressure().MaxSetPressure);
2587       }
2588       else {
2589         RPTracker.getUpwardPressureDelta(
2590           TryCand.SU->getInstr(),
2591           DAG->getPressureDiff(TryCand.SU),
2592           TryCand.RPDelta,
2593           DAG->getRegionCriticalPSets(),
2594           DAG->getRegPressure().MaxSetPressure);
2595       }
2596     }
2597   }
2598   DEBUG(if (TryCand.RPDelta.Excess.isValid())
2599           dbgs() << "  SU(" << TryCand.SU->NodeNum << ") "
2600                  << TRI->getRegPressureSetName(TryCand.RPDelta.Excess.getPSet())
2601                  << ":" << TryCand.RPDelta.Excess.getUnitInc() << "\n");
2602 
2603   // Initialize the candidate if needed.
2604   if (!Cand.isValid()) {
2605     TryCand.Reason = NodeOrder;
2606     return;
2607   }
2608 
2609   if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()),
2610                  biasPhysRegCopy(Cand.SU, Zone.isTop()),
2611                  TryCand, Cand, PhysRegCopy))
2612     return;
2613 
2614   // Avoid exceeding the target's limit. If signed PSetID is negative, it is
2615   // invalid; convert it to INT_MAX to give it lowest priority.
2616   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
2617                                                Cand.RPDelta.Excess,
2618                                                TryCand, Cand, RegExcess))
2619     return;
2620 
2621   // Avoid increasing the max critical pressure in the scheduled region.
2622   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
2623                                                Cand.RPDelta.CriticalMax,
2624                                                TryCand, Cand, RegCritical))
2625     return;
2626 
2627   // For loops that are acyclic path limited, aggressively schedule for latency.
2628   // This can result in very long dependence chains scheduled in sequence, so
2629   // once every cycle (when CurrMOps == 0), switch to normal heuristics.
2630   if (Rem.IsAcyclicLatencyLimited && !Zone.getCurrMOps()
2631       && tryLatency(TryCand, Cand, Zone))
2632     return;
2633 
2634   // Prioritize instructions that read unbuffered resources by stall cycles.
2635   if (tryLess(Zone.getLatencyStallCycles(TryCand.SU),
2636               Zone.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
2637     return;
2638 
2639   // Keep clustered nodes together to encourage downstream peephole
2640   // optimizations which may reduce resource requirements.
2641   //
2642   // This is a best effort to set things up for a post-RA pass. Optimizations
2643   // like generating loads of multiple registers should ideally be done within
2644   // the scheduler pass by combining the loads during DAG postprocessing.
2645   const SUnit *NextClusterSU =
2646     Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2647   if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU,
2648                  TryCand, Cand, Cluster))
2649     return;
2650 
2651   // Weak edges are for clustering and other constraints.
2652   if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()),
2653               getWeakLeft(Cand.SU, Zone.isTop()),
2654               TryCand, Cand, Weak)) {
2655     return;
2656   }
2657   // Avoid increasing the max pressure of the entire region.
2658   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
2659                                                Cand.RPDelta.CurrentMax,
2660                                                TryCand, Cand, RegMax))
2661     return;
2662 
2663   // Avoid critical resource consumption and balance the schedule.
2664   TryCand.initResourceDelta(DAG, SchedModel);
2665   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2666               TryCand, Cand, ResourceReduce))
2667     return;
2668   if (tryGreater(TryCand.ResDelta.DemandedResources,
2669                  Cand.ResDelta.DemandedResources,
2670                  TryCand, Cand, ResourceDemand))
2671     return;
2672 
2673   // Avoid serializing long latency dependence chains.
2674   // For acyclic path limited loops, latency was already checked above.
2675   if (Cand.Policy.ReduceLatency && !Rem.IsAcyclicLatencyLimited
2676       && tryLatency(TryCand, Cand, Zone)) {
2677     return;
2678   }
2679 
2680   // Prefer immediate defs/users of the last scheduled instruction. This is a
2681   // local pressure avoidance strategy that also makes the machine code
2682   // readable.
2683   if (tryGreater(Zone.isNextSU(TryCand.SU), Zone.isNextSU(Cand.SU),
2684                  TryCand, Cand, NextDefUse))
2685     return;
2686 
2687   // Fall through to original instruction order.
2688   if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
2689       || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
2690     TryCand.Reason = NodeOrder;
2691   }
2692 }
2693 
2694 /// Pick the best candidate from the queue.
2695 ///
2696 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
2697 /// DAG building. To adjust for the current scheduling location we need to
2698 /// maintain the number of vreg uses remaining to be top-scheduled.
pickNodeFromQueue(SchedBoundary & Zone,const RegPressureTracker & RPTracker,SchedCandidate & Cand)2699 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
2700                                          const RegPressureTracker &RPTracker,
2701                                          SchedCandidate &Cand) {
2702   ReadyQueue &Q = Zone.Available;
2703 
2704   DEBUG(Q.dump());
2705 
2706   // getMaxPressureDelta temporarily modifies the tracker.
2707   RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
2708 
2709   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
2710 
2711     SchedCandidate TryCand(Cand.Policy);
2712     TryCand.SU = *I;
2713     tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker);
2714     if (TryCand.Reason != NoCand) {
2715       // Initialize resource delta if needed in case future heuristics query it.
2716       if (TryCand.ResDelta == SchedResourceDelta())
2717         TryCand.initResourceDelta(DAG, SchedModel);
2718       Cand.setBest(TryCand);
2719       DEBUG(traceCandidate(Cand));
2720     }
2721   }
2722 }
2723 
2724 /// Pick the best candidate node from either the top or bottom queue.
pickNodeBidirectional(bool & IsTopNode)2725 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
2726   // Schedule as far as possible in the direction of no choice. This is most
2727   // efficient, but also provides the best heuristics for CriticalPSets.
2728   if (SUnit *SU = Bot.pickOnlyChoice()) {
2729     IsTopNode = false;
2730     DEBUG(dbgs() << "Pick Bot NOCAND\n");
2731     return SU;
2732   }
2733   if (SUnit *SU = Top.pickOnlyChoice()) {
2734     IsTopNode = true;
2735     DEBUG(dbgs() << "Pick Top NOCAND\n");
2736     return SU;
2737   }
2738   CandPolicy NoPolicy;
2739   SchedCandidate BotCand(NoPolicy);
2740   SchedCandidate TopCand(NoPolicy);
2741   // Set the bottom-up policy based on the state of the current bottom zone and
2742   // the instructions outside the zone, including the top zone.
2743   setPolicy(BotCand.Policy, /*IsPostRA=*/false, Bot, &Top);
2744   // Set the top-down policy based on the state of the current top zone and
2745   // the instructions outside the zone, including the bottom zone.
2746   setPolicy(TopCand.Policy, /*IsPostRA=*/false, Top, &Bot);
2747 
2748   // Prefer bottom scheduling when heuristics are silent.
2749   pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2750   assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2751 
2752   // If either Q has a single candidate that provides the least increase in
2753   // Excess pressure, we can immediately schedule from that Q.
2754   //
2755   // RegionCriticalPSets summarizes the pressure within the scheduled region and
2756   // affects picking from either Q. If scheduling in one direction must
2757   // increase pressure for one of the excess PSets, then schedule in that
2758   // direction first to provide more freedom in the other direction.
2759   if ((BotCand.Reason == RegExcess && !BotCand.isRepeat(RegExcess))
2760       || (BotCand.Reason == RegCritical
2761           && !BotCand.isRepeat(RegCritical)))
2762   {
2763     IsTopNode = false;
2764     tracePick(BotCand, IsTopNode);
2765     return BotCand.SU;
2766   }
2767   // Check if the top Q has a better candidate.
2768   pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2769   assert(TopCand.Reason != NoCand && "failed to find the first candidate");
2770 
2771   // Choose the queue with the most important (lowest enum) reason.
2772   if (TopCand.Reason < BotCand.Reason) {
2773     IsTopNode = true;
2774     tracePick(TopCand, IsTopNode);
2775     return TopCand.SU;
2776   }
2777   // Otherwise prefer the bottom candidate, in node order if all else failed.
2778   IsTopNode = false;
2779   tracePick(BotCand, IsTopNode);
2780   return BotCand.SU;
2781 }
2782 
2783 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
pickNode(bool & IsTopNode)2784 SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
2785   if (DAG->top() == DAG->bottom()) {
2786     assert(Top.Available.empty() && Top.Pending.empty() &&
2787            Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
2788     return nullptr;
2789   }
2790   SUnit *SU;
2791   do {
2792     if (RegionPolicy.OnlyTopDown) {
2793       SU = Top.pickOnlyChoice();
2794       if (!SU) {
2795         CandPolicy NoPolicy;
2796         SchedCandidate TopCand(NoPolicy);
2797         pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2798         assert(TopCand.Reason != NoCand && "failed to find a candidate");
2799         tracePick(TopCand, true);
2800         SU = TopCand.SU;
2801       }
2802       IsTopNode = true;
2803     }
2804     else if (RegionPolicy.OnlyBottomUp) {
2805       SU = Bot.pickOnlyChoice();
2806       if (!SU) {
2807         CandPolicy NoPolicy;
2808         SchedCandidate BotCand(NoPolicy);
2809         pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2810         assert(BotCand.Reason != NoCand && "failed to find a candidate");
2811         tracePick(BotCand, false);
2812         SU = BotCand.SU;
2813       }
2814       IsTopNode = false;
2815     }
2816     else {
2817       SU = pickNodeBidirectional(IsTopNode);
2818     }
2819   } while (SU->isScheduled);
2820 
2821   if (SU->isTopReady())
2822     Top.removeReady(SU);
2823   if (SU->isBottomReady())
2824     Bot.removeReady(SU);
2825 
2826   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
2827   return SU;
2828 }
2829 
reschedulePhysRegCopies(SUnit * SU,bool isTop)2830 void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
2831 
2832   MachineBasicBlock::iterator InsertPos = SU->getInstr();
2833   if (!isTop)
2834     ++InsertPos;
2835   SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
2836 
2837   // Find already scheduled copies with a single physreg dependence and move
2838   // them just above the scheduled instruction.
2839   for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end();
2840        I != E; ++I) {
2841     if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg()))
2842       continue;
2843     SUnit *DepSU = I->getSUnit();
2844     if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
2845       continue;
2846     MachineInstr *Copy = DepSU->getInstr();
2847     if (!Copy->isCopy())
2848       continue;
2849     DEBUG(dbgs() << "  Rescheduling physreg copy ";
2850           I->getSUnit()->dump(DAG));
2851     DAG->moveInstruction(Copy, InsertPos);
2852   }
2853 }
2854 
2855 /// Update the scheduler's state after scheduling a node. This is the same node
2856 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
2857 /// update it's state based on the current cycle before MachineSchedStrategy
2858 /// does.
2859 ///
2860 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
2861 /// them here. See comments in biasPhysRegCopy.
schedNode(SUnit * SU,bool IsTopNode)2862 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
2863   if (IsTopNode) {
2864     SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
2865     Top.bumpNode(SU);
2866     if (SU->hasPhysRegUses)
2867       reschedulePhysRegCopies(SU, true);
2868   }
2869   else {
2870     SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
2871     Bot.bumpNode(SU);
2872     if (SU->hasPhysRegDefs)
2873       reschedulePhysRegCopies(SU, false);
2874   }
2875 }
2876 
2877 /// Create the standard converging machine scheduler. This will be used as the
2878 /// default scheduler if the target does not set a default.
createGenericSchedLive(MachineSchedContext * C)2879 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) {
2880   ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, make_unique<GenericScheduler>(C));
2881   // Register DAG post-processors.
2882   //
2883   // FIXME: extend the mutation API to allow earlier mutations to instantiate
2884   // data and pass it to later mutations. Have a single mutation that gathers
2885   // the interesting nodes in one pass.
2886   DAG->addMutation(make_unique<CopyConstrain>(DAG->TII, DAG->TRI));
2887   if (EnableLoadCluster && DAG->TII->enableClusterLoads())
2888     DAG->addMutation(make_unique<LoadClusterMutation>(DAG->TII, DAG->TRI));
2889   if (EnableMacroFusion)
2890     DAG->addMutation(make_unique<MacroFusion>(DAG->TII));
2891   return DAG;
2892 }
2893 
2894 static MachineSchedRegistry
2895 GenericSchedRegistry("converge", "Standard converging scheduler.",
2896                      createGenericSchedLive);
2897 
2898 //===----------------------------------------------------------------------===//
2899 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
2900 //===----------------------------------------------------------------------===//
2901 
initialize(ScheduleDAGMI * Dag)2902 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
2903   DAG = Dag;
2904   SchedModel = DAG->getSchedModel();
2905   TRI = DAG->TRI;
2906 
2907   Rem.init(DAG, SchedModel);
2908   Top.init(DAG, SchedModel, &Rem);
2909   BotRoots.clear();
2910 
2911   // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
2912   // or are disabled, then these HazardRecs will be disabled.
2913   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
2914   if (!Top.HazardRec) {
2915     Top.HazardRec =
2916         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2917             Itin, DAG);
2918   }
2919 }
2920 
2921 
registerRoots()2922 void PostGenericScheduler::registerRoots() {
2923   Rem.CriticalPath = DAG->ExitSU.getDepth();
2924 
2925   // Some roots may not feed into ExitSU. Check all of them in case.
2926   for (SmallVectorImpl<SUnit*>::const_iterator
2927          I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) {
2928     if ((*I)->getDepth() > Rem.CriticalPath)
2929       Rem.CriticalPath = (*I)->getDepth();
2930   }
2931   DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
2932   if (DumpCriticalPathLength) {
2933     errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n";
2934   }
2935 }
2936 
2937 /// Apply a set of heursitics to a new candidate for PostRA scheduling.
2938 ///
2939 /// \param Cand provides the policy and current best candidate.
2940 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
tryCandidate(SchedCandidate & Cand,SchedCandidate & TryCand)2941 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
2942                                         SchedCandidate &TryCand) {
2943 
2944   // Initialize the candidate if needed.
2945   if (!Cand.isValid()) {
2946     TryCand.Reason = NodeOrder;
2947     return;
2948   }
2949 
2950   // Prioritize instructions that read unbuffered resources by stall cycles.
2951   if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
2952               Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
2953     return;
2954 
2955   // Avoid critical resource consumption and balance the schedule.
2956   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2957               TryCand, Cand, ResourceReduce))
2958     return;
2959   if (tryGreater(TryCand.ResDelta.DemandedResources,
2960                  Cand.ResDelta.DemandedResources,
2961                  TryCand, Cand, ResourceDemand))
2962     return;
2963 
2964   // Avoid serializing long latency dependence chains.
2965   if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
2966     return;
2967   }
2968 
2969   // Fall through to original instruction order.
2970   if (TryCand.SU->NodeNum < Cand.SU->NodeNum)
2971     TryCand.Reason = NodeOrder;
2972 }
2973 
pickNodeFromQueue(SchedCandidate & Cand)2974 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
2975   ReadyQueue &Q = Top.Available;
2976 
2977   DEBUG(Q.dump());
2978 
2979   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
2980     SchedCandidate TryCand(Cand.Policy);
2981     TryCand.SU = *I;
2982     TryCand.initResourceDelta(DAG, SchedModel);
2983     tryCandidate(Cand, TryCand);
2984     if (TryCand.Reason != NoCand) {
2985       Cand.setBest(TryCand);
2986       DEBUG(traceCandidate(Cand));
2987     }
2988   }
2989 }
2990 
2991 /// Pick the next node to schedule.
pickNode(bool & IsTopNode)2992 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
2993   if (DAG->top() == DAG->bottom()) {
2994     assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
2995     return nullptr;
2996   }
2997   SUnit *SU;
2998   do {
2999     SU = Top.pickOnlyChoice();
3000     if (!SU) {
3001       CandPolicy NoPolicy;
3002       SchedCandidate TopCand(NoPolicy);
3003       // Set the top-down policy based on the state of the current top zone and
3004       // the instructions outside the zone, including the bottom zone.
3005       setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
3006       pickNodeFromQueue(TopCand);
3007       assert(TopCand.Reason != NoCand && "failed to find a candidate");
3008       tracePick(TopCand, true);
3009       SU = TopCand.SU;
3010     }
3011   } while (SU->isScheduled);
3012 
3013   IsTopNode = true;
3014   Top.removeReady(SU);
3015 
3016   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
3017   return SU;
3018 }
3019 
3020 /// Called after ScheduleDAGMI has scheduled an instruction and updated
3021 /// scheduled/remaining flags in the DAG nodes.
schedNode(SUnit * SU,bool IsTopNode)3022 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3023   SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3024   Top.bumpNode(SU);
3025 }
3026 
3027 /// Create a generic scheduler with no vreg liveness or DAG mutation passes.
createGenericSchedPostRA(MachineSchedContext * C)3028 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) {
3029   return new ScheduleDAGMI(C, make_unique<PostGenericScheduler>(C), /*IsPostRA=*/true);
3030 }
3031 
3032 //===----------------------------------------------------------------------===//
3033 // ILP Scheduler. Currently for experimental analysis of heuristics.
3034 //===----------------------------------------------------------------------===//
3035 
3036 namespace {
3037 /// \brief Order nodes by the ILP metric.
3038 struct ILPOrder {
3039   const SchedDFSResult *DFSResult;
3040   const BitVector *ScheduledTrees;
3041   bool MaximizeILP;
3042 
ILPOrder__anond1ea022a0511::ILPOrder3043   ILPOrder(bool MaxILP)
3044     : DFSResult(nullptr), ScheduledTrees(nullptr), MaximizeILP(MaxILP) {}
3045 
3046   /// \brief Apply a less-than relation on node priority.
3047   ///
3048   /// (Return true if A comes after B in the Q.)
operator ()__anond1ea022a0511::ILPOrder3049   bool operator()(const SUnit *A, const SUnit *B) const {
3050     unsigned SchedTreeA = DFSResult->getSubtreeID(A);
3051     unsigned SchedTreeB = DFSResult->getSubtreeID(B);
3052     if (SchedTreeA != SchedTreeB) {
3053       // Unscheduled trees have lower priority.
3054       if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
3055         return ScheduledTrees->test(SchedTreeB);
3056 
3057       // Trees with shallower connections have have lower priority.
3058       if (DFSResult->getSubtreeLevel(SchedTreeA)
3059           != DFSResult->getSubtreeLevel(SchedTreeB)) {
3060         return DFSResult->getSubtreeLevel(SchedTreeA)
3061           < DFSResult->getSubtreeLevel(SchedTreeB);
3062       }
3063     }
3064     if (MaximizeILP)
3065       return DFSResult->getILP(A) < DFSResult->getILP(B);
3066     else
3067       return DFSResult->getILP(A) > DFSResult->getILP(B);
3068   }
3069 };
3070 
3071 /// \brief Schedule based on the ILP metric.
3072 class ILPScheduler : public MachineSchedStrategy {
3073   ScheduleDAGMILive *DAG;
3074   ILPOrder Cmp;
3075 
3076   std::vector<SUnit*> ReadyQ;
3077 public:
ILPScheduler(bool MaximizeILP)3078   ILPScheduler(bool MaximizeILP): DAG(nullptr), Cmp(MaximizeILP) {}
3079 
initialize(ScheduleDAGMI * dag)3080   void initialize(ScheduleDAGMI *dag) override {
3081     assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
3082     DAG = static_cast<ScheduleDAGMILive*>(dag);
3083     DAG->computeDFSResult();
3084     Cmp.DFSResult = DAG->getDFSResult();
3085     Cmp.ScheduledTrees = &DAG->getScheduledTrees();
3086     ReadyQ.clear();
3087   }
3088 
registerRoots()3089   void registerRoots() override {
3090     // Restore the heap in ReadyQ with the updated DFS results.
3091     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3092   }
3093 
3094   /// Implement MachineSchedStrategy interface.
3095   /// -----------------------------------------
3096 
3097   /// Callback to select the highest priority node from the ready Q.
pickNode(bool & IsTopNode)3098   SUnit *pickNode(bool &IsTopNode) override {
3099     if (ReadyQ.empty()) return nullptr;
3100     std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3101     SUnit *SU = ReadyQ.back();
3102     ReadyQ.pop_back();
3103     IsTopNode = false;
3104     DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
3105           << " ILP: " << DAG->getDFSResult()->getILP(SU)
3106           << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
3107           << DAG->getDFSResult()->getSubtreeLevel(
3108             DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
3109           << "Scheduling " << *SU->getInstr());
3110     return SU;
3111   }
3112 
3113   /// \brief Scheduler callback to notify that a new subtree is scheduled.
scheduleTree(unsigned SubtreeID)3114   void scheduleTree(unsigned SubtreeID) override {
3115     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3116   }
3117 
3118   /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
3119   /// DFSResults, and resort the priority Q.
schedNode(SUnit * SU,bool IsTopNode)3120   void schedNode(SUnit *SU, bool IsTopNode) override {
3121     assert(!IsTopNode && "SchedDFSResult needs bottom-up");
3122   }
3123 
releaseTopNode(SUnit *)3124   void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
3125 
releaseBottomNode(SUnit * SU)3126   void releaseBottomNode(SUnit *SU) override {
3127     ReadyQ.push_back(SU);
3128     std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3129   }
3130 };
3131 } // namespace
3132 
createILPMaxScheduler(MachineSchedContext * C)3133 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
3134   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(true));
3135 }
createILPMinScheduler(MachineSchedContext * C)3136 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
3137   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(false));
3138 }
3139 static MachineSchedRegistry ILPMaxRegistry(
3140   "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
3141 static MachineSchedRegistry ILPMinRegistry(
3142   "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
3143 
3144 //===----------------------------------------------------------------------===//
3145 // Machine Instruction Shuffler for Correctness Testing
3146 //===----------------------------------------------------------------------===//
3147 
3148 #ifndef NDEBUG
3149 namespace {
3150 /// Apply a less-than relation on the node order, which corresponds to the
3151 /// instruction order prior to scheduling. IsReverse implements greater-than.
3152 template<bool IsReverse>
3153 struct SUnitOrder {
operator ()__anond1ea022a0611::SUnitOrder3154   bool operator()(SUnit *A, SUnit *B) const {
3155     if (IsReverse)
3156       return A->NodeNum > B->NodeNum;
3157     else
3158       return A->NodeNum < B->NodeNum;
3159   }
3160 };
3161 
3162 /// Reorder instructions as much as possible.
3163 class InstructionShuffler : public MachineSchedStrategy {
3164   bool IsAlternating;
3165   bool IsTopDown;
3166 
3167   // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
3168   // gives nodes with a higher number higher priority causing the latest
3169   // instructions to be scheduled first.
3170   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
3171     TopQ;
3172   // When scheduling bottom-up, use greater-than as the queue priority.
3173   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
3174     BottomQ;
3175 public:
InstructionShuffler(bool alternate,bool topdown)3176   InstructionShuffler(bool alternate, bool topdown)
3177     : IsAlternating(alternate), IsTopDown(topdown) {}
3178 
initialize(ScheduleDAGMI *)3179   void initialize(ScheduleDAGMI*) override {
3180     TopQ.clear();
3181     BottomQ.clear();
3182   }
3183 
3184   /// Implement MachineSchedStrategy interface.
3185   /// -----------------------------------------
3186 
pickNode(bool & IsTopNode)3187   SUnit *pickNode(bool &IsTopNode) override {
3188     SUnit *SU;
3189     if (IsTopDown) {
3190       do {
3191         if (TopQ.empty()) return nullptr;
3192         SU = TopQ.top();
3193         TopQ.pop();
3194       } while (SU->isScheduled);
3195       IsTopNode = true;
3196     }
3197     else {
3198       do {
3199         if (BottomQ.empty()) return nullptr;
3200         SU = BottomQ.top();
3201         BottomQ.pop();
3202       } while (SU->isScheduled);
3203       IsTopNode = false;
3204     }
3205     if (IsAlternating)
3206       IsTopDown = !IsTopDown;
3207     return SU;
3208   }
3209 
schedNode(SUnit * SU,bool IsTopNode)3210   void schedNode(SUnit *SU, bool IsTopNode) override {}
3211 
releaseTopNode(SUnit * SU)3212   void releaseTopNode(SUnit *SU) override {
3213     TopQ.push(SU);
3214   }
releaseBottomNode(SUnit * SU)3215   void releaseBottomNode(SUnit *SU) override {
3216     BottomQ.push(SU);
3217   }
3218 };
3219 } // namespace
3220 
createInstructionShuffler(MachineSchedContext * C)3221 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
3222   bool Alternate = !ForceTopDown && !ForceBottomUp;
3223   bool TopDown = !ForceBottomUp;
3224   assert((TopDown || !ForceTopDown) &&
3225          "-misched-topdown incompatible with -misched-bottomup");
3226   return new ScheduleDAGMILive(C, make_unique<InstructionShuffler>(Alternate, TopDown));
3227 }
3228 static MachineSchedRegistry ShufflerRegistry(
3229   "shuffle", "Shuffle machine instructions alternating directions",
3230   createInstructionShuffler);
3231 #endif // !NDEBUG
3232 
3233 //===----------------------------------------------------------------------===//
3234 // GraphWriter support for ScheduleDAGMILive.
3235 //===----------------------------------------------------------------------===//
3236 
3237 #ifndef NDEBUG
3238 namespace llvm {
3239 
3240 template<> struct GraphTraits<
3241   ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
3242 
3243 template<>
3244 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
3245 
DOTGraphTraitsllvm::DOTGraphTraits3246   DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
3247 
getGraphNamellvm::DOTGraphTraits3248   static std::string getGraphName(const ScheduleDAG *G) {
3249     return G->MF.getName();
3250   }
3251 
renderGraphFromBottomUpllvm::DOTGraphTraits3252   static bool renderGraphFromBottomUp() {
3253     return true;
3254   }
3255 
isNodeHiddenllvm::DOTGraphTraits3256   static bool isNodeHidden(const SUnit *Node) {
3257     return (Node->Preds.size() > 10 || Node->Succs.size() > 10);
3258   }
3259 
hasNodeAddressLabelllvm::DOTGraphTraits3260   static bool hasNodeAddressLabel(const SUnit *Node,
3261                                   const ScheduleDAG *Graph) {
3262     return false;
3263   }
3264 
3265   /// If you want to override the dot attributes printed for a particular
3266   /// edge, override this method.
getEdgeAttributesllvm::DOTGraphTraits3267   static std::string getEdgeAttributes(const SUnit *Node,
3268                                        SUnitIterator EI,
3269                                        const ScheduleDAG *Graph) {
3270     if (EI.isArtificialDep())
3271       return "color=cyan,style=dashed";
3272     if (EI.isCtrlDep())
3273       return "color=blue,style=dashed";
3274     return "";
3275   }
3276 
getNodeLabelllvm::DOTGraphTraits3277   static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
3278     std::string Str;
3279     raw_string_ostream SS(Str);
3280     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3281     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3282       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3283     SS << "SU:" << SU->NodeNum;
3284     if (DFS)
3285       SS << " I:" << DFS->getNumInstrs(SU);
3286     return SS.str();
3287   }
getNodeDescriptionllvm::DOTGraphTraits3288   static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
3289     return G->getGraphNodeLabel(SU);
3290   }
3291 
getNodeAttributesllvm::DOTGraphTraits3292   static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
3293     std::string Str("shape=Mrecord");
3294     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3295     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3296       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3297     if (DFS) {
3298       Str += ",style=filled,fillcolor=\"#";
3299       Str += DOT::getColorString(DFS->getSubtreeID(N));
3300       Str += '"';
3301     }
3302     return Str;
3303   }
3304 };
3305 } // namespace llvm
3306 #endif // NDEBUG
3307 
3308 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
3309 /// rendered using 'dot'.
3310 ///
viewGraph(const Twine & Name,const Twine & Title)3311 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
3312 #ifndef NDEBUG
3313   ViewGraph(this, Name, false, Title);
3314 #else
3315   errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
3316          << "systems with Graphviz or gv!\n";
3317 #endif  // NDEBUG
3318 }
3319 
3320 /// Out-of-line implementation with no arguments is handy for gdb.
viewGraph()3321 void ScheduleDAGMI::viewGraph() {
3322   viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
3323 }
3324