1 //===------- llvm/CodeGen/ScheduleDAG.h - Common Base Class------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the ScheduleDAG class, which is used as the common
11 // base class for instruction schedulers. This encapsulates the scheduling DAG,
12 // which is shared between SelectionDAG and MachineInstr scheduling.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #ifndef LLVM_CODEGEN_SCHEDULEDAG_H
17 #define LLVM_CODEGEN_SCHEDULEDAG_H
18 
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/GraphTraits.h"
21 #include "llvm/ADT/PointerIntPair.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/Target/TargetLowering.h"
25 
26 namespace llvm {
27   class AliasAnalysis;
28   class SUnit;
29   class MachineConstantPool;
30   class MachineFunction;
31   class MachineRegisterInfo;
32   class MachineInstr;
33   struct MCSchedClassDesc;
34   class TargetRegisterInfo;
35   class ScheduleDAG;
36   class SDNode;
37   class TargetInstrInfo;
38   class MCInstrDesc;
39   class TargetMachine;
40   class TargetRegisterClass;
41   template<class Graph> class GraphWriter;
42 
43   /// SDep - Scheduling dependency. This represents one direction of an
44   /// edge in the scheduling DAG.
45   class SDep {
46   public:
47     /// Kind - These are the different kinds of scheduling dependencies.
48     enum Kind {
49       Data,        ///< Regular data dependence (aka true-dependence).
50       Anti,        ///< A register anti-dependedence (aka WAR).
51       Output,      ///< A register output-dependence (aka WAW).
52       Order        ///< Any other ordering dependency.
53     };
54 
55     // Strong dependencies must be respected by the scheduler. Artificial
56     // dependencies may be removed only if they are redundant with another
57     // strong depedence.
58     //
59     // Weak dependencies may be violated by the scheduling strategy, but only if
60     // the strategy can prove it is correct to do so.
61     //
62     // Strong OrderKinds must occur before "Weak".
63     // Weak OrderKinds must occur after "Weak".
64     enum OrderKind {
65       Barrier,      ///< An unknown scheduling barrier.
66       MayAliasMem,  ///< Nonvolatile load/Store instructions that may alias.
67       MustAliasMem, ///< Nonvolatile load/Store instructions that must alias.
68       Artificial,   ///< Arbitrary strong DAG edge (no real dependence).
69       Weak,         ///< Arbitrary weak DAG edge.
70       Cluster       ///< Weak DAG edge linking a chain of clustered instrs.
71     };
72 
73   private:
74     /// Dep - A pointer to the depending/depended-on SUnit, and an enum
75     /// indicating the kind of the dependency.
76     PointerIntPair<SUnit *, 2, Kind> Dep;
77 
78     /// Contents - A union discriminated by the dependence kind.
79     union {
80       /// Reg - For Data, Anti, and Output dependencies, the associated
81       /// register. For Data dependencies that don't currently have a register
82       /// assigned, this is set to zero.
83       unsigned Reg;
84 
85       /// Order - Additional information about Order dependencies.
86       unsigned OrdKind; // enum OrderKind
87     } Contents;
88 
89     /// Latency - The time associated with this edge. Often this is just
90     /// the value of the Latency field of the predecessor, however advanced
91     /// models may provide additional information about specific edges.
92     unsigned Latency;
93 
94   public:
95     /// SDep - Construct a null SDep. This is only for use by container
96     /// classes which require default constructors. SUnits may not
97     /// have null SDep edges.
SDep()98     SDep() : Dep(nullptr, Data) {}
99 
100     /// SDep - Construct an SDep with the specified values.
SDep(SUnit * S,Kind kind,unsigned Reg)101     SDep(SUnit *S, Kind kind, unsigned Reg)
102       : Dep(S, kind), Contents() {
103       switch (kind) {
104       default:
105         llvm_unreachable("Reg given for non-register dependence!");
106       case Anti:
107       case Output:
108         assert(Reg != 0 &&
109                "SDep::Anti and SDep::Output must use a non-zero Reg!");
110         Contents.Reg = Reg;
111         Latency = 0;
112         break;
113       case Data:
114         Contents.Reg = Reg;
115         Latency = 1;
116         break;
117       }
118     }
SDep(SUnit * S,OrderKind kind)119     SDep(SUnit *S, OrderKind kind)
120       : Dep(S, Order), Contents(), Latency(0) {
121       Contents.OrdKind = kind;
122     }
123 
124     /// Return true if the specified SDep is equivalent except for latency.
overlaps(const SDep & Other)125     bool overlaps(const SDep &Other) const {
126       if (Dep != Other.Dep) return false;
127       switch (Dep.getInt()) {
128       case Data:
129       case Anti:
130       case Output:
131         return Contents.Reg == Other.Contents.Reg;
132       case Order:
133         return Contents.OrdKind == Other.Contents.OrdKind;
134       }
135       llvm_unreachable("Invalid dependency kind!");
136     }
137 
138     bool operator==(const SDep &Other) const {
139       return overlaps(Other) && Latency == Other.Latency;
140     }
141 
142     bool operator!=(const SDep &Other) const {
143       return !operator==(Other);
144     }
145 
146     /// getLatency - Return the latency value for this edge, which roughly
147     /// means the minimum number of cycles that must elapse between the
148     /// predecessor and the successor, given that they have this edge
149     /// between them.
getLatency()150     unsigned getLatency() const {
151       return Latency;
152     }
153 
154     /// setLatency - Set the latency for this edge.
setLatency(unsigned Lat)155     void setLatency(unsigned Lat) {
156       Latency = Lat;
157     }
158 
159     //// getSUnit - Return the SUnit to which this edge points.
getSUnit()160     SUnit *getSUnit() const {
161       return Dep.getPointer();
162     }
163 
164     //// setSUnit - Assign the SUnit to which this edge points.
setSUnit(SUnit * SU)165     void setSUnit(SUnit *SU) {
166       Dep.setPointer(SU);
167     }
168 
169     /// getKind - Return an enum value representing the kind of the dependence.
getKind()170     Kind getKind() const {
171       return Dep.getInt();
172     }
173 
174     /// isCtrl - Shorthand for getKind() != SDep::Data.
isCtrl()175     bool isCtrl() const {
176       return getKind() != Data;
177     }
178 
179     /// isNormalMemory - Test if this is an Order dependence between two
180     /// memory accesses where both sides of the dependence access memory
181     /// in non-volatile and fully modeled ways.
isNormalMemory()182     bool isNormalMemory() const {
183       return getKind() == Order && (Contents.OrdKind == MayAliasMem
184                                     || Contents.OrdKind == MustAliasMem);
185     }
186 
187     /// isBarrier - Test if this is an Order dependence that is marked
188     /// as a barrier.
isBarrier()189     bool isBarrier() const {
190       return getKind() == Order && Contents.OrdKind == Barrier;
191     }
192 
193     /// isNormalMemoryOrBarrier - Test if this is could be any kind of memory
194     /// dependence.
isNormalMemoryOrBarrier()195     bool isNormalMemoryOrBarrier() const {
196       return (isNormalMemory() || isBarrier());
197     }
198 
199     /// isMustAlias - Test if this is an Order dependence that is marked
200     /// as "must alias", meaning that the SUnits at either end of the edge
201     /// have a memory dependence on a known memory location.
isMustAlias()202     bool isMustAlias() const {
203       return getKind() == Order && Contents.OrdKind == MustAliasMem;
204     }
205 
206     /// isWeak - Test if this a weak dependence. Weak dependencies are
207     /// considered DAG edges for height computation and other heuristics, but do
208     /// not force ordering. Breaking a weak edge may require the scheduler to
209     /// compensate, for example by inserting a copy.
isWeak()210     bool isWeak() const {
211       return getKind() == Order && Contents.OrdKind >= Weak;
212     }
213 
214     /// isArtificial - Test if this is an Order dependence that is marked
215     /// as "artificial", meaning it isn't necessary for correctness.
isArtificial()216     bool isArtificial() const {
217       return getKind() == Order && Contents.OrdKind == Artificial;
218     }
219 
220     /// isCluster - Test if this is an Order dependence that is marked
221     /// as "cluster", meaning it is artificial and wants to be adjacent.
isCluster()222     bool isCluster() const {
223       return getKind() == Order && Contents.OrdKind == Cluster;
224     }
225 
226     /// isAssignedRegDep - Test if this is a Data dependence that is
227     /// associated with a register.
isAssignedRegDep()228     bool isAssignedRegDep() const {
229       return getKind() == Data && Contents.Reg != 0;
230     }
231 
232     /// getReg - Return the register associated with this edge. This is
233     /// only valid on Data, Anti, and Output edges. On Data edges, this
234     /// value may be zero, meaning there is no associated register.
getReg()235     unsigned getReg() const {
236       assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
237              "getReg called on non-register dependence edge!");
238       return Contents.Reg;
239     }
240 
241     /// setReg - Assign the associated register for this edge. This is
242     /// only valid on Data, Anti, and Output edges. On Anti and Output
243     /// edges, this value must not be zero. On Data edges, the value may
244     /// be zero, which would mean that no specific register is associated
245     /// with this edge.
setReg(unsigned Reg)246     void setReg(unsigned Reg) {
247       assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
248              "setReg called on non-register dependence edge!");
249       assert((getKind() != Anti || Reg != 0) &&
250              "SDep::Anti edge cannot use the zero register!");
251       assert((getKind() != Output || Reg != 0) &&
252              "SDep::Output edge cannot use the zero register!");
253       Contents.Reg = Reg;
254     }
255   };
256 
257   template <>
258   struct isPodLike<SDep> { static const bool value = true; };
259 
260   /// SUnit - Scheduling unit. This is a node in the scheduling DAG.
261   class SUnit {
262   private:
263     enum : unsigned { BoundaryID = ~0u };
264 
265     SDNode *Node;                       // Representative node.
266     MachineInstr *Instr;                // Alternatively, a MachineInstr.
267   public:
268     SUnit *OrigNode;                    // If not this, the node from which
269                                         // this node was cloned.
270                                         // (SD scheduling only)
271 
272     const MCSchedClassDesc *SchedClass; // NULL or resolved SchedClass.
273 
274     // Preds/Succs - The SUnits before/after us in the graph.
275     SmallVector<SDep, 4> Preds;  // All sunit predecessors.
276     SmallVector<SDep, 4> Succs;  // All sunit successors.
277 
278     typedef SmallVectorImpl<SDep>::iterator pred_iterator;
279     typedef SmallVectorImpl<SDep>::iterator succ_iterator;
280     typedef SmallVectorImpl<SDep>::const_iterator const_pred_iterator;
281     typedef SmallVectorImpl<SDep>::const_iterator const_succ_iterator;
282 
283     unsigned NodeNum;                   // Entry # of node in the node vector.
284     unsigned NodeQueueId;               // Queue id of node.
285     unsigned NumPreds;                  // # of SDep::Data preds.
286     unsigned NumSuccs;                  // # of SDep::Data sucss.
287     unsigned NumPredsLeft;              // # of preds not scheduled.
288     unsigned NumSuccsLeft;              // # of succs not scheduled.
289     unsigned WeakPredsLeft;             // # of weak preds not scheduled.
290     unsigned WeakSuccsLeft;             // # of weak succs not scheduled.
291     unsigned short NumRegDefsLeft;      // # of reg defs with no scheduled use.
292     unsigned short Latency;             // Node latency.
293     bool isVRegCycle      : 1;          // May use and def the same vreg.
294     bool isCall           : 1;          // Is a function call.
295     bool isCallOp         : 1;          // Is a function call operand.
296     bool isTwoAddress     : 1;          // Is a two-address instruction.
297     bool isCommutable     : 1;          // Is a commutable instruction.
298     bool hasPhysRegUses   : 1;          // Has physreg uses.
299     bool hasPhysRegDefs   : 1;          // Has physreg defs that are being used.
300     bool hasPhysRegClobbers : 1;        // Has any physreg defs, used or not.
301     bool isPending        : 1;          // True once pending.
302     bool isAvailable      : 1;          // True once available.
303     bool isScheduled      : 1;          // True once scheduled.
304     bool isScheduleHigh   : 1;          // True if preferable to schedule high.
305     bool isScheduleLow    : 1;          // True if preferable to schedule low.
306     bool isCloned         : 1;          // True if this node has been cloned.
307     bool isUnbuffered     : 1;          // Uses an unbuffered resource.
308     bool hasReservedResource : 1;       // Uses a reserved resource.
309     Sched::Preference SchedulingPref;   // Scheduling preference.
310 
311   private:
312     bool isDepthCurrent   : 1;          // True if Depth is current.
313     bool isHeightCurrent  : 1;          // True if Height is current.
314     unsigned Depth;                     // Node depth.
315     unsigned Height;                    // Node height.
316   public:
317     unsigned TopReadyCycle; // Cycle relative to start when node is ready.
318     unsigned BotReadyCycle; // Cycle relative to end when node is ready.
319 
320     const TargetRegisterClass *CopyDstRC; // Is a special copy node if not null.
321     const TargetRegisterClass *CopySrcRC;
322 
323     /// SUnit - Construct an SUnit for pre-regalloc scheduling to represent
324     /// an SDNode and any nodes flagged to it.
325     SUnit(SDNode *node, unsigned nodenum)
326       : Node(node), Instr(nullptr), OrigNode(nullptr), SchedClass(nullptr),
327         NodeNum(nodenum), NodeQueueId(0), NumPreds(0), NumSuccs(0),
328         NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0),
329         NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false),
330         isCallOp(false), isTwoAddress(false), isCommutable(false),
331         hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
332         isPending(false), isAvailable(false), isScheduled(false),
333         isScheduleHigh(false), isScheduleLow(false), isCloned(false),
334         isUnbuffered(false), hasReservedResource(false),
335         SchedulingPref(Sched::None), isDepthCurrent(false),
336         isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0),
337         BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {}
338 
339     /// SUnit - Construct an SUnit for post-regalloc scheduling to represent
340     /// a MachineInstr.
341     SUnit(MachineInstr *instr, unsigned nodenum)
342       : Node(nullptr), Instr(instr), OrigNode(nullptr), SchedClass(nullptr),
343         NodeNum(nodenum), NodeQueueId(0), NumPreds(0), NumSuccs(0),
344         NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0),
345         NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false),
346         isCallOp(false), isTwoAddress(false), isCommutable(false),
347         hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
348         isPending(false), isAvailable(false), isScheduled(false),
349         isScheduleHigh(false), isScheduleLow(false), isCloned(false),
350         isUnbuffered(false), hasReservedResource(false),
351         SchedulingPref(Sched::None), isDepthCurrent(false),
352         isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0),
353         BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {}
354 
355     /// SUnit - Construct a placeholder SUnit.
356     SUnit()
357       : Node(nullptr), Instr(nullptr), OrigNode(nullptr), SchedClass(nullptr),
358         NodeNum(BoundaryID), NodeQueueId(0), NumPreds(0), NumSuccs(0),
359         NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0),
360         NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false),
361         isCallOp(false), isTwoAddress(false), isCommutable(false),
362         hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
363         isPending(false), isAvailable(false), isScheduled(false),
364         isScheduleHigh(false), isScheduleLow(false), isCloned(false),
365         isUnbuffered(false), hasReservedResource(false),
366         SchedulingPref(Sched::None), isDepthCurrent(false),
367         isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0),
368         BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {}
369 
370     /// \brief Boundary nodes are placeholders for the boundary of the
371     /// scheduling region.
372     ///
373     /// BoundaryNodes can have DAG edges, including Data edges, but they do not
374     /// correspond to schedulable entities (e.g. instructions) and do not have a
375     /// valid ID. Consequently, always check for boundary nodes before accessing
376     /// an assoicative data structure keyed on node ID.
377     bool isBoundaryNode() const { return NodeNum == BoundaryID; };
378 
379     /// setNode - Assign the representative SDNode for this SUnit.
380     /// This may be used during pre-regalloc scheduling.
381     void setNode(SDNode *N) {
382       assert(!Instr && "Setting SDNode of SUnit with MachineInstr!");
383       Node = N;
384     }
385 
386     /// getNode - Return the representative SDNode for this SUnit.
387     /// This may be used during pre-regalloc scheduling.
388     SDNode *getNode() const {
389       assert(!Instr && "Reading SDNode of SUnit with MachineInstr!");
390       return Node;
391     }
392 
393     /// isInstr - Return true if this SUnit refers to a machine instruction as
394     /// opposed to an SDNode.
395     bool isInstr() const { return Instr; }
396 
397     /// setInstr - Assign the instruction for the SUnit.
398     /// This may be used during post-regalloc scheduling.
399     void setInstr(MachineInstr *MI) {
400       assert(!Node && "Setting MachineInstr of SUnit with SDNode!");
401       Instr = MI;
402     }
403 
404     /// getInstr - Return the representative MachineInstr for this SUnit.
405     /// This may be used during post-regalloc scheduling.
406     MachineInstr *getInstr() const {
407       assert(!Node && "Reading MachineInstr of SUnit with SDNode!");
408       return Instr;
409     }
410 
411     /// addPred - This adds the specified edge as a pred of the current node if
412     /// not already.  It also adds the current node as a successor of the
413     /// specified node.
414     bool addPred(const SDep &D, bool Required = true);
415 
416     /// removePred - This removes the specified edge as a pred of the current
417     /// node if it exists.  It also removes the current node as a successor of
418     /// the specified node.
419     void removePred(const SDep &D);
420 
421     /// getDepth - Return the depth of this node, which is the length of the
422     /// maximum path up to any node which has no predecessors.
423     unsigned getDepth() const {
424       if (!isDepthCurrent)
425         const_cast<SUnit *>(this)->ComputeDepth();
426       return Depth;
427     }
428 
429     /// getHeight - Return the height of this node, which is the length of the
430     /// maximum path down to any node which has no successors.
431     unsigned getHeight() const {
432       if (!isHeightCurrent)
433         const_cast<SUnit *>(this)->ComputeHeight();
434       return Height;
435     }
436 
437     /// setDepthToAtLeast - If NewDepth is greater than this node's
438     /// depth value, set it to be the new depth value. This also
439     /// recursively marks successor nodes dirty.
440     void setDepthToAtLeast(unsigned NewDepth);
441 
442     /// setDepthToAtLeast - If NewDepth is greater than this node's
443     /// depth value, set it to be the new height value. This also
444     /// recursively marks predecessor nodes dirty.
445     void setHeightToAtLeast(unsigned NewHeight);
446 
447     /// setDepthDirty - Set a flag in this node to indicate that its
448     /// stored Depth value will require recomputation the next time
449     /// getDepth() is called.
450     void setDepthDirty();
451 
452     /// setHeightDirty - Set a flag in this node to indicate that its
453     /// stored Height value will require recomputation the next time
454     /// getHeight() is called.
455     void setHeightDirty();
456 
457     /// isPred - Test if node N is a predecessor of this node.
458     bool isPred(SUnit *N) {
459       for (unsigned i = 0, e = (unsigned)Preds.size(); i != e; ++i)
460         if (Preds[i].getSUnit() == N)
461           return true;
462       return false;
463     }
464 
465     /// isSucc - Test if node N is a successor of this node.
466     bool isSucc(SUnit *N) {
467       for (unsigned i = 0, e = (unsigned)Succs.size(); i != e; ++i)
468         if (Succs[i].getSUnit() == N)
469           return true;
470       return false;
471     }
472 
473     bool isTopReady() const {
474       return NumPredsLeft == 0;
475     }
476     bool isBottomReady() const {
477       return NumSuccsLeft == 0;
478     }
479 
480     /// \brief Order this node's predecessor edges such that the critical path
481     /// edge occurs first.
482     void biasCriticalPath();
483 
484     void dump(const ScheduleDAG *G) const;
485     void dumpAll(const ScheduleDAG *G) const;
486     void print(raw_ostream &O, const ScheduleDAG *G) const;
487 
488   private:
489     void ComputeDepth();
490     void ComputeHeight();
491   };
492 
493   //===--------------------------------------------------------------------===//
494   /// SchedulingPriorityQueue - This interface is used to plug different
495   /// priorities computation algorithms into the list scheduler. It implements
496   /// the interface of a standard priority queue, where nodes are inserted in
497   /// arbitrary order and returned in priority order.  The computation of the
498   /// priority and the representation of the queue are totally up to the
499   /// implementation to decide.
500   ///
501   class SchedulingPriorityQueue {
502     virtual void anchor();
503     unsigned CurCycle;
504     bool HasReadyFilter;
505   public:
506     SchedulingPriorityQueue(bool rf = false):
507       CurCycle(0), HasReadyFilter(rf) {}
508     virtual ~SchedulingPriorityQueue() {}
509 
510     virtual bool isBottomUp() const = 0;
511 
512     virtual void initNodes(std::vector<SUnit> &SUnits) = 0;
513     virtual void addNode(const SUnit *SU) = 0;
514     virtual void updateNode(const SUnit *SU) = 0;
515     virtual void releaseState() = 0;
516 
517     virtual bool empty() const = 0;
518 
519     bool hasReadyFilter() const { return HasReadyFilter; }
520 
521     virtual bool tracksRegPressure() const { return false; }
522 
523     virtual bool isReady(SUnit *) const {
524       assert(!HasReadyFilter && "The ready filter must override isReady()");
525       return true;
526     }
527     virtual void push(SUnit *U) = 0;
528 
529     void push_all(const std::vector<SUnit *> &Nodes) {
530       for (std::vector<SUnit *>::const_iterator I = Nodes.begin(),
531            E = Nodes.end(); I != E; ++I)
532         push(*I);
533     }
534 
535     virtual SUnit *pop() = 0;
536 
537     virtual void remove(SUnit *SU) = 0;
538 
539     virtual void dump(ScheduleDAG *) const {}
540 
541     /// scheduledNode - As each node is scheduled, this method is invoked.  This
542     /// allows the priority function to adjust the priority of related
543     /// unscheduled nodes, for example.
544     ///
545     virtual void scheduledNode(SUnit *) {}
546 
547     virtual void unscheduledNode(SUnit *) {}
548 
549     void setCurCycle(unsigned Cycle) {
550       CurCycle = Cycle;
551     }
552 
553     unsigned getCurCycle() const {
554       return CurCycle;
555     }
556   };
557 
558   class ScheduleDAG {
559   public:
560     const TargetMachine &TM;              // Target processor
561     const TargetInstrInfo *TII;           // Target instruction information
562     const TargetRegisterInfo *TRI;        // Target processor register info
563     MachineFunction &MF;                  // Machine function
564     MachineRegisterInfo &MRI;             // Virtual/real register map
565     std::vector<SUnit> SUnits;            // The scheduling units.
566     SUnit EntrySU;                        // Special node for the region entry.
567     SUnit ExitSU;                         // Special node for the region exit.
568 
569 #ifdef NDEBUG
570     static const bool StressSched = false;
571 #else
572     bool StressSched;
573 #endif
574 
575     explicit ScheduleDAG(MachineFunction &mf);
576 
577     virtual ~ScheduleDAG();
578 
579     /// clearDAG - clear the DAG state (between regions).
580     void clearDAG();
581 
582     /// getInstrDesc - Return the MCInstrDesc of this SUnit.
583     /// Return NULL for SDNodes without a machine opcode.
584     const MCInstrDesc *getInstrDesc(const SUnit *SU) const {
585       if (SU->isInstr()) return &SU->getInstr()->getDesc();
586       return getNodeDesc(SU->getNode());
587     }
588 
589     /// viewGraph - Pop up a GraphViz/gv window with the ScheduleDAG rendered
590     /// using 'dot'.
591     ///
592     virtual void viewGraph(const Twine &Name, const Twine &Title);
593     virtual void viewGraph();
594 
595     virtual void dumpNode(const SUnit *SU) const = 0;
596 
597     /// getGraphNodeLabel - Return a label for an SUnit node in a visualization
598     /// of the ScheduleDAG.
599     virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0;
600 
601     /// getDAGLabel - Return a label for the region of code covered by the DAG.
602     virtual std::string getDAGName() const = 0;
603 
604     /// addCustomGraphFeatures - Add custom features for a visualization of
605     /// the ScheduleDAG.
606     virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {}
607 
608 #ifndef NDEBUG
609     /// VerifyScheduledDAG - Verify that all SUnits were scheduled and that
610     /// their state is consistent. Return the number of scheduled SUnits.
611     unsigned VerifyScheduledDAG(bool isBottomUp);
612 #endif
613 
614   private:
615     // Return the MCInstrDesc of this SDNode or NULL.
616     const MCInstrDesc *getNodeDesc(const SDNode *Node) const;
617   };
618 
619   class SUnitIterator : public std::iterator<std::forward_iterator_tag,
620                                              SUnit, ptrdiff_t> {
621     SUnit *Node;
622     unsigned Operand;
623 
624     SUnitIterator(SUnit *N, unsigned Op) : Node(N), Operand(Op) {}
625   public:
626     bool operator==(const SUnitIterator& x) const {
627       return Operand == x.Operand;
628     }
629     bool operator!=(const SUnitIterator& x) const { return !operator==(x); }
630 
631     pointer operator*() const {
632       return Node->Preds[Operand].getSUnit();
633     }
634     pointer operator->() const { return operator*(); }
635 
636     SUnitIterator& operator++() {                // Preincrement
637       ++Operand;
638       return *this;
639     }
640     SUnitIterator operator++(int) { // Postincrement
641       SUnitIterator tmp = *this; ++*this; return tmp;
642     }
643 
644     static SUnitIterator begin(SUnit *N) { return SUnitIterator(N, 0); }
645     static SUnitIterator end  (SUnit *N) {
646       return SUnitIterator(N, (unsigned)N->Preds.size());
647     }
648 
649     unsigned getOperand() const { return Operand; }
650     const SUnit *getNode() const { return Node; }
651     /// isCtrlDep - Test if this is not an SDep::Data dependence.
652     bool isCtrlDep() const {
653       return getSDep().isCtrl();
654     }
655     bool isArtificialDep() const {
656       return getSDep().isArtificial();
657     }
658     const SDep &getSDep() const {
659       return Node->Preds[Operand];
660     }
661   };
662 
663   template <> struct GraphTraits<SUnit*> {
664     typedef SUnit NodeType;
665     typedef SUnitIterator ChildIteratorType;
666     static inline NodeType *getEntryNode(SUnit *N) { return N; }
667     static inline ChildIteratorType child_begin(NodeType *N) {
668       return SUnitIterator::begin(N);
669     }
670     static inline ChildIteratorType child_end(NodeType *N) {
671       return SUnitIterator::end(N);
672     }
673   };
674 
675   template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> {
676     typedef std::vector<SUnit>::iterator nodes_iterator;
677     static nodes_iterator nodes_begin(ScheduleDAG *G) {
678       return G->SUnits.begin();
679     }
680     static nodes_iterator nodes_end(ScheduleDAG *G) {
681       return G->SUnits.end();
682     }
683   };
684 
685   /// ScheduleDAGTopologicalSort is a class that computes a topological
686   /// ordering for SUnits and provides methods for dynamically updating
687   /// the ordering as new edges are added.
688   ///
689   /// This allows a very fast implementation of IsReachable, for example.
690   ///
691   class ScheduleDAGTopologicalSort {
692     /// SUnits - A reference to the ScheduleDAG's SUnits.
693     std::vector<SUnit> &SUnits;
694     SUnit *ExitSU;
695 
696     /// Index2Node - Maps topological index to the node number.
697     std::vector<int> Index2Node;
698     /// Node2Index - Maps the node number to its topological index.
699     std::vector<int> Node2Index;
700     /// Visited - a set of nodes visited during a DFS traversal.
701     BitVector Visited;
702 
703     /// DFS - make a DFS traversal and mark all nodes affected by the
704     /// edge insertion. These nodes will later get new topological indexes
705     /// by means of the Shift method.
706     void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
707 
708     /// Shift - reassign topological indexes for the nodes in the DAG
709     /// to preserve the topological ordering.
710     void Shift(BitVector& Visited, int LowerBound, int UpperBound);
711 
712     /// Allocate - assign the topological index to the node n.
713     void Allocate(int n, int index);
714 
715   public:
716     ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits, SUnit *ExitSU);
717 
718     /// InitDAGTopologicalSorting - create the initial topological
719     /// ordering from the DAG to be scheduled.
720     void InitDAGTopologicalSorting();
721 
722     /// IsReachable - Checks if SU is reachable from TargetSU.
723     bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
724 
725     /// WillCreateCycle - Return true if addPred(TargetSU, SU) creates a cycle.
726     bool WillCreateCycle(SUnit *TargetSU, SUnit *SU);
727 
728     /// AddPred - Updates the topological ordering to accommodate an edge
729     /// to be added from SUnit X to SUnit Y.
730     void AddPred(SUnit *Y, SUnit *X);
731 
732     /// RemovePred - Updates the topological ordering to accommodate an
733     /// an edge to be removed from the specified node N from the predecessors
734     /// of the current node M.
735     void RemovePred(SUnit *M, SUnit *N);
736 
737     typedef std::vector<int>::iterator iterator;
738     typedef std::vector<int>::const_iterator const_iterator;
739     iterator begin() { return Index2Node.begin(); }
740     const_iterator begin() const { return Index2Node.begin(); }
741     iterator end() { return Index2Node.end(); }
742     const_iterator end() const { return Index2Node.end(); }
743 
744     typedef std::vector<int>::reverse_iterator reverse_iterator;
745     typedef std::vector<int>::const_reverse_iterator const_reverse_iterator;
746     reverse_iterator rbegin() { return Index2Node.rbegin(); }
747     const_reverse_iterator rbegin() const { return Index2Node.rbegin(); }
748     reverse_iterator rend() { return Index2Node.rend(); }
749     const_reverse_iterator rend() const { return Index2Node.rend(); }
750   };
751 }
752 
753 #endif
754