1 //===---- LatencyPriorityQueue.cpp - A latency-oriented priority queue ----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the LatencyPriorityQueue class, which is a
11 // SchedulingPriorityQueue that schedules using latency information to
12 // reduce the length of the critical path through the basic block.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/CodeGen/LatencyPriorityQueue.h"
17 #include "llvm/Support/Debug.h"
18 #include "llvm/Support/raw_ostream.h"
19 using namespace llvm;
20 
21 #define DEBUG_TYPE "scheduler"
22 
operator ()(const SUnit * LHS,const SUnit * RHS) const23 bool latency_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
24   // The isScheduleHigh flag allows nodes with wraparound dependencies that
25   // cannot easily be modeled as edges with latencies to be scheduled as
26   // soon as possible in a top-down schedule.
27   if (LHS->isScheduleHigh && !RHS->isScheduleHigh)
28     return false;
29   if (!LHS->isScheduleHigh && RHS->isScheduleHigh)
30     return true;
31 
32   unsigned LHSNum = LHS->NodeNum;
33   unsigned RHSNum = RHS->NodeNum;
34 
35   // The most important heuristic is scheduling the critical path.
36   unsigned LHSLatency = PQ->getLatency(LHSNum);
37   unsigned RHSLatency = PQ->getLatency(RHSNum);
38   if (LHSLatency < RHSLatency) return true;
39   if (LHSLatency > RHSLatency) return false;
40 
41   // After that, if two nodes have identical latencies, look to see if one will
42   // unblock more other nodes than the other.
43   unsigned LHSBlocked = PQ->getNumSolelyBlockNodes(LHSNum);
44   unsigned RHSBlocked = PQ->getNumSolelyBlockNodes(RHSNum);
45   if (LHSBlocked < RHSBlocked) return true;
46   if (LHSBlocked > RHSBlocked) return false;
47 
48   // Finally, just to provide a stable ordering, use the node number as a
49   // deciding factor.
50   return RHSNum < LHSNum;
51 }
52 
53 
54 /// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor
55 /// of SU, return it, otherwise return null.
getSingleUnscheduledPred(SUnit * SU)56 SUnit *LatencyPriorityQueue::getSingleUnscheduledPred(SUnit *SU) {
57   SUnit *OnlyAvailablePred = nullptr;
58   for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
59        I != E; ++I) {
60     SUnit &Pred = *I->getSUnit();
61     if (!Pred.isScheduled) {
62       // We found an available, but not scheduled, predecessor.  If it's the
63       // only one we have found, keep track of it... otherwise give up.
64       if (OnlyAvailablePred && OnlyAvailablePred != &Pred)
65         return nullptr;
66       OnlyAvailablePred = &Pred;
67     }
68   }
69 
70   return OnlyAvailablePred;
71 }
72 
push(SUnit * SU)73 void LatencyPriorityQueue::push(SUnit *SU) {
74   // Look at all of the successors of this node.  Count the number of nodes that
75   // this node is the sole unscheduled node for.
76   unsigned NumNodesBlocking = 0;
77   for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
78        I != E; ++I) {
79     if (getSingleUnscheduledPred(I->getSUnit()) == SU)
80       ++NumNodesBlocking;
81   }
82   NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking;
83 
84   Queue.push_back(SU);
85 }
86 
87 
88 // scheduledNode - As nodes are scheduled, we look to see if there are any
89 // successor nodes that have a single unscheduled predecessor.  If so, that
90 // single predecessor has a higher priority, since scheduling it will make
91 // the node available.
scheduledNode(SUnit * SU)92 void LatencyPriorityQueue::scheduledNode(SUnit *SU) {
93   for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
94        I != E; ++I) {
95     AdjustPriorityOfUnscheduledPreds(I->getSUnit());
96   }
97 }
98 
99 /// AdjustPriorityOfUnscheduledPreds - One of the predecessors of SU was just
100 /// scheduled.  If SU is not itself available, then there is at least one
101 /// predecessor node that has not been scheduled yet.  If SU has exactly ONE
102 /// unscheduled predecessor, we want to increase its priority: it getting
103 /// scheduled will make this node available, so it is better than some other
104 /// node of the same priority that will not make a node available.
AdjustPriorityOfUnscheduledPreds(SUnit * SU)105 void LatencyPriorityQueue::AdjustPriorityOfUnscheduledPreds(SUnit *SU) {
106   if (SU->isAvailable) return;  // All preds scheduled.
107 
108   SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU);
109   if (!OnlyAvailablePred || !OnlyAvailablePred->isAvailable) return;
110 
111   // Okay, we found a single predecessor that is available, but not scheduled.
112   // Since it is available, it must be in the priority queue.  First remove it.
113   remove(OnlyAvailablePred);
114 
115   // Reinsert the node into the priority queue, which recomputes its
116   // NumNodesSolelyBlocking value.
117   push(OnlyAvailablePred);
118 }
119 
pop()120 SUnit *LatencyPriorityQueue::pop() {
121   if (empty()) return nullptr;
122   std::vector<SUnit *>::iterator Best = Queue.begin();
123   for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
124        E = Queue.end(); I != E; ++I)
125     if (Picker(*Best, *I))
126       Best = I;
127   SUnit *V = *Best;
128   if (Best != std::prev(Queue.end()))
129     std::swap(*Best, Queue.back());
130   Queue.pop_back();
131   return V;
132 }
133 
remove(SUnit * SU)134 void LatencyPriorityQueue::remove(SUnit *SU) {
135   assert(!Queue.empty() && "Queue is empty!");
136   std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(), SU);
137   if (I != std::prev(Queue.end()))
138     std::swap(*I, Queue.back());
139   Queue.pop_back();
140 }
141