1 //===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
15 //
16 //===----------------------------------------------------------------------===//
17
18 #include "llvm/CodeGen/SchedulerRegistry.h"
19 #include "ScheduleDAGSDNodes.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
25 #include "llvm/CodeGen/SelectionDAGISel.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/InlineAsm.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetLowering.h"
33 #include "llvm/Target/TargetRegisterInfo.h"
34 #include "llvm/Target/TargetSubtargetInfo.h"
35 #include <climits>
36 using namespace llvm;
37
38 #define DEBUG_TYPE "pre-RA-sched"
39
40 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
41 STATISTIC(NumUnfolds, "Number of nodes unfolded");
42 STATISTIC(NumDups, "Number of duplicated nodes");
43 STATISTIC(NumPRCopies, "Number of physical register copies");
44
45 static RegisterScheduler
46 burrListDAGScheduler("list-burr",
47 "Bottom-up register reduction list scheduling",
48 createBURRListDAGScheduler);
49 static RegisterScheduler
50 sourceListDAGScheduler("source",
51 "Similar to list-burr but schedules in source "
52 "order when possible",
53 createSourceListDAGScheduler);
54
55 static RegisterScheduler
56 hybridListDAGScheduler("list-hybrid",
57 "Bottom-up register pressure aware list scheduling "
58 "which tries to balance latency and register pressure",
59 createHybridListDAGScheduler);
60
61 static RegisterScheduler
62 ILPListDAGScheduler("list-ilp",
63 "Bottom-up register pressure aware list scheduling "
64 "which tries to balance ILP and register pressure",
65 createILPListDAGScheduler);
66
67 static cl::opt<bool> DisableSchedCycles(
68 "disable-sched-cycles", cl::Hidden, cl::init(false),
69 cl::desc("Disable cycle-level precision during preRA scheduling"));
70
71 // Temporary sched=list-ilp flags until the heuristics are robust.
72 // Some options are also available under sched=list-hybrid.
73 static cl::opt<bool> DisableSchedRegPressure(
74 "disable-sched-reg-pressure", cl::Hidden, cl::init(false),
75 cl::desc("Disable regpressure priority in sched=list-ilp"));
76 static cl::opt<bool> DisableSchedLiveUses(
77 "disable-sched-live-uses", cl::Hidden, cl::init(true),
78 cl::desc("Disable live use priority in sched=list-ilp"));
79 static cl::opt<bool> DisableSchedVRegCycle(
80 "disable-sched-vrcycle", cl::Hidden, cl::init(false),
81 cl::desc("Disable virtual register cycle interference checks"));
82 static cl::opt<bool> DisableSchedPhysRegJoin(
83 "disable-sched-physreg-join", cl::Hidden, cl::init(false),
84 cl::desc("Disable physreg def-use affinity"));
85 static cl::opt<bool> DisableSchedStalls(
86 "disable-sched-stalls", cl::Hidden, cl::init(true),
87 cl::desc("Disable no-stall priority in sched=list-ilp"));
88 static cl::opt<bool> DisableSchedCriticalPath(
89 "disable-sched-critical-path", cl::Hidden, cl::init(false),
90 cl::desc("Disable critical path priority in sched=list-ilp"));
91 static cl::opt<bool> DisableSchedHeight(
92 "disable-sched-height", cl::Hidden, cl::init(false),
93 cl::desc("Disable scheduled-height priority in sched=list-ilp"));
94 static cl::opt<bool> Disable2AddrHack(
95 "disable-2addr-hack", cl::Hidden, cl::init(true),
96 cl::desc("Disable scheduler's two-address hack"));
97
98 static cl::opt<int> MaxReorderWindow(
99 "max-sched-reorder", cl::Hidden, cl::init(6),
100 cl::desc("Number of instructions to allow ahead of the critical path "
101 "in sched=list-ilp"));
102
103 static cl::opt<unsigned> AvgIPC(
104 "sched-avg-ipc", cl::Hidden, cl::init(1),
105 cl::desc("Average inst/cycle whan no target itinerary exists."));
106
107 namespace {
108 //===----------------------------------------------------------------------===//
109 /// ScheduleDAGRRList - The actual register reduction list scheduler
110 /// implementation. This supports both top-down and bottom-up scheduling.
111 ///
112 class ScheduleDAGRRList : public ScheduleDAGSDNodes {
113 private:
114 /// NeedLatency - True if the scheduler will make use of latency information.
115 ///
116 bool NeedLatency;
117
118 /// AvailableQueue - The priority queue to use for the available SUnits.
119 SchedulingPriorityQueue *AvailableQueue;
120
121 /// PendingQueue - This contains all of the instructions whose operands have
122 /// been issued, but their results are not ready yet (due to the latency of
123 /// the operation). Once the operands becomes available, the instruction is
124 /// added to the AvailableQueue.
125 std::vector<SUnit*> PendingQueue;
126
127 /// HazardRec - The hazard recognizer to use.
128 ScheduleHazardRecognizer *HazardRec;
129
130 /// CurCycle - The current scheduler state corresponds to this cycle.
131 unsigned CurCycle;
132
133 /// MinAvailableCycle - Cycle of the soonest available instruction.
134 unsigned MinAvailableCycle;
135
136 /// IssueCount - Count instructions issued in this cycle
137 /// Currently valid only for bottom-up scheduling.
138 unsigned IssueCount;
139
140 /// LiveRegDefs - A set of physical registers and their definition
141 /// that are "live". These nodes must be scheduled before any other nodes that
142 /// modifies the registers can be scheduled.
143 unsigned NumLiveRegs;
144 std::unique_ptr<SUnit*[]> LiveRegDefs;
145 std::unique_ptr<SUnit*[]> LiveRegGens;
146
147 // Collect interferences between physical register use/defs.
148 // Each interference is an SUnit and set of physical registers.
149 SmallVector<SUnit*, 4> Interferences;
150 typedef DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMapT;
151 LRegsMapT LRegsMap;
152
153 /// Topo - A topological ordering for SUnits which permits fast IsReachable
154 /// and similar queries.
155 ScheduleDAGTopologicalSort Topo;
156
157 // Hack to keep track of the inverse of FindCallSeqStart without more crazy
158 // DAG crawling.
159 DenseMap<SUnit*, SUnit*> CallSeqEndForStart;
160
161 public:
ScheduleDAGRRList(MachineFunction & mf,bool needlatency,SchedulingPriorityQueue * availqueue,CodeGenOpt::Level OptLevel)162 ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
163 SchedulingPriorityQueue *availqueue,
164 CodeGenOpt::Level OptLevel)
165 : ScheduleDAGSDNodes(mf),
166 NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
167 Topo(SUnits, nullptr) {
168
169 const TargetSubtargetInfo &STI = mf.getSubtarget();
170 if (DisableSchedCycles || !NeedLatency)
171 HazardRec = new ScheduleHazardRecognizer();
172 else
173 HazardRec = STI.getInstrInfo()->CreateTargetHazardRecognizer(&STI, this);
174 }
175
~ScheduleDAGRRList()176 ~ScheduleDAGRRList() override {
177 delete HazardRec;
178 delete AvailableQueue;
179 }
180
181 void Schedule() override;
182
getHazardRec()183 ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
184
185 /// IsReachable - Checks if SU is reachable from TargetSU.
IsReachable(const SUnit * SU,const SUnit * TargetSU)186 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
187 return Topo.IsReachable(SU, TargetSU);
188 }
189
190 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
191 /// create a cycle.
WillCreateCycle(SUnit * SU,SUnit * TargetSU)192 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
193 return Topo.WillCreateCycle(SU, TargetSU);
194 }
195
196 /// AddPred - adds a predecessor edge to SUnit SU.
197 /// This returns true if this is a new predecessor.
198 /// Updates the topological ordering if required.
AddPred(SUnit * SU,const SDep & D)199 void AddPred(SUnit *SU, const SDep &D) {
200 Topo.AddPred(SU, D.getSUnit());
201 SU->addPred(D);
202 }
203
204 /// RemovePred - removes a predecessor edge from SUnit SU.
205 /// This returns true if an edge was removed.
206 /// Updates the topological ordering if required.
RemovePred(SUnit * SU,const SDep & D)207 void RemovePred(SUnit *SU, const SDep &D) {
208 Topo.RemovePred(SU, D.getSUnit());
209 SU->removePred(D);
210 }
211
212 private:
isReady(SUnit * SU)213 bool isReady(SUnit *SU) {
214 return DisableSchedCycles || !AvailableQueue->hasReadyFilter() ||
215 AvailableQueue->isReady(SU);
216 }
217
218 void ReleasePred(SUnit *SU, const SDep *PredEdge);
219 void ReleasePredecessors(SUnit *SU);
220 void ReleasePending();
221 void AdvanceToCycle(unsigned NextCycle);
222 void AdvancePastStalls(SUnit *SU);
223 void EmitNode(SUnit *SU);
224 void ScheduleNodeBottomUp(SUnit*);
225 void CapturePred(SDep *PredEdge);
226 void UnscheduleNodeBottomUp(SUnit*);
227 void RestoreHazardCheckerBottomUp();
228 void BacktrackBottomUp(SUnit*, SUnit*);
229 SUnit *CopyAndMoveSuccessors(SUnit*);
230 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
231 const TargetRegisterClass*,
232 const TargetRegisterClass*,
233 SmallVectorImpl<SUnit*>&);
234 bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
235
236 void releaseInterferences(unsigned Reg = 0);
237
238 SUnit *PickNodeToScheduleBottomUp();
239 void ListScheduleBottomUp();
240
241 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
242 /// Updates the topological ordering if required.
CreateNewSUnit(SDNode * N)243 SUnit *CreateNewSUnit(SDNode *N) {
244 unsigned NumSUnits = SUnits.size();
245 SUnit *NewNode = newSUnit(N);
246 // Update the topological ordering.
247 if (NewNode->NodeNum >= NumSUnits)
248 Topo.InitDAGTopologicalSorting();
249 return NewNode;
250 }
251
252 /// CreateClone - Creates a new SUnit from an existing one.
253 /// Updates the topological ordering if required.
CreateClone(SUnit * N)254 SUnit *CreateClone(SUnit *N) {
255 unsigned NumSUnits = SUnits.size();
256 SUnit *NewNode = Clone(N);
257 // Update the topological ordering.
258 if (NewNode->NodeNum >= NumSUnits)
259 Topo.InitDAGTopologicalSorting();
260 return NewNode;
261 }
262
263 /// forceUnitLatencies - Register-pressure-reducing scheduling doesn't
264 /// need actual latency information but the hybrid scheduler does.
forceUnitLatencies() const265 bool forceUnitLatencies() const override {
266 return !NeedLatency;
267 }
268 };
269 } // end anonymous namespace
270
271 /// GetCostForDef - Looks up the register class and cost for a given definition.
272 /// Typically this just means looking up the representative register class,
273 /// but for untyped values (MVT::Untyped) it means inspecting the node's
274 /// opcode to determine what register class is being generated.
GetCostForDef(const ScheduleDAGSDNodes::RegDefIter & RegDefPos,const TargetLowering * TLI,const TargetInstrInfo * TII,const TargetRegisterInfo * TRI,unsigned & RegClass,unsigned & Cost,const MachineFunction & MF)275 static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
276 const TargetLowering *TLI,
277 const TargetInstrInfo *TII,
278 const TargetRegisterInfo *TRI,
279 unsigned &RegClass, unsigned &Cost,
280 const MachineFunction &MF) {
281 MVT VT = RegDefPos.GetValue();
282
283 // Special handling for untyped values. These values can only come from
284 // the expansion of custom DAG-to-DAG patterns.
285 if (VT == MVT::Untyped) {
286 const SDNode *Node = RegDefPos.GetNode();
287
288 // Special handling for CopyFromReg of untyped values.
289 if (!Node->isMachineOpcode() && Node->getOpcode() == ISD::CopyFromReg) {
290 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
291 const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(Reg);
292 RegClass = RC->getID();
293 Cost = 1;
294 return;
295 }
296
297 unsigned Opcode = Node->getMachineOpcode();
298 if (Opcode == TargetOpcode::REG_SEQUENCE) {
299 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
300 const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
301 RegClass = RC->getID();
302 Cost = 1;
303 return;
304 }
305
306 unsigned Idx = RegDefPos.GetIdx();
307 const MCInstrDesc Desc = TII->get(Opcode);
308 const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI, MF);
309 RegClass = RC->getID();
310 // FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a
311 // better way to determine it.
312 Cost = 1;
313 } else {
314 RegClass = TLI->getRepRegClassFor(VT)->getID();
315 Cost = TLI->getRepRegClassCostFor(VT);
316 }
317 }
318
319 /// Schedule - Schedule the DAG using list scheduling.
Schedule()320 void ScheduleDAGRRList::Schedule() {
321 DEBUG(dbgs()
322 << "********** List Scheduling BB#" << BB->getNumber()
323 << " '" << BB->getName() << "' **********\n");
324
325 CurCycle = 0;
326 IssueCount = 0;
327 MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX;
328 NumLiveRegs = 0;
329 // Allocate slots for each physical register, plus one for a special register
330 // to track the virtual resource of a calling sequence.
331 LiveRegDefs.reset(new SUnit*[TRI->getNumRegs() + 1]());
332 LiveRegGens.reset(new SUnit*[TRI->getNumRegs() + 1]());
333 CallSeqEndForStart.clear();
334 assert(Interferences.empty() && LRegsMap.empty() && "stale Interferences");
335
336 // Build the scheduling graph.
337 BuildSchedGraph(nullptr);
338
339 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
340 SUnits[su].dumpAll(this));
341 Topo.InitDAGTopologicalSorting();
342
343 AvailableQueue->initNodes(SUnits);
344
345 HazardRec->Reset();
346
347 // Execute the actual scheduling loop.
348 ListScheduleBottomUp();
349
350 AvailableQueue->releaseState();
351
352 DEBUG({
353 dbgs() << "*** Final schedule ***\n";
354 dumpSchedule();
355 dbgs() << '\n';
356 });
357 }
358
359 //===----------------------------------------------------------------------===//
360 // Bottom-Up Scheduling
361 //===----------------------------------------------------------------------===//
362
363 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
364 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
ReleasePred(SUnit * SU,const SDep * PredEdge)365 void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
366 SUnit *PredSU = PredEdge->getSUnit();
367
368 #ifndef NDEBUG
369 if (PredSU->NumSuccsLeft == 0) {
370 dbgs() << "*** Scheduling failed! ***\n";
371 PredSU->dump(this);
372 dbgs() << " has been released too many times!\n";
373 llvm_unreachable(nullptr);
374 }
375 #endif
376 --PredSU->NumSuccsLeft;
377
378 if (!forceUnitLatencies()) {
379 // Updating predecessor's height. This is now the cycle when the
380 // predecessor can be scheduled without causing a pipeline stall.
381 PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
382 }
383
384 // If all the node's successors are scheduled, this node is ready
385 // to be scheduled. Ignore the special EntrySU node.
386 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
387 PredSU->isAvailable = true;
388
389 unsigned Height = PredSU->getHeight();
390 if (Height < MinAvailableCycle)
391 MinAvailableCycle = Height;
392
393 if (isReady(PredSU)) {
394 AvailableQueue->push(PredSU);
395 }
396 // CapturePred and others may have left the node in the pending queue, avoid
397 // adding it twice.
398 else if (!PredSU->isPending) {
399 PredSU->isPending = true;
400 PendingQueue.push_back(PredSU);
401 }
402 }
403 }
404
405 /// IsChainDependent - Test if Outer is reachable from Inner through
406 /// chain dependencies.
IsChainDependent(SDNode * Outer,SDNode * Inner,unsigned NestLevel,const TargetInstrInfo * TII)407 static bool IsChainDependent(SDNode *Outer, SDNode *Inner,
408 unsigned NestLevel,
409 const TargetInstrInfo *TII) {
410 SDNode *N = Outer;
411 for (;;) {
412 if (N == Inner)
413 return true;
414 // For a TokenFactor, examine each operand. There may be multiple ways
415 // to get to the CALLSEQ_BEGIN, but we need to find the path with the
416 // most nesting in order to ensure that we find the corresponding match.
417 if (N->getOpcode() == ISD::TokenFactor) {
418 for (const SDValue &Op : N->op_values())
419 if (IsChainDependent(Op.getNode(), Inner, NestLevel, TII))
420 return true;
421 return false;
422 }
423 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
424 if (N->isMachineOpcode()) {
425 if (N->getMachineOpcode() ==
426 (unsigned)TII->getCallFrameDestroyOpcode()) {
427 ++NestLevel;
428 } else if (N->getMachineOpcode() ==
429 (unsigned)TII->getCallFrameSetupOpcode()) {
430 if (NestLevel == 0)
431 return false;
432 --NestLevel;
433 }
434 }
435 // Otherwise, find the chain and continue climbing.
436 for (const SDValue &Op : N->op_values())
437 if (Op.getValueType() == MVT::Other) {
438 N = Op.getNode();
439 goto found_chain_operand;
440 }
441 return false;
442 found_chain_operand:;
443 if (N->getOpcode() == ISD::EntryToken)
444 return false;
445 }
446 }
447
448 /// FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate
449 /// the corresponding (lowered) CALLSEQ_BEGIN node.
450 ///
451 /// NestLevel and MaxNested are used in recursion to indcate the current level
452 /// of nesting of CALLSEQ_BEGIN and CALLSEQ_END pairs, as well as the maximum
453 /// level seen so far.
454 ///
455 /// TODO: It would be better to give CALLSEQ_END an explicit operand to point
456 /// to the corresponding CALLSEQ_BEGIN to avoid needing to search for it.
457 static SDNode *
FindCallSeqStart(SDNode * N,unsigned & NestLevel,unsigned & MaxNest,const TargetInstrInfo * TII)458 FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
459 const TargetInstrInfo *TII) {
460 for (;;) {
461 // For a TokenFactor, examine each operand. There may be multiple ways
462 // to get to the CALLSEQ_BEGIN, but we need to find the path with the
463 // most nesting in order to ensure that we find the corresponding match.
464 if (N->getOpcode() == ISD::TokenFactor) {
465 SDNode *Best = nullptr;
466 unsigned BestMaxNest = MaxNest;
467 for (const SDValue &Op : N->op_values()) {
468 unsigned MyNestLevel = NestLevel;
469 unsigned MyMaxNest = MaxNest;
470 if (SDNode *New = FindCallSeqStart(Op.getNode(),
471 MyNestLevel, MyMaxNest, TII))
472 if (!Best || (MyMaxNest > BestMaxNest)) {
473 Best = New;
474 BestMaxNest = MyMaxNest;
475 }
476 }
477 assert(Best);
478 MaxNest = BestMaxNest;
479 return Best;
480 }
481 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
482 if (N->isMachineOpcode()) {
483 if (N->getMachineOpcode() ==
484 (unsigned)TII->getCallFrameDestroyOpcode()) {
485 ++NestLevel;
486 MaxNest = std::max(MaxNest, NestLevel);
487 } else if (N->getMachineOpcode() ==
488 (unsigned)TII->getCallFrameSetupOpcode()) {
489 assert(NestLevel != 0);
490 --NestLevel;
491 if (NestLevel == 0)
492 return N;
493 }
494 }
495 // Otherwise, find the chain and continue climbing.
496 for (const SDValue &Op : N->op_values())
497 if (Op.getValueType() == MVT::Other) {
498 N = Op.getNode();
499 goto found_chain_operand;
500 }
501 return nullptr;
502 found_chain_operand:;
503 if (N->getOpcode() == ISD::EntryToken)
504 return nullptr;
505 }
506 }
507
508 /// Call ReleasePred for each predecessor, then update register live def/gen.
509 /// Always update LiveRegDefs for a register dependence even if the current SU
510 /// also defines the register. This effectively create one large live range
511 /// across a sequence of two-address node. This is important because the
512 /// entire chain must be scheduled together. Example:
513 ///
514 /// flags = (3) add
515 /// flags = (2) addc flags
516 /// flags = (1) addc flags
517 ///
518 /// results in
519 ///
520 /// LiveRegDefs[flags] = 3
521 /// LiveRegGens[flags] = 1
522 ///
523 /// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid
524 /// interference on flags.
ReleasePredecessors(SUnit * SU)525 void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
526 // Bottom up: release predecessors
527 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
528 I != E; ++I) {
529 ReleasePred(SU, &*I);
530 if (I->isAssignedRegDep()) {
531 // This is a physical register dependency and it's impossible or
532 // expensive to copy the register. Make sure nothing that can
533 // clobber the register is scheduled between the predecessor and
534 // this node.
535 SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
536 assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
537 "interference on register dependence");
538 LiveRegDefs[I->getReg()] = I->getSUnit();
539 if (!LiveRegGens[I->getReg()]) {
540 ++NumLiveRegs;
541 LiveRegGens[I->getReg()] = SU;
542 }
543 }
544 }
545
546 // If we're scheduling a lowered CALLSEQ_END, find the corresponding
547 // CALLSEQ_BEGIN. Inject an artificial physical register dependence between
548 // these nodes, to prevent other calls from being interscheduled with them.
549 unsigned CallResource = TRI->getNumRegs();
550 if (!LiveRegDefs[CallResource])
551 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode())
552 if (Node->isMachineOpcode() &&
553 Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
554 unsigned NestLevel = 0;
555 unsigned MaxNest = 0;
556 SDNode *N = FindCallSeqStart(Node, NestLevel, MaxNest, TII);
557
558 SUnit *Def = &SUnits[N->getNodeId()];
559 CallSeqEndForStart[Def] = SU;
560
561 ++NumLiveRegs;
562 LiveRegDefs[CallResource] = Def;
563 LiveRegGens[CallResource] = SU;
564 break;
565 }
566 }
567
568 /// Check to see if any of the pending instructions are ready to issue. If
569 /// so, add them to the available queue.
ReleasePending()570 void ScheduleDAGRRList::ReleasePending() {
571 if (DisableSchedCycles) {
572 assert(PendingQueue.empty() && "pending instrs not allowed in this mode");
573 return;
574 }
575
576 // If the available queue is empty, it is safe to reset MinAvailableCycle.
577 if (AvailableQueue->empty())
578 MinAvailableCycle = UINT_MAX;
579
580 // Check to see if any of the pending instructions are ready to issue. If
581 // so, add them to the available queue.
582 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
583 unsigned ReadyCycle = PendingQueue[i]->getHeight();
584 if (ReadyCycle < MinAvailableCycle)
585 MinAvailableCycle = ReadyCycle;
586
587 if (PendingQueue[i]->isAvailable) {
588 if (!isReady(PendingQueue[i]))
589 continue;
590 AvailableQueue->push(PendingQueue[i]);
591 }
592 PendingQueue[i]->isPending = false;
593 PendingQueue[i] = PendingQueue.back();
594 PendingQueue.pop_back();
595 --i; --e;
596 }
597 }
598
599 /// Move the scheduler state forward by the specified number of Cycles.
AdvanceToCycle(unsigned NextCycle)600 void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
601 if (NextCycle <= CurCycle)
602 return;
603
604 IssueCount = 0;
605 AvailableQueue->setCurCycle(NextCycle);
606 if (!HazardRec->isEnabled()) {
607 // Bypass lots of virtual calls in case of long latency.
608 CurCycle = NextCycle;
609 }
610 else {
611 for (; CurCycle != NextCycle; ++CurCycle) {
612 HazardRec->RecedeCycle();
613 }
614 }
615 // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
616 // available Q to release pending nodes at least once before popping.
617 ReleasePending();
618 }
619
620 /// Move the scheduler state forward until the specified node's dependents are
621 /// ready and can be scheduled with no resource conflicts.
AdvancePastStalls(SUnit * SU)622 void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
623 if (DisableSchedCycles)
624 return;
625
626 // FIXME: Nodes such as CopyFromReg probably should not advance the current
627 // cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node
628 // has predecessors the cycle will be advanced when they are scheduled.
629 // But given the crude nature of modeling latency though such nodes, we
630 // currently need to treat these nodes like real instructions.
631 // if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return;
632
633 unsigned ReadyCycle = SU->getHeight();
634
635 // Bump CurCycle to account for latency. We assume the latency of other
636 // available instructions may be hidden by the stall (not a full pipe stall).
637 // This updates the hazard recognizer's cycle before reserving resources for
638 // this instruction.
639 AdvanceToCycle(ReadyCycle);
640
641 // Calls are scheduled in their preceding cycle, so don't conflict with
642 // hazards from instructions after the call. EmitNode will reset the
643 // scoreboard state before emitting the call.
644 if (SU->isCall)
645 return;
646
647 // FIXME: For resource conflicts in very long non-pipelined stages, we
648 // should probably skip ahead here to avoid useless scoreboard checks.
649 int Stalls = 0;
650 while (true) {
651 ScheduleHazardRecognizer::HazardType HT =
652 HazardRec->getHazardType(SU, -Stalls);
653
654 if (HT == ScheduleHazardRecognizer::NoHazard)
655 break;
656
657 ++Stalls;
658 }
659 AdvanceToCycle(CurCycle + Stalls);
660 }
661
662 /// Record this SUnit in the HazardRecognizer.
663 /// Does not update CurCycle.
EmitNode(SUnit * SU)664 void ScheduleDAGRRList::EmitNode(SUnit *SU) {
665 if (!HazardRec->isEnabled())
666 return;
667
668 // Check for phys reg copy.
669 if (!SU->getNode())
670 return;
671
672 switch (SU->getNode()->getOpcode()) {
673 default:
674 assert(SU->getNode()->isMachineOpcode() &&
675 "This target-independent node should not be scheduled.");
676 break;
677 case ISD::MERGE_VALUES:
678 case ISD::TokenFactor:
679 case ISD::LIFETIME_START:
680 case ISD::LIFETIME_END:
681 case ISD::CopyToReg:
682 case ISD::CopyFromReg:
683 case ISD::EH_LABEL:
684 // Noops don't affect the scoreboard state. Copies are likely to be
685 // removed.
686 return;
687 case ISD::INLINEASM:
688 // For inline asm, clear the pipeline state.
689 HazardRec->Reset();
690 return;
691 }
692 if (SU->isCall) {
693 // Calls are scheduled with their preceding instructions. For bottom-up
694 // scheduling, clear the pipeline state before emitting.
695 HazardRec->Reset();
696 }
697
698 HazardRec->EmitInstruction(SU);
699 }
700
701 static void resetVRegCycle(SUnit *SU);
702
703 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
704 /// count of its predecessors. If a predecessor pending count is zero, add it to
705 /// the Available queue.
ScheduleNodeBottomUp(SUnit * SU)706 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
707 DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
708 DEBUG(SU->dump(this));
709
710 #ifndef NDEBUG
711 if (CurCycle < SU->getHeight())
712 DEBUG(dbgs() << " Height [" << SU->getHeight()
713 << "] pipeline stall!\n");
714 #endif
715
716 // FIXME: Do not modify node height. It may interfere with
717 // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the
718 // node its ready cycle can aid heuristics, and after scheduling it can
719 // indicate the scheduled cycle.
720 SU->setHeightToAtLeast(CurCycle);
721
722 // Reserve resources for the scheduled instruction.
723 EmitNode(SU);
724
725 Sequence.push_back(SU);
726
727 AvailableQueue->scheduledNode(SU);
728
729 // If HazardRec is disabled, and each inst counts as one cycle, then
730 // advance CurCycle before ReleasePredecessors to avoid useless pushes to
731 // PendingQueue for schedulers that implement HasReadyFilter.
732 if (!HazardRec->isEnabled() && AvgIPC < 2)
733 AdvanceToCycle(CurCycle + 1);
734
735 // Update liveness of predecessors before successors to avoid treating a
736 // two-address node as a live range def.
737 ReleasePredecessors(SU);
738
739 // Release all the implicit physical register defs that are live.
740 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
741 I != E; ++I) {
742 // LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
743 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
744 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
745 --NumLiveRegs;
746 LiveRegDefs[I->getReg()] = nullptr;
747 LiveRegGens[I->getReg()] = nullptr;
748 releaseInterferences(I->getReg());
749 }
750 }
751 // Release the special call resource dependence, if this is the beginning
752 // of a call.
753 unsigned CallResource = TRI->getNumRegs();
754 if (LiveRegDefs[CallResource] == SU)
755 for (const SDNode *SUNode = SU->getNode(); SUNode;
756 SUNode = SUNode->getGluedNode()) {
757 if (SUNode->isMachineOpcode() &&
758 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
759 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
760 --NumLiveRegs;
761 LiveRegDefs[CallResource] = nullptr;
762 LiveRegGens[CallResource] = nullptr;
763 releaseInterferences(CallResource);
764 }
765 }
766
767 resetVRegCycle(SU);
768
769 SU->isScheduled = true;
770
771 // Conditions under which the scheduler should eagerly advance the cycle:
772 // (1) No available instructions
773 // (2) All pipelines full, so available instructions must have hazards.
774 //
775 // If HazardRec is disabled, the cycle was pre-advanced before calling
776 // ReleasePredecessors. In that case, IssueCount should remain 0.
777 //
778 // Check AvailableQueue after ReleasePredecessors in case of zero latency.
779 if (HazardRec->isEnabled() || AvgIPC > 1) {
780 if (SU->getNode() && SU->getNode()->isMachineOpcode())
781 ++IssueCount;
782 if ((HazardRec->isEnabled() && HazardRec->atIssueLimit())
783 || (!HazardRec->isEnabled() && IssueCount == AvgIPC))
784 AdvanceToCycle(CurCycle + 1);
785 }
786 }
787
788 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
789 /// unscheduled, incrcease the succ left count of its predecessors. Remove
790 /// them from AvailableQueue if necessary.
CapturePred(SDep * PredEdge)791 void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
792 SUnit *PredSU = PredEdge->getSUnit();
793 if (PredSU->isAvailable) {
794 PredSU->isAvailable = false;
795 if (!PredSU->isPending)
796 AvailableQueue->remove(PredSU);
797 }
798
799 assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
800 ++PredSU->NumSuccsLeft;
801 }
802
803 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
804 /// its predecessor states to reflect the change.
UnscheduleNodeBottomUp(SUnit * SU)805 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
806 DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
807 DEBUG(SU->dump(this));
808
809 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
810 I != E; ++I) {
811 CapturePred(&*I);
812 if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
813 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
814 assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
815 "Physical register dependency violated?");
816 --NumLiveRegs;
817 LiveRegDefs[I->getReg()] = nullptr;
818 LiveRegGens[I->getReg()] = nullptr;
819 releaseInterferences(I->getReg());
820 }
821 }
822
823 // Reclaim the special call resource dependence, if this is the beginning
824 // of a call.
825 unsigned CallResource = TRI->getNumRegs();
826 for (const SDNode *SUNode = SU->getNode(); SUNode;
827 SUNode = SUNode->getGluedNode()) {
828 if (SUNode->isMachineOpcode() &&
829 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
830 ++NumLiveRegs;
831 LiveRegDefs[CallResource] = SU;
832 LiveRegGens[CallResource] = CallSeqEndForStart[SU];
833 }
834 }
835
836 // Release the special call resource dependence, if this is the end
837 // of a call.
838 if (LiveRegGens[CallResource] == SU)
839 for (const SDNode *SUNode = SU->getNode(); SUNode;
840 SUNode = SUNode->getGluedNode()) {
841 if (SUNode->isMachineOpcode() &&
842 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
843 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
844 --NumLiveRegs;
845 LiveRegDefs[CallResource] = nullptr;
846 LiveRegGens[CallResource] = nullptr;
847 releaseInterferences(CallResource);
848 }
849 }
850
851 for (auto &Succ : SU->Succs) {
852 if (Succ.isAssignedRegDep()) {
853 auto Reg = Succ.getReg();
854 if (!LiveRegDefs[Reg])
855 ++NumLiveRegs;
856 // This becomes the nearest def. Note that an earlier def may still be
857 // pending if this is a two-address node.
858 LiveRegDefs[Reg] = SU;
859
860 // Update LiveRegGen only if was empty before this unscheduling.
861 // This is to avoid incorrect updating LiveRegGen set in previous run.
862 if (!LiveRegGens[Reg]) {
863 // Find the successor with the lowest height.
864 LiveRegGens[Reg] = Succ.getSUnit();
865 for (auto &Succ2 : SU->Succs) {
866 if (Succ2.isAssignedRegDep() && Succ2.getReg() == Reg &&
867 Succ2.getSUnit()->getHeight() < LiveRegGens[Reg]->getHeight())
868 LiveRegGens[Reg] = Succ2.getSUnit();
869 }
870 }
871 }
872 }
873 if (SU->getHeight() < MinAvailableCycle)
874 MinAvailableCycle = SU->getHeight();
875
876 SU->setHeightDirty();
877 SU->isScheduled = false;
878 SU->isAvailable = true;
879 if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) {
880 // Don't make available until backtracking is complete.
881 SU->isPending = true;
882 PendingQueue.push_back(SU);
883 }
884 else {
885 AvailableQueue->push(SU);
886 }
887 AvailableQueue->unscheduledNode(SU);
888 }
889
890 /// After backtracking, the hazard checker needs to be restored to a state
891 /// corresponding the current cycle.
RestoreHazardCheckerBottomUp()892 void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
893 HazardRec->Reset();
894
895 unsigned LookAhead = std::min((unsigned)Sequence.size(),
896 HazardRec->getMaxLookAhead());
897 if (LookAhead == 0)
898 return;
899
900 std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
901 unsigned HazardCycle = (*I)->getHeight();
902 for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
903 SUnit *SU = *I;
904 for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
905 HazardRec->RecedeCycle();
906 }
907 EmitNode(SU);
908 }
909 }
910
911 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
912 /// BTCycle in order to schedule a specific node.
BacktrackBottomUp(SUnit * SU,SUnit * BtSU)913 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
914 SUnit *OldSU = Sequence.back();
915 while (true) {
916 Sequence.pop_back();
917 // FIXME: use ready cycle instead of height
918 CurCycle = OldSU->getHeight();
919 UnscheduleNodeBottomUp(OldSU);
920 AvailableQueue->setCurCycle(CurCycle);
921 if (OldSU == BtSU)
922 break;
923 OldSU = Sequence.back();
924 }
925
926 assert(!SU->isSucc(OldSU) && "Something is wrong!");
927
928 RestoreHazardCheckerBottomUp();
929
930 ReleasePending();
931
932 ++NumBacktracks;
933 }
934
isOperandOf(const SUnit * SU,SDNode * N)935 static bool isOperandOf(const SUnit *SU, SDNode *N) {
936 for (const SDNode *SUNode = SU->getNode(); SUNode;
937 SUNode = SUNode->getGluedNode()) {
938 if (SUNode->isOperandOf(N))
939 return true;
940 }
941 return false;
942 }
943
944 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
945 /// successors to the newly created node.
CopyAndMoveSuccessors(SUnit * SU)946 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
947 SDNode *N = SU->getNode();
948 if (!N)
949 return nullptr;
950
951 if (SU->getNode()->getGluedNode())
952 return nullptr;
953
954 SUnit *NewSU;
955 bool TryUnfold = false;
956 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
957 MVT VT = N->getSimpleValueType(i);
958 if (VT == MVT::Glue)
959 return nullptr;
960 else if (VT == MVT::Other)
961 TryUnfold = true;
962 }
963 for (const SDValue &Op : N->op_values()) {
964 MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo());
965 if (VT == MVT::Glue)
966 return nullptr;
967 }
968
969 if (TryUnfold) {
970 SmallVector<SDNode*, 2> NewNodes;
971 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
972 return nullptr;
973
974 // unfolding an x86 DEC64m operation results in store, dec, load which
975 // can't be handled here so quit
976 if (NewNodes.size() == 3)
977 return nullptr;
978
979 DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
980 assert(NewNodes.size() == 2 && "Expected a load folding node!");
981
982 N = NewNodes[1];
983 SDNode *LoadNode = NewNodes[0];
984 unsigned NumVals = N->getNumValues();
985 unsigned OldNumVals = SU->getNode()->getNumValues();
986 for (unsigned i = 0; i != NumVals; ++i)
987 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
988 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
989 SDValue(LoadNode, 1));
990
991 // LoadNode may already exist. This can happen when there is another
992 // load from the same location and producing the same type of value
993 // but it has different alignment or volatileness.
994 bool isNewLoad = true;
995 SUnit *LoadSU;
996 if (LoadNode->getNodeId() != -1) {
997 LoadSU = &SUnits[LoadNode->getNodeId()];
998 isNewLoad = false;
999 } else {
1000 LoadSU = CreateNewSUnit(LoadNode);
1001 LoadNode->setNodeId(LoadSU->NodeNum);
1002
1003 InitNumRegDefsLeft(LoadSU);
1004 computeLatency(LoadSU);
1005 }
1006
1007 SUnit *NewSU = CreateNewSUnit(N);
1008 assert(N->getNodeId() == -1 && "Node already inserted!");
1009 N->setNodeId(NewSU->NodeNum);
1010
1011 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1012 for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
1013 if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
1014 NewSU->isTwoAddress = true;
1015 break;
1016 }
1017 }
1018 if (MCID.isCommutable())
1019 NewSU->isCommutable = true;
1020
1021 InitNumRegDefsLeft(NewSU);
1022 computeLatency(NewSU);
1023
1024 // Record all the edges to and from the old SU, by category.
1025 SmallVector<SDep, 4> ChainPreds;
1026 SmallVector<SDep, 4> ChainSuccs;
1027 SmallVector<SDep, 4> LoadPreds;
1028 SmallVector<SDep, 4> NodePreds;
1029 SmallVector<SDep, 4> NodeSuccs;
1030 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1031 I != E; ++I) {
1032 if (I->isCtrl())
1033 ChainPreds.push_back(*I);
1034 else if (isOperandOf(I->getSUnit(), LoadNode))
1035 LoadPreds.push_back(*I);
1036 else
1037 NodePreds.push_back(*I);
1038 }
1039 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1040 I != E; ++I) {
1041 if (I->isCtrl())
1042 ChainSuccs.push_back(*I);
1043 else
1044 NodeSuccs.push_back(*I);
1045 }
1046
1047 // Now assign edges to the newly-created nodes.
1048 for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) {
1049 const SDep &Pred = ChainPreds[i];
1050 RemovePred(SU, Pred);
1051 if (isNewLoad)
1052 AddPred(LoadSU, Pred);
1053 }
1054 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
1055 const SDep &Pred = LoadPreds[i];
1056 RemovePred(SU, Pred);
1057 if (isNewLoad)
1058 AddPred(LoadSU, Pred);
1059 }
1060 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
1061 const SDep &Pred = NodePreds[i];
1062 RemovePred(SU, Pred);
1063 AddPred(NewSU, Pred);
1064 }
1065 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
1066 SDep D = NodeSuccs[i];
1067 SUnit *SuccDep = D.getSUnit();
1068 D.setSUnit(SU);
1069 RemovePred(SuccDep, D);
1070 D.setSUnit(NewSU);
1071 AddPred(SuccDep, D);
1072 // Balance register pressure.
1073 if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled
1074 && !D.isCtrl() && NewSU->NumRegDefsLeft > 0)
1075 --NewSU->NumRegDefsLeft;
1076 }
1077 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
1078 SDep D = ChainSuccs[i];
1079 SUnit *SuccDep = D.getSUnit();
1080 D.setSUnit(SU);
1081 RemovePred(SuccDep, D);
1082 if (isNewLoad) {
1083 D.setSUnit(LoadSU);
1084 AddPred(SuccDep, D);
1085 }
1086 }
1087
1088 // Add a data dependency to reflect that NewSU reads the value defined
1089 // by LoadSU.
1090 SDep D(LoadSU, SDep::Data, 0);
1091 D.setLatency(LoadSU->Latency);
1092 AddPred(NewSU, D);
1093
1094 if (isNewLoad)
1095 AvailableQueue->addNode(LoadSU);
1096 AvailableQueue->addNode(NewSU);
1097
1098 ++NumUnfolds;
1099
1100 if (NewSU->NumSuccsLeft == 0) {
1101 NewSU->isAvailable = true;
1102 return NewSU;
1103 }
1104 SU = NewSU;
1105 }
1106
1107 DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n");
1108 NewSU = CreateClone(SU);
1109
1110 // New SUnit has the exact same predecessors.
1111 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1112 I != E; ++I)
1113 if (!I->isArtificial())
1114 AddPred(NewSU, *I);
1115
1116 // Only copy scheduled successors. Cut them from old node's successor
1117 // list and move them over.
1118 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1119 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1120 I != E; ++I) {
1121 if (I->isArtificial())
1122 continue;
1123 SUnit *SuccSU = I->getSUnit();
1124 if (SuccSU->isScheduled) {
1125 SDep D = *I;
1126 D.setSUnit(NewSU);
1127 AddPred(SuccSU, D);
1128 D.setSUnit(SU);
1129 DelDeps.push_back(std::make_pair(SuccSU, D));
1130 }
1131 }
1132 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1133 RemovePred(DelDeps[i].first, DelDeps[i].second);
1134
1135 AvailableQueue->updateNode(SU);
1136 AvailableQueue->addNode(NewSU);
1137
1138 ++NumDups;
1139 return NewSU;
1140 }
1141
1142 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
1143 /// scheduled successors of the given SUnit to the last copy.
InsertCopiesAndMoveSuccs(SUnit * SU,unsigned Reg,const TargetRegisterClass * DestRC,const TargetRegisterClass * SrcRC,SmallVectorImpl<SUnit * > & Copies)1144 void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
1145 const TargetRegisterClass *DestRC,
1146 const TargetRegisterClass *SrcRC,
1147 SmallVectorImpl<SUnit*> &Copies) {
1148 SUnit *CopyFromSU = CreateNewSUnit(nullptr);
1149 CopyFromSU->CopySrcRC = SrcRC;
1150 CopyFromSU->CopyDstRC = DestRC;
1151
1152 SUnit *CopyToSU = CreateNewSUnit(nullptr);
1153 CopyToSU->CopySrcRC = DestRC;
1154 CopyToSU->CopyDstRC = SrcRC;
1155
1156 // Only copy scheduled successors. Cut them from old node's successor
1157 // list and move them over.
1158 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1159 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1160 I != E; ++I) {
1161 if (I->isArtificial())
1162 continue;
1163 SUnit *SuccSU = I->getSUnit();
1164 if (SuccSU->isScheduled) {
1165 SDep D = *I;
1166 D.setSUnit(CopyToSU);
1167 AddPred(SuccSU, D);
1168 DelDeps.push_back(std::make_pair(SuccSU, *I));
1169 }
1170 else {
1171 // Avoid scheduling the def-side copy before other successors. Otherwise
1172 // we could introduce another physreg interference on the copy and
1173 // continue inserting copies indefinitely.
1174 AddPred(SuccSU, SDep(CopyFromSU, SDep::Artificial));
1175 }
1176 }
1177 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1178 RemovePred(DelDeps[i].first, DelDeps[i].second);
1179
1180 SDep FromDep(SU, SDep::Data, Reg);
1181 FromDep.setLatency(SU->Latency);
1182 AddPred(CopyFromSU, FromDep);
1183 SDep ToDep(CopyFromSU, SDep::Data, 0);
1184 ToDep.setLatency(CopyFromSU->Latency);
1185 AddPred(CopyToSU, ToDep);
1186
1187 AvailableQueue->updateNode(SU);
1188 AvailableQueue->addNode(CopyFromSU);
1189 AvailableQueue->addNode(CopyToSU);
1190 Copies.push_back(CopyFromSU);
1191 Copies.push_back(CopyToSU);
1192
1193 ++NumPRCopies;
1194 }
1195
1196 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
1197 /// definition of the specified node.
1198 /// FIXME: Move to SelectionDAG?
getPhysicalRegisterVT(SDNode * N,unsigned Reg,const TargetInstrInfo * TII)1199 static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
1200 const TargetInstrInfo *TII) {
1201 unsigned NumRes;
1202 if (N->getOpcode() == ISD::CopyFromReg) {
1203 // CopyFromReg has: "chain, Val, glue" so operand 1 gives the type.
1204 NumRes = 1;
1205 } else {
1206 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1207 assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
1208 NumRes = MCID.getNumDefs();
1209 for (const MCPhysReg *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
1210 if (Reg == *ImpDef)
1211 break;
1212 ++NumRes;
1213 }
1214 }
1215 return N->getSimpleValueType(NumRes);
1216 }
1217
1218 /// CheckForLiveRegDef - Return true and update live register vector if the
1219 /// specified register def of the specified SUnit clobbers any "live" registers.
CheckForLiveRegDef(SUnit * SU,unsigned Reg,SUnit ** LiveRegDefs,SmallSet<unsigned,4> & RegAdded,SmallVectorImpl<unsigned> & LRegs,const TargetRegisterInfo * TRI)1220 static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
1221 SUnit **LiveRegDefs,
1222 SmallSet<unsigned, 4> &RegAdded,
1223 SmallVectorImpl<unsigned> &LRegs,
1224 const TargetRegisterInfo *TRI) {
1225 for (MCRegAliasIterator AliasI(Reg, TRI, true); AliasI.isValid(); ++AliasI) {
1226
1227 // Check if Ref is live.
1228 if (!LiveRegDefs[*AliasI]) continue;
1229
1230 // Allow multiple uses of the same def.
1231 if (LiveRegDefs[*AliasI] == SU) continue;
1232
1233 // Add Reg to the set of interfering live regs.
1234 if (RegAdded.insert(*AliasI).second) {
1235 LRegs.push_back(*AliasI);
1236 }
1237 }
1238 }
1239
1240 /// CheckForLiveRegDefMasked - Check for any live physregs that are clobbered
1241 /// by RegMask, and add them to LRegs.
CheckForLiveRegDefMasked(SUnit * SU,const uint32_t * RegMask,ArrayRef<SUnit * > LiveRegDefs,SmallSet<unsigned,4> & RegAdded,SmallVectorImpl<unsigned> & LRegs)1242 static void CheckForLiveRegDefMasked(SUnit *SU, const uint32_t *RegMask,
1243 ArrayRef<SUnit*> LiveRegDefs,
1244 SmallSet<unsigned, 4> &RegAdded,
1245 SmallVectorImpl<unsigned> &LRegs) {
1246 // Look at all live registers. Skip Reg0 and the special CallResource.
1247 for (unsigned i = 1, e = LiveRegDefs.size()-1; i != e; ++i) {
1248 if (!LiveRegDefs[i]) continue;
1249 if (LiveRegDefs[i] == SU) continue;
1250 if (!MachineOperand::clobbersPhysReg(RegMask, i)) continue;
1251 if (RegAdded.insert(i).second)
1252 LRegs.push_back(i);
1253 }
1254 }
1255
1256 /// getNodeRegMask - Returns the register mask attached to an SDNode, if any.
getNodeRegMask(const SDNode * N)1257 static const uint32_t *getNodeRegMask(const SDNode *N) {
1258 for (const SDValue &Op : N->op_values())
1259 if (const auto *RegOp = dyn_cast<RegisterMaskSDNode>(Op.getNode()))
1260 return RegOp->getRegMask();
1261 return nullptr;
1262 }
1263
1264 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
1265 /// scheduling of the given node to satisfy live physical register dependencies.
1266 /// If the specific node is the last one that's available to schedule, do
1267 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
1268 bool ScheduleDAGRRList::
DelayForLiveRegsBottomUp(SUnit * SU,SmallVectorImpl<unsigned> & LRegs)1269 DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
1270 if (NumLiveRegs == 0)
1271 return false;
1272
1273 SmallSet<unsigned, 4> RegAdded;
1274 // If this node would clobber any "live" register, then it's not ready.
1275 //
1276 // If SU is the currently live definition of the same register that it uses,
1277 // then we are free to schedule it.
1278 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1279 I != E; ++I) {
1280 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
1281 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs.get(),
1282 RegAdded, LRegs, TRI);
1283 }
1284
1285 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
1286 if (Node->getOpcode() == ISD::INLINEASM) {
1287 // Inline asm can clobber physical defs.
1288 unsigned NumOps = Node->getNumOperands();
1289 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1290 --NumOps; // Ignore the glue operand.
1291
1292 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1293 unsigned Flags =
1294 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1295 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1296
1297 ++i; // Skip the ID value.
1298 if (InlineAsm::isRegDefKind(Flags) ||
1299 InlineAsm::isRegDefEarlyClobberKind(Flags) ||
1300 InlineAsm::isClobberKind(Flags)) {
1301 // Check for def of register or earlyclobber register.
1302 for (; NumVals; --NumVals, ++i) {
1303 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1304 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1305 CheckForLiveRegDef(SU, Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI);
1306 }
1307 } else
1308 i += NumVals;
1309 }
1310 continue;
1311 }
1312
1313 if (!Node->isMachineOpcode())
1314 continue;
1315 // If we're in the middle of scheduling a call, don't begin scheduling
1316 // another call. Also, don't allow any physical registers to be live across
1317 // the call.
1318 if (Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
1319 // Check the special calling-sequence resource.
1320 unsigned CallResource = TRI->getNumRegs();
1321 if (LiveRegDefs[CallResource]) {
1322 SDNode *Gen = LiveRegGens[CallResource]->getNode();
1323 while (SDNode *Glued = Gen->getGluedNode())
1324 Gen = Glued;
1325 if (!IsChainDependent(Gen, Node, 0, TII) &&
1326 RegAdded.insert(CallResource).second)
1327 LRegs.push_back(CallResource);
1328 }
1329 }
1330 if (const uint32_t *RegMask = getNodeRegMask(Node))
1331 CheckForLiveRegDefMasked(SU, RegMask,
1332 makeArrayRef(LiveRegDefs.get(), TRI->getNumRegs()),
1333 RegAdded, LRegs);
1334
1335 const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
1336 if (!MCID.ImplicitDefs)
1337 continue;
1338 for (const MCPhysReg *Reg = MCID.getImplicitDefs(); *Reg; ++Reg)
1339 CheckForLiveRegDef(SU, *Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI);
1340 }
1341
1342 return !LRegs.empty();
1343 }
1344
releaseInterferences(unsigned Reg)1345 void ScheduleDAGRRList::releaseInterferences(unsigned Reg) {
1346 // Add the nodes that aren't ready back onto the available list.
1347 for (unsigned i = Interferences.size(); i > 0; --i) {
1348 SUnit *SU = Interferences[i-1];
1349 LRegsMapT::iterator LRegsPos = LRegsMap.find(SU);
1350 if (Reg) {
1351 SmallVectorImpl<unsigned> &LRegs = LRegsPos->second;
1352 if (std::find(LRegs.begin(), LRegs.end(), Reg) == LRegs.end())
1353 continue;
1354 }
1355 SU->isPending = false;
1356 // The interfering node may no longer be available due to backtracking.
1357 // Furthermore, it may have been made available again, in which case it is
1358 // now already in the AvailableQueue.
1359 if (SU->isAvailable && !SU->NodeQueueId) {
1360 DEBUG(dbgs() << " Repushing SU #" << SU->NodeNum << '\n');
1361 AvailableQueue->push(SU);
1362 }
1363 if (i < Interferences.size())
1364 Interferences[i-1] = Interferences.back();
1365 Interferences.pop_back();
1366 LRegsMap.erase(LRegsPos);
1367 }
1368 }
1369
1370 /// Return a node that can be scheduled in this cycle. Requirements:
1371 /// (1) Ready: latency has been satisfied
1372 /// (2) No Hazards: resources are available
1373 /// (3) No Interferences: may unschedule to break register interferences.
PickNodeToScheduleBottomUp()1374 SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
1375 SUnit *CurSU = AvailableQueue->empty() ? nullptr : AvailableQueue->pop();
1376 while (CurSU) {
1377 SmallVector<unsigned, 4> LRegs;
1378 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
1379 break;
1380 DEBUG(dbgs() << " Interfering reg " <<
1381 (LRegs[0] == TRI->getNumRegs() ? "CallResource"
1382 : TRI->getName(LRegs[0]))
1383 << " SU #" << CurSU->NodeNum << '\n');
1384 std::pair<LRegsMapT::iterator, bool> LRegsPair =
1385 LRegsMap.insert(std::make_pair(CurSU, LRegs));
1386 if (LRegsPair.second) {
1387 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
1388 Interferences.push_back(CurSU);
1389 }
1390 else {
1391 assert(CurSU->isPending && "Interferences are pending");
1392 // Update the interference with current live regs.
1393 LRegsPair.first->second = LRegs;
1394 }
1395 CurSU = AvailableQueue->pop();
1396 }
1397 if (CurSU)
1398 return CurSU;
1399
1400 // All candidates are delayed due to live physical reg dependencies.
1401 // Try backtracking, code duplication, or inserting cross class copies
1402 // to resolve it.
1403 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1404 SUnit *TrySU = Interferences[i];
1405 SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
1406
1407 // Try unscheduling up to the point where it's safe to schedule
1408 // this node.
1409 SUnit *BtSU = nullptr;
1410 unsigned LiveCycle = UINT_MAX;
1411 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
1412 unsigned Reg = LRegs[j];
1413 if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
1414 BtSU = LiveRegGens[Reg];
1415 LiveCycle = BtSU->getHeight();
1416 }
1417 }
1418 if (!WillCreateCycle(TrySU, BtSU)) {
1419 // BacktrackBottomUp mutates Interferences!
1420 BacktrackBottomUp(TrySU, BtSU);
1421
1422 // Force the current node to be scheduled before the node that
1423 // requires the physical reg dep.
1424 if (BtSU->isAvailable) {
1425 BtSU->isAvailable = false;
1426 if (!BtSU->isPending)
1427 AvailableQueue->remove(BtSU);
1428 }
1429 DEBUG(dbgs() << "ARTIFICIAL edge from SU(" << BtSU->NodeNum << ") to SU("
1430 << TrySU->NodeNum << ")\n");
1431 AddPred(TrySU, SDep(BtSU, SDep::Artificial));
1432
1433 // If one or more successors has been unscheduled, then the current
1434 // node is no longer available.
1435 if (!TrySU->isAvailable || !TrySU->NodeQueueId)
1436 CurSU = AvailableQueue->pop();
1437 else {
1438 // Available and in AvailableQueue
1439 AvailableQueue->remove(TrySU);
1440 CurSU = TrySU;
1441 }
1442 // Interferences has been mutated. We must break.
1443 break;
1444 }
1445 }
1446
1447 if (!CurSU) {
1448 // Can't backtrack. If it's too expensive to copy the value, then try
1449 // duplicate the nodes that produces these "too expensive to copy"
1450 // values to break the dependency. In case even that doesn't work,
1451 // insert cross class copies.
1452 // If it's not too expensive, i.e. cost != -1, issue copies.
1453 SUnit *TrySU = Interferences[0];
1454 SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
1455 assert(LRegs.size() == 1 && "Can't handle this yet!");
1456 unsigned Reg = LRegs[0];
1457 SUnit *LRDef = LiveRegDefs[Reg];
1458 MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
1459 const TargetRegisterClass *RC =
1460 TRI->getMinimalPhysRegClass(Reg, VT);
1461 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1462
1463 // If cross copy register class is the same as RC, then it must be possible
1464 // copy the value directly. Do not try duplicate the def.
1465 // If cross copy register class is not the same as RC, then it's possible to
1466 // copy the value but it require cross register class copies and it is
1467 // expensive.
1468 // If cross copy register class is null, then it's not possible to copy
1469 // the value at all.
1470 SUnit *NewDef = nullptr;
1471 if (DestRC != RC) {
1472 NewDef = CopyAndMoveSuccessors(LRDef);
1473 if (!DestRC && !NewDef)
1474 report_fatal_error("Can't handle live physical register dependency!");
1475 }
1476 if (!NewDef) {
1477 // Issue copies, these can be expensive cross register class copies.
1478 SmallVector<SUnit*, 2> Copies;
1479 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1480 DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
1481 << " to SU #" << Copies.front()->NodeNum << "\n");
1482 AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
1483 NewDef = Copies.back();
1484 }
1485
1486 DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
1487 << " to SU #" << TrySU->NodeNum << "\n");
1488 LiveRegDefs[Reg] = NewDef;
1489 AddPred(NewDef, SDep(TrySU, SDep::Artificial));
1490 TrySU->isAvailable = false;
1491 CurSU = NewDef;
1492 }
1493 assert(CurSU && "Unable to resolve live physical register dependencies!");
1494 return CurSU;
1495 }
1496
1497 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
1498 /// schedulers.
ListScheduleBottomUp()1499 void ScheduleDAGRRList::ListScheduleBottomUp() {
1500 // Release any predecessors of the special Exit node.
1501 ReleasePredecessors(&ExitSU);
1502
1503 // Add root to Available queue.
1504 if (!SUnits.empty()) {
1505 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
1506 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
1507 RootSU->isAvailable = true;
1508 AvailableQueue->push(RootSU);
1509 }
1510
1511 // While Available queue is not empty, grab the node with the highest
1512 // priority. If it is not ready put it back. Schedule the node.
1513 Sequence.reserve(SUnits.size());
1514 while (!AvailableQueue->empty() || !Interferences.empty()) {
1515 DEBUG(dbgs() << "\nExamining Available:\n";
1516 AvailableQueue->dump(this));
1517
1518 // Pick the best node to schedule taking all constraints into
1519 // consideration.
1520 SUnit *SU = PickNodeToScheduleBottomUp();
1521
1522 AdvancePastStalls(SU);
1523
1524 ScheduleNodeBottomUp(SU);
1525
1526 while (AvailableQueue->empty() && !PendingQueue.empty()) {
1527 // Advance the cycle to free resources. Skip ahead to the next ready SU.
1528 assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized");
1529 AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
1530 }
1531 }
1532
1533 // Reverse the order if it is bottom up.
1534 std::reverse(Sequence.begin(), Sequence.end());
1535
1536 #ifndef NDEBUG
1537 VerifyScheduledSequence(/*isBottomUp=*/true);
1538 #endif
1539 }
1540
1541 //===----------------------------------------------------------------------===//
1542 // RegReductionPriorityQueue Definition
1543 //===----------------------------------------------------------------------===//
1544 //
1545 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1546 // to reduce register pressure.
1547 //
1548 namespace {
1549 class RegReductionPQBase;
1550
1551 struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
isReady__anon884f54d00211::queue_sort1552 bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
1553 };
1554
1555 #ifndef NDEBUG
1556 template<class SF>
1557 struct reverse_sort : public queue_sort {
1558 SF &SortFunc;
reverse_sort__anon884f54d00211::reverse_sort1559 reverse_sort(SF &sf) : SortFunc(sf) {}
1560
operator ()__anon884f54d00211::reverse_sort1561 bool operator()(SUnit* left, SUnit* right) const {
1562 // reverse left/right rather than simply !SortFunc(left, right)
1563 // to expose different paths in the comparison logic.
1564 return SortFunc(right, left);
1565 }
1566 };
1567 #endif // NDEBUG
1568
1569 /// bu_ls_rr_sort - Priority function for bottom up register pressure
1570 // reduction scheduler.
1571 struct bu_ls_rr_sort : public queue_sort {
1572 enum {
1573 IsBottomUp = true,
1574 HasReadyFilter = false
1575 };
1576
1577 RegReductionPQBase *SPQ;
bu_ls_rr_sort__anon884f54d00211::bu_ls_rr_sort1578 bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1579
1580 bool operator()(SUnit* left, SUnit* right) const;
1581 };
1582
1583 // src_ls_rr_sort - Priority function for source order scheduler.
1584 struct src_ls_rr_sort : public queue_sort {
1585 enum {
1586 IsBottomUp = true,
1587 HasReadyFilter = false
1588 };
1589
1590 RegReductionPQBase *SPQ;
src_ls_rr_sort__anon884f54d00211::src_ls_rr_sort1591 src_ls_rr_sort(RegReductionPQBase *spq)
1592 : SPQ(spq) {}
1593
1594 bool operator()(SUnit* left, SUnit* right) const;
1595 };
1596
1597 // hybrid_ls_rr_sort - Priority function for hybrid scheduler.
1598 struct hybrid_ls_rr_sort : public queue_sort {
1599 enum {
1600 IsBottomUp = true,
1601 HasReadyFilter = false
1602 };
1603
1604 RegReductionPQBase *SPQ;
hybrid_ls_rr_sort__anon884f54d00211::hybrid_ls_rr_sort1605 hybrid_ls_rr_sort(RegReductionPQBase *spq)
1606 : SPQ(spq) {}
1607
1608 bool isReady(SUnit *SU, unsigned CurCycle) const;
1609
1610 bool operator()(SUnit* left, SUnit* right) const;
1611 };
1612
1613 // ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
1614 // scheduler.
1615 struct ilp_ls_rr_sort : public queue_sort {
1616 enum {
1617 IsBottomUp = true,
1618 HasReadyFilter = false
1619 };
1620
1621 RegReductionPQBase *SPQ;
ilp_ls_rr_sort__anon884f54d00211::ilp_ls_rr_sort1622 ilp_ls_rr_sort(RegReductionPQBase *spq)
1623 : SPQ(spq) {}
1624
1625 bool isReady(SUnit *SU, unsigned CurCycle) const;
1626
1627 bool operator()(SUnit* left, SUnit* right) const;
1628 };
1629
1630 class RegReductionPQBase : public SchedulingPriorityQueue {
1631 protected:
1632 std::vector<SUnit*> Queue;
1633 unsigned CurQueueId;
1634 bool TracksRegPressure;
1635 bool SrcOrder;
1636
1637 // SUnits - The SUnits for the current graph.
1638 std::vector<SUnit> *SUnits;
1639
1640 MachineFunction &MF;
1641 const TargetInstrInfo *TII;
1642 const TargetRegisterInfo *TRI;
1643 const TargetLowering *TLI;
1644 ScheduleDAGRRList *scheduleDAG;
1645
1646 // SethiUllmanNumbers - The SethiUllman number for each node.
1647 std::vector<unsigned> SethiUllmanNumbers;
1648
1649 /// RegPressure - Tracking current reg pressure per register class.
1650 ///
1651 std::vector<unsigned> RegPressure;
1652
1653 /// RegLimit - Tracking the number of allocatable registers per register
1654 /// class.
1655 std::vector<unsigned> RegLimit;
1656
1657 public:
RegReductionPQBase(MachineFunction & mf,bool hasReadyFilter,bool tracksrp,bool srcorder,const TargetInstrInfo * tii,const TargetRegisterInfo * tri,const TargetLowering * tli)1658 RegReductionPQBase(MachineFunction &mf,
1659 bool hasReadyFilter,
1660 bool tracksrp,
1661 bool srcorder,
1662 const TargetInstrInfo *tii,
1663 const TargetRegisterInfo *tri,
1664 const TargetLowering *tli)
1665 : SchedulingPriorityQueue(hasReadyFilter),
1666 CurQueueId(0), TracksRegPressure(tracksrp), SrcOrder(srcorder),
1667 MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(nullptr) {
1668 if (TracksRegPressure) {
1669 unsigned NumRC = TRI->getNumRegClasses();
1670 RegLimit.resize(NumRC);
1671 RegPressure.resize(NumRC);
1672 std::fill(RegLimit.begin(), RegLimit.end(), 0);
1673 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1674 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1675 E = TRI->regclass_end(); I != E; ++I)
1676 RegLimit[(*I)->getID()] = tri->getRegPressureLimit(*I, MF);
1677 }
1678 }
1679
setScheduleDAG(ScheduleDAGRRList * scheduleDag)1680 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1681 scheduleDAG = scheduleDag;
1682 }
1683
getHazardRec()1684 ScheduleHazardRecognizer* getHazardRec() {
1685 return scheduleDAG->getHazardRec();
1686 }
1687
1688 void initNodes(std::vector<SUnit> &sunits) override;
1689
1690 void addNode(const SUnit *SU) override;
1691
1692 void updateNode(const SUnit *SU) override;
1693
releaseState()1694 void releaseState() override {
1695 SUnits = nullptr;
1696 SethiUllmanNumbers.clear();
1697 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1698 }
1699
1700 unsigned getNodePriority(const SUnit *SU) const;
1701
getNodeOrdering(const SUnit * SU) const1702 unsigned getNodeOrdering(const SUnit *SU) const {
1703 if (!SU->getNode()) return 0;
1704
1705 return SU->getNode()->getIROrder();
1706 }
1707
empty() const1708 bool empty() const override { return Queue.empty(); }
1709
push(SUnit * U)1710 void push(SUnit *U) override {
1711 assert(!U->NodeQueueId && "Node in the queue already");
1712 U->NodeQueueId = ++CurQueueId;
1713 Queue.push_back(U);
1714 }
1715
remove(SUnit * SU)1716 void remove(SUnit *SU) override {
1717 assert(!Queue.empty() && "Queue is empty!");
1718 assert(SU->NodeQueueId != 0 && "Not in queue!");
1719 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
1720 SU);
1721 if (I != std::prev(Queue.end()))
1722 std::swap(*I, Queue.back());
1723 Queue.pop_back();
1724 SU->NodeQueueId = 0;
1725 }
1726
tracksRegPressure() const1727 bool tracksRegPressure() const override { return TracksRegPressure; }
1728
1729 void dumpRegPressure() const;
1730
1731 bool HighRegPressure(const SUnit *SU) const;
1732
1733 bool MayReduceRegPressure(SUnit *SU) const;
1734
1735 int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
1736
1737 void scheduledNode(SUnit *SU) override;
1738
1739 void unscheduledNode(SUnit *SU) override;
1740
1741 protected:
1742 bool canClobber(const SUnit *SU, const SUnit *Op);
1743 void AddPseudoTwoAddrDeps();
1744 void PrescheduleNodesWithMultipleUses();
1745 void CalculateSethiUllmanNumbers();
1746 };
1747
1748 template<class SF>
popFromQueueImpl(std::vector<SUnit * > & Q,SF & Picker)1749 static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
1750 std::vector<SUnit *>::iterator Best = Q.begin();
1751 for (std::vector<SUnit *>::iterator I = std::next(Q.begin()),
1752 E = Q.end(); I != E; ++I)
1753 if (Picker(*Best, *I))
1754 Best = I;
1755 SUnit *V = *Best;
1756 if (Best != std::prev(Q.end()))
1757 std::swap(*Best, Q.back());
1758 Q.pop_back();
1759 return V;
1760 }
1761
1762 template<class SF>
popFromQueue(std::vector<SUnit * > & Q,SF & Picker,ScheduleDAG * DAG)1763 SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker, ScheduleDAG *DAG) {
1764 #ifndef NDEBUG
1765 if (DAG->StressSched) {
1766 reverse_sort<SF> RPicker(Picker);
1767 return popFromQueueImpl(Q, RPicker);
1768 }
1769 #endif
1770 (void)DAG;
1771 return popFromQueueImpl(Q, Picker);
1772 }
1773
1774 template<class SF>
1775 class RegReductionPriorityQueue : public RegReductionPQBase {
1776 SF Picker;
1777
1778 public:
RegReductionPriorityQueue(MachineFunction & mf,bool tracksrp,bool srcorder,const TargetInstrInfo * tii,const TargetRegisterInfo * tri,const TargetLowering * tli)1779 RegReductionPriorityQueue(MachineFunction &mf,
1780 bool tracksrp,
1781 bool srcorder,
1782 const TargetInstrInfo *tii,
1783 const TargetRegisterInfo *tri,
1784 const TargetLowering *tli)
1785 : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, srcorder,
1786 tii, tri, tli),
1787 Picker(this) {}
1788
isBottomUp() const1789 bool isBottomUp() const override { return SF::IsBottomUp; }
1790
isReady(SUnit * U) const1791 bool isReady(SUnit *U) const override {
1792 return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
1793 }
1794
pop()1795 SUnit *pop() override {
1796 if (Queue.empty()) return nullptr;
1797
1798 SUnit *V = popFromQueue(Queue, Picker, scheduleDAG);
1799 V->NodeQueueId = 0;
1800 return V;
1801 }
1802
1803 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump(ScheduleDAG * DAG) const1804 void dump(ScheduleDAG *DAG) const override {
1805 // Emulate pop() without clobbering NodeQueueIds.
1806 std::vector<SUnit*> DumpQueue = Queue;
1807 SF DumpPicker = Picker;
1808 while (!DumpQueue.empty()) {
1809 SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG);
1810 dbgs() << "Height " << SU->getHeight() << ": ";
1811 SU->dump(DAG);
1812 }
1813 }
1814 #endif
1815 };
1816
1817 typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1818 BURegReductionPriorityQueue;
1819
1820 typedef RegReductionPriorityQueue<src_ls_rr_sort>
1821 SrcRegReductionPriorityQueue;
1822
1823 typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
1824 HybridBURRPriorityQueue;
1825
1826 typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
1827 ILPBURRPriorityQueue;
1828 } // end anonymous namespace
1829
1830 //===----------------------------------------------------------------------===//
1831 // Static Node Priority for Register Pressure Reduction
1832 //===----------------------------------------------------------------------===//
1833
1834 // Check for special nodes that bypass scheduling heuristics.
1835 // Currently this pushes TokenFactor nodes down, but may be used for other
1836 // pseudo-ops as well.
1837 //
1838 // Return -1 to schedule right above left, 1 for left above right.
1839 // Return 0 if no bias exists.
checkSpecialNodes(const SUnit * left,const SUnit * right)1840 static int checkSpecialNodes(const SUnit *left, const SUnit *right) {
1841 bool LSchedLow = left->isScheduleLow;
1842 bool RSchedLow = right->isScheduleLow;
1843 if (LSchedLow != RSchedLow)
1844 return LSchedLow < RSchedLow ? 1 : -1;
1845 return 0;
1846 }
1847
1848 /// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
1849 /// Smaller number is the higher priority.
1850 static unsigned
CalcNodeSethiUllmanNumber(const SUnit * SU,std::vector<unsigned> & SUNumbers)1851 CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1852 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1853 if (SethiUllmanNumber != 0)
1854 return SethiUllmanNumber;
1855
1856 unsigned Extra = 0;
1857 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1858 I != E; ++I) {
1859 if (I->isCtrl()) continue; // ignore chain preds
1860 SUnit *PredSU = I->getSUnit();
1861 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
1862 if (PredSethiUllman > SethiUllmanNumber) {
1863 SethiUllmanNumber = PredSethiUllman;
1864 Extra = 0;
1865 } else if (PredSethiUllman == SethiUllmanNumber)
1866 ++Extra;
1867 }
1868
1869 SethiUllmanNumber += Extra;
1870
1871 if (SethiUllmanNumber == 0)
1872 SethiUllmanNumber = 1;
1873
1874 return SethiUllmanNumber;
1875 }
1876
1877 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1878 /// scheduling units.
CalculateSethiUllmanNumbers()1879 void RegReductionPQBase::CalculateSethiUllmanNumbers() {
1880 SethiUllmanNumbers.assign(SUnits->size(), 0);
1881
1882 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1883 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1884 }
1885
addNode(const SUnit * SU)1886 void RegReductionPQBase::addNode(const SUnit *SU) {
1887 unsigned SUSize = SethiUllmanNumbers.size();
1888 if (SUnits->size() > SUSize)
1889 SethiUllmanNumbers.resize(SUSize*2, 0);
1890 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1891 }
1892
updateNode(const SUnit * SU)1893 void RegReductionPQBase::updateNode(const SUnit *SU) {
1894 SethiUllmanNumbers[SU->NodeNum] = 0;
1895 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1896 }
1897
1898 // Lower priority means schedule further down. For bottom-up scheduling, lower
1899 // priority SUs are scheduled before higher priority SUs.
getNodePriority(const SUnit * SU) const1900 unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const {
1901 assert(SU->NodeNum < SethiUllmanNumbers.size());
1902 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1903 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1904 // CopyToReg should be close to its uses to facilitate coalescing and
1905 // avoid spilling.
1906 return 0;
1907 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1908 Opc == TargetOpcode::SUBREG_TO_REG ||
1909 Opc == TargetOpcode::INSERT_SUBREG)
1910 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
1911 // close to their uses to facilitate coalescing.
1912 return 0;
1913 if (SU->NumSuccs == 0 && SU->NumPreds != 0)
1914 // If SU does not have a register use, i.e. it doesn't produce a value
1915 // that would be consumed (e.g. store), then it terminates a chain of
1916 // computation. Give it a large SethiUllman number so it will be
1917 // scheduled right before its predecessors that it doesn't lengthen
1918 // their live ranges.
1919 return 0xffff;
1920 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
1921 // If SU does not have a register def, schedule it close to its uses
1922 // because it does not lengthen any live ranges.
1923 return 0;
1924 #if 1
1925 return SethiUllmanNumbers[SU->NodeNum];
1926 #else
1927 unsigned Priority = SethiUllmanNumbers[SU->NodeNum];
1928 if (SU->isCallOp) {
1929 // FIXME: This assumes all of the defs are used as call operands.
1930 int NP = (int)Priority - SU->getNode()->getNumValues();
1931 return (NP > 0) ? NP : 0;
1932 }
1933 return Priority;
1934 #endif
1935 }
1936
1937 //===----------------------------------------------------------------------===//
1938 // Register Pressure Tracking
1939 //===----------------------------------------------------------------------===//
1940
dumpRegPressure() const1941 void RegReductionPQBase::dumpRegPressure() const {
1942 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1943 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1944 E = TRI->regclass_end(); I != E; ++I) {
1945 const TargetRegisterClass *RC = *I;
1946 unsigned Id = RC->getID();
1947 unsigned RP = RegPressure[Id];
1948 if (!RP) continue;
1949 DEBUG(dbgs() << TRI->getRegClassName(RC) << ": " << RP << " / "
1950 << RegLimit[Id] << '\n');
1951 }
1952 #endif
1953 }
1954
HighRegPressure(const SUnit * SU) const1955 bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
1956 if (!TLI)
1957 return false;
1958
1959 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1960 I != E; ++I) {
1961 if (I->isCtrl())
1962 continue;
1963 SUnit *PredSU = I->getSUnit();
1964 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1965 // to cover the number of registers defined (they are all live).
1966 if (PredSU->NumRegDefsLeft == 0) {
1967 continue;
1968 }
1969 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1970 RegDefPos.IsValid(); RegDefPos.Advance()) {
1971 unsigned RCId, Cost;
1972 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
1973
1974 if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
1975 return true;
1976 }
1977 }
1978 return false;
1979 }
1980
MayReduceRegPressure(SUnit * SU) const1981 bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const {
1982 const SDNode *N = SU->getNode();
1983
1984 if (!N->isMachineOpcode() || !SU->NumSuccs)
1985 return false;
1986
1987 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1988 for (unsigned i = 0; i != NumDefs; ++i) {
1989 MVT VT = N->getSimpleValueType(i);
1990 if (!N->hasAnyUseOfValue(i))
1991 continue;
1992 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1993 if (RegPressure[RCId] >= RegLimit[RCId])
1994 return true;
1995 }
1996 return false;
1997 }
1998
1999 // Compute the register pressure contribution by this instruction by count up
2000 // for uses that are not live and down for defs. Only count register classes
2001 // that are already under high pressure. As a side effect, compute the number of
2002 // uses of registers that are already live.
2003 //
2004 // FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure
2005 // so could probably be factored.
RegPressureDiff(SUnit * SU,unsigned & LiveUses) const2006 int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
2007 LiveUses = 0;
2008 int PDiff = 0;
2009 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2010 I != E; ++I) {
2011 if (I->isCtrl())
2012 continue;
2013 SUnit *PredSU = I->getSUnit();
2014 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
2015 // to cover the number of registers defined (they are all live).
2016 if (PredSU->NumRegDefsLeft == 0) {
2017 if (PredSU->getNode()->isMachineOpcode())
2018 ++LiveUses;
2019 continue;
2020 }
2021 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
2022 RegDefPos.IsValid(); RegDefPos.Advance()) {
2023 MVT VT = RegDefPos.GetValue();
2024 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2025 if (RegPressure[RCId] >= RegLimit[RCId])
2026 ++PDiff;
2027 }
2028 }
2029 const SDNode *N = SU->getNode();
2030
2031 if (!N || !N->isMachineOpcode() || !SU->NumSuccs)
2032 return PDiff;
2033
2034 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2035 for (unsigned i = 0; i != NumDefs; ++i) {
2036 MVT VT = N->getSimpleValueType(i);
2037 if (!N->hasAnyUseOfValue(i))
2038 continue;
2039 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2040 if (RegPressure[RCId] >= RegLimit[RCId])
2041 --PDiff;
2042 }
2043 return PDiff;
2044 }
2045
scheduledNode(SUnit * SU)2046 void RegReductionPQBase::scheduledNode(SUnit *SU) {
2047 if (!TracksRegPressure)
2048 return;
2049
2050 if (!SU->getNode())
2051 return;
2052
2053 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2054 I != E; ++I) {
2055 if (I->isCtrl())
2056 continue;
2057 SUnit *PredSU = I->getSUnit();
2058 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
2059 // to cover the number of registers defined (they are all live).
2060 if (PredSU->NumRegDefsLeft == 0) {
2061 continue;
2062 }
2063 // FIXME: The ScheduleDAG currently loses information about which of a
2064 // node's values is consumed by each dependence. Consequently, if the node
2065 // defines multiple register classes, we don't know which to pressurize
2066 // here. Instead the following loop consumes the register defs in an
2067 // arbitrary order. At least it handles the common case of clustered loads
2068 // to the same class. For precise liveness, each SDep needs to indicate the
2069 // result number. But that tightly couples the ScheduleDAG with the
2070 // SelectionDAG making updates tricky. A simpler hack would be to attach a
2071 // value type or register class to SDep.
2072 //
2073 // The most important aspect of register tracking is balancing the increase
2074 // here with the reduction further below. Note that this SU may use multiple
2075 // defs in PredSU. The can't be determined here, but we've already
2076 // compensated by reducing NumRegDefsLeft in PredSU during
2077 // ScheduleDAGSDNodes::AddSchedEdges.
2078 --PredSU->NumRegDefsLeft;
2079 unsigned SkipRegDefs = PredSU->NumRegDefsLeft;
2080 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
2081 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2082 if (SkipRegDefs)
2083 continue;
2084
2085 unsigned RCId, Cost;
2086 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
2087 RegPressure[RCId] += Cost;
2088 break;
2089 }
2090 }
2091
2092 // We should have this assert, but there may be dead SDNodes that never
2093 // materialize as SUnits, so they don't appear to generate liveness.
2094 //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
2095 int SkipRegDefs = (int)SU->NumRegDefsLeft;
2096 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
2097 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2098 if (SkipRegDefs > 0)
2099 continue;
2100 unsigned RCId, Cost;
2101 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
2102 if (RegPressure[RCId] < Cost) {
2103 // Register pressure tracking is imprecise. This can happen. But we try
2104 // hard not to let it happen because it likely results in poor scheduling.
2105 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n");
2106 RegPressure[RCId] = 0;
2107 }
2108 else {
2109 RegPressure[RCId] -= Cost;
2110 }
2111 }
2112 dumpRegPressure();
2113 }
2114
unscheduledNode(SUnit * SU)2115 void RegReductionPQBase::unscheduledNode(SUnit *SU) {
2116 if (!TracksRegPressure)
2117 return;
2118
2119 const SDNode *N = SU->getNode();
2120 if (!N) return;
2121
2122 if (!N->isMachineOpcode()) {
2123 if (N->getOpcode() != ISD::CopyToReg)
2124 return;
2125 } else {
2126 unsigned Opc = N->getMachineOpcode();
2127 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2128 Opc == TargetOpcode::INSERT_SUBREG ||
2129 Opc == TargetOpcode::SUBREG_TO_REG ||
2130 Opc == TargetOpcode::REG_SEQUENCE ||
2131 Opc == TargetOpcode::IMPLICIT_DEF)
2132 return;
2133 }
2134
2135 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2136 I != E; ++I) {
2137 if (I->isCtrl())
2138 continue;
2139 SUnit *PredSU = I->getSUnit();
2140 // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only
2141 // counts data deps.
2142 if (PredSU->NumSuccsLeft != PredSU->Succs.size())
2143 continue;
2144 const SDNode *PN = PredSU->getNode();
2145 if (!PN->isMachineOpcode()) {
2146 if (PN->getOpcode() == ISD::CopyFromReg) {
2147 MVT VT = PN->getSimpleValueType(0);
2148 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2149 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2150 }
2151 continue;
2152 }
2153 unsigned POpc = PN->getMachineOpcode();
2154 if (POpc == TargetOpcode::IMPLICIT_DEF)
2155 continue;
2156 if (POpc == TargetOpcode::EXTRACT_SUBREG ||
2157 POpc == TargetOpcode::INSERT_SUBREG ||
2158 POpc == TargetOpcode::SUBREG_TO_REG) {
2159 MVT VT = PN->getSimpleValueType(0);
2160 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2161 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2162 continue;
2163 }
2164 unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
2165 for (unsigned i = 0; i != NumDefs; ++i) {
2166 MVT VT = PN->getSimpleValueType(i);
2167 if (!PN->hasAnyUseOfValue(i))
2168 continue;
2169 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2170 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
2171 // Register pressure tracking is imprecise. This can happen.
2172 RegPressure[RCId] = 0;
2173 else
2174 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
2175 }
2176 }
2177
2178 // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
2179 // may transfer data dependencies to CopyToReg.
2180 if (SU->NumSuccs && N->isMachineOpcode()) {
2181 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2182 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2183 MVT VT = N->getSimpleValueType(i);
2184 if (VT == MVT::Glue || VT == MVT::Other)
2185 continue;
2186 if (!N->hasAnyUseOfValue(i))
2187 continue;
2188 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2189 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2190 }
2191 }
2192
2193 dumpRegPressure();
2194 }
2195
2196 //===----------------------------------------------------------------------===//
2197 // Dynamic Node Priority for Register Pressure Reduction
2198 //===----------------------------------------------------------------------===//
2199
2200 /// closestSucc - Returns the scheduled cycle of the successor which is
2201 /// closest to the current cycle.
closestSucc(const SUnit * SU)2202 static unsigned closestSucc(const SUnit *SU) {
2203 unsigned MaxHeight = 0;
2204 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2205 I != E; ++I) {
2206 if (I->isCtrl()) continue; // ignore chain succs
2207 unsigned Height = I->getSUnit()->getHeight();
2208 // If there are bunch of CopyToRegs stacked up, they should be considered
2209 // to be at the same position.
2210 if (I->getSUnit()->getNode() &&
2211 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
2212 Height = closestSucc(I->getSUnit())+1;
2213 if (Height > MaxHeight)
2214 MaxHeight = Height;
2215 }
2216 return MaxHeight;
2217 }
2218
2219 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
2220 /// for scratch registers, i.e. number of data dependencies.
calcMaxScratches(const SUnit * SU)2221 static unsigned calcMaxScratches(const SUnit *SU) {
2222 unsigned Scratches = 0;
2223 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2224 I != E; ++I) {
2225 if (I->isCtrl()) continue; // ignore chain preds
2226 Scratches++;
2227 }
2228 return Scratches;
2229 }
2230
2231 /// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are
2232 /// CopyFromReg from a virtual register.
hasOnlyLiveInOpers(const SUnit * SU)2233 static bool hasOnlyLiveInOpers(const SUnit *SU) {
2234 bool RetVal = false;
2235 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2236 I != E; ++I) {
2237 if (I->isCtrl()) continue;
2238 const SUnit *PredSU = I->getSUnit();
2239 if (PredSU->getNode() &&
2240 PredSU->getNode()->getOpcode() == ISD::CopyFromReg) {
2241 unsigned Reg =
2242 cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg();
2243 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2244 RetVal = true;
2245 continue;
2246 }
2247 }
2248 return false;
2249 }
2250 return RetVal;
2251 }
2252
2253 /// hasOnlyLiveOutUses - Return true if SU has only value successors that are
2254 /// CopyToReg to a virtual register. This SU def is probably a liveout and
2255 /// it has no other use. It should be scheduled closer to the terminator.
hasOnlyLiveOutUses(const SUnit * SU)2256 static bool hasOnlyLiveOutUses(const SUnit *SU) {
2257 bool RetVal = false;
2258 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2259 I != E; ++I) {
2260 if (I->isCtrl()) continue;
2261 const SUnit *SuccSU = I->getSUnit();
2262 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
2263 unsigned Reg =
2264 cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
2265 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2266 RetVal = true;
2267 continue;
2268 }
2269 }
2270 return false;
2271 }
2272 return RetVal;
2273 }
2274
2275 // Set isVRegCycle for a node with only live in opers and live out uses. Also
2276 // set isVRegCycle for its CopyFromReg operands.
2277 //
2278 // This is only relevant for single-block loops, in which case the VRegCycle
2279 // node is likely an induction variable in which the operand and target virtual
2280 // registers should be coalesced (e.g. pre/post increment values). Setting the
2281 // isVRegCycle flag helps the scheduler prioritize other uses of the same
2282 // CopyFromReg so that this node becomes the virtual register "kill". This
2283 // avoids interference between the values live in and out of the block and
2284 // eliminates a copy inside the loop.
initVRegCycle(SUnit * SU)2285 static void initVRegCycle(SUnit *SU) {
2286 if (DisableSchedVRegCycle)
2287 return;
2288
2289 if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU))
2290 return;
2291
2292 DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n");
2293
2294 SU->isVRegCycle = true;
2295
2296 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2297 I != E; ++I) {
2298 if (I->isCtrl()) continue;
2299 I->getSUnit()->isVRegCycle = true;
2300 }
2301 }
2302
2303 // After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of
2304 // CopyFromReg operands. We should no longer penalize other uses of this VReg.
resetVRegCycle(SUnit * SU)2305 static void resetVRegCycle(SUnit *SU) {
2306 if (!SU->isVRegCycle)
2307 return;
2308
2309 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2310 I != E; ++I) {
2311 if (I->isCtrl()) continue; // ignore chain preds
2312 SUnit *PredSU = I->getSUnit();
2313 if (PredSU->isVRegCycle) {
2314 assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&
2315 "VRegCycle def must be CopyFromReg");
2316 I->getSUnit()->isVRegCycle = 0;
2317 }
2318 }
2319 }
2320
2321 // Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This
2322 // means a node that defines the VRegCycle has not been scheduled yet.
hasVRegCycleUse(const SUnit * SU)2323 static bool hasVRegCycleUse(const SUnit *SU) {
2324 // If this SU also defines the VReg, don't hoist it as a "use".
2325 if (SU->isVRegCycle)
2326 return false;
2327
2328 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2329 I != E; ++I) {
2330 if (I->isCtrl()) continue; // ignore chain preds
2331 if (I->getSUnit()->isVRegCycle &&
2332 I->getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) {
2333 DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n");
2334 return true;
2335 }
2336 }
2337 return false;
2338 }
2339
2340 // Check for either a dependence (latency) or resource (hazard) stall.
2341 //
2342 // Note: The ScheduleHazardRecognizer interface requires a non-const SU.
BUHasStall(SUnit * SU,int Height,RegReductionPQBase * SPQ)2343 static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
2344 if ((int)SPQ->getCurCycle() < Height) return true;
2345 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2346 != ScheduleHazardRecognizer::NoHazard)
2347 return true;
2348 return false;
2349 }
2350
2351 // Return -1 if left has higher priority, 1 if right has higher priority.
2352 // Return 0 if latency-based priority is equivalent.
BUCompareLatency(SUnit * left,SUnit * right,bool checkPref,RegReductionPQBase * SPQ)2353 static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
2354 RegReductionPQBase *SPQ) {
2355 // Scheduling an instruction that uses a VReg whose postincrement has not yet
2356 // been scheduled will induce a copy. Model this as an extra cycle of latency.
2357 int LPenalty = hasVRegCycleUse(left) ? 1 : 0;
2358 int RPenalty = hasVRegCycleUse(right) ? 1 : 0;
2359 int LHeight = (int)left->getHeight() + LPenalty;
2360 int RHeight = (int)right->getHeight() + RPenalty;
2361
2362 bool LStall = (!checkPref || left->SchedulingPref == Sched::ILP) &&
2363 BUHasStall(left, LHeight, SPQ);
2364 bool RStall = (!checkPref || right->SchedulingPref == Sched::ILP) &&
2365 BUHasStall(right, RHeight, SPQ);
2366
2367 // If scheduling one of the node will cause a pipeline stall, delay it.
2368 // If scheduling either one of the node will cause a pipeline stall, sort
2369 // them according to their height.
2370 if (LStall) {
2371 if (!RStall)
2372 return 1;
2373 if (LHeight != RHeight)
2374 return LHeight > RHeight ? 1 : -1;
2375 } else if (RStall)
2376 return -1;
2377
2378 // If either node is scheduling for latency, sort them by height/depth
2379 // and latency.
2380 if (!checkPref || (left->SchedulingPref == Sched::ILP ||
2381 right->SchedulingPref == Sched::ILP)) {
2382 // If neither instruction stalls (!LStall && !RStall) and HazardRecognizer
2383 // is enabled, grouping instructions by cycle, then its height is already
2384 // covered so only its depth matters. We also reach this point if both stall
2385 // but have the same height.
2386 if (!SPQ->getHazardRec()->isEnabled()) {
2387 if (LHeight != RHeight)
2388 return LHeight > RHeight ? 1 : -1;
2389 }
2390 int LDepth = left->getDepth() - LPenalty;
2391 int RDepth = right->getDepth() - RPenalty;
2392 if (LDepth != RDepth) {
2393 DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
2394 << ") depth " << LDepth << " vs SU (" << right->NodeNum
2395 << ") depth " << RDepth << "\n");
2396 return LDepth < RDepth ? 1 : -1;
2397 }
2398 if (left->Latency != right->Latency)
2399 return left->Latency > right->Latency ? 1 : -1;
2400 }
2401 return 0;
2402 }
2403
BURRSort(SUnit * left,SUnit * right,RegReductionPQBase * SPQ)2404 static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
2405 // Schedule physical register definitions close to their use. This is
2406 // motivated by microarchitectures that can fuse cmp+jump macro-ops. But as
2407 // long as shortening physreg live ranges is generally good, we can defer
2408 // creating a subtarget hook.
2409 if (!DisableSchedPhysRegJoin) {
2410 bool LHasPhysReg = left->hasPhysRegDefs;
2411 bool RHasPhysReg = right->hasPhysRegDefs;
2412 if (LHasPhysReg != RHasPhysReg) {
2413 #ifndef NDEBUG
2414 static const char *const PhysRegMsg[] = { " has no physreg",
2415 " defines a physreg" };
2416 #endif
2417 DEBUG(dbgs() << " SU (" << left->NodeNum << ") "
2418 << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") "
2419 << PhysRegMsg[RHasPhysReg] << "\n");
2420 return LHasPhysReg < RHasPhysReg;
2421 }
2422 }
2423
2424 // Prioritize by Sethi-Ulmann number and push CopyToReg nodes down.
2425 unsigned LPriority = SPQ->getNodePriority(left);
2426 unsigned RPriority = SPQ->getNodePriority(right);
2427
2428 // Be really careful about hoisting call operands above previous calls.
2429 // Only allows it if it would reduce register pressure.
2430 if (left->isCall && right->isCallOp) {
2431 unsigned RNumVals = right->getNode()->getNumValues();
2432 RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0;
2433 }
2434 if (right->isCall && left->isCallOp) {
2435 unsigned LNumVals = left->getNode()->getNumValues();
2436 LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0;
2437 }
2438
2439 if (LPriority != RPriority)
2440 return LPriority > RPriority;
2441
2442 // One or both of the nodes are calls and their sethi-ullman numbers are the
2443 // same, then keep source order.
2444 if (left->isCall || right->isCall) {
2445 unsigned LOrder = SPQ->getNodeOrdering(left);
2446 unsigned ROrder = SPQ->getNodeOrdering(right);
2447
2448 // Prefer an ordering where the lower the non-zero order number, the higher
2449 // the preference.
2450 if ((LOrder || ROrder) && LOrder != ROrder)
2451 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2452 }
2453
2454 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
2455 // e.g.
2456 // t1 = op t2, c1
2457 // t3 = op t4, c2
2458 //
2459 // and the following instructions are both ready.
2460 // t2 = op c3
2461 // t4 = op c4
2462 //
2463 // Then schedule t2 = op first.
2464 // i.e.
2465 // t4 = op c4
2466 // t2 = op c3
2467 // t1 = op t2, c1
2468 // t3 = op t4, c2
2469 //
2470 // This creates more short live intervals.
2471 unsigned LDist = closestSucc(left);
2472 unsigned RDist = closestSucc(right);
2473 if (LDist != RDist)
2474 return LDist < RDist;
2475
2476 // How many registers becomes live when the node is scheduled.
2477 unsigned LScratch = calcMaxScratches(left);
2478 unsigned RScratch = calcMaxScratches(right);
2479 if (LScratch != RScratch)
2480 return LScratch > RScratch;
2481
2482 // Comparing latency against a call makes little sense unless the node
2483 // is register pressure-neutral.
2484 if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0))
2485 return (left->NodeQueueId > right->NodeQueueId);
2486
2487 // Do not compare latencies when one or both of the nodes are calls.
2488 if (!DisableSchedCycles &&
2489 !(left->isCall || right->isCall)) {
2490 int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ);
2491 if (result != 0)
2492 return result > 0;
2493 }
2494 else {
2495 if (left->getHeight() != right->getHeight())
2496 return left->getHeight() > right->getHeight();
2497
2498 if (left->getDepth() != right->getDepth())
2499 return left->getDepth() < right->getDepth();
2500 }
2501
2502 assert(left->NodeQueueId && right->NodeQueueId &&
2503 "NodeQueueId cannot be zero");
2504 return (left->NodeQueueId > right->NodeQueueId);
2505 }
2506
2507 // Bottom up
operator ()(SUnit * left,SUnit * right) const2508 bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2509 if (int res = checkSpecialNodes(left, right))
2510 return res > 0;
2511
2512 return BURRSort(left, right, SPQ);
2513 }
2514
2515 // Source order, otherwise bottom up.
operator ()(SUnit * left,SUnit * right) const2516 bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2517 if (int res = checkSpecialNodes(left, right))
2518 return res > 0;
2519
2520 unsigned LOrder = SPQ->getNodeOrdering(left);
2521 unsigned ROrder = SPQ->getNodeOrdering(right);
2522
2523 // Prefer an ordering where the lower the non-zero order number, the higher
2524 // the preference.
2525 if ((LOrder || ROrder) && LOrder != ROrder)
2526 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2527
2528 return BURRSort(left, right, SPQ);
2529 }
2530
2531 // If the time between now and when the instruction will be ready can cover
2532 // the spill code, then avoid adding it to the ready queue. This gives long
2533 // stalls highest priority and allows hoisting across calls. It should also
2534 // speed up processing the available queue.
isReady(SUnit * SU,unsigned CurCycle) const2535 bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2536 static const unsigned ReadyDelay = 3;
2537
2538 if (SPQ->MayReduceRegPressure(SU)) return true;
2539
2540 if (SU->getHeight() > (CurCycle + ReadyDelay)) return false;
2541
2542 if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
2543 != ScheduleHazardRecognizer::NoHazard)
2544 return false;
2545
2546 return true;
2547 }
2548
2549 // Return true if right should be scheduled with higher priority than left.
operator ()(SUnit * left,SUnit * right) const2550 bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2551 if (int res = checkSpecialNodes(left, right))
2552 return res > 0;
2553
2554 if (left->isCall || right->isCall)
2555 // No way to compute latency of calls.
2556 return BURRSort(left, right, SPQ);
2557
2558 bool LHigh = SPQ->HighRegPressure(left);
2559 bool RHigh = SPQ->HighRegPressure(right);
2560 // Avoid causing spills. If register pressure is high, schedule for
2561 // register pressure reduction.
2562 if (LHigh && !RHigh) {
2563 DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("
2564 << right->NodeNum << ")\n");
2565 return true;
2566 }
2567 else if (!LHigh && RHigh) {
2568 DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("
2569 << left->NodeNum << ")\n");
2570 return false;
2571 }
2572 if (!LHigh && !RHigh) {
2573 int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ);
2574 if (result != 0)
2575 return result > 0;
2576 }
2577 return BURRSort(left, right, SPQ);
2578 }
2579
2580 // Schedule as many instructions in each cycle as possible. So don't make an
2581 // instruction available unless it is ready in the current cycle.
isReady(SUnit * SU,unsigned CurCycle) const2582 bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2583 if (SU->getHeight() > CurCycle) return false;
2584
2585 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2586 != ScheduleHazardRecognizer::NoHazard)
2587 return false;
2588
2589 return true;
2590 }
2591
canEnableCoalescing(SUnit * SU)2592 static bool canEnableCoalescing(SUnit *SU) {
2593 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
2594 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
2595 // CopyToReg should be close to its uses to facilitate coalescing and
2596 // avoid spilling.
2597 return true;
2598
2599 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2600 Opc == TargetOpcode::SUBREG_TO_REG ||
2601 Opc == TargetOpcode::INSERT_SUBREG)
2602 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
2603 // close to their uses to facilitate coalescing.
2604 return true;
2605
2606 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
2607 // If SU does not have a register def, schedule it close to its uses
2608 // because it does not lengthen any live ranges.
2609 return true;
2610
2611 return false;
2612 }
2613
2614 // list-ilp is currently an experimental scheduler that allows various
2615 // heuristics to be enabled prior to the normal register reduction logic.
operator ()(SUnit * left,SUnit * right) const2616 bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2617 if (int res = checkSpecialNodes(left, right))
2618 return res > 0;
2619
2620 if (left->isCall || right->isCall)
2621 // No way to compute latency of calls.
2622 return BURRSort(left, right, SPQ);
2623
2624 unsigned LLiveUses = 0, RLiveUses = 0;
2625 int LPDiff = 0, RPDiff = 0;
2626 if (!DisableSchedRegPressure || !DisableSchedLiveUses) {
2627 LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
2628 RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
2629 }
2630 if (!DisableSchedRegPressure && LPDiff != RPDiff) {
2631 DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff
2632 << " != SU(" << right->NodeNum << "): " << RPDiff << "\n");
2633 return LPDiff > RPDiff;
2634 }
2635
2636 if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) {
2637 bool LReduce = canEnableCoalescing(left);
2638 bool RReduce = canEnableCoalescing(right);
2639 if (LReduce && !RReduce) return false;
2640 if (RReduce && !LReduce) return true;
2641 }
2642
2643 if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) {
2644 DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses
2645 << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n");
2646 return LLiveUses < RLiveUses;
2647 }
2648
2649 if (!DisableSchedStalls) {
2650 bool LStall = BUHasStall(left, left->getHeight(), SPQ);
2651 bool RStall = BUHasStall(right, right->getHeight(), SPQ);
2652 if (LStall != RStall)
2653 return left->getHeight() > right->getHeight();
2654 }
2655
2656 if (!DisableSchedCriticalPath) {
2657 int spread = (int)left->getDepth() - (int)right->getDepth();
2658 if (std::abs(spread) > MaxReorderWindow) {
2659 DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
2660 << left->getDepth() << " != SU(" << right->NodeNum << "): "
2661 << right->getDepth() << "\n");
2662 return left->getDepth() < right->getDepth();
2663 }
2664 }
2665
2666 if (!DisableSchedHeight && left->getHeight() != right->getHeight()) {
2667 int spread = (int)left->getHeight() - (int)right->getHeight();
2668 if (std::abs(spread) > MaxReorderWindow)
2669 return left->getHeight() > right->getHeight();
2670 }
2671
2672 return BURRSort(left, right, SPQ);
2673 }
2674
initNodes(std::vector<SUnit> & sunits)2675 void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
2676 SUnits = &sunits;
2677 // Add pseudo dependency edges for two-address nodes.
2678 if (!Disable2AddrHack)
2679 AddPseudoTwoAddrDeps();
2680 // Reroute edges to nodes with multiple uses.
2681 if (!TracksRegPressure && !SrcOrder)
2682 PrescheduleNodesWithMultipleUses();
2683 // Calculate node priorities.
2684 CalculateSethiUllmanNumbers();
2685
2686 // For single block loops, mark nodes that look like canonical IV increments.
2687 if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) {
2688 for (unsigned i = 0, e = sunits.size(); i != e; ++i) {
2689 initVRegCycle(&sunits[i]);
2690 }
2691 }
2692 }
2693
2694 //===----------------------------------------------------------------------===//
2695 // Preschedule for Register Pressure
2696 //===----------------------------------------------------------------------===//
2697
canClobber(const SUnit * SU,const SUnit * Op)2698 bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
2699 if (SU->isTwoAddress) {
2700 unsigned Opc = SU->getNode()->getMachineOpcode();
2701 const MCInstrDesc &MCID = TII->get(Opc);
2702 unsigned NumRes = MCID.getNumDefs();
2703 unsigned NumOps = MCID.getNumOperands() - NumRes;
2704 for (unsigned i = 0; i != NumOps; ++i) {
2705 if (MCID.getOperandConstraint(i+NumRes, MCOI::TIED_TO) != -1) {
2706 SDNode *DU = SU->getNode()->getOperand(i).getNode();
2707 if (DU->getNodeId() != -1 &&
2708 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
2709 return true;
2710 }
2711 }
2712 }
2713 return false;
2714 }
2715
2716 /// canClobberReachingPhysRegUse - True if SU would clobber one of it's
2717 /// successor's explicit physregs whose definition can reach DepSU.
2718 /// i.e. DepSU should not be scheduled above SU.
canClobberReachingPhysRegUse(const SUnit * DepSU,const SUnit * SU,ScheduleDAGRRList * scheduleDAG,const TargetInstrInfo * TII,const TargetRegisterInfo * TRI)2719 static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU,
2720 ScheduleDAGRRList *scheduleDAG,
2721 const TargetInstrInfo *TII,
2722 const TargetRegisterInfo *TRI) {
2723 const MCPhysReg *ImpDefs
2724 = TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
2725 const uint32_t *RegMask = getNodeRegMask(SU->getNode());
2726 if(!ImpDefs && !RegMask)
2727 return false;
2728
2729 for (SUnit::const_succ_iterator SI = SU->Succs.begin(), SE = SU->Succs.end();
2730 SI != SE; ++SI) {
2731 SUnit *SuccSU = SI->getSUnit();
2732 for (SUnit::const_pred_iterator PI = SuccSU->Preds.begin(),
2733 PE = SuccSU->Preds.end(); PI != PE; ++PI) {
2734 if (!PI->isAssignedRegDep())
2735 continue;
2736
2737 if (RegMask && MachineOperand::clobbersPhysReg(RegMask, PI->getReg()) &&
2738 scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
2739 return true;
2740
2741 if (ImpDefs)
2742 for (const MCPhysReg *ImpDef = ImpDefs; *ImpDef; ++ImpDef)
2743 // Return true if SU clobbers this physical register use and the
2744 // definition of the register reaches from DepSU. IsReachable queries
2745 // a topological forward sort of the DAG (following the successors).
2746 if (TRI->regsOverlap(*ImpDef, PI->getReg()) &&
2747 scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
2748 return true;
2749 }
2750 }
2751 return false;
2752 }
2753
2754 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
2755 /// physical register defs.
canClobberPhysRegDefs(const SUnit * SuccSU,const SUnit * SU,const TargetInstrInfo * TII,const TargetRegisterInfo * TRI)2756 static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
2757 const TargetInstrInfo *TII,
2758 const TargetRegisterInfo *TRI) {
2759 SDNode *N = SuccSU->getNode();
2760 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2761 const MCPhysReg *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
2762 assert(ImpDefs && "Caller should check hasPhysRegDefs");
2763 for (const SDNode *SUNode = SU->getNode(); SUNode;
2764 SUNode = SUNode->getGluedNode()) {
2765 if (!SUNode->isMachineOpcode())
2766 continue;
2767 const MCPhysReg *SUImpDefs =
2768 TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
2769 const uint32_t *SURegMask = getNodeRegMask(SUNode);
2770 if (!SUImpDefs && !SURegMask)
2771 continue;
2772 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2773 MVT VT = N->getSimpleValueType(i);
2774 if (VT == MVT::Glue || VT == MVT::Other)
2775 continue;
2776 if (!N->hasAnyUseOfValue(i))
2777 continue;
2778 unsigned Reg = ImpDefs[i - NumDefs];
2779 if (SURegMask && MachineOperand::clobbersPhysReg(SURegMask, Reg))
2780 return true;
2781 if (!SUImpDefs)
2782 continue;
2783 for (;*SUImpDefs; ++SUImpDefs) {
2784 unsigned SUReg = *SUImpDefs;
2785 if (TRI->regsOverlap(Reg, SUReg))
2786 return true;
2787 }
2788 }
2789 }
2790 return false;
2791 }
2792
2793 /// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
2794 /// are not handled well by the general register pressure reduction
2795 /// heuristics. When presented with code like this:
2796 ///
2797 /// N
2798 /// / |
2799 /// / |
2800 /// U store
2801 /// |
2802 /// ...
2803 ///
2804 /// the heuristics tend to push the store up, but since the
2805 /// operand of the store has another use (U), this would increase
2806 /// the length of that other use (the U->N edge).
2807 ///
2808 /// This function transforms code like the above to route U's
2809 /// dependence through the store when possible, like this:
2810 ///
2811 /// N
2812 /// ||
2813 /// ||
2814 /// store
2815 /// |
2816 /// U
2817 /// |
2818 /// ...
2819 ///
2820 /// This results in the store being scheduled immediately
2821 /// after N, which shortens the U->N live range, reducing
2822 /// register pressure.
2823 ///
PrescheduleNodesWithMultipleUses()2824 void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
2825 // Visit all the nodes in topological order, working top-down.
2826 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2827 SUnit *SU = &(*SUnits)[i];
2828 // For now, only look at nodes with no data successors, such as stores.
2829 // These are especially important, due to the heuristics in
2830 // getNodePriority for nodes with no data successors.
2831 if (SU->NumSuccs != 0)
2832 continue;
2833 // For now, only look at nodes with exactly one data predecessor.
2834 if (SU->NumPreds != 1)
2835 continue;
2836 // Avoid prescheduling copies to virtual registers, which don't behave
2837 // like other nodes from the perspective of scheduling heuristics.
2838 if (SDNode *N = SU->getNode())
2839 if (N->getOpcode() == ISD::CopyToReg &&
2840 TargetRegisterInfo::isVirtualRegister
2841 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2842 continue;
2843
2844 // Locate the single data predecessor.
2845 SUnit *PredSU = nullptr;
2846 for (SUnit::const_pred_iterator II = SU->Preds.begin(),
2847 EE = SU->Preds.end(); II != EE; ++II)
2848 if (!II->isCtrl()) {
2849 PredSU = II->getSUnit();
2850 break;
2851 }
2852 assert(PredSU);
2853
2854 // Don't rewrite edges that carry physregs, because that requires additional
2855 // support infrastructure.
2856 if (PredSU->hasPhysRegDefs)
2857 continue;
2858 // Short-circuit the case where SU is PredSU's only data successor.
2859 if (PredSU->NumSuccs == 1)
2860 continue;
2861 // Avoid prescheduling to copies from virtual registers, which don't behave
2862 // like other nodes from the perspective of scheduling heuristics.
2863 if (SDNode *N = SU->getNode())
2864 if (N->getOpcode() == ISD::CopyFromReg &&
2865 TargetRegisterInfo::isVirtualRegister
2866 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2867 continue;
2868
2869 // Perform checks on the successors of PredSU.
2870 for (SUnit::const_succ_iterator II = PredSU->Succs.begin(),
2871 EE = PredSU->Succs.end(); II != EE; ++II) {
2872 SUnit *PredSuccSU = II->getSUnit();
2873 if (PredSuccSU == SU) continue;
2874 // If PredSU has another successor with no data successors, for
2875 // now don't attempt to choose either over the other.
2876 if (PredSuccSU->NumSuccs == 0)
2877 goto outer_loop_continue;
2878 // Don't break physical register dependencies.
2879 if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs)
2880 if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI))
2881 goto outer_loop_continue;
2882 // Don't introduce graph cycles.
2883 if (scheduleDAG->IsReachable(SU, PredSuccSU))
2884 goto outer_loop_continue;
2885 }
2886
2887 // Ok, the transformation is safe and the heuristics suggest it is
2888 // profitable. Update the graph.
2889 DEBUG(dbgs() << " Prescheduling SU #" << SU->NodeNum
2890 << " next to PredSU #" << PredSU->NodeNum
2891 << " to guide scheduling in the presence of multiple uses\n");
2892 for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
2893 SDep Edge = PredSU->Succs[i];
2894 assert(!Edge.isAssignedRegDep());
2895 SUnit *SuccSU = Edge.getSUnit();
2896 if (SuccSU != SU) {
2897 Edge.setSUnit(PredSU);
2898 scheduleDAG->RemovePred(SuccSU, Edge);
2899 scheduleDAG->AddPred(SU, Edge);
2900 Edge.setSUnit(SU);
2901 scheduleDAG->AddPred(SuccSU, Edge);
2902 --i;
2903 }
2904 }
2905 outer_loop_continue:;
2906 }
2907 }
2908
2909 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
2910 /// it as a def&use operand. Add a pseudo control edge from it to the other
2911 /// node (if it won't create a cycle) so the two-address one will be scheduled
2912 /// first (lower in the schedule). If both nodes are two-address, favor the
2913 /// one that has a CopyToReg use (more likely to be a loop induction update).
2914 /// If both are two-address, but one is commutable while the other is not
2915 /// commutable, favor the one that's not commutable.
AddPseudoTwoAddrDeps()2916 void RegReductionPQBase::AddPseudoTwoAddrDeps() {
2917 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2918 SUnit *SU = &(*SUnits)[i];
2919 if (!SU->isTwoAddress)
2920 continue;
2921
2922 SDNode *Node = SU->getNode();
2923 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode())
2924 continue;
2925
2926 bool isLiveOut = hasOnlyLiveOutUses(SU);
2927 unsigned Opc = Node->getMachineOpcode();
2928 const MCInstrDesc &MCID = TII->get(Opc);
2929 unsigned NumRes = MCID.getNumDefs();
2930 unsigned NumOps = MCID.getNumOperands() - NumRes;
2931 for (unsigned j = 0; j != NumOps; ++j) {
2932 if (MCID.getOperandConstraint(j+NumRes, MCOI::TIED_TO) == -1)
2933 continue;
2934 SDNode *DU = SU->getNode()->getOperand(j).getNode();
2935 if (DU->getNodeId() == -1)
2936 continue;
2937 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
2938 if (!DUSU) continue;
2939 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
2940 E = DUSU->Succs.end(); I != E; ++I) {
2941 if (I->isCtrl()) continue;
2942 SUnit *SuccSU = I->getSUnit();
2943 if (SuccSU == SU)
2944 continue;
2945 // Be conservative. Ignore if nodes aren't at roughly the same
2946 // depth and height.
2947 if (SuccSU->getHeight() < SU->getHeight() &&
2948 (SU->getHeight() - SuccSU->getHeight()) > 1)
2949 continue;
2950 // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
2951 // constrains whatever is using the copy, instead of the copy
2952 // itself. In the case that the copy is coalesced, this
2953 // preserves the intent of the pseudo two-address heurietics.
2954 while (SuccSU->Succs.size() == 1 &&
2955 SuccSU->getNode()->isMachineOpcode() &&
2956 SuccSU->getNode()->getMachineOpcode() ==
2957 TargetOpcode::COPY_TO_REGCLASS)
2958 SuccSU = SuccSU->Succs.front().getSUnit();
2959 // Don't constrain non-instruction nodes.
2960 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
2961 continue;
2962 // Don't constrain nodes with physical register defs if the
2963 // predecessor can clobber them.
2964 if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) {
2965 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
2966 continue;
2967 }
2968 // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG;
2969 // these may be coalesced away. We want them close to their uses.
2970 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
2971 if (SuccOpc == TargetOpcode::EXTRACT_SUBREG ||
2972 SuccOpc == TargetOpcode::INSERT_SUBREG ||
2973 SuccOpc == TargetOpcode::SUBREG_TO_REG)
2974 continue;
2975 if (!canClobberReachingPhysRegUse(SuccSU, SU, scheduleDAG, TII, TRI) &&
2976 (!canClobber(SuccSU, DUSU) ||
2977 (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
2978 (!SU->isCommutable && SuccSU->isCommutable)) &&
2979 !scheduleDAG->IsReachable(SuccSU, SU)) {
2980 DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"
2981 << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
2982 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Artificial));
2983 }
2984 }
2985 }
2986 }
2987 }
2988
2989 //===----------------------------------------------------------------------===//
2990 // Public Constructor Functions
2991 //===----------------------------------------------------------------------===//
2992
2993 llvm::ScheduleDAGSDNodes *
createBURRListDAGScheduler(SelectionDAGISel * IS,CodeGenOpt::Level OptLevel)2994 llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
2995 CodeGenOpt::Level OptLevel) {
2996 const TargetSubtargetInfo &STI = IS->MF->getSubtarget();
2997 const TargetInstrInfo *TII = STI.getInstrInfo();
2998 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
2999
3000 BURegReductionPriorityQueue *PQ =
3001 new BURegReductionPriorityQueue(*IS->MF, false, false, TII, TRI, nullptr);
3002 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
3003 PQ->setScheduleDAG(SD);
3004 return SD;
3005 }
3006
3007 llvm::ScheduleDAGSDNodes *
createSourceListDAGScheduler(SelectionDAGISel * IS,CodeGenOpt::Level OptLevel)3008 llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
3009 CodeGenOpt::Level OptLevel) {
3010 const TargetSubtargetInfo &STI = IS->MF->getSubtarget();
3011 const TargetInstrInfo *TII = STI.getInstrInfo();
3012 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
3013
3014 SrcRegReductionPriorityQueue *PQ =
3015 new SrcRegReductionPriorityQueue(*IS->MF, false, true, TII, TRI, nullptr);
3016 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
3017 PQ->setScheduleDAG(SD);
3018 return SD;
3019 }
3020
3021 llvm::ScheduleDAGSDNodes *
createHybridListDAGScheduler(SelectionDAGISel * IS,CodeGenOpt::Level OptLevel)3022 llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
3023 CodeGenOpt::Level OptLevel) {
3024 const TargetSubtargetInfo &STI = IS->MF->getSubtarget();
3025 const TargetInstrInfo *TII = STI.getInstrInfo();
3026 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
3027 const TargetLowering *TLI = IS->TLI;
3028
3029 HybridBURRPriorityQueue *PQ =
3030 new HybridBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
3031
3032 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
3033 PQ->setScheduleDAG(SD);
3034 return SD;
3035 }
3036
3037 llvm::ScheduleDAGSDNodes *
createILPListDAGScheduler(SelectionDAGISel * IS,CodeGenOpt::Level OptLevel)3038 llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
3039 CodeGenOpt::Level OptLevel) {
3040 const TargetSubtargetInfo &STI = IS->MF->getSubtarget();
3041 const TargetInstrInfo *TII = STI.getInstrInfo();
3042 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
3043 const TargetLowering *TLI = IS->TLI;
3044
3045 ILPBURRPriorityQueue *PQ =
3046 new ILPBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
3047 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
3048 PQ->setScheduleDAG(SD);
3049 return SD;
3050 }
3051