1 //===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the SelectionDAGISel class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #define DEBUG_TYPE "isel"
15 #include "ScheduleDAGSDNodes.h"
16 #include "SelectionDAGBuilder.h"
17 #include "llvm/CodeGen/FunctionLoweringInfo.h"
18 #include "llvm/CodeGen/SelectionDAGISel.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/BranchProbabilityInfo.h"
21 #include "llvm/Analysis/DebugInfo.h"
22 #include "llvm/Constants.h"
23 #include "llvm/Function.h"
24 #include "llvm/InlineAsm.h"
25 #include "llvm/Instructions.h"
26 #include "llvm/Intrinsics.h"
27 #include "llvm/IntrinsicInst.h"
28 #include "llvm/LLVMContext.h"
29 #include "llvm/Module.h"
30 #include "llvm/CodeGen/FastISel.h"
31 #include "llvm/CodeGen/GCStrategy.h"
32 #include "llvm/CodeGen/GCMetadata.h"
33 #include "llvm/CodeGen/MachineFrameInfo.h"
34 #include "llvm/CodeGen/MachineFunction.h"
35 #include "llvm/CodeGen/MachineInstrBuilder.h"
36 #include "llvm/CodeGen/MachineModuleInfo.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
39 #include "llvm/CodeGen/SchedulerRegistry.h"
40 #include "llvm/CodeGen/SelectionDAG.h"
41 #include "llvm/Target/TargetRegisterInfo.h"
42 #include "llvm/Target/TargetIntrinsicInfo.h"
43 #include "llvm/Target/TargetInstrInfo.h"
44 #include "llvm/Target/TargetLowering.h"
45 #include "llvm/Target/TargetMachine.h"
46 #include "llvm/Target/TargetOptions.h"
47 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
48 #include "llvm/Support/Compiler.h"
49 #include "llvm/Support/Debug.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/Timer.h"
52 #include "llvm/Support/raw_ostream.h"
53 #include "llvm/ADT/PostOrderIterator.h"
54 #include "llvm/ADT/Statistic.h"
55 #include <algorithm>
56 using namespace llvm;
57
58 STATISTIC(NumFastIselFailures, "Number of instructions fast isel failed on");
59 STATISTIC(NumFastIselSuccess, "Number of instructions fast isel selected");
60 STATISTIC(NumFastIselBlocks, "Number of blocks selected entirely by fast isel");
61 STATISTIC(NumDAGBlocks, "Number of blocks selected using DAG");
62 STATISTIC(NumDAGIselRetries,"Number of times dag isel has to try another path");
63
64 static cl::opt<bool>
65 EnableFastISelVerbose("fast-isel-verbose", cl::Hidden,
66 cl::desc("Enable verbose messages in the \"fast\" "
67 "instruction selector"));
68 static cl::opt<bool>
69 EnableFastISelAbort("fast-isel-abort", cl::Hidden,
70 cl::desc("Enable abort calls when \"fast\" instruction fails"));
71
72 static cl::opt<bool>
73 UseMBPI("use-mbpi",
74 cl::desc("use Machine Branch Probability Info"),
75 cl::init(true), cl::Hidden);
76
77 #ifndef NDEBUG
78 static cl::opt<bool>
79 ViewDAGCombine1("view-dag-combine1-dags", cl::Hidden,
80 cl::desc("Pop up a window to show dags before the first "
81 "dag combine pass"));
82 static cl::opt<bool>
83 ViewLegalizeTypesDAGs("view-legalize-types-dags", cl::Hidden,
84 cl::desc("Pop up a window to show dags before legalize types"));
85 static cl::opt<bool>
86 ViewLegalizeDAGs("view-legalize-dags", cl::Hidden,
87 cl::desc("Pop up a window to show dags before legalize"));
88 static cl::opt<bool>
89 ViewDAGCombine2("view-dag-combine2-dags", cl::Hidden,
90 cl::desc("Pop up a window to show dags before the second "
91 "dag combine pass"));
92 static cl::opt<bool>
93 ViewDAGCombineLT("view-dag-combine-lt-dags", cl::Hidden,
94 cl::desc("Pop up a window to show dags before the post legalize types"
95 " dag combine pass"));
96 static cl::opt<bool>
97 ViewISelDAGs("view-isel-dags", cl::Hidden,
98 cl::desc("Pop up a window to show isel dags as they are selected"));
99 static cl::opt<bool>
100 ViewSchedDAGs("view-sched-dags", cl::Hidden,
101 cl::desc("Pop up a window to show sched dags as they are processed"));
102 static cl::opt<bool>
103 ViewSUnitDAGs("view-sunit-dags", cl::Hidden,
104 cl::desc("Pop up a window to show SUnit dags after they are processed"));
105 #else
106 static const bool ViewDAGCombine1 = false,
107 ViewLegalizeTypesDAGs = false, ViewLegalizeDAGs = false,
108 ViewDAGCombine2 = false,
109 ViewDAGCombineLT = false,
110 ViewISelDAGs = false, ViewSchedDAGs = false,
111 ViewSUnitDAGs = false;
112 #endif
113
114 //===---------------------------------------------------------------------===//
115 ///
116 /// RegisterScheduler class - Track the registration of instruction schedulers.
117 ///
118 //===---------------------------------------------------------------------===//
119 MachinePassRegistry RegisterScheduler::Registry;
120
121 //===---------------------------------------------------------------------===//
122 ///
123 /// ISHeuristic command line option for instruction schedulers.
124 ///
125 //===---------------------------------------------------------------------===//
126 static cl::opt<RegisterScheduler::FunctionPassCtor, false,
127 RegisterPassParser<RegisterScheduler> >
128 ISHeuristic("pre-RA-sched",
129 cl::init(&createDefaultScheduler),
130 cl::desc("Instruction schedulers available (before register"
131 " allocation):"));
132
133 static RegisterScheduler
134 defaultListDAGScheduler("default", "Best scheduler for the target",
135 createDefaultScheduler);
136
137 namespace llvm {
138 //===--------------------------------------------------------------------===//
139 /// createDefaultScheduler - This creates an instruction scheduler appropriate
140 /// for the target.
createDefaultScheduler(SelectionDAGISel * IS,CodeGenOpt::Level OptLevel)141 ScheduleDAGSDNodes* createDefaultScheduler(SelectionDAGISel *IS,
142 CodeGenOpt::Level OptLevel) {
143 const TargetLowering &TLI = IS->getTargetLowering();
144
145 if (OptLevel == CodeGenOpt::None)
146 return createSourceListDAGScheduler(IS, OptLevel);
147 if (TLI.getSchedulingPreference() == Sched::Latency)
148 return createTDListDAGScheduler(IS, OptLevel);
149 if (TLI.getSchedulingPreference() == Sched::RegPressure)
150 return createBURRListDAGScheduler(IS, OptLevel);
151 if (TLI.getSchedulingPreference() == Sched::Hybrid)
152 return createHybridListDAGScheduler(IS, OptLevel);
153 assert(TLI.getSchedulingPreference() == Sched::ILP &&
154 "Unknown sched type!");
155 return createILPListDAGScheduler(IS, OptLevel);
156 }
157 }
158
159 // EmitInstrWithCustomInserter - This method should be implemented by targets
160 // that mark instructions with the 'usesCustomInserter' flag. These
161 // instructions are special in various ways, which require special support to
162 // insert. The specified MachineInstr is created but not inserted into any
163 // basic blocks, and this method is called to expand it into a sequence of
164 // instructions, potentially also creating new basic blocks and control flow.
165 // When new basic blocks are inserted and the edges from MBB to its successors
166 // are modified, the method should insert pairs of <OldSucc, NewSucc> into the
167 // DenseMap.
168 MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr * MI,MachineBasicBlock * MBB) const169 TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
170 MachineBasicBlock *MBB) const {
171 #ifndef NDEBUG
172 dbgs() << "If a target marks an instruction with "
173 "'usesCustomInserter', it must implement "
174 "TargetLowering::EmitInstrWithCustomInserter!";
175 #endif
176 llvm_unreachable(0);
177 return 0;
178 }
179
AdjustInstrPostInstrSelection(MachineInstr * MI,SDNode * Node) const180 void TargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
181 SDNode *Node) const {
182 assert(!MI->getDesc().hasPostISelHook() &&
183 "If a target marks an instruction with 'hasPostISelHook', "
184 "it must implement TargetLowering::AdjustInstrPostInstrSelection!");
185 }
186
187 //===----------------------------------------------------------------------===//
188 // SelectionDAGISel code
189 //===----------------------------------------------------------------------===//
190
SelectionDAGISel(const TargetMachine & tm,CodeGenOpt::Level OL)191 SelectionDAGISel::SelectionDAGISel(const TargetMachine &tm,
192 CodeGenOpt::Level OL) :
193 MachineFunctionPass(ID), TM(tm), TLI(*tm.getTargetLowering()),
194 FuncInfo(new FunctionLoweringInfo(TLI)),
195 CurDAG(new SelectionDAG(tm)),
196 SDB(new SelectionDAGBuilder(*CurDAG, *FuncInfo, OL)),
197 GFI(),
198 OptLevel(OL),
199 DAGSize(0) {
200 initializeGCModuleInfoPass(*PassRegistry::getPassRegistry());
201 initializeAliasAnalysisAnalysisGroup(*PassRegistry::getPassRegistry());
202 initializeBranchProbabilityInfoPass(*PassRegistry::getPassRegistry());
203 }
204
~SelectionDAGISel()205 SelectionDAGISel::~SelectionDAGISel() {
206 delete SDB;
207 delete CurDAG;
208 delete FuncInfo;
209 }
210
getAnalysisUsage(AnalysisUsage & AU) const211 void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
212 AU.addRequired<AliasAnalysis>();
213 AU.addPreserved<AliasAnalysis>();
214 AU.addRequired<GCModuleInfo>();
215 AU.addPreserved<GCModuleInfo>();
216 if (UseMBPI && OptLevel != CodeGenOpt::None)
217 AU.addRequired<BranchProbabilityInfo>();
218 MachineFunctionPass::getAnalysisUsage(AU);
219 }
220
221 /// SplitCriticalSideEffectEdges - Look for critical edges with a PHI value that
222 /// may trap on it. In this case we have to split the edge so that the path
223 /// through the predecessor block that doesn't go to the phi block doesn't
224 /// execute the possibly trapping instruction.
225 ///
226 /// This is required for correctness, so it must be done at -O0.
227 ///
SplitCriticalSideEffectEdges(Function & Fn,Pass * SDISel)228 static void SplitCriticalSideEffectEdges(Function &Fn, Pass *SDISel) {
229 // Loop for blocks with phi nodes.
230 for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
231 PHINode *PN = dyn_cast<PHINode>(BB->begin());
232 if (PN == 0) continue;
233
234 ReprocessBlock:
235 // For each block with a PHI node, check to see if any of the input values
236 // are potentially trapping constant expressions. Constant expressions are
237 // the only potentially trapping value that can occur as the argument to a
238 // PHI.
239 for (BasicBlock::iterator I = BB->begin(); (PN = dyn_cast<PHINode>(I)); ++I)
240 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
241 ConstantExpr *CE = dyn_cast<ConstantExpr>(PN->getIncomingValue(i));
242 if (CE == 0 || !CE->canTrap()) continue;
243
244 // The only case we have to worry about is when the edge is critical.
245 // Since this block has a PHI Node, we assume it has multiple input
246 // edges: check to see if the pred has multiple successors.
247 BasicBlock *Pred = PN->getIncomingBlock(i);
248 if (Pred->getTerminator()->getNumSuccessors() == 1)
249 continue;
250
251 // Okay, we have to split this edge.
252 SplitCriticalEdge(Pred->getTerminator(),
253 GetSuccessorNumber(Pred, BB), SDISel, true);
254 goto ReprocessBlock;
255 }
256 }
257 }
258
runOnMachineFunction(MachineFunction & mf)259 bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
260 // Do some sanity-checking on the command-line options.
261 assert((!EnableFastISelVerbose || EnableFastISel) &&
262 "-fast-isel-verbose requires -fast-isel");
263 assert((!EnableFastISelAbort || EnableFastISel) &&
264 "-fast-isel-abort requires -fast-isel");
265
266 const Function &Fn = *mf.getFunction();
267 const TargetInstrInfo &TII = *TM.getInstrInfo();
268 const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
269
270 MF = &mf;
271 RegInfo = &MF->getRegInfo();
272 AA = &getAnalysis<AliasAnalysis>();
273 GFI = Fn.hasGC() ? &getAnalysis<GCModuleInfo>().getFunctionInfo(Fn) : 0;
274
275 DEBUG(dbgs() << "\n\n\n=== " << Fn.getName() << "\n");
276
277 SplitCriticalSideEffectEdges(const_cast<Function&>(Fn), this);
278
279 CurDAG->init(*MF);
280 FuncInfo->set(Fn, *MF);
281
282 if (UseMBPI && OptLevel != CodeGenOpt::None)
283 FuncInfo->BPI = &getAnalysis<BranchProbabilityInfo>();
284 else
285 FuncInfo->BPI = 0;
286
287 SDB->init(GFI, *AA);
288
289 SelectAllBasicBlocks(Fn);
290
291 // If the first basic block in the function has live ins that need to be
292 // copied into vregs, emit the copies into the top of the block before
293 // emitting the code for the block.
294 MachineBasicBlock *EntryMBB = MF->begin();
295 RegInfo->EmitLiveInCopies(EntryMBB, TRI, TII);
296
297 DenseMap<unsigned, unsigned> LiveInMap;
298 if (!FuncInfo->ArgDbgValues.empty())
299 for (MachineRegisterInfo::livein_iterator LI = RegInfo->livein_begin(),
300 E = RegInfo->livein_end(); LI != E; ++LI)
301 if (LI->second)
302 LiveInMap.insert(std::make_pair(LI->first, LI->second));
303
304 // Insert DBG_VALUE instructions for function arguments to the entry block.
305 for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
306 MachineInstr *MI = FuncInfo->ArgDbgValues[e-i-1];
307 unsigned Reg = MI->getOperand(0).getReg();
308 if (TargetRegisterInfo::isPhysicalRegister(Reg))
309 EntryMBB->insert(EntryMBB->begin(), MI);
310 else {
311 MachineInstr *Def = RegInfo->getVRegDef(Reg);
312 MachineBasicBlock::iterator InsertPos = Def;
313 // FIXME: VR def may not be in entry block.
314 Def->getParent()->insert(llvm::next(InsertPos), MI);
315 }
316
317 // If Reg is live-in then update debug info to track its copy in a vreg.
318 DenseMap<unsigned, unsigned>::iterator LDI = LiveInMap.find(Reg);
319 if (LDI != LiveInMap.end()) {
320 MachineInstr *Def = RegInfo->getVRegDef(LDI->second);
321 MachineBasicBlock::iterator InsertPos = Def;
322 const MDNode *Variable =
323 MI->getOperand(MI->getNumOperands()-1).getMetadata();
324 unsigned Offset = MI->getOperand(1).getImm();
325 // Def is never a terminator here, so it is ok to increment InsertPos.
326 BuildMI(*EntryMBB, ++InsertPos, MI->getDebugLoc(),
327 TII.get(TargetOpcode::DBG_VALUE))
328 .addReg(LDI->second, RegState::Debug)
329 .addImm(Offset).addMetadata(Variable);
330
331 // If this vreg is directly copied into an exported register then
332 // that COPY instructions also need DBG_VALUE, if it is the only
333 // user of LDI->second.
334 MachineInstr *CopyUseMI = NULL;
335 for (MachineRegisterInfo::use_iterator
336 UI = RegInfo->use_begin(LDI->second);
337 MachineInstr *UseMI = UI.skipInstruction();) {
338 if (UseMI->isDebugValue()) continue;
339 if (UseMI->isCopy() && !CopyUseMI && UseMI->getParent() == EntryMBB) {
340 CopyUseMI = UseMI; continue;
341 }
342 // Otherwise this is another use or second copy use.
343 CopyUseMI = NULL; break;
344 }
345 if (CopyUseMI) {
346 MachineInstr *NewMI =
347 BuildMI(*MF, CopyUseMI->getDebugLoc(),
348 TII.get(TargetOpcode::DBG_VALUE))
349 .addReg(CopyUseMI->getOperand(0).getReg(), RegState::Debug)
350 .addImm(Offset).addMetadata(Variable);
351 EntryMBB->insertAfter(CopyUseMI, NewMI);
352 }
353 }
354 }
355
356 // Determine if there are any calls in this machine function.
357 MachineFrameInfo *MFI = MF->getFrameInfo();
358 if (!MFI->hasCalls()) {
359 for (MachineFunction::const_iterator
360 I = MF->begin(), E = MF->end(); I != E; ++I) {
361 const MachineBasicBlock *MBB = I;
362 for (MachineBasicBlock::const_iterator
363 II = MBB->begin(), IE = MBB->end(); II != IE; ++II) {
364 const MCInstrDesc &MCID = TM.getInstrInfo()->get(II->getOpcode());
365
366 if ((MCID.isCall() && !MCID.isReturn()) ||
367 II->isStackAligningInlineAsm()) {
368 MFI->setHasCalls(true);
369 goto done;
370 }
371 }
372 }
373 done:;
374 }
375
376 // Determine if there is a call to setjmp in the machine function.
377 MF->setCallsSetJmp(Fn.callsFunctionThatReturnsTwice());
378
379 // Replace forward-declared registers with the registers containing
380 // the desired value.
381 MachineRegisterInfo &MRI = MF->getRegInfo();
382 for (DenseMap<unsigned, unsigned>::iterator
383 I = FuncInfo->RegFixups.begin(), E = FuncInfo->RegFixups.end();
384 I != E; ++I) {
385 unsigned From = I->first;
386 unsigned To = I->second;
387 // If To is also scheduled to be replaced, find what its ultimate
388 // replacement is.
389 for (;;) {
390 DenseMap<unsigned, unsigned>::iterator J =
391 FuncInfo->RegFixups.find(To);
392 if (J == E) break;
393 To = J->second;
394 }
395 // Replace it.
396 MRI.replaceRegWith(From, To);
397 }
398
399 // Release function-specific state. SDB and CurDAG are already cleared
400 // at this point.
401 FuncInfo->clear();
402
403 return true;
404 }
405
SelectBasicBlock(BasicBlock::const_iterator Begin,BasicBlock::const_iterator End,bool & HadTailCall)406 void SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
407 BasicBlock::const_iterator End,
408 bool &HadTailCall) {
409 // Lower all of the non-terminator instructions. If a call is emitted
410 // as a tail call, cease emitting nodes for this block. Terminators
411 // are handled below.
412 for (BasicBlock::const_iterator I = Begin; I != End && !SDB->HasTailCall; ++I)
413 SDB->visit(*I);
414
415 // Make sure the root of the DAG is up-to-date.
416 CurDAG->setRoot(SDB->getControlRoot());
417 HadTailCall = SDB->HasTailCall;
418 SDB->clear();
419
420 // Final step, emit the lowered DAG as machine code.
421 CodeGenAndEmitDAG();
422 }
423
ComputeLiveOutVRegInfo()424 void SelectionDAGISel::ComputeLiveOutVRegInfo() {
425 SmallPtrSet<SDNode*, 128> VisitedNodes;
426 SmallVector<SDNode*, 128> Worklist;
427
428 Worklist.push_back(CurDAG->getRoot().getNode());
429
430 APInt Mask;
431 APInt KnownZero;
432 APInt KnownOne;
433
434 do {
435 SDNode *N = Worklist.pop_back_val();
436
437 // If we've already seen this node, ignore it.
438 if (!VisitedNodes.insert(N))
439 continue;
440
441 // Otherwise, add all chain operands to the worklist.
442 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
443 if (N->getOperand(i).getValueType() == MVT::Other)
444 Worklist.push_back(N->getOperand(i).getNode());
445
446 // If this is a CopyToReg with a vreg dest, process it.
447 if (N->getOpcode() != ISD::CopyToReg)
448 continue;
449
450 unsigned DestReg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
451 if (!TargetRegisterInfo::isVirtualRegister(DestReg))
452 continue;
453
454 // Ignore non-scalar or non-integer values.
455 SDValue Src = N->getOperand(2);
456 EVT SrcVT = Src.getValueType();
457 if (!SrcVT.isInteger() || SrcVT.isVector())
458 continue;
459
460 unsigned NumSignBits = CurDAG->ComputeNumSignBits(Src);
461 Mask = APInt::getAllOnesValue(SrcVT.getSizeInBits());
462 CurDAG->ComputeMaskedBits(Src, Mask, KnownZero, KnownOne);
463 FuncInfo->AddLiveOutRegInfo(DestReg, NumSignBits, KnownZero, KnownOne);
464 } while (!Worklist.empty());
465 }
466
CodeGenAndEmitDAG()467 void SelectionDAGISel::CodeGenAndEmitDAG() {
468 std::string GroupName;
469 if (TimePassesIsEnabled)
470 GroupName = "Instruction Selection and Scheduling";
471 std::string BlockName;
472 int BlockNumber = -1;
473 (void)BlockNumber;
474 #ifdef NDEBUG
475 if (ViewDAGCombine1 || ViewLegalizeTypesDAGs || ViewLegalizeDAGs ||
476 ViewDAGCombine2 || ViewDAGCombineLT || ViewISelDAGs || ViewSchedDAGs ||
477 ViewSUnitDAGs)
478 #endif
479 {
480 BlockNumber = FuncInfo->MBB->getNumber();
481 BlockName = MF->getFunction()->getNameStr() + ":" +
482 FuncInfo->MBB->getBasicBlock()->getNameStr();
483 }
484 DEBUG(dbgs() << "Initial selection DAG: BB#" << BlockNumber
485 << " '" << BlockName << "'\n"; CurDAG->dump());
486
487 if (ViewDAGCombine1) CurDAG->viewGraph("dag-combine1 input for " + BlockName);
488
489 // Run the DAG combiner in pre-legalize mode.
490 {
491 NamedRegionTimer T("DAG Combining 1", GroupName, TimePassesIsEnabled);
492 CurDAG->Combine(Unrestricted, *AA, OptLevel);
493 }
494
495 DEBUG(dbgs() << "Optimized lowered selection DAG: BB#" << BlockNumber
496 << " '" << BlockName << "'\n"; CurDAG->dump());
497
498 // Second step, hack on the DAG until it only uses operations and types that
499 // the target supports.
500 if (ViewLegalizeTypesDAGs) CurDAG->viewGraph("legalize-types input for " +
501 BlockName);
502
503 bool Changed;
504 {
505 NamedRegionTimer T("Type Legalization", GroupName, TimePassesIsEnabled);
506 Changed = CurDAG->LegalizeTypes();
507 }
508
509 DEBUG(dbgs() << "Type-legalized selection DAG: BB#" << BlockNumber
510 << " '" << BlockName << "'\n"; CurDAG->dump());
511
512 if (Changed) {
513 if (ViewDAGCombineLT)
514 CurDAG->viewGraph("dag-combine-lt input for " + BlockName);
515
516 // Run the DAG combiner in post-type-legalize mode.
517 {
518 NamedRegionTimer T("DAG Combining after legalize types", GroupName,
519 TimePassesIsEnabled);
520 CurDAG->Combine(NoIllegalTypes, *AA, OptLevel);
521 }
522
523 DEBUG(dbgs() << "Optimized type-legalized selection DAG: BB#" << BlockNumber
524 << " '" << BlockName << "'\n"; CurDAG->dump());
525 }
526
527 {
528 NamedRegionTimer T("Vector Legalization", GroupName, TimePassesIsEnabled);
529 Changed = CurDAG->LegalizeVectors();
530 }
531
532 if (Changed) {
533 {
534 NamedRegionTimer T("Type Legalization 2", GroupName, TimePassesIsEnabled);
535 CurDAG->LegalizeTypes();
536 }
537
538 if (ViewDAGCombineLT)
539 CurDAG->viewGraph("dag-combine-lv input for " + BlockName);
540
541 // Run the DAG combiner in post-type-legalize mode.
542 {
543 NamedRegionTimer T("DAG Combining after legalize vectors", GroupName,
544 TimePassesIsEnabled);
545 CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
546 }
547
548 DEBUG(dbgs() << "Optimized vector-legalized selection DAG: BB#"
549 << BlockNumber << " '" << BlockName << "'\n"; CurDAG->dump());
550 }
551
552 if (ViewLegalizeDAGs) CurDAG->viewGraph("legalize input for " + BlockName);
553
554 {
555 NamedRegionTimer T("DAG Legalization", GroupName, TimePassesIsEnabled);
556 CurDAG->Legalize();
557 }
558
559 DEBUG(dbgs() << "Legalized selection DAG: BB#" << BlockNumber
560 << " '" << BlockName << "'\n"; CurDAG->dump());
561
562 if (ViewDAGCombine2) CurDAG->viewGraph("dag-combine2 input for " + BlockName);
563
564 // Run the DAG combiner in post-legalize mode.
565 {
566 NamedRegionTimer T("DAG Combining 2", GroupName, TimePassesIsEnabled);
567 CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
568 }
569
570 DEBUG(dbgs() << "Optimized legalized selection DAG: BB#" << BlockNumber
571 << " '" << BlockName << "'\n"; CurDAG->dump());
572
573 if (OptLevel != CodeGenOpt::None)
574 ComputeLiveOutVRegInfo();
575
576 if (ViewISelDAGs) CurDAG->viewGraph("isel input for " + BlockName);
577
578 // Third, instruction select all of the operations to machine code, adding the
579 // code to the MachineBasicBlock.
580 {
581 NamedRegionTimer T("Instruction Selection", GroupName, TimePassesIsEnabled);
582 DoInstructionSelection();
583 }
584
585 DEBUG(dbgs() << "Selected selection DAG: BB#" << BlockNumber
586 << " '" << BlockName << "'\n"; CurDAG->dump());
587
588 if (ViewSchedDAGs) CurDAG->viewGraph("scheduler input for " + BlockName);
589
590 // Schedule machine code.
591 ScheduleDAGSDNodes *Scheduler = CreateScheduler();
592 {
593 NamedRegionTimer T("Instruction Scheduling", GroupName,
594 TimePassesIsEnabled);
595 Scheduler->Run(CurDAG, FuncInfo->MBB, FuncInfo->InsertPt);
596 }
597
598 if (ViewSUnitDAGs) Scheduler->viewGraph();
599
600 // Emit machine code to BB. This can change 'BB' to the last block being
601 // inserted into.
602 MachineBasicBlock *FirstMBB = FuncInfo->MBB, *LastMBB;
603 {
604 NamedRegionTimer T("Instruction Creation", GroupName, TimePassesIsEnabled);
605
606 LastMBB = FuncInfo->MBB = Scheduler->EmitSchedule();
607 FuncInfo->InsertPt = Scheduler->InsertPos;
608 }
609
610 // If the block was split, make sure we update any references that are used to
611 // update PHI nodes later on.
612 if (FirstMBB != LastMBB)
613 SDB->UpdateSplitBlock(FirstMBB, LastMBB);
614
615 // Free the scheduler state.
616 {
617 NamedRegionTimer T("Instruction Scheduling Cleanup", GroupName,
618 TimePassesIsEnabled);
619 delete Scheduler;
620 }
621
622 // Free the SelectionDAG state, now that we're finished with it.
623 CurDAG->clear();
624 }
625
DoInstructionSelection()626 void SelectionDAGISel::DoInstructionSelection() {
627 DEBUG(errs() << "===== Instruction selection begins: BB#"
628 << FuncInfo->MBB->getNumber()
629 << " '" << FuncInfo->MBB->getName() << "'\n");
630
631 PreprocessISelDAG();
632
633 // Select target instructions for the DAG.
634 {
635 // Number all nodes with a topological order and set DAGSize.
636 DAGSize = CurDAG->AssignTopologicalOrder();
637
638 // Create a dummy node (which is not added to allnodes), that adds
639 // a reference to the root node, preventing it from being deleted,
640 // and tracking any changes of the root.
641 HandleSDNode Dummy(CurDAG->getRoot());
642 ISelPosition = SelectionDAG::allnodes_iterator(CurDAG->getRoot().getNode());
643 ++ISelPosition;
644
645 // The AllNodes list is now topological-sorted. Visit the
646 // nodes by starting at the end of the list (the root of the
647 // graph) and preceding back toward the beginning (the entry
648 // node).
649 while (ISelPosition != CurDAG->allnodes_begin()) {
650 SDNode *Node = --ISelPosition;
651 // Skip dead nodes. DAGCombiner is expected to eliminate all dead nodes,
652 // but there are currently some corner cases that it misses. Also, this
653 // makes it theoretically possible to disable the DAGCombiner.
654 if (Node->use_empty())
655 continue;
656
657 SDNode *ResNode = Select(Node);
658
659 // FIXME: This is pretty gross. 'Select' should be changed to not return
660 // anything at all and this code should be nuked with a tactical strike.
661
662 // If node should not be replaced, continue with the next one.
663 if (ResNode == Node || Node->getOpcode() == ISD::DELETED_NODE)
664 continue;
665 // Replace node.
666 if (ResNode)
667 ReplaceUses(Node, ResNode);
668
669 // If after the replacement this node is not used any more,
670 // remove this dead node.
671 if (Node->use_empty()) { // Don't delete EntryToken, etc.
672 ISelUpdater ISU(ISelPosition);
673 CurDAG->RemoveDeadNode(Node, &ISU);
674 }
675 }
676
677 CurDAG->setRoot(Dummy.getValue());
678 }
679
680 DEBUG(errs() << "===== Instruction selection ends:\n");
681
682 PostprocessISelDAG();
683 }
684
685 /// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and
686 /// do other setup for EH landing-pad blocks.
PrepareEHLandingPad()687 void SelectionDAGISel::PrepareEHLandingPad() {
688 MachineBasicBlock *MBB = FuncInfo->MBB;
689
690 // Add a label to mark the beginning of the landing pad. Deletion of the
691 // landing pad can thus be detected via the MachineModuleInfo.
692 MCSymbol *Label = MF->getMMI().addLandingPad(MBB);
693
694 // Assign the call site to the landing pad's begin label.
695 MF->getMMI().setCallSiteLandingPad(Label, SDB->LPadToCallSiteMap[MBB]);
696
697 const MCInstrDesc &II = TM.getInstrInfo()->get(TargetOpcode::EH_LABEL);
698 BuildMI(*MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II)
699 .addSym(Label);
700
701 // Mark exception register as live in.
702 unsigned Reg = TLI.getExceptionAddressRegister();
703 if (Reg) MBB->addLiveIn(Reg);
704
705 // Mark exception selector register as live in.
706 Reg = TLI.getExceptionSelectorRegister();
707 if (Reg) MBB->addLiveIn(Reg);
708
709 // FIXME: Hack around an exception handling flaw (PR1508): the personality
710 // function and list of typeids logically belong to the invoke (or, if you
711 // like, the basic block containing the invoke), and need to be associated
712 // with it in the dwarf exception handling tables. Currently however the
713 // information is provided by an intrinsic (eh.selector) that can be moved
714 // to unexpected places by the optimizers: if the unwind edge is critical,
715 // then breaking it can result in the intrinsics being in the successor of
716 // the landing pad, not the landing pad itself. This results
717 // in exceptions not being caught because no typeids are associated with
718 // the invoke. This may not be the only way things can go wrong, but it
719 // is the only way we try to work around for the moment.
720 const BasicBlock *LLVMBB = MBB->getBasicBlock();
721 const BranchInst *Br = dyn_cast<BranchInst>(LLVMBB->getTerminator());
722
723 if (Br && Br->isUnconditional()) { // Critical edge?
724 BasicBlock::const_iterator I, E;
725 for (I = LLVMBB->begin(), E = --LLVMBB->end(); I != E; ++I)
726 if (isa<EHSelectorInst>(I))
727 break;
728
729 if (I == E)
730 // No catch info found - try to extract some from the successor.
731 CopyCatchInfo(Br->getSuccessor(0), LLVMBB, &MF->getMMI(), *FuncInfo);
732 }
733 }
734
735 /// TryToFoldFastISelLoad - We're checking to see if we can fold the specified
736 /// load into the specified FoldInst. Note that we could have a sequence where
737 /// multiple LLVM IR instructions are folded into the same machineinstr. For
738 /// example we could have:
739 /// A: x = load i32 *P
740 /// B: y = icmp A, 42
741 /// C: br y, ...
742 ///
743 /// In this scenario, LI is "A", and FoldInst is "C". We know about "B" (and
744 /// any other folded instructions) because it is between A and C.
745 ///
746 /// If we succeed in folding the load into the operation, return true.
747 ///
TryToFoldFastISelLoad(const LoadInst * LI,const Instruction * FoldInst,FastISel * FastIS)748 bool SelectionDAGISel::TryToFoldFastISelLoad(const LoadInst *LI,
749 const Instruction *FoldInst,
750 FastISel *FastIS) {
751 // We know that the load has a single use, but don't know what it is. If it
752 // isn't one of the folded instructions, then we can't succeed here. Handle
753 // this by scanning the single-use users of the load until we get to FoldInst.
754 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
755
756 const Instruction *TheUser = LI->use_back();
757 while (TheUser != FoldInst && // Scan up until we find FoldInst.
758 // Stay in the right block.
759 TheUser->getParent() == FoldInst->getParent() &&
760 --MaxUsers) { // Don't scan too far.
761 // If there are multiple or no uses of this instruction, then bail out.
762 if (!TheUser->hasOneUse())
763 return false;
764
765 TheUser = TheUser->use_back();
766 }
767
768 // If we didn't find the fold instruction, then we failed to collapse the
769 // sequence.
770 if (TheUser != FoldInst)
771 return false;
772
773 // Don't try to fold volatile loads. Target has to deal with alignment
774 // constraints.
775 if (LI->isVolatile()) return false;
776
777 // Figure out which vreg this is going into. If there is no assigned vreg yet
778 // then there actually was no reference to it. Perhaps the load is referenced
779 // by a dead instruction.
780 unsigned LoadReg = FastIS->getRegForValue(LI);
781 if (LoadReg == 0)
782 return false;
783
784 // Check to see what the uses of this vreg are. If it has no uses, or more
785 // than one use (at the machine instr level) then we can't fold it.
786 MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(LoadReg);
787 if (RI == RegInfo->reg_end())
788 return false;
789
790 // See if there is exactly one use of the vreg. If there are multiple uses,
791 // then the instruction got lowered to multiple machine instructions or the
792 // use of the loaded value ended up being multiple operands of the result, in
793 // either case, we can't fold this.
794 MachineRegisterInfo::reg_iterator PostRI = RI; ++PostRI;
795 if (PostRI != RegInfo->reg_end())
796 return false;
797
798 assert(RI.getOperand().isUse() &&
799 "The only use of the vreg must be a use, we haven't emitted the def!");
800
801 MachineInstr *User = &*RI;
802
803 // Set the insertion point properly. Folding the load can cause generation of
804 // other random instructions (like sign extends) for addressing modes, make
805 // sure they get inserted in a logical place before the new instruction.
806 FuncInfo->InsertPt = User;
807 FuncInfo->MBB = User->getParent();
808
809 // Ask the target to try folding the load.
810 return FastIS->TryToFoldLoad(User, RI.getOperandNo(), LI);
811 }
812
813 /// isFoldedOrDeadInstruction - Return true if the specified instruction is
814 /// side-effect free and is either dead or folded into a generated instruction.
815 /// Return false if it needs to be emitted.
isFoldedOrDeadInstruction(const Instruction * I,FunctionLoweringInfo * FuncInfo)816 static bool isFoldedOrDeadInstruction(const Instruction *I,
817 FunctionLoweringInfo *FuncInfo) {
818 return !I->mayWriteToMemory() && // Side-effecting instructions aren't folded.
819 !isa<TerminatorInst>(I) && // Terminators aren't folded.
820 !isa<DbgInfoIntrinsic>(I) && // Debug instructions aren't folded.
821 !isa<LandingPadInst>(I) && // Landingpad instructions aren't folded.
822 !FuncInfo->isExportedInst(I); // Exported instrs must be computed.
823 }
824
SelectAllBasicBlocks(const Function & Fn)825 void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
826 // Initialize the Fast-ISel state, if needed.
827 FastISel *FastIS = 0;
828 if (EnableFastISel)
829 FastIS = TLI.createFastISel(*FuncInfo);
830
831 // Iterate over all basic blocks in the function.
832 ReversePostOrderTraversal<const Function*> RPOT(&Fn);
833 for (ReversePostOrderTraversal<const Function*>::rpo_iterator
834 I = RPOT.begin(), E = RPOT.end(); I != E; ++I) {
835 const BasicBlock *LLVMBB = *I;
836
837 if (OptLevel != CodeGenOpt::None) {
838 bool AllPredsVisited = true;
839 for (const_pred_iterator PI = pred_begin(LLVMBB), PE = pred_end(LLVMBB);
840 PI != PE; ++PI) {
841 if (!FuncInfo->VisitedBBs.count(*PI)) {
842 AllPredsVisited = false;
843 break;
844 }
845 }
846
847 if (AllPredsVisited) {
848 for (BasicBlock::const_iterator I = LLVMBB->begin();
849 isa<PHINode>(I); ++I)
850 FuncInfo->ComputePHILiveOutRegInfo(cast<PHINode>(I));
851 } else {
852 for (BasicBlock::const_iterator I = LLVMBB->begin();
853 isa<PHINode>(I); ++I)
854 FuncInfo->InvalidatePHILiveOutRegInfo(cast<PHINode>(I));
855 }
856
857 FuncInfo->VisitedBBs.insert(LLVMBB);
858 }
859
860 FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB];
861 FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI();
862
863 BasicBlock::const_iterator const Begin = LLVMBB->getFirstNonPHI();
864 BasicBlock::const_iterator const End = LLVMBB->end();
865 BasicBlock::const_iterator BI = End;
866
867 FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI();
868
869 // Setup an EH landing-pad block.
870 if (FuncInfo->MBB->isLandingPad())
871 PrepareEHLandingPad();
872
873 // Lower any arguments needed in this block if this is the entry block.
874 if (LLVMBB == &Fn.getEntryBlock())
875 LowerArguments(LLVMBB);
876
877 // Before doing SelectionDAG ISel, see if FastISel has been requested.
878 if (FastIS) {
879 FastIS->startNewBlock();
880
881 // Emit code for any incoming arguments. This must happen before
882 // beginning FastISel on the entry block.
883 if (LLVMBB == &Fn.getEntryBlock()) {
884 CurDAG->setRoot(SDB->getControlRoot());
885 SDB->clear();
886 CodeGenAndEmitDAG();
887
888 // If we inserted any instructions at the beginning, make a note of
889 // where they are, so we can be sure to emit subsequent instructions
890 // after them.
891 if (FuncInfo->InsertPt != FuncInfo->MBB->begin())
892 FastIS->setLastLocalValue(llvm::prior(FuncInfo->InsertPt));
893 else
894 FastIS->setLastLocalValue(0);
895 }
896
897 // Do FastISel on as many instructions as possible.
898 for (; BI != Begin; --BI) {
899 const Instruction *Inst = llvm::prior(BI);
900
901 // If we no longer require this instruction, skip it.
902 if (isFoldedOrDeadInstruction(Inst, FuncInfo))
903 continue;
904
905 // Bottom-up: reset the insert pos at the top, after any local-value
906 // instructions.
907 FastIS->recomputeInsertPt();
908
909 // Try to select the instruction with FastISel.
910 if (FastIS->SelectInstruction(Inst)) {
911 ++NumFastIselSuccess;
912 // If fast isel succeeded, skip over all the folded instructions, and
913 // then see if there is a load right before the selected instructions.
914 // Try to fold the load if so.
915 const Instruction *BeforeInst = Inst;
916 while (BeforeInst != Begin) {
917 BeforeInst = llvm::prior(BasicBlock::const_iterator(BeforeInst));
918 if (!isFoldedOrDeadInstruction(BeforeInst, FuncInfo))
919 break;
920 }
921 if (BeforeInst != Inst && isa<LoadInst>(BeforeInst) &&
922 BeforeInst->hasOneUse() &&
923 TryToFoldFastISelLoad(cast<LoadInst>(BeforeInst), Inst, FastIS))
924 // If we succeeded, don't re-select the load.
925 BI = llvm::next(BasicBlock::const_iterator(BeforeInst));
926 continue;
927 }
928
929 // Then handle certain instructions as single-LLVM-Instruction blocks.
930 if (isa<CallInst>(Inst)) {
931 ++NumFastIselFailures;
932 if (EnableFastISelVerbose || EnableFastISelAbort) {
933 dbgs() << "FastISel missed call: ";
934 Inst->dump();
935 }
936
937 if (!Inst->getType()->isVoidTy() && !Inst->use_empty()) {
938 unsigned &R = FuncInfo->ValueMap[Inst];
939 if (!R)
940 R = FuncInfo->CreateRegs(Inst->getType());
941 }
942
943 bool HadTailCall = false;
944 SelectBasicBlock(Inst, BI, HadTailCall);
945
946 // If the call was emitted as a tail call, we're done with the block.
947 if (HadTailCall) {
948 --BI;
949 break;
950 }
951
952 continue;
953 }
954
955 if (isa<TerminatorInst>(Inst) && !isa<BranchInst>(Inst)) {
956 // Don't abort, and use a different message for terminator misses.
957 ++NumFastIselFailures;
958 if (EnableFastISelVerbose || EnableFastISelAbort) {
959 dbgs() << "FastISel missed terminator: ";
960 Inst->dump();
961 }
962 } else {
963 ++NumFastIselFailures;
964 if (EnableFastISelVerbose || EnableFastISelAbort) {
965 dbgs() << "FastISel miss: ";
966 Inst->dump();
967 }
968 if (EnableFastISelAbort)
969 // The "fast" selector couldn't handle something and bailed.
970 // For the purpose of debugging, just abort.
971 llvm_unreachable("FastISel didn't select the entire block");
972 }
973 break;
974 }
975
976 FastIS->recomputeInsertPt();
977 }
978
979 if (Begin != BI)
980 ++NumDAGBlocks;
981 else
982 ++NumFastIselBlocks;
983
984 if (Begin != BI) {
985 // Run SelectionDAG instruction selection on the remainder of the block
986 // not handled by FastISel. If FastISel is not run, this is the entire
987 // block.
988 bool HadTailCall;
989 SelectBasicBlock(Begin, BI, HadTailCall);
990 }
991
992 FinishBasicBlock();
993 FuncInfo->PHINodesToUpdate.clear();
994 }
995
996 delete FastIS;
997 SDB->clearDanglingDebugInfo();
998 }
999
1000 void
FinishBasicBlock()1001 SelectionDAGISel::FinishBasicBlock() {
1002
1003 DEBUG(dbgs() << "Total amount of phi nodes to update: "
1004 << FuncInfo->PHINodesToUpdate.size() << "\n";
1005 for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i)
1006 dbgs() << "Node " << i << " : ("
1007 << FuncInfo->PHINodesToUpdate[i].first
1008 << ", " << FuncInfo->PHINodesToUpdate[i].second << ")\n");
1009
1010 // Next, now that we know what the last MBB the LLVM BB expanded is, update
1011 // PHI nodes in successors.
1012 if (SDB->SwitchCases.empty() &&
1013 SDB->JTCases.empty() &&
1014 SDB->BitTestCases.empty()) {
1015 for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) {
1016 MachineInstr *PHI = FuncInfo->PHINodesToUpdate[i].first;
1017 assert(PHI->isPHI() &&
1018 "This is not a machine PHI node that we are updating!");
1019 if (!FuncInfo->MBB->isSuccessor(PHI->getParent()))
1020 continue;
1021 PHI->addOperand(
1022 MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[i].second, false));
1023 PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB));
1024 }
1025 return;
1026 }
1027
1028 for (unsigned i = 0, e = SDB->BitTestCases.size(); i != e; ++i) {
1029 // Lower header first, if it wasn't already lowered
1030 if (!SDB->BitTestCases[i].Emitted) {
1031 // Set the current basic block to the mbb we wish to insert the code into
1032 FuncInfo->MBB = SDB->BitTestCases[i].Parent;
1033 FuncInfo->InsertPt = FuncInfo->MBB->end();
1034 // Emit the code
1035 SDB->visitBitTestHeader(SDB->BitTestCases[i], FuncInfo->MBB);
1036 CurDAG->setRoot(SDB->getRoot());
1037 SDB->clear();
1038 CodeGenAndEmitDAG();
1039 }
1040
1041 for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size(); j != ej; ++j) {
1042 // Set the current basic block to the mbb we wish to insert the code into
1043 FuncInfo->MBB = SDB->BitTestCases[i].Cases[j].ThisBB;
1044 FuncInfo->InsertPt = FuncInfo->MBB->end();
1045 // Emit the code
1046 if (j+1 != ej)
1047 SDB->visitBitTestCase(SDB->BitTestCases[i],
1048 SDB->BitTestCases[i].Cases[j+1].ThisBB,
1049 SDB->BitTestCases[i].Reg,
1050 SDB->BitTestCases[i].Cases[j],
1051 FuncInfo->MBB);
1052 else
1053 SDB->visitBitTestCase(SDB->BitTestCases[i],
1054 SDB->BitTestCases[i].Default,
1055 SDB->BitTestCases[i].Reg,
1056 SDB->BitTestCases[i].Cases[j],
1057 FuncInfo->MBB);
1058
1059
1060 CurDAG->setRoot(SDB->getRoot());
1061 SDB->clear();
1062 CodeGenAndEmitDAG();
1063 }
1064
1065 // Update PHI Nodes
1066 for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
1067 pi != pe; ++pi) {
1068 MachineInstr *PHI = FuncInfo->PHINodesToUpdate[pi].first;
1069 MachineBasicBlock *PHIBB = PHI->getParent();
1070 assert(PHI->isPHI() &&
1071 "This is not a machine PHI node that we are updating!");
1072 // This is "default" BB. We have two jumps to it. From "header" BB and
1073 // from last "case" BB.
1074 if (PHIBB == SDB->BitTestCases[i].Default) {
1075 PHI->addOperand(MachineOperand::
1076 CreateReg(FuncInfo->PHINodesToUpdate[pi].second,
1077 false));
1078 PHI->addOperand(MachineOperand::CreateMBB(SDB->BitTestCases[i].Parent));
1079 PHI->addOperand(MachineOperand::
1080 CreateReg(FuncInfo->PHINodesToUpdate[pi].second,
1081 false));
1082 PHI->addOperand(MachineOperand::CreateMBB(SDB->BitTestCases[i].Cases.
1083 back().ThisBB));
1084 }
1085 // One of "cases" BB.
1086 for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size();
1087 j != ej; ++j) {
1088 MachineBasicBlock* cBB = SDB->BitTestCases[i].Cases[j].ThisBB;
1089 if (cBB->isSuccessor(PHIBB)) {
1090 PHI->addOperand(MachineOperand::
1091 CreateReg(FuncInfo->PHINodesToUpdate[pi].second,
1092 false));
1093 PHI->addOperand(MachineOperand::CreateMBB(cBB));
1094 }
1095 }
1096 }
1097 }
1098 SDB->BitTestCases.clear();
1099
1100 // If the JumpTable record is filled in, then we need to emit a jump table.
1101 // Updating the PHI nodes is tricky in this case, since we need to determine
1102 // whether the PHI is a successor of the range check MBB or the jump table MBB
1103 for (unsigned i = 0, e = SDB->JTCases.size(); i != e; ++i) {
1104 // Lower header first, if it wasn't already lowered
1105 if (!SDB->JTCases[i].first.Emitted) {
1106 // Set the current basic block to the mbb we wish to insert the code into
1107 FuncInfo->MBB = SDB->JTCases[i].first.HeaderBB;
1108 FuncInfo->InsertPt = FuncInfo->MBB->end();
1109 // Emit the code
1110 SDB->visitJumpTableHeader(SDB->JTCases[i].second, SDB->JTCases[i].first,
1111 FuncInfo->MBB);
1112 CurDAG->setRoot(SDB->getRoot());
1113 SDB->clear();
1114 CodeGenAndEmitDAG();
1115 }
1116
1117 // Set the current basic block to the mbb we wish to insert the code into
1118 FuncInfo->MBB = SDB->JTCases[i].second.MBB;
1119 FuncInfo->InsertPt = FuncInfo->MBB->end();
1120 // Emit the code
1121 SDB->visitJumpTable(SDB->JTCases[i].second);
1122 CurDAG->setRoot(SDB->getRoot());
1123 SDB->clear();
1124 CodeGenAndEmitDAG();
1125
1126 // Update PHI Nodes
1127 for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
1128 pi != pe; ++pi) {
1129 MachineInstr *PHI = FuncInfo->PHINodesToUpdate[pi].first;
1130 MachineBasicBlock *PHIBB = PHI->getParent();
1131 assert(PHI->isPHI() &&
1132 "This is not a machine PHI node that we are updating!");
1133 // "default" BB. We can go there only from header BB.
1134 if (PHIBB == SDB->JTCases[i].second.Default) {
1135 PHI->addOperand
1136 (MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[pi].second,
1137 false));
1138 PHI->addOperand
1139 (MachineOperand::CreateMBB(SDB->JTCases[i].first.HeaderBB));
1140 }
1141 // JT BB. Just iterate over successors here
1142 if (FuncInfo->MBB->isSuccessor(PHIBB)) {
1143 PHI->addOperand
1144 (MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[pi].second,
1145 false));
1146 PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB));
1147 }
1148 }
1149 }
1150 SDB->JTCases.clear();
1151
1152 // If the switch block involved a branch to one of the actual successors, we
1153 // need to update PHI nodes in that block.
1154 for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) {
1155 MachineInstr *PHI = FuncInfo->PHINodesToUpdate[i].first;
1156 assert(PHI->isPHI() &&
1157 "This is not a machine PHI node that we are updating!");
1158 if (FuncInfo->MBB->isSuccessor(PHI->getParent())) {
1159 PHI->addOperand(
1160 MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[i].second, false));
1161 PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB));
1162 }
1163 }
1164
1165 // If we generated any switch lowering information, build and codegen any
1166 // additional DAGs necessary.
1167 for (unsigned i = 0, e = SDB->SwitchCases.size(); i != e; ++i) {
1168 // Set the current basic block to the mbb we wish to insert the code into
1169 FuncInfo->MBB = SDB->SwitchCases[i].ThisBB;
1170 FuncInfo->InsertPt = FuncInfo->MBB->end();
1171
1172 // Determine the unique successors.
1173 SmallVector<MachineBasicBlock *, 2> Succs;
1174 Succs.push_back(SDB->SwitchCases[i].TrueBB);
1175 if (SDB->SwitchCases[i].TrueBB != SDB->SwitchCases[i].FalseBB)
1176 Succs.push_back(SDB->SwitchCases[i].FalseBB);
1177
1178 // Emit the code. Note that this could result in FuncInfo->MBB being split.
1179 SDB->visitSwitchCase(SDB->SwitchCases[i], FuncInfo->MBB);
1180 CurDAG->setRoot(SDB->getRoot());
1181 SDB->clear();
1182 CodeGenAndEmitDAG();
1183
1184 // Remember the last block, now that any splitting is done, for use in
1185 // populating PHI nodes in successors.
1186 MachineBasicBlock *ThisBB = FuncInfo->MBB;
1187
1188 // Handle any PHI nodes in successors of this chunk, as if we were coming
1189 // from the original BB before switch expansion. Note that PHI nodes can
1190 // occur multiple times in PHINodesToUpdate. We have to be very careful to
1191 // handle them the right number of times.
1192 for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
1193 FuncInfo->MBB = Succs[i];
1194 FuncInfo->InsertPt = FuncInfo->MBB->end();
1195 // FuncInfo->MBB may have been removed from the CFG if a branch was
1196 // constant folded.
1197 if (ThisBB->isSuccessor(FuncInfo->MBB)) {
1198 for (MachineBasicBlock::iterator Phi = FuncInfo->MBB->begin();
1199 Phi != FuncInfo->MBB->end() && Phi->isPHI();
1200 ++Phi) {
1201 // This value for this PHI node is recorded in PHINodesToUpdate.
1202 for (unsigned pn = 0; ; ++pn) {
1203 assert(pn != FuncInfo->PHINodesToUpdate.size() &&
1204 "Didn't find PHI entry!");
1205 if (FuncInfo->PHINodesToUpdate[pn].first == Phi) {
1206 Phi->addOperand(MachineOperand::
1207 CreateReg(FuncInfo->PHINodesToUpdate[pn].second,
1208 false));
1209 Phi->addOperand(MachineOperand::CreateMBB(ThisBB));
1210 break;
1211 }
1212 }
1213 }
1214 }
1215 }
1216 }
1217 SDB->SwitchCases.clear();
1218 }
1219
1220
1221 /// Create the scheduler. If a specific scheduler was specified
1222 /// via the SchedulerRegistry, use it, otherwise select the
1223 /// one preferred by the target.
1224 ///
CreateScheduler()1225 ScheduleDAGSDNodes *SelectionDAGISel::CreateScheduler() {
1226 RegisterScheduler::FunctionPassCtor Ctor = RegisterScheduler::getDefault();
1227
1228 if (!Ctor) {
1229 Ctor = ISHeuristic;
1230 RegisterScheduler::setDefault(Ctor);
1231 }
1232
1233 return Ctor(this, OptLevel);
1234 }
1235
1236 //===----------------------------------------------------------------------===//
1237 // Helper functions used by the generated instruction selector.
1238 //===----------------------------------------------------------------------===//
1239 // Calls to these methods are generated by tblgen.
1240
1241 /// CheckAndMask - The isel is trying to match something like (and X, 255). If
1242 /// the dag combiner simplified the 255, we still want to match. RHS is the
1243 /// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value
1244 /// specified in the .td file (e.g. 255).
CheckAndMask(SDValue LHS,ConstantSDNode * RHS,int64_t DesiredMaskS) const1245 bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
1246 int64_t DesiredMaskS) const {
1247 const APInt &ActualMask = RHS->getAPIntValue();
1248 const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
1249
1250 // If the actual mask exactly matches, success!
1251 if (ActualMask == DesiredMask)
1252 return true;
1253
1254 // If the actual AND mask is allowing unallowed bits, this doesn't match.
1255 if (ActualMask.intersects(~DesiredMask))
1256 return false;
1257
1258 // Otherwise, the DAG Combiner may have proven that the value coming in is
1259 // either already zero or is not demanded. Check for known zero input bits.
1260 APInt NeededMask = DesiredMask & ~ActualMask;
1261 if (CurDAG->MaskedValueIsZero(LHS, NeededMask))
1262 return true;
1263
1264 // TODO: check to see if missing bits are just not demanded.
1265
1266 // Otherwise, this pattern doesn't match.
1267 return false;
1268 }
1269
1270 /// CheckOrMask - The isel is trying to match something like (or X, 255). If
1271 /// the dag combiner simplified the 255, we still want to match. RHS is the
1272 /// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value
1273 /// specified in the .td file (e.g. 255).
CheckOrMask(SDValue LHS,ConstantSDNode * RHS,int64_t DesiredMaskS) const1274 bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
1275 int64_t DesiredMaskS) const {
1276 const APInt &ActualMask = RHS->getAPIntValue();
1277 const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
1278
1279 // If the actual mask exactly matches, success!
1280 if (ActualMask == DesiredMask)
1281 return true;
1282
1283 // If the actual AND mask is allowing unallowed bits, this doesn't match.
1284 if (ActualMask.intersects(~DesiredMask))
1285 return false;
1286
1287 // Otherwise, the DAG Combiner may have proven that the value coming in is
1288 // either already zero or is not demanded. Check for known zero input bits.
1289 APInt NeededMask = DesiredMask & ~ActualMask;
1290
1291 APInt KnownZero, KnownOne;
1292 CurDAG->ComputeMaskedBits(LHS, NeededMask, KnownZero, KnownOne);
1293
1294 // If all the missing bits in the or are already known to be set, match!
1295 if ((NeededMask & KnownOne) == NeededMask)
1296 return true;
1297
1298 // TODO: check to see if missing bits are just not demanded.
1299
1300 // Otherwise, this pattern doesn't match.
1301 return false;
1302 }
1303
1304
1305 /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
1306 /// by tblgen. Others should not call it.
1307 void SelectionDAGISel::
SelectInlineAsmMemoryOperands(std::vector<SDValue> & Ops)1308 SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops) {
1309 std::vector<SDValue> InOps;
1310 std::swap(InOps, Ops);
1311
1312 Ops.push_back(InOps[InlineAsm::Op_InputChain]); // 0
1313 Ops.push_back(InOps[InlineAsm::Op_AsmString]); // 1
1314 Ops.push_back(InOps[InlineAsm::Op_MDNode]); // 2, !srcloc
1315 Ops.push_back(InOps[InlineAsm::Op_ExtraInfo]); // 3 (SideEffect, AlignStack)
1316
1317 unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size();
1318 if (InOps[e-1].getValueType() == MVT::Glue)
1319 --e; // Don't process a glue operand if it is here.
1320
1321 while (i != e) {
1322 unsigned Flags = cast<ConstantSDNode>(InOps[i])->getZExtValue();
1323 if (!InlineAsm::isMemKind(Flags)) {
1324 // Just skip over this operand, copying the operands verbatim.
1325 Ops.insert(Ops.end(), InOps.begin()+i,
1326 InOps.begin()+i+InlineAsm::getNumOperandRegisters(Flags) + 1);
1327 i += InlineAsm::getNumOperandRegisters(Flags) + 1;
1328 } else {
1329 assert(InlineAsm::getNumOperandRegisters(Flags) == 1 &&
1330 "Memory operand with multiple values?");
1331 // Otherwise, this is a memory operand. Ask the target to select it.
1332 std::vector<SDValue> SelOps;
1333 if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps))
1334 report_fatal_error("Could not match memory address. Inline asm"
1335 " failure!");
1336
1337 // Add this to the output node.
1338 unsigned NewFlags =
1339 InlineAsm::getFlagWord(InlineAsm::Kind_Mem, SelOps.size());
1340 Ops.push_back(CurDAG->getTargetConstant(NewFlags, MVT::i32));
1341 Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
1342 i += 2;
1343 }
1344 }
1345
1346 // Add the glue input back if present.
1347 if (e != InOps.size())
1348 Ops.push_back(InOps.back());
1349 }
1350
1351 /// findGlueUse - Return use of MVT::Glue value produced by the specified
1352 /// SDNode.
1353 ///
findGlueUse(SDNode * N)1354 static SDNode *findGlueUse(SDNode *N) {
1355 unsigned FlagResNo = N->getNumValues()-1;
1356 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
1357 SDUse &Use = I.getUse();
1358 if (Use.getResNo() == FlagResNo)
1359 return Use.getUser();
1360 }
1361 return NULL;
1362 }
1363
1364 /// findNonImmUse - Return true if "Use" is a non-immediate use of "Def".
1365 /// This function recursively traverses up the operand chain, ignoring
1366 /// certain nodes.
findNonImmUse(SDNode * Use,SDNode * Def,SDNode * ImmedUse,SDNode * Root,SmallPtrSet<SDNode *,16> & Visited,bool IgnoreChains)1367 static bool findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
1368 SDNode *Root, SmallPtrSet<SDNode*, 16> &Visited,
1369 bool IgnoreChains) {
1370 // The NodeID's are given uniques ID's where a node ID is guaranteed to be
1371 // greater than all of its (recursive) operands. If we scan to a point where
1372 // 'use' is smaller than the node we're scanning for, then we know we will
1373 // never find it.
1374 //
1375 // The Use may be -1 (unassigned) if it is a newly allocated node. This can
1376 // happen because we scan down to newly selected nodes in the case of glue
1377 // uses.
1378 if ((Use->getNodeId() < Def->getNodeId() && Use->getNodeId() != -1))
1379 return false;
1380
1381 // Don't revisit nodes if we already scanned it and didn't fail, we know we
1382 // won't fail if we scan it again.
1383 if (!Visited.insert(Use))
1384 return false;
1385
1386 for (unsigned i = 0, e = Use->getNumOperands(); i != e; ++i) {
1387 // Ignore chain uses, they are validated by HandleMergeInputChains.
1388 if (Use->getOperand(i).getValueType() == MVT::Other && IgnoreChains)
1389 continue;
1390
1391 SDNode *N = Use->getOperand(i).getNode();
1392 if (N == Def) {
1393 if (Use == ImmedUse || Use == Root)
1394 continue; // We are not looking for immediate use.
1395 assert(N != Root);
1396 return true;
1397 }
1398
1399 // Traverse up the operand chain.
1400 if (findNonImmUse(N, Def, ImmedUse, Root, Visited, IgnoreChains))
1401 return true;
1402 }
1403 return false;
1404 }
1405
1406 /// IsProfitableToFold - Returns true if it's profitable to fold the specific
1407 /// operand node N of U during instruction selection that starts at Root.
IsProfitableToFold(SDValue N,SDNode * U,SDNode * Root) const1408 bool SelectionDAGISel::IsProfitableToFold(SDValue N, SDNode *U,
1409 SDNode *Root) const {
1410 if (OptLevel == CodeGenOpt::None) return false;
1411 return N.hasOneUse();
1412 }
1413
1414 /// IsLegalToFold - Returns true if the specific operand node N of
1415 /// U can be folded during instruction selection that starts at Root.
IsLegalToFold(SDValue N,SDNode * U,SDNode * Root,CodeGenOpt::Level OptLevel,bool IgnoreChains)1416 bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
1417 CodeGenOpt::Level OptLevel,
1418 bool IgnoreChains) {
1419 if (OptLevel == CodeGenOpt::None) return false;
1420
1421 // If Root use can somehow reach N through a path that that doesn't contain
1422 // U then folding N would create a cycle. e.g. In the following
1423 // diagram, Root can reach N through X. If N is folded into into Root, then
1424 // X is both a predecessor and a successor of U.
1425 //
1426 // [N*] //
1427 // ^ ^ //
1428 // / \ //
1429 // [U*] [X]? //
1430 // ^ ^ //
1431 // \ / //
1432 // \ / //
1433 // [Root*] //
1434 //
1435 // * indicates nodes to be folded together.
1436 //
1437 // If Root produces glue, then it gets (even more) interesting. Since it
1438 // will be "glued" together with its glue use in the scheduler, we need to
1439 // check if it might reach N.
1440 //
1441 // [N*] //
1442 // ^ ^ //
1443 // / \ //
1444 // [U*] [X]? //
1445 // ^ ^ //
1446 // \ \ //
1447 // \ | //
1448 // [Root*] | //
1449 // ^ | //
1450 // f | //
1451 // | / //
1452 // [Y] / //
1453 // ^ / //
1454 // f / //
1455 // | / //
1456 // [GU] //
1457 //
1458 // If GU (glue use) indirectly reaches N (the load), and Root folds N
1459 // (call it Fold), then X is a predecessor of GU and a successor of
1460 // Fold. But since Fold and GU are glued together, this will create
1461 // a cycle in the scheduling graph.
1462
1463 // If the node has glue, walk down the graph to the "lowest" node in the
1464 // glueged set.
1465 EVT VT = Root->getValueType(Root->getNumValues()-1);
1466 while (VT == MVT::Glue) {
1467 SDNode *GU = findGlueUse(Root);
1468 if (GU == NULL)
1469 break;
1470 Root = GU;
1471 VT = Root->getValueType(Root->getNumValues()-1);
1472
1473 // If our query node has a glue result with a use, we've walked up it. If
1474 // the user (which has already been selected) has a chain or indirectly uses
1475 // the chain, our WalkChainUsers predicate will not consider it. Because of
1476 // this, we cannot ignore chains in this predicate.
1477 IgnoreChains = false;
1478 }
1479
1480
1481 SmallPtrSet<SDNode*, 16> Visited;
1482 return !findNonImmUse(Root, N.getNode(), U, Root, Visited, IgnoreChains);
1483 }
1484
Select_INLINEASM(SDNode * N)1485 SDNode *SelectionDAGISel::Select_INLINEASM(SDNode *N) {
1486 std::vector<SDValue> Ops(N->op_begin(), N->op_end());
1487 SelectInlineAsmMemoryOperands(Ops);
1488
1489 std::vector<EVT> VTs;
1490 VTs.push_back(MVT::Other);
1491 VTs.push_back(MVT::Glue);
1492 SDValue New = CurDAG->getNode(ISD::INLINEASM, N->getDebugLoc(),
1493 VTs, &Ops[0], Ops.size());
1494 New->setNodeId(-1);
1495 return New.getNode();
1496 }
1497
Select_UNDEF(SDNode * N)1498 SDNode *SelectionDAGISel::Select_UNDEF(SDNode *N) {
1499 return CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF,N->getValueType(0));
1500 }
1501
1502 /// GetVBR - decode a vbr encoding whose top bit is set.
1503 LLVM_ATTRIBUTE_ALWAYS_INLINE static uint64_t
GetVBR(uint64_t Val,const unsigned char * MatcherTable,unsigned & Idx)1504 GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) {
1505 assert(Val >= 128 && "Not a VBR");
1506 Val &= 127; // Remove first vbr bit.
1507
1508 unsigned Shift = 7;
1509 uint64_t NextBits;
1510 do {
1511 NextBits = MatcherTable[Idx++];
1512 Val |= (NextBits&127) << Shift;
1513 Shift += 7;
1514 } while (NextBits & 128);
1515
1516 return Val;
1517 }
1518
1519
1520 /// UpdateChainsAndGlue - When a match is complete, this method updates uses of
1521 /// interior glue and chain results to use the new glue and chain results.
1522 void SelectionDAGISel::
UpdateChainsAndGlue(SDNode * NodeToMatch,SDValue InputChain,const SmallVectorImpl<SDNode * > & ChainNodesMatched,SDValue InputGlue,const SmallVectorImpl<SDNode * > & GlueResultNodesMatched,bool isMorphNodeTo)1523 UpdateChainsAndGlue(SDNode *NodeToMatch, SDValue InputChain,
1524 const SmallVectorImpl<SDNode*> &ChainNodesMatched,
1525 SDValue InputGlue,
1526 const SmallVectorImpl<SDNode*> &GlueResultNodesMatched,
1527 bool isMorphNodeTo) {
1528 SmallVector<SDNode*, 4> NowDeadNodes;
1529
1530 ISelUpdater ISU(ISelPosition);
1531
1532 // Now that all the normal results are replaced, we replace the chain and
1533 // glue results if present.
1534 if (!ChainNodesMatched.empty()) {
1535 assert(InputChain.getNode() != 0 &&
1536 "Matched input chains but didn't produce a chain");
1537 // Loop over all of the nodes we matched that produced a chain result.
1538 // Replace all the chain results with the final chain we ended up with.
1539 for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
1540 SDNode *ChainNode = ChainNodesMatched[i];
1541
1542 // If this node was already deleted, don't look at it.
1543 if (ChainNode->getOpcode() == ISD::DELETED_NODE)
1544 continue;
1545
1546 // Don't replace the results of the root node if we're doing a
1547 // MorphNodeTo.
1548 if (ChainNode == NodeToMatch && isMorphNodeTo)
1549 continue;
1550
1551 SDValue ChainVal = SDValue(ChainNode, ChainNode->getNumValues()-1);
1552 if (ChainVal.getValueType() == MVT::Glue)
1553 ChainVal = ChainVal.getValue(ChainVal->getNumValues()-2);
1554 assert(ChainVal.getValueType() == MVT::Other && "Not a chain?");
1555 CurDAG->ReplaceAllUsesOfValueWith(ChainVal, InputChain, &ISU);
1556
1557 // If the node became dead and we haven't already seen it, delete it.
1558 if (ChainNode->use_empty() &&
1559 !std::count(NowDeadNodes.begin(), NowDeadNodes.end(), ChainNode))
1560 NowDeadNodes.push_back(ChainNode);
1561 }
1562 }
1563
1564 // If the result produces glue, update any glue results in the matched
1565 // pattern with the glue result.
1566 if (InputGlue.getNode() != 0) {
1567 // Handle any interior nodes explicitly marked.
1568 for (unsigned i = 0, e = GlueResultNodesMatched.size(); i != e; ++i) {
1569 SDNode *FRN = GlueResultNodesMatched[i];
1570
1571 // If this node was already deleted, don't look at it.
1572 if (FRN->getOpcode() == ISD::DELETED_NODE)
1573 continue;
1574
1575 assert(FRN->getValueType(FRN->getNumValues()-1) == MVT::Glue &&
1576 "Doesn't have a glue result");
1577 CurDAG->ReplaceAllUsesOfValueWith(SDValue(FRN, FRN->getNumValues()-1),
1578 InputGlue, &ISU);
1579
1580 // If the node became dead and we haven't already seen it, delete it.
1581 if (FRN->use_empty() &&
1582 !std::count(NowDeadNodes.begin(), NowDeadNodes.end(), FRN))
1583 NowDeadNodes.push_back(FRN);
1584 }
1585 }
1586
1587 if (!NowDeadNodes.empty())
1588 CurDAG->RemoveDeadNodes(NowDeadNodes, &ISU);
1589
1590 DEBUG(errs() << "ISEL: Match complete!\n");
1591 }
1592
1593 enum ChainResult {
1594 CR_Simple,
1595 CR_InducesCycle,
1596 CR_LeadsToInteriorNode
1597 };
1598
1599 /// WalkChainUsers - Walk down the users of the specified chained node that is
1600 /// part of the pattern we're matching, looking at all of the users we find.
1601 /// This determines whether something is an interior node, whether we have a
1602 /// non-pattern node in between two pattern nodes (which prevent folding because
1603 /// it would induce a cycle) and whether we have a TokenFactor node sandwiched
1604 /// between pattern nodes (in which case the TF becomes part of the pattern).
1605 ///
1606 /// The walk we do here is guaranteed to be small because we quickly get down to
1607 /// already selected nodes "below" us.
1608 static ChainResult
WalkChainUsers(SDNode * ChainedNode,SmallVectorImpl<SDNode * > & ChainedNodesInPattern,SmallVectorImpl<SDNode * > & InteriorChainedNodes)1609 WalkChainUsers(SDNode *ChainedNode,
1610 SmallVectorImpl<SDNode*> &ChainedNodesInPattern,
1611 SmallVectorImpl<SDNode*> &InteriorChainedNodes) {
1612 ChainResult Result = CR_Simple;
1613
1614 for (SDNode::use_iterator UI = ChainedNode->use_begin(),
1615 E = ChainedNode->use_end(); UI != E; ++UI) {
1616 // Make sure the use is of the chain, not some other value we produce.
1617 if (UI.getUse().getValueType() != MVT::Other) continue;
1618
1619 SDNode *User = *UI;
1620
1621 // If we see an already-selected machine node, then we've gone beyond the
1622 // pattern that we're selecting down into the already selected chunk of the
1623 // DAG.
1624 if (User->isMachineOpcode() ||
1625 User->getOpcode() == ISD::HANDLENODE) // Root of the graph.
1626 continue;
1627
1628 if (User->getOpcode() == ISD::CopyToReg ||
1629 User->getOpcode() == ISD::CopyFromReg ||
1630 User->getOpcode() == ISD::INLINEASM ||
1631 User->getOpcode() == ISD::EH_LABEL) {
1632 // If their node ID got reset to -1 then they've already been selected.
1633 // Treat them like a MachineOpcode.
1634 if (User->getNodeId() == -1)
1635 continue;
1636 }
1637
1638 // If we have a TokenFactor, we handle it specially.
1639 if (User->getOpcode() != ISD::TokenFactor) {
1640 // If the node isn't a token factor and isn't part of our pattern, then it
1641 // must be a random chained node in between two nodes we're selecting.
1642 // This happens when we have something like:
1643 // x = load ptr
1644 // call
1645 // y = x+4
1646 // store y -> ptr
1647 // Because we structurally match the load/store as a read/modify/write,
1648 // but the call is chained between them. We cannot fold in this case
1649 // because it would induce a cycle in the graph.
1650 if (!std::count(ChainedNodesInPattern.begin(),
1651 ChainedNodesInPattern.end(), User))
1652 return CR_InducesCycle;
1653
1654 // Otherwise we found a node that is part of our pattern. For example in:
1655 // x = load ptr
1656 // y = x+4
1657 // store y -> ptr
1658 // This would happen when we're scanning down from the load and see the
1659 // store as a user. Record that there is a use of ChainedNode that is
1660 // part of the pattern and keep scanning uses.
1661 Result = CR_LeadsToInteriorNode;
1662 InteriorChainedNodes.push_back(User);
1663 continue;
1664 }
1665
1666 // If we found a TokenFactor, there are two cases to consider: first if the
1667 // TokenFactor is just hanging "below" the pattern we're matching (i.e. no
1668 // uses of the TF are in our pattern) we just want to ignore it. Second,
1669 // the TokenFactor can be sandwiched in between two chained nodes, like so:
1670 // [Load chain]
1671 // ^
1672 // |
1673 // [Load]
1674 // ^ ^
1675 // | \ DAG's like cheese
1676 // / \ do you?
1677 // / |
1678 // [TokenFactor] [Op]
1679 // ^ ^
1680 // | |
1681 // \ /
1682 // \ /
1683 // [Store]
1684 //
1685 // In this case, the TokenFactor becomes part of our match and we rewrite it
1686 // as a new TokenFactor.
1687 //
1688 // To distinguish these two cases, do a recursive walk down the uses.
1689 switch (WalkChainUsers(User, ChainedNodesInPattern, InteriorChainedNodes)) {
1690 case CR_Simple:
1691 // If the uses of the TokenFactor are just already-selected nodes, ignore
1692 // it, it is "below" our pattern.
1693 continue;
1694 case CR_InducesCycle:
1695 // If the uses of the TokenFactor lead to nodes that are not part of our
1696 // pattern that are not selected, folding would turn this into a cycle,
1697 // bail out now.
1698 return CR_InducesCycle;
1699 case CR_LeadsToInteriorNode:
1700 break; // Otherwise, keep processing.
1701 }
1702
1703 // Okay, we know we're in the interesting interior case. The TokenFactor
1704 // is now going to be considered part of the pattern so that we rewrite its
1705 // uses (it may have uses that are not part of the pattern) with the
1706 // ultimate chain result of the generated code. We will also add its chain
1707 // inputs as inputs to the ultimate TokenFactor we create.
1708 Result = CR_LeadsToInteriorNode;
1709 ChainedNodesInPattern.push_back(User);
1710 InteriorChainedNodes.push_back(User);
1711 continue;
1712 }
1713
1714 return Result;
1715 }
1716
1717 /// HandleMergeInputChains - This implements the OPC_EmitMergeInputChains
1718 /// operation for when the pattern matched at least one node with a chains. The
1719 /// input vector contains a list of all of the chained nodes that we match. We
1720 /// must determine if this is a valid thing to cover (i.e. matching it won't
1721 /// induce cycles in the DAG) and if so, creating a TokenFactor node. that will
1722 /// be used as the input node chain for the generated nodes.
1723 static SDValue
HandleMergeInputChains(SmallVectorImpl<SDNode * > & ChainNodesMatched,SelectionDAG * CurDAG)1724 HandleMergeInputChains(SmallVectorImpl<SDNode*> &ChainNodesMatched,
1725 SelectionDAG *CurDAG) {
1726 // Walk all of the chained nodes we've matched, recursively scanning down the
1727 // users of the chain result. This adds any TokenFactor nodes that are caught
1728 // in between chained nodes to the chained and interior nodes list.
1729 SmallVector<SDNode*, 3> InteriorChainedNodes;
1730 for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
1731 if (WalkChainUsers(ChainNodesMatched[i], ChainNodesMatched,
1732 InteriorChainedNodes) == CR_InducesCycle)
1733 return SDValue(); // Would induce a cycle.
1734 }
1735
1736 // Okay, we have walked all the matched nodes and collected TokenFactor nodes
1737 // that we are interested in. Form our input TokenFactor node.
1738 SmallVector<SDValue, 3> InputChains;
1739 for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
1740 // Add the input chain of this node to the InputChains list (which will be
1741 // the operands of the generated TokenFactor) if it's not an interior node.
1742 SDNode *N = ChainNodesMatched[i];
1743 if (N->getOpcode() != ISD::TokenFactor) {
1744 if (std::count(InteriorChainedNodes.begin(),InteriorChainedNodes.end(),N))
1745 continue;
1746
1747 // Otherwise, add the input chain.
1748 SDValue InChain = ChainNodesMatched[i]->getOperand(0);
1749 assert(InChain.getValueType() == MVT::Other && "Not a chain");
1750 InputChains.push_back(InChain);
1751 continue;
1752 }
1753
1754 // If we have a token factor, we want to add all inputs of the token factor
1755 // that are not part of the pattern we're matching.
1756 for (unsigned op = 0, e = N->getNumOperands(); op != e; ++op) {
1757 if (!std::count(ChainNodesMatched.begin(), ChainNodesMatched.end(),
1758 N->getOperand(op).getNode()))
1759 InputChains.push_back(N->getOperand(op));
1760 }
1761 }
1762
1763 SDValue Res;
1764 if (InputChains.size() == 1)
1765 return InputChains[0];
1766 return CurDAG->getNode(ISD::TokenFactor, ChainNodesMatched[0]->getDebugLoc(),
1767 MVT::Other, &InputChains[0], InputChains.size());
1768 }
1769
1770 /// MorphNode - Handle morphing a node in place for the selector.
1771 SDNode *SelectionDAGISel::
MorphNode(SDNode * Node,unsigned TargetOpc,SDVTList VTList,const SDValue * Ops,unsigned NumOps,unsigned EmitNodeInfo)1772 MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
1773 const SDValue *Ops, unsigned NumOps, unsigned EmitNodeInfo) {
1774 // It is possible we're using MorphNodeTo to replace a node with no
1775 // normal results with one that has a normal result (or we could be
1776 // adding a chain) and the input could have glue and chains as well.
1777 // In this case we need to shift the operands down.
1778 // FIXME: This is a horrible hack and broken in obscure cases, no worse
1779 // than the old isel though.
1780 int OldGlueResultNo = -1, OldChainResultNo = -1;
1781
1782 unsigned NTMNumResults = Node->getNumValues();
1783 if (Node->getValueType(NTMNumResults-1) == MVT::Glue) {
1784 OldGlueResultNo = NTMNumResults-1;
1785 if (NTMNumResults != 1 &&
1786 Node->getValueType(NTMNumResults-2) == MVT::Other)
1787 OldChainResultNo = NTMNumResults-2;
1788 } else if (Node->getValueType(NTMNumResults-1) == MVT::Other)
1789 OldChainResultNo = NTMNumResults-1;
1790
1791 // Call the underlying SelectionDAG routine to do the transmogrification. Note
1792 // that this deletes operands of the old node that become dead.
1793 SDNode *Res = CurDAG->MorphNodeTo(Node, ~TargetOpc, VTList, Ops, NumOps);
1794
1795 // MorphNodeTo can operate in two ways: if an existing node with the
1796 // specified operands exists, it can just return it. Otherwise, it
1797 // updates the node in place to have the requested operands.
1798 if (Res == Node) {
1799 // If we updated the node in place, reset the node ID. To the isel,
1800 // this should be just like a newly allocated machine node.
1801 Res->setNodeId(-1);
1802 }
1803
1804 unsigned ResNumResults = Res->getNumValues();
1805 // Move the glue if needed.
1806 if ((EmitNodeInfo & OPFL_GlueOutput) && OldGlueResultNo != -1 &&
1807 (unsigned)OldGlueResultNo != ResNumResults-1)
1808 CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldGlueResultNo),
1809 SDValue(Res, ResNumResults-1));
1810
1811 if ((EmitNodeInfo & OPFL_GlueOutput) != 0)
1812 --ResNumResults;
1813
1814 // Move the chain reference if needed.
1815 if ((EmitNodeInfo & OPFL_Chain) && OldChainResultNo != -1 &&
1816 (unsigned)OldChainResultNo != ResNumResults-1)
1817 CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldChainResultNo),
1818 SDValue(Res, ResNumResults-1));
1819
1820 // Otherwise, no replacement happened because the node already exists. Replace
1821 // Uses of the old node with the new one.
1822 if (Res != Node)
1823 CurDAG->ReplaceAllUsesWith(Node, Res);
1824
1825 return Res;
1826 }
1827
1828 /// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
1829 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckSame(const unsigned char * MatcherTable,unsigned & MatcherIndex,SDValue N,const SmallVectorImpl<std::pair<SDValue,SDNode * >> & RecordedNodes)1830 CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1831 SDValue N,
1832 const SmallVectorImpl<std::pair<SDValue, SDNode*> > &RecordedNodes) {
1833 // Accept if it is exactly the same as a previously recorded node.
1834 unsigned RecNo = MatcherTable[MatcherIndex++];
1835 assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
1836 return N == RecordedNodes[RecNo].first;
1837 }
1838
1839 /// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
1840 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckPatternPredicate(const unsigned char * MatcherTable,unsigned & MatcherIndex,SelectionDAGISel & SDISel)1841 CheckPatternPredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1842 SelectionDAGISel &SDISel) {
1843 return SDISel.CheckPatternPredicate(MatcherTable[MatcherIndex++]);
1844 }
1845
1846 /// CheckNodePredicate - Implements OP_CheckNodePredicate.
1847 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckNodePredicate(const unsigned char * MatcherTable,unsigned & MatcherIndex,SelectionDAGISel & SDISel,SDNode * N)1848 CheckNodePredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1849 SelectionDAGISel &SDISel, SDNode *N) {
1850 return SDISel.CheckNodePredicate(N, MatcherTable[MatcherIndex++]);
1851 }
1852
1853 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckOpcode(const unsigned char * MatcherTable,unsigned & MatcherIndex,SDNode * N)1854 CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1855 SDNode *N) {
1856 uint16_t Opc = MatcherTable[MatcherIndex++];
1857 Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
1858 return N->getOpcode() == Opc;
1859 }
1860
1861 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckType(const unsigned char * MatcherTable,unsigned & MatcherIndex,SDValue N,const TargetLowering & TLI)1862 CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1863 SDValue N, const TargetLowering &TLI) {
1864 MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
1865 if (N.getValueType() == VT) return true;
1866
1867 // Handle the case when VT is iPTR.
1868 return VT == MVT::iPTR && N.getValueType() == TLI.getPointerTy();
1869 }
1870
1871 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckChildType(const unsigned char * MatcherTable,unsigned & MatcherIndex,SDValue N,const TargetLowering & TLI,unsigned ChildNo)1872 CheckChildType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1873 SDValue N, const TargetLowering &TLI,
1874 unsigned ChildNo) {
1875 if (ChildNo >= N.getNumOperands())
1876 return false; // Match fails if out of range child #.
1877 return ::CheckType(MatcherTable, MatcherIndex, N.getOperand(ChildNo), TLI);
1878 }
1879
1880
1881 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckCondCode(const unsigned char * MatcherTable,unsigned & MatcherIndex,SDValue N)1882 CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1883 SDValue N) {
1884 return cast<CondCodeSDNode>(N)->get() ==
1885 (ISD::CondCode)MatcherTable[MatcherIndex++];
1886 }
1887
1888 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckValueType(const unsigned char * MatcherTable,unsigned & MatcherIndex,SDValue N,const TargetLowering & TLI)1889 CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1890 SDValue N, const TargetLowering &TLI) {
1891 MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
1892 if (cast<VTSDNode>(N)->getVT() == VT)
1893 return true;
1894
1895 // Handle the case when VT is iPTR.
1896 return VT == MVT::iPTR && cast<VTSDNode>(N)->getVT() == TLI.getPointerTy();
1897 }
1898
1899 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckInteger(const unsigned char * MatcherTable,unsigned & MatcherIndex,SDValue N)1900 CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1901 SDValue N) {
1902 int64_t Val = MatcherTable[MatcherIndex++];
1903 if (Val & 128)
1904 Val = GetVBR(Val, MatcherTable, MatcherIndex);
1905
1906 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
1907 return C != 0 && C->getSExtValue() == Val;
1908 }
1909
1910 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckAndImm(const unsigned char * MatcherTable,unsigned & MatcherIndex,SDValue N,SelectionDAGISel & SDISel)1911 CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1912 SDValue N, SelectionDAGISel &SDISel) {
1913 int64_t Val = MatcherTable[MatcherIndex++];
1914 if (Val & 128)
1915 Val = GetVBR(Val, MatcherTable, MatcherIndex);
1916
1917 if (N->getOpcode() != ISD::AND) return false;
1918
1919 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
1920 return C != 0 && SDISel.CheckAndMask(N.getOperand(0), C, Val);
1921 }
1922
1923 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckOrImm(const unsigned char * MatcherTable,unsigned & MatcherIndex,SDValue N,SelectionDAGISel & SDISel)1924 CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1925 SDValue N, SelectionDAGISel &SDISel) {
1926 int64_t Val = MatcherTable[MatcherIndex++];
1927 if (Val & 128)
1928 Val = GetVBR(Val, MatcherTable, MatcherIndex);
1929
1930 if (N->getOpcode() != ISD::OR) return false;
1931
1932 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
1933 return C != 0 && SDISel.CheckOrMask(N.getOperand(0), C, Val);
1934 }
1935
1936 /// IsPredicateKnownToFail - If we know how and can do so without pushing a
1937 /// scope, evaluate the current node. If the current predicate is known to
1938 /// fail, set Result=true and return anything. If the current predicate is
1939 /// known to pass, set Result=false and return the MatcherIndex to continue
1940 /// with. If the current predicate is unknown, set Result=false and return the
1941 /// MatcherIndex to continue with.
IsPredicateKnownToFail(const unsigned char * Table,unsigned Index,SDValue N,bool & Result,SelectionDAGISel & SDISel,SmallVectorImpl<std::pair<SDValue,SDNode * >> & RecordedNodes)1942 static unsigned IsPredicateKnownToFail(const unsigned char *Table,
1943 unsigned Index, SDValue N,
1944 bool &Result, SelectionDAGISel &SDISel,
1945 SmallVectorImpl<std::pair<SDValue, SDNode*> > &RecordedNodes) {
1946 switch (Table[Index++]) {
1947 default:
1948 Result = false;
1949 return Index-1; // Could not evaluate this predicate.
1950 case SelectionDAGISel::OPC_CheckSame:
1951 Result = !::CheckSame(Table, Index, N, RecordedNodes);
1952 return Index;
1953 case SelectionDAGISel::OPC_CheckPatternPredicate:
1954 Result = !::CheckPatternPredicate(Table, Index, SDISel);
1955 return Index;
1956 case SelectionDAGISel::OPC_CheckPredicate:
1957 Result = !::CheckNodePredicate(Table, Index, SDISel, N.getNode());
1958 return Index;
1959 case SelectionDAGISel::OPC_CheckOpcode:
1960 Result = !::CheckOpcode(Table, Index, N.getNode());
1961 return Index;
1962 case SelectionDAGISel::OPC_CheckType:
1963 Result = !::CheckType(Table, Index, N, SDISel.TLI);
1964 return Index;
1965 case SelectionDAGISel::OPC_CheckChild0Type:
1966 case SelectionDAGISel::OPC_CheckChild1Type:
1967 case SelectionDAGISel::OPC_CheckChild2Type:
1968 case SelectionDAGISel::OPC_CheckChild3Type:
1969 case SelectionDAGISel::OPC_CheckChild4Type:
1970 case SelectionDAGISel::OPC_CheckChild5Type:
1971 case SelectionDAGISel::OPC_CheckChild6Type:
1972 case SelectionDAGISel::OPC_CheckChild7Type:
1973 Result = !::CheckChildType(Table, Index, N, SDISel.TLI,
1974 Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Type);
1975 return Index;
1976 case SelectionDAGISel::OPC_CheckCondCode:
1977 Result = !::CheckCondCode(Table, Index, N);
1978 return Index;
1979 case SelectionDAGISel::OPC_CheckValueType:
1980 Result = !::CheckValueType(Table, Index, N, SDISel.TLI);
1981 return Index;
1982 case SelectionDAGISel::OPC_CheckInteger:
1983 Result = !::CheckInteger(Table, Index, N);
1984 return Index;
1985 case SelectionDAGISel::OPC_CheckAndImm:
1986 Result = !::CheckAndImm(Table, Index, N, SDISel);
1987 return Index;
1988 case SelectionDAGISel::OPC_CheckOrImm:
1989 Result = !::CheckOrImm(Table, Index, N, SDISel);
1990 return Index;
1991 }
1992 }
1993
1994 namespace {
1995
1996 struct MatchScope {
1997 /// FailIndex - If this match fails, this is the index to continue with.
1998 unsigned FailIndex;
1999
2000 /// NodeStack - The node stack when the scope was formed.
2001 SmallVector<SDValue, 4> NodeStack;
2002
2003 /// NumRecordedNodes - The number of recorded nodes when the scope was formed.
2004 unsigned NumRecordedNodes;
2005
2006 /// NumMatchedMemRefs - The number of matched memref entries.
2007 unsigned NumMatchedMemRefs;
2008
2009 /// InputChain/InputGlue - The current chain/glue
2010 SDValue InputChain, InputGlue;
2011
2012 /// HasChainNodesMatched - True if the ChainNodesMatched list is non-empty.
2013 bool HasChainNodesMatched, HasGlueResultNodesMatched;
2014 };
2015
2016 }
2017
2018 SDNode *SelectionDAGISel::
SelectCodeCommon(SDNode * NodeToMatch,const unsigned char * MatcherTable,unsigned TableSize)2019 SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
2020 unsigned TableSize) {
2021 // FIXME: Should these even be selected? Handle these cases in the caller?
2022 switch (NodeToMatch->getOpcode()) {
2023 default:
2024 break;
2025 case ISD::EntryToken: // These nodes remain the same.
2026 case ISD::BasicBlock:
2027 case ISD::Register:
2028 //case ISD::VALUETYPE:
2029 //case ISD::CONDCODE:
2030 case ISD::HANDLENODE:
2031 case ISD::MDNODE_SDNODE:
2032 case ISD::TargetConstant:
2033 case ISD::TargetConstantFP:
2034 case ISD::TargetConstantPool:
2035 case ISD::TargetFrameIndex:
2036 case ISD::TargetExternalSymbol:
2037 case ISD::TargetBlockAddress:
2038 case ISD::TargetJumpTable:
2039 case ISD::TargetGlobalTLSAddress:
2040 case ISD::TargetGlobalAddress:
2041 case ISD::TokenFactor:
2042 case ISD::CopyFromReg:
2043 case ISD::CopyToReg:
2044 case ISD::EH_LABEL:
2045 NodeToMatch->setNodeId(-1); // Mark selected.
2046 return 0;
2047 case ISD::AssertSext:
2048 case ISD::AssertZext:
2049 CurDAG->ReplaceAllUsesOfValueWith(SDValue(NodeToMatch, 0),
2050 NodeToMatch->getOperand(0));
2051 return 0;
2052 case ISD::INLINEASM: return Select_INLINEASM(NodeToMatch);
2053 case ISD::UNDEF: return Select_UNDEF(NodeToMatch);
2054 }
2055
2056 assert(!NodeToMatch->isMachineOpcode() && "Node already selected!");
2057
2058 // Set up the node stack with NodeToMatch as the only node on the stack.
2059 SmallVector<SDValue, 8> NodeStack;
2060 SDValue N = SDValue(NodeToMatch, 0);
2061 NodeStack.push_back(N);
2062
2063 // MatchScopes - Scopes used when matching, if a match failure happens, this
2064 // indicates where to continue checking.
2065 SmallVector<MatchScope, 8> MatchScopes;
2066
2067 // RecordedNodes - This is the set of nodes that have been recorded by the
2068 // state machine. The second value is the parent of the node, or null if the
2069 // root is recorded.
2070 SmallVector<std::pair<SDValue, SDNode*>, 8> RecordedNodes;
2071
2072 // MatchedMemRefs - This is the set of MemRef's we've seen in the input
2073 // pattern.
2074 SmallVector<MachineMemOperand*, 2> MatchedMemRefs;
2075
2076 // These are the current input chain and glue for use when generating nodes.
2077 // Various Emit operations change these. For example, emitting a copytoreg
2078 // uses and updates these.
2079 SDValue InputChain, InputGlue;
2080
2081 // ChainNodesMatched - If a pattern matches nodes that have input/output
2082 // chains, the OPC_EmitMergeInputChains operation is emitted which indicates
2083 // which ones they are. The result is captured into this list so that we can
2084 // update the chain results when the pattern is complete.
2085 SmallVector<SDNode*, 3> ChainNodesMatched;
2086 SmallVector<SDNode*, 3> GlueResultNodesMatched;
2087
2088 DEBUG(errs() << "ISEL: Starting pattern match on root node: ";
2089 NodeToMatch->dump(CurDAG);
2090 errs() << '\n');
2091
2092 // Determine where to start the interpreter. Normally we start at opcode #0,
2093 // but if the state machine starts with an OPC_SwitchOpcode, then we
2094 // accelerate the first lookup (which is guaranteed to be hot) with the
2095 // OpcodeOffset table.
2096 unsigned MatcherIndex = 0;
2097
2098 if (!OpcodeOffset.empty()) {
2099 // Already computed the OpcodeOffset table, just index into it.
2100 if (N.getOpcode() < OpcodeOffset.size())
2101 MatcherIndex = OpcodeOffset[N.getOpcode()];
2102 DEBUG(errs() << " Initial Opcode index to " << MatcherIndex << "\n");
2103
2104 } else if (MatcherTable[0] == OPC_SwitchOpcode) {
2105 // Otherwise, the table isn't computed, but the state machine does start
2106 // with an OPC_SwitchOpcode instruction. Populate the table now, since this
2107 // is the first time we're selecting an instruction.
2108 unsigned Idx = 1;
2109 while (1) {
2110 // Get the size of this case.
2111 unsigned CaseSize = MatcherTable[Idx++];
2112 if (CaseSize & 128)
2113 CaseSize = GetVBR(CaseSize, MatcherTable, Idx);
2114 if (CaseSize == 0) break;
2115
2116 // Get the opcode, add the index to the table.
2117 uint16_t Opc = MatcherTable[Idx++];
2118 Opc |= (unsigned short)MatcherTable[Idx++] << 8;
2119 if (Opc >= OpcodeOffset.size())
2120 OpcodeOffset.resize((Opc+1)*2);
2121 OpcodeOffset[Opc] = Idx;
2122 Idx += CaseSize;
2123 }
2124
2125 // Okay, do the lookup for the first opcode.
2126 if (N.getOpcode() < OpcodeOffset.size())
2127 MatcherIndex = OpcodeOffset[N.getOpcode()];
2128 }
2129
2130 while (1) {
2131 assert(MatcherIndex < TableSize && "Invalid index");
2132 #ifndef NDEBUG
2133 unsigned CurrentOpcodeIndex = MatcherIndex;
2134 #endif
2135 BuiltinOpcodes Opcode = (BuiltinOpcodes)MatcherTable[MatcherIndex++];
2136 switch (Opcode) {
2137 case OPC_Scope: {
2138 // Okay, the semantics of this operation are that we should push a scope
2139 // then evaluate the first child. However, pushing a scope only to have
2140 // the first check fail (which then pops it) is inefficient. If we can
2141 // determine immediately that the first check (or first several) will
2142 // immediately fail, don't even bother pushing a scope for them.
2143 unsigned FailIndex;
2144
2145 while (1) {
2146 unsigned NumToSkip = MatcherTable[MatcherIndex++];
2147 if (NumToSkip & 128)
2148 NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
2149 // Found the end of the scope with no match.
2150 if (NumToSkip == 0) {
2151 FailIndex = 0;
2152 break;
2153 }
2154
2155 FailIndex = MatcherIndex+NumToSkip;
2156
2157 unsigned MatcherIndexOfPredicate = MatcherIndex;
2158 (void)MatcherIndexOfPredicate; // silence warning.
2159
2160 // If we can't evaluate this predicate without pushing a scope (e.g. if
2161 // it is a 'MoveParent') or if the predicate succeeds on this node, we
2162 // push the scope and evaluate the full predicate chain.
2163 bool Result;
2164 MatcherIndex = IsPredicateKnownToFail(MatcherTable, MatcherIndex, N,
2165 Result, *this, RecordedNodes);
2166 if (!Result)
2167 break;
2168
2169 DEBUG(errs() << " Skipped scope entry (due to false predicate) at "
2170 << "index " << MatcherIndexOfPredicate
2171 << ", continuing at " << FailIndex << "\n");
2172 ++NumDAGIselRetries;
2173
2174 // Otherwise, we know that this case of the Scope is guaranteed to fail,
2175 // move to the next case.
2176 MatcherIndex = FailIndex;
2177 }
2178
2179 // If the whole scope failed to match, bail.
2180 if (FailIndex == 0) break;
2181
2182 // Push a MatchScope which indicates where to go if the first child fails
2183 // to match.
2184 MatchScope NewEntry;
2185 NewEntry.FailIndex = FailIndex;
2186 NewEntry.NodeStack.append(NodeStack.begin(), NodeStack.end());
2187 NewEntry.NumRecordedNodes = RecordedNodes.size();
2188 NewEntry.NumMatchedMemRefs = MatchedMemRefs.size();
2189 NewEntry.InputChain = InputChain;
2190 NewEntry.InputGlue = InputGlue;
2191 NewEntry.HasChainNodesMatched = !ChainNodesMatched.empty();
2192 NewEntry.HasGlueResultNodesMatched = !GlueResultNodesMatched.empty();
2193 MatchScopes.push_back(NewEntry);
2194 continue;
2195 }
2196 case OPC_RecordNode: {
2197 // Remember this node, it may end up being an operand in the pattern.
2198 SDNode *Parent = 0;
2199 if (NodeStack.size() > 1)
2200 Parent = NodeStack[NodeStack.size()-2].getNode();
2201 RecordedNodes.push_back(std::make_pair(N, Parent));
2202 continue;
2203 }
2204
2205 case OPC_RecordChild0: case OPC_RecordChild1:
2206 case OPC_RecordChild2: case OPC_RecordChild3:
2207 case OPC_RecordChild4: case OPC_RecordChild5:
2208 case OPC_RecordChild6: case OPC_RecordChild7: {
2209 unsigned ChildNo = Opcode-OPC_RecordChild0;
2210 if (ChildNo >= N.getNumOperands())
2211 break; // Match fails if out of range child #.
2212
2213 RecordedNodes.push_back(std::make_pair(N->getOperand(ChildNo),
2214 N.getNode()));
2215 continue;
2216 }
2217 case OPC_RecordMemRef:
2218 MatchedMemRefs.push_back(cast<MemSDNode>(N)->getMemOperand());
2219 continue;
2220
2221 case OPC_CaptureGlueInput:
2222 // If the current node has an input glue, capture it in InputGlue.
2223 if (N->getNumOperands() != 0 &&
2224 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue)
2225 InputGlue = N->getOperand(N->getNumOperands()-1);
2226 continue;
2227
2228 case OPC_MoveChild: {
2229 unsigned ChildNo = MatcherTable[MatcherIndex++];
2230 if (ChildNo >= N.getNumOperands())
2231 break; // Match fails if out of range child #.
2232 N = N.getOperand(ChildNo);
2233 NodeStack.push_back(N);
2234 continue;
2235 }
2236
2237 case OPC_MoveParent:
2238 // Pop the current node off the NodeStack.
2239 NodeStack.pop_back();
2240 assert(!NodeStack.empty() && "Node stack imbalance!");
2241 N = NodeStack.back();
2242 continue;
2243
2244 case OPC_CheckSame:
2245 if (!::CheckSame(MatcherTable, MatcherIndex, N, RecordedNodes)) break;
2246 continue;
2247 case OPC_CheckPatternPredicate:
2248 if (!::CheckPatternPredicate(MatcherTable, MatcherIndex, *this)) break;
2249 continue;
2250 case OPC_CheckPredicate:
2251 if (!::CheckNodePredicate(MatcherTable, MatcherIndex, *this,
2252 N.getNode()))
2253 break;
2254 continue;
2255 case OPC_CheckComplexPat: {
2256 unsigned CPNum = MatcherTable[MatcherIndex++];
2257 unsigned RecNo = MatcherTable[MatcherIndex++];
2258 assert(RecNo < RecordedNodes.size() && "Invalid CheckComplexPat");
2259 if (!CheckComplexPattern(NodeToMatch, RecordedNodes[RecNo].second,
2260 RecordedNodes[RecNo].first, CPNum,
2261 RecordedNodes))
2262 break;
2263 continue;
2264 }
2265 case OPC_CheckOpcode:
2266 if (!::CheckOpcode(MatcherTable, MatcherIndex, N.getNode())) break;
2267 continue;
2268
2269 case OPC_CheckType:
2270 if (!::CheckType(MatcherTable, MatcherIndex, N, TLI)) break;
2271 continue;
2272
2273 case OPC_SwitchOpcode: {
2274 unsigned CurNodeOpcode = N.getOpcode();
2275 unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
2276 unsigned CaseSize;
2277 while (1) {
2278 // Get the size of this case.
2279 CaseSize = MatcherTable[MatcherIndex++];
2280 if (CaseSize & 128)
2281 CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
2282 if (CaseSize == 0) break;
2283
2284 uint16_t Opc = MatcherTable[MatcherIndex++];
2285 Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
2286
2287 // If the opcode matches, then we will execute this case.
2288 if (CurNodeOpcode == Opc)
2289 break;
2290
2291 // Otherwise, skip over this case.
2292 MatcherIndex += CaseSize;
2293 }
2294
2295 // If no cases matched, bail out.
2296 if (CaseSize == 0) break;
2297
2298 // Otherwise, execute the case we found.
2299 DEBUG(errs() << " OpcodeSwitch from " << SwitchStart
2300 << " to " << MatcherIndex << "\n");
2301 continue;
2302 }
2303
2304 case OPC_SwitchType: {
2305 MVT CurNodeVT = N.getValueType().getSimpleVT();
2306 unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
2307 unsigned CaseSize;
2308 while (1) {
2309 // Get the size of this case.
2310 CaseSize = MatcherTable[MatcherIndex++];
2311 if (CaseSize & 128)
2312 CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
2313 if (CaseSize == 0) break;
2314
2315 MVT CaseVT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
2316 if (CaseVT == MVT::iPTR)
2317 CaseVT = TLI.getPointerTy();
2318
2319 // If the VT matches, then we will execute this case.
2320 if (CurNodeVT == CaseVT)
2321 break;
2322
2323 // Otherwise, skip over this case.
2324 MatcherIndex += CaseSize;
2325 }
2326
2327 // If no cases matched, bail out.
2328 if (CaseSize == 0) break;
2329
2330 // Otherwise, execute the case we found.
2331 DEBUG(errs() << " TypeSwitch[" << EVT(CurNodeVT).getEVTString()
2332 << "] from " << SwitchStart << " to " << MatcherIndex<<'\n');
2333 continue;
2334 }
2335 case OPC_CheckChild0Type: case OPC_CheckChild1Type:
2336 case OPC_CheckChild2Type: case OPC_CheckChild3Type:
2337 case OPC_CheckChild4Type: case OPC_CheckChild5Type:
2338 case OPC_CheckChild6Type: case OPC_CheckChild7Type:
2339 if (!::CheckChildType(MatcherTable, MatcherIndex, N, TLI,
2340 Opcode-OPC_CheckChild0Type))
2341 break;
2342 continue;
2343 case OPC_CheckCondCode:
2344 if (!::CheckCondCode(MatcherTable, MatcherIndex, N)) break;
2345 continue;
2346 case OPC_CheckValueType:
2347 if (!::CheckValueType(MatcherTable, MatcherIndex, N, TLI)) break;
2348 continue;
2349 case OPC_CheckInteger:
2350 if (!::CheckInteger(MatcherTable, MatcherIndex, N)) break;
2351 continue;
2352 case OPC_CheckAndImm:
2353 if (!::CheckAndImm(MatcherTable, MatcherIndex, N, *this)) break;
2354 continue;
2355 case OPC_CheckOrImm:
2356 if (!::CheckOrImm(MatcherTable, MatcherIndex, N, *this)) break;
2357 continue;
2358
2359 case OPC_CheckFoldableChainNode: {
2360 assert(NodeStack.size() != 1 && "No parent node");
2361 // Verify that all intermediate nodes between the root and this one have
2362 // a single use.
2363 bool HasMultipleUses = false;
2364 for (unsigned i = 1, e = NodeStack.size()-1; i != e; ++i)
2365 if (!NodeStack[i].hasOneUse()) {
2366 HasMultipleUses = true;
2367 break;
2368 }
2369 if (HasMultipleUses) break;
2370
2371 // Check to see that the target thinks this is profitable to fold and that
2372 // we can fold it without inducing cycles in the graph.
2373 if (!IsProfitableToFold(N, NodeStack[NodeStack.size()-2].getNode(),
2374 NodeToMatch) ||
2375 !IsLegalToFold(N, NodeStack[NodeStack.size()-2].getNode(),
2376 NodeToMatch, OptLevel,
2377 true/*We validate our own chains*/))
2378 break;
2379
2380 continue;
2381 }
2382 case OPC_EmitInteger: {
2383 MVT::SimpleValueType VT =
2384 (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
2385 int64_t Val = MatcherTable[MatcherIndex++];
2386 if (Val & 128)
2387 Val = GetVBR(Val, MatcherTable, MatcherIndex);
2388 RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
2389 CurDAG->getTargetConstant(Val, VT), (SDNode*)0));
2390 continue;
2391 }
2392 case OPC_EmitRegister: {
2393 MVT::SimpleValueType VT =
2394 (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
2395 unsigned RegNo = MatcherTable[MatcherIndex++];
2396 RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
2397 CurDAG->getRegister(RegNo, VT), (SDNode*)0));
2398 continue;
2399 }
2400 case OPC_EmitRegister2: {
2401 // For targets w/ more than 256 register names, the register enum
2402 // values are stored in two bytes in the matcher table (just like
2403 // opcodes).
2404 MVT::SimpleValueType VT =
2405 (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
2406 unsigned RegNo = MatcherTable[MatcherIndex++];
2407 RegNo |= MatcherTable[MatcherIndex++] << 8;
2408 RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
2409 CurDAG->getRegister(RegNo, VT), (SDNode*)0));
2410 continue;
2411 }
2412
2413 case OPC_EmitConvertToTarget: {
2414 // Convert from IMM/FPIMM to target version.
2415 unsigned RecNo = MatcherTable[MatcherIndex++];
2416 assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2417 SDValue Imm = RecordedNodes[RecNo].first;
2418
2419 if (Imm->getOpcode() == ISD::Constant) {
2420 int64_t Val = cast<ConstantSDNode>(Imm)->getZExtValue();
2421 Imm = CurDAG->getTargetConstant(Val, Imm.getValueType());
2422 } else if (Imm->getOpcode() == ISD::ConstantFP) {
2423 const ConstantFP *Val=cast<ConstantFPSDNode>(Imm)->getConstantFPValue();
2424 Imm = CurDAG->getTargetConstantFP(*Val, Imm.getValueType());
2425 }
2426
2427 RecordedNodes.push_back(std::make_pair(Imm, RecordedNodes[RecNo].second));
2428 continue;
2429 }
2430
2431 case OPC_EmitMergeInputChains1_0: // OPC_EmitMergeInputChains, 1, 0
2432 case OPC_EmitMergeInputChains1_1: { // OPC_EmitMergeInputChains, 1, 1
2433 // These are space-optimized forms of OPC_EmitMergeInputChains.
2434 assert(InputChain.getNode() == 0 &&
2435 "EmitMergeInputChains should be the first chain producing node");
2436 assert(ChainNodesMatched.empty() &&
2437 "Should only have one EmitMergeInputChains per match");
2438
2439 // Read all of the chained nodes.
2440 unsigned RecNo = Opcode == OPC_EmitMergeInputChains1_1;
2441 assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2442 ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
2443
2444 // FIXME: What if other value results of the node have uses not matched
2445 // by this pattern?
2446 if (ChainNodesMatched.back() != NodeToMatch &&
2447 !RecordedNodes[RecNo].first.hasOneUse()) {
2448 ChainNodesMatched.clear();
2449 break;
2450 }
2451
2452 // Merge the input chains if they are not intra-pattern references.
2453 InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
2454
2455 if (InputChain.getNode() == 0)
2456 break; // Failed to merge.
2457 continue;
2458 }
2459
2460 case OPC_EmitMergeInputChains: {
2461 assert(InputChain.getNode() == 0 &&
2462 "EmitMergeInputChains should be the first chain producing node");
2463 // This node gets a list of nodes we matched in the input that have
2464 // chains. We want to token factor all of the input chains to these nodes
2465 // together. However, if any of the input chains is actually one of the
2466 // nodes matched in this pattern, then we have an intra-match reference.
2467 // Ignore these because the newly token factored chain should not refer to
2468 // the old nodes.
2469 unsigned NumChains = MatcherTable[MatcherIndex++];
2470 assert(NumChains != 0 && "Can't TF zero chains");
2471
2472 assert(ChainNodesMatched.empty() &&
2473 "Should only have one EmitMergeInputChains per match");
2474
2475 // Read all of the chained nodes.
2476 for (unsigned i = 0; i != NumChains; ++i) {
2477 unsigned RecNo = MatcherTable[MatcherIndex++];
2478 assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2479 ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
2480
2481 // FIXME: What if other value results of the node have uses not matched
2482 // by this pattern?
2483 if (ChainNodesMatched.back() != NodeToMatch &&
2484 !RecordedNodes[RecNo].first.hasOneUse()) {
2485 ChainNodesMatched.clear();
2486 break;
2487 }
2488 }
2489
2490 // If the inner loop broke out, the match fails.
2491 if (ChainNodesMatched.empty())
2492 break;
2493
2494 // Merge the input chains if they are not intra-pattern references.
2495 InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
2496
2497 if (InputChain.getNode() == 0)
2498 break; // Failed to merge.
2499
2500 continue;
2501 }
2502
2503 case OPC_EmitCopyToReg: {
2504 unsigned RecNo = MatcherTable[MatcherIndex++];
2505 assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2506 unsigned DestPhysReg = MatcherTable[MatcherIndex++];
2507
2508 if (InputChain.getNode() == 0)
2509 InputChain = CurDAG->getEntryNode();
2510
2511 InputChain = CurDAG->getCopyToReg(InputChain, NodeToMatch->getDebugLoc(),
2512 DestPhysReg, RecordedNodes[RecNo].first,
2513 InputGlue);
2514
2515 InputGlue = InputChain.getValue(1);
2516 continue;
2517 }
2518
2519 case OPC_EmitNodeXForm: {
2520 unsigned XFormNo = MatcherTable[MatcherIndex++];
2521 unsigned RecNo = MatcherTable[MatcherIndex++];
2522 assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2523 SDValue Res = RunSDNodeXForm(RecordedNodes[RecNo].first, XFormNo);
2524 RecordedNodes.push_back(std::pair<SDValue,SDNode*>(Res, (SDNode*) 0));
2525 continue;
2526 }
2527
2528 case OPC_EmitNode:
2529 case OPC_MorphNodeTo: {
2530 uint16_t TargetOpc = MatcherTable[MatcherIndex++];
2531 TargetOpc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
2532 unsigned EmitNodeInfo = MatcherTable[MatcherIndex++];
2533 // Get the result VT list.
2534 unsigned NumVTs = MatcherTable[MatcherIndex++];
2535 SmallVector<EVT, 4> VTs;
2536 for (unsigned i = 0; i != NumVTs; ++i) {
2537 MVT::SimpleValueType VT =
2538 (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
2539 if (VT == MVT::iPTR) VT = TLI.getPointerTy().SimpleTy;
2540 VTs.push_back(VT);
2541 }
2542
2543 if (EmitNodeInfo & OPFL_Chain)
2544 VTs.push_back(MVT::Other);
2545 if (EmitNodeInfo & OPFL_GlueOutput)
2546 VTs.push_back(MVT::Glue);
2547
2548 // This is hot code, so optimize the two most common cases of 1 and 2
2549 // results.
2550 SDVTList VTList;
2551 if (VTs.size() == 1)
2552 VTList = CurDAG->getVTList(VTs[0]);
2553 else if (VTs.size() == 2)
2554 VTList = CurDAG->getVTList(VTs[0], VTs[1]);
2555 else
2556 VTList = CurDAG->getVTList(VTs.data(), VTs.size());
2557
2558 // Get the operand list.
2559 unsigned NumOps = MatcherTable[MatcherIndex++];
2560 SmallVector<SDValue, 8> Ops;
2561 for (unsigned i = 0; i != NumOps; ++i) {
2562 unsigned RecNo = MatcherTable[MatcherIndex++];
2563 if (RecNo & 128)
2564 RecNo = GetVBR(RecNo, MatcherTable, MatcherIndex);
2565
2566 assert(RecNo < RecordedNodes.size() && "Invalid EmitNode");
2567 Ops.push_back(RecordedNodes[RecNo].first);
2568 }
2569
2570 // If there are variadic operands to add, handle them now.
2571 if (EmitNodeInfo & OPFL_VariadicInfo) {
2572 // Determine the start index to copy from.
2573 unsigned FirstOpToCopy = getNumFixedFromVariadicInfo(EmitNodeInfo);
2574 FirstOpToCopy += (EmitNodeInfo & OPFL_Chain) ? 1 : 0;
2575 assert(NodeToMatch->getNumOperands() >= FirstOpToCopy &&
2576 "Invalid variadic node");
2577 // Copy all of the variadic operands, not including a potential glue
2578 // input.
2579 for (unsigned i = FirstOpToCopy, e = NodeToMatch->getNumOperands();
2580 i != e; ++i) {
2581 SDValue V = NodeToMatch->getOperand(i);
2582 if (V.getValueType() == MVT::Glue) break;
2583 Ops.push_back(V);
2584 }
2585 }
2586
2587 // If this has chain/glue inputs, add them.
2588 if (EmitNodeInfo & OPFL_Chain)
2589 Ops.push_back(InputChain);
2590 if ((EmitNodeInfo & OPFL_GlueInput) && InputGlue.getNode() != 0)
2591 Ops.push_back(InputGlue);
2592
2593 // Create the node.
2594 SDNode *Res = 0;
2595 if (Opcode != OPC_MorphNodeTo) {
2596 // If this is a normal EmitNode command, just create the new node and
2597 // add the results to the RecordedNodes list.
2598 Res = CurDAG->getMachineNode(TargetOpc, NodeToMatch->getDebugLoc(),
2599 VTList, Ops.data(), Ops.size());
2600
2601 // Add all the non-glue/non-chain results to the RecordedNodes list.
2602 for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
2603 if (VTs[i] == MVT::Other || VTs[i] == MVT::Glue) break;
2604 RecordedNodes.push_back(std::pair<SDValue,SDNode*>(SDValue(Res, i),
2605 (SDNode*) 0));
2606 }
2607
2608 } else {
2609 Res = MorphNode(NodeToMatch, TargetOpc, VTList, Ops.data(), Ops.size(),
2610 EmitNodeInfo);
2611 }
2612
2613 // If the node had chain/glue results, update our notion of the current
2614 // chain and glue.
2615 if (EmitNodeInfo & OPFL_GlueOutput) {
2616 InputGlue = SDValue(Res, VTs.size()-1);
2617 if (EmitNodeInfo & OPFL_Chain)
2618 InputChain = SDValue(Res, VTs.size()-2);
2619 } else if (EmitNodeInfo & OPFL_Chain)
2620 InputChain = SDValue(Res, VTs.size()-1);
2621
2622 // If the OPFL_MemRefs glue is set on this node, slap all of the
2623 // accumulated memrefs onto it.
2624 //
2625 // FIXME: This is vastly incorrect for patterns with multiple outputs
2626 // instructions that access memory and for ComplexPatterns that match
2627 // loads.
2628 if (EmitNodeInfo & OPFL_MemRefs) {
2629 // Only attach load or store memory operands if the generated
2630 // instruction may load or store.
2631 const MCInstrDesc &MCID = TM.getInstrInfo()->get(TargetOpc);
2632 bool mayLoad = MCID.mayLoad();
2633 bool mayStore = MCID.mayStore();
2634
2635 unsigned NumMemRefs = 0;
2636 for (SmallVector<MachineMemOperand*, 2>::const_iterator I =
2637 MatchedMemRefs.begin(), E = MatchedMemRefs.end(); I != E; ++I) {
2638 if ((*I)->isLoad()) {
2639 if (mayLoad)
2640 ++NumMemRefs;
2641 } else if ((*I)->isStore()) {
2642 if (mayStore)
2643 ++NumMemRefs;
2644 } else {
2645 ++NumMemRefs;
2646 }
2647 }
2648
2649 MachineSDNode::mmo_iterator MemRefs =
2650 MF->allocateMemRefsArray(NumMemRefs);
2651
2652 MachineSDNode::mmo_iterator MemRefsPos = MemRefs;
2653 for (SmallVector<MachineMemOperand*, 2>::const_iterator I =
2654 MatchedMemRefs.begin(), E = MatchedMemRefs.end(); I != E; ++I) {
2655 if ((*I)->isLoad()) {
2656 if (mayLoad)
2657 *MemRefsPos++ = *I;
2658 } else if ((*I)->isStore()) {
2659 if (mayStore)
2660 *MemRefsPos++ = *I;
2661 } else {
2662 *MemRefsPos++ = *I;
2663 }
2664 }
2665
2666 cast<MachineSDNode>(Res)
2667 ->setMemRefs(MemRefs, MemRefs + NumMemRefs);
2668 }
2669
2670 DEBUG(errs() << " "
2671 << (Opcode == OPC_MorphNodeTo ? "Morphed" : "Created")
2672 << " node: "; Res->dump(CurDAG); errs() << "\n");
2673
2674 // If this was a MorphNodeTo then we're completely done!
2675 if (Opcode == OPC_MorphNodeTo) {
2676 // Update chain and glue uses.
2677 UpdateChainsAndGlue(NodeToMatch, InputChain, ChainNodesMatched,
2678 InputGlue, GlueResultNodesMatched, true);
2679 return Res;
2680 }
2681
2682 continue;
2683 }
2684
2685 case OPC_MarkGlueResults: {
2686 unsigned NumNodes = MatcherTable[MatcherIndex++];
2687
2688 // Read and remember all the glue-result nodes.
2689 for (unsigned i = 0; i != NumNodes; ++i) {
2690 unsigned RecNo = MatcherTable[MatcherIndex++];
2691 if (RecNo & 128)
2692 RecNo = GetVBR(RecNo, MatcherTable, MatcherIndex);
2693
2694 assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2695 GlueResultNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
2696 }
2697 continue;
2698 }
2699
2700 case OPC_CompleteMatch: {
2701 // The match has been completed, and any new nodes (if any) have been
2702 // created. Patch up references to the matched dag to use the newly
2703 // created nodes.
2704 unsigned NumResults = MatcherTable[MatcherIndex++];
2705
2706 for (unsigned i = 0; i != NumResults; ++i) {
2707 unsigned ResSlot = MatcherTable[MatcherIndex++];
2708 if (ResSlot & 128)
2709 ResSlot = GetVBR(ResSlot, MatcherTable, MatcherIndex);
2710
2711 assert(ResSlot < RecordedNodes.size() && "Invalid CheckSame");
2712 SDValue Res = RecordedNodes[ResSlot].first;
2713
2714 assert(i < NodeToMatch->getNumValues() &&
2715 NodeToMatch->getValueType(i) != MVT::Other &&
2716 NodeToMatch->getValueType(i) != MVT::Glue &&
2717 "Invalid number of results to complete!");
2718 assert((NodeToMatch->getValueType(i) == Res.getValueType() ||
2719 NodeToMatch->getValueType(i) == MVT::iPTR ||
2720 Res.getValueType() == MVT::iPTR ||
2721 NodeToMatch->getValueType(i).getSizeInBits() ==
2722 Res.getValueType().getSizeInBits()) &&
2723 "invalid replacement");
2724 CurDAG->ReplaceAllUsesOfValueWith(SDValue(NodeToMatch, i), Res);
2725 }
2726
2727 // If the root node defines glue, add it to the glue nodes to update list.
2728 if (NodeToMatch->getValueType(NodeToMatch->getNumValues()-1) == MVT::Glue)
2729 GlueResultNodesMatched.push_back(NodeToMatch);
2730
2731 // Update chain and glue uses.
2732 UpdateChainsAndGlue(NodeToMatch, InputChain, ChainNodesMatched,
2733 InputGlue, GlueResultNodesMatched, false);
2734
2735 assert(NodeToMatch->use_empty() &&
2736 "Didn't replace all uses of the node?");
2737
2738 // FIXME: We just return here, which interacts correctly with SelectRoot
2739 // above. We should fix this to not return an SDNode* anymore.
2740 return 0;
2741 }
2742 }
2743
2744 // If the code reached this point, then the match failed. See if there is
2745 // another child to try in the current 'Scope', otherwise pop it until we
2746 // find a case to check.
2747 DEBUG(errs() << " Match failed at index " << CurrentOpcodeIndex << "\n");
2748 ++NumDAGIselRetries;
2749 while (1) {
2750 if (MatchScopes.empty()) {
2751 CannotYetSelect(NodeToMatch);
2752 return 0;
2753 }
2754
2755 // Restore the interpreter state back to the point where the scope was
2756 // formed.
2757 MatchScope &LastScope = MatchScopes.back();
2758 RecordedNodes.resize(LastScope.NumRecordedNodes);
2759 NodeStack.clear();
2760 NodeStack.append(LastScope.NodeStack.begin(), LastScope.NodeStack.end());
2761 N = NodeStack.back();
2762
2763 if (LastScope.NumMatchedMemRefs != MatchedMemRefs.size())
2764 MatchedMemRefs.resize(LastScope.NumMatchedMemRefs);
2765 MatcherIndex = LastScope.FailIndex;
2766
2767 DEBUG(errs() << " Continuing at " << MatcherIndex << "\n");
2768
2769 InputChain = LastScope.InputChain;
2770 InputGlue = LastScope.InputGlue;
2771 if (!LastScope.HasChainNodesMatched)
2772 ChainNodesMatched.clear();
2773 if (!LastScope.HasGlueResultNodesMatched)
2774 GlueResultNodesMatched.clear();
2775
2776 // Check to see what the offset is at the new MatcherIndex. If it is zero
2777 // we have reached the end of this scope, otherwise we have another child
2778 // in the current scope to try.
2779 unsigned NumToSkip = MatcherTable[MatcherIndex++];
2780 if (NumToSkip & 128)
2781 NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
2782
2783 // If we have another child in this scope to match, update FailIndex and
2784 // try it.
2785 if (NumToSkip != 0) {
2786 LastScope.FailIndex = MatcherIndex+NumToSkip;
2787 break;
2788 }
2789
2790 // End of this scope, pop it and try the next child in the containing
2791 // scope.
2792 MatchScopes.pop_back();
2793 }
2794 }
2795 }
2796
2797
2798
CannotYetSelect(SDNode * N)2799 void SelectionDAGISel::CannotYetSelect(SDNode *N) {
2800 std::string msg;
2801 raw_string_ostream Msg(msg);
2802 Msg << "Cannot select: ";
2803
2804 if (N->getOpcode() != ISD::INTRINSIC_W_CHAIN &&
2805 N->getOpcode() != ISD::INTRINSIC_WO_CHAIN &&
2806 N->getOpcode() != ISD::INTRINSIC_VOID) {
2807 N->printrFull(Msg, CurDAG);
2808 } else {
2809 bool HasInputChain = N->getOperand(0).getValueType() == MVT::Other;
2810 unsigned iid =
2811 cast<ConstantSDNode>(N->getOperand(HasInputChain))->getZExtValue();
2812 if (iid < Intrinsic::num_intrinsics)
2813 Msg << "intrinsic %" << Intrinsic::getName((Intrinsic::ID)iid);
2814 else if (const TargetIntrinsicInfo *TII = TM.getIntrinsicInfo())
2815 Msg << "target intrinsic %" << TII->getName(iid);
2816 else
2817 Msg << "unknown intrinsic #" << iid;
2818 }
2819 report_fatal_error(Msg.str());
2820 }
2821
2822 char SelectionDAGISel::ID = 0;
2823