1 //===-- MachineFunction.cpp -----------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Collect native machine code information for a function.  This allows
11 // target-specific information about the generated code to be stored with each
12 // function.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/CodeGen/MachineFunction.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallString.h"
19 #include "llvm/Analysis/ConstantFolding.h"
20 #include "llvm/Analysis/EHPersonalities.h"
21 #include "llvm/CodeGen/MachineConstantPool.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunctionInitializer.h"
24 #include "llvm/CodeGen/MachineFunctionPass.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineJumpTableInfo.h"
27 #include "llvm/CodeGen/MachineModuleInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/CodeGen/WinEHFuncInfo.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/DebugInfo.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/Module.h"
36 #include "llvm/IR/ModuleSlotTracker.h"
37 #include "llvm/MC/MCAsmInfo.h"
38 #include "llvm/MC/MCContext.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/GraphWriter.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/Target/TargetFrameLowering.h"
43 #include "llvm/Target/TargetLowering.h"
44 #include "llvm/Target/TargetMachine.h"
45 #include "llvm/Target/TargetSubtargetInfo.h"
46 using namespace llvm;
47 
48 #define DEBUG_TYPE "codegen"
49 
anchor()50 void MachineFunctionInitializer::anchor() {}
51 
52 //===----------------------------------------------------------------------===//
53 // MachineFunction implementation
54 //===----------------------------------------------------------------------===//
55 
56 // Out-of-line virtual method.
~MachineFunctionInfo()57 MachineFunctionInfo::~MachineFunctionInfo() {}
58 
deleteNode(MachineBasicBlock * MBB)59 void ilist_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) {
60   MBB->getParent()->DeleteMachineBasicBlock(MBB);
61 }
62 
MachineFunction(const Function * F,const TargetMachine & TM,unsigned FunctionNum,MachineModuleInfo & mmi)63 MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
64                                  unsigned FunctionNum, MachineModuleInfo &mmi)
65     : Fn(F), Target(TM), STI(TM.getSubtargetImpl(*F)), Ctx(mmi.getContext()),
66       MMI(mmi) {
67   if (STI->getRegisterInfo())
68     RegInfo = new (Allocator) MachineRegisterInfo(this);
69   else
70     RegInfo = nullptr;
71 
72   MFInfo = nullptr;
73   FrameInfo = new (Allocator)
74       MachineFrameInfo(STI->getFrameLowering()->getStackAlignment(),
75                        STI->getFrameLowering()->isStackRealignable(),
76                        !F->hasFnAttribute("no-realign-stack"));
77 
78   if (Fn->hasFnAttribute(Attribute::StackAlignment))
79     FrameInfo->ensureMaxAlignment(Fn->getFnStackAlignment());
80 
81   ConstantPool = new (Allocator) MachineConstantPool(getDataLayout());
82   Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
83 
84   // FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn.
85   // FIXME: Use Function::optForSize().
86   if (!Fn->hasFnAttribute(Attribute::OptimizeForSize))
87     Alignment = std::max(Alignment,
88                          STI->getTargetLowering()->getPrefFunctionAlignment());
89 
90   FunctionNumber = FunctionNum;
91   JumpTableInfo = nullptr;
92 
93   if (isFuncletEHPersonality(classifyEHPersonality(
94           F->hasPersonalityFn() ? F->getPersonalityFn() : nullptr))) {
95     WinEHInfo = new (Allocator) WinEHFuncInfo();
96   }
97 
98   assert(TM.isCompatibleDataLayout(getDataLayout()) &&
99          "Can't create a MachineFunction using a Module with a "
100          "Target-incompatible DataLayout attached\n");
101 
102   PSVManager = llvm::make_unique<PseudoSourceValueManager>();
103 }
104 
~MachineFunction()105 MachineFunction::~MachineFunction() {
106   // Don't call destructors on MachineInstr and MachineOperand. All of their
107   // memory comes from the BumpPtrAllocator which is about to be purged.
108   //
109   // Do call MachineBasicBlock destructors, it contains std::vectors.
110   for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I))
111     I->Insts.clearAndLeakNodesUnsafely();
112 
113   InstructionRecycler.clear(Allocator);
114   OperandRecycler.clear(Allocator);
115   BasicBlockRecycler.clear(Allocator);
116   if (RegInfo) {
117     RegInfo->~MachineRegisterInfo();
118     Allocator.Deallocate(RegInfo);
119   }
120   if (MFInfo) {
121     MFInfo->~MachineFunctionInfo();
122     Allocator.Deallocate(MFInfo);
123   }
124 
125   FrameInfo->~MachineFrameInfo();
126   Allocator.Deallocate(FrameInfo);
127 
128   ConstantPool->~MachineConstantPool();
129   Allocator.Deallocate(ConstantPool);
130 
131   if (JumpTableInfo) {
132     JumpTableInfo->~MachineJumpTableInfo();
133     Allocator.Deallocate(JumpTableInfo);
134   }
135 
136   if (WinEHInfo) {
137     WinEHInfo->~WinEHFuncInfo();
138     Allocator.Deallocate(WinEHInfo);
139   }
140 }
141 
getDataLayout() const142 const DataLayout &MachineFunction::getDataLayout() const {
143   return Fn->getParent()->getDataLayout();
144 }
145 
146 /// Get the JumpTableInfo for this function.
147 /// If it does not already exist, allocate one.
148 MachineJumpTableInfo *MachineFunction::
getOrCreateJumpTableInfo(unsigned EntryKind)149 getOrCreateJumpTableInfo(unsigned EntryKind) {
150   if (JumpTableInfo) return JumpTableInfo;
151 
152   JumpTableInfo = new (Allocator)
153     MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind);
154   return JumpTableInfo;
155 }
156 
157 /// Should we be emitting segmented stack stuff for the function
shouldSplitStack()158 bool MachineFunction::shouldSplitStack() {
159   return getFunction()->hasFnAttribute("split-stack");
160 }
161 
162 /// This discards all of the MachineBasicBlock numbers and recomputes them.
163 /// This guarantees that the MBB numbers are sequential, dense, and match the
164 /// ordering of the blocks within the function.  If a specific MachineBasicBlock
165 /// is specified, only that block and those after it are renumbered.
RenumberBlocks(MachineBasicBlock * MBB)166 void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
167   if (empty()) { MBBNumbering.clear(); return; }
168   MachineFunction::iterator MBBI, E = end();
169   if (MBB == nullptr)
170     MBBI = begin();
171   else
172     MBBI = MBB->getIterator();
173 
174   // Figure out the block number this should have.
175   unsigned BlockNo = 0;
176   if (MBBI != begin())
177     BlockNo = std::prev(MBBI)->getNumber() + 1;
178 
179   for (; MBBI != E; ++MBBI, ++BlockNo) {
180     if (MBBI->getNumber() != (int)BlockNo) {
181       // Remove use of the old number.
182       if (MBBI->getNumber() != -1) {
183         assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&
184                "MBB number mismatch!");
185         MBBNumbering[MBBI->getNumber()] = nullptr;
186       }
187 
188       // If BlockNo is already taken, set that block's number to -1.
189       if (MBBNumbering[BlockNo])
190         MBBNumbering[BlockNo]->setNumber(-1);
191 
192       MBBNumbering[BlockNo] = &*MBBI;
193       MBBI->setNumber(BlockNo);
194     }
195   }
196 
197   // Okay, all the blocks are renumbered.  If we have compactified the block
198   // numbering, shrink MBBNumbering now.
199   assert(BlockNo <= MBBNumbering.size() && "Mismatch!");
200   MBBNumbering.resize(BlockNo);
201 }
202 
203 /// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
204 MachineInstr *
CreateMachineInstr(const MCInstrDesc & MCID,DebugLoc DL,bool NoImp)205 MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
206                                     DebugLoc DL, bool NoImp) {
207   return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
208     MachineInstr(*this, MCID, DL, NoImp);
209 }
210 
211 /// Create a new MachineInstr which is a copy of the 'Orig' instruction,
212 /// identical in all ways except the instruction has no parent, prev, or next.
213 MachineInstr *
CloneMachineInstr(const MachineInstr * Orig)214 MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
215   return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
216              MachineInstr(*this, *Orig);
217 }
218 
219 /// Delete the given MachineInstr.
220 ///
221 /// This function also serves as the MachineInstr destructor - the real
222 /// ~MachineInstr() destructor must be empty.
223 void
DeleteMachineInstr(MachineInstr * MI)224 MachineFunction::DeleteMachineInstr(MachineInstr *MI) {
225   // Strip it for parts. The operand array and the MI object itself are
226   // independently recyclable.
227   if (MI->Operands)
228     deallocateOperandArray(MI->CapOperands, MI->Operands);
229   // Don't call ~MachineInstr() which must be trivial anyway because
230   // ~MachineFunction drops whole lists of MachineInstrs wihout calling their
231   // destructors.
232   InstructionRecycler.Deallocate(Allocator, MI);
233 }
234 
235 /// Allocate a new MachineBasicBlock. Use this instead of
236 /// `new MachineBasicBlock'.
237 MachineBasicBlock *
CreateMachineBasicBlock(const BasicBlock * bb)238 MachineFunction::CreateMachineBasicBlock(const BasicBlock *bb) {
239   return new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
240              MachineBasicBlock(*this, bb);
241 }
242 
243 /// Delete the given MachineBasicBlock.
244 void
DeleteMachineBasicBlock(MachineBasicBlock * MBB)245 MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
246   assert(MBB->getParent() == this && "MBB parent mismatch!");
247   MBB->~MachineBasicBlock();
248   BasicBlockRecycler.Deallocate(Allocator, MBB);
249 }
250 
251 MachineMemOperand *
getMachineMemOperand(MachinePointerInfo PtrInfo,unsigned f,uint64_t s,unsigned base_alignment,const AAMDNodes & AAInfo,const MDNode * Ranges)252 MachineFunction::getMachineMemOperand(MachinePointerInfo PtrInfo, unsigned f,
253                                       uint64_t s, unsigned base_alignment,
254                                       const AAMDNodes &AAInfo,
255                                       const MDNode *Ranges) {
256   return new (Allocator) MachineMemOperand(PtrInfo, f, s, base_alignment,
257                                            AAInfo, Ranges);
258 }
259 
260 MachineMemOperand *
getMachineMemOperand(const MachineMemOperand * MMO,int64_t Offset,uint64_t Size)261 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
262                                       int64_t Offset, uint64_t Size) {
263   if (MMO->getValue())
264     return new (Allocator)
265                MachineMemOperand(MachinePointerInfo(MMO->getValue(),
266                                                     MMO->getOffset()+Offset),
267                                  MMO->getFlags(), Size,
268                                  MMO->getBaseAlignment());
269   return new (Allocator)
270              MachineMemOperand(MachinePointerInfo(MMO->getPseudoValue(),
271                                                   MMO->getOffset()+Offset),
272                                MMO->getFlags(), Size,
273                                MMO->getBaseAlignment());
274 }
275 
276 MachineInstr::mmo_iterator
allocateMemRefsArray(unsigned long Num)277 MachineFunction::allocateMemRefsArray(unsigned long Num) {
278   return Allocator.Allocate<MachineMemOperand *>(Num);
279 }
280 
281 std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
extractLoadMemRefs(MachineInstr::mmo_iterator Begin,MachineInstr::mmo_iterator End)282 MachineFunction::extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
283                                     MachineInstr::mmo_iterator End) {
284   // Count the number of load mem refs.
285   unsigned Num = 0;
286   for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
287     if ((*I)->isLoad())
288       ++Num;
289 
290   // Allocate a new array and populate it with the load information.
291   MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
292   unsigned Index = 0;
293   for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
294     if ((*I)->isLoad()) {
295       if (!(*I)->isStore())
296         // Reuse the MMO.
297         Result[Index] = *I;
298       else {
299         // Clone the MMO and unset the store flag.
300         MachineMemOperand *JustLoad =
301           getMachineMemOperand((*I)->getPointerInfo(),
302                                (*I)->getFlags() & ~MachineMemOperand::MOStore,
303                                (*I)->getSize(), (*I)->getBaseAlignment(),
304                                (*I)->getAAInfo());
305         Result[Index] = JustLoad;
306       }
307       ++Index;
308     }
309   }
310   return std::make_pair(Result, Result + Num);
311 }
312 
313 std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
extractStoreMemRefs(MachineInstr::mmo_iterator Begin,MachineInstr::mmo_iterator End)314 MachineFunction::extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
315                                      MachineInstr::mmo_iterator End) {
316   // Count the number of load mem refs.
317   unsigned Num = 0;
318   for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
319     if ((*I)->isStore())
320       ++Num;
321 
322   // Allocate a new array and populate it with the store information.
323   MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
324   unsigned Index = 0;
325   for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
326     if ((*I)->isStore()) {
327       if (!(*I)->isLoad())
328         // Reuse the MMO.
329         Result[Index] = *I;
330       else {
331         // Clone the MMO and unset the load flag.
332         MachineMemOperand *JustStore =
333           getMachineMemOperand((*I)->getPointerInfo(),
334                                (*I)->getFlags() & ~MachineMemOperand::MOLoad,
335                                (*I)->getSize(), (*I)->getBaseAlignment(),
336                                (*I)->getAAInfo());
337         Result[Index] = JustStore;
338       }
339       ++Index;
340     }
341   }
342   return std::make_pair(Result, Result + Num);
343 }
344 
createExternalSymbolName(StringRef Name)345 const char *MachineFunction::createExternalSymbolName(StringRef Name) {
346   char *Dest = Allocator.Allocate<char>(Name.size() + 1);
347   std::copy(Name.begin(), Name.end(), Dest);
348   Dest[Name.size()] = 0;
349   return Dest;
350 }
351 
352 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const353 void MachineFunction::dump() const {
354   print(dbgs());
355 }
356 #endif
357 
getName() const358 StringRef MachineFunction::getName() const {
359   assert(getFunction() && "No function!");
360   return getFunction()->getName();
361 }
362 
print(raw_ostream & OS,SlotIndexes * Indexes) const363 void MachineFunction::print(raw_ostream &OS, SlotIndexes *Indexes) const {
364   OS << "# Machine code for function " << getName() << ": ";
365   if (RegInfo) {
366     OS << (RegInfo->isSSA() ? "SSA" : "Post SSA");
367     if (!RegInfo->tracksLiveness())
368       OS << ", not tracking liveness";
369   }
370   OS << '\n';
371 
372   // Print Frame Information
373   FrameInfo->print(*this, OS);
374 
375   // Print JumpTable Information
376   if (JumpTableInfo)
377     JumpTableInfo->print(OS);
378 
379   // Print Constant Pool
380   ConstantPool->print(OS);
381 
382   const TargetRegisterInfo *TRI = getSubtarget().getRegisterInfo();
383 
384   if (RegInfo && !RegInfo->livein_empty()) {
385     OS << "Function Live Ins: ";
386     for (MachineRegisterInfo::livein_iterator
387          I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
388       OS << PrintReg(I->first, TRI);
389       if (I->second)
390         OS << " in " << PrintReg(I->second, TRI);
391       if (std::next(I) != E)
392         OS << ", ";
393     }
394     OS << '\n';
395   }
396 
397   ModuleSlotTracker MST(getFunction()->getParent());
398   MST.incorporateFunction(*getFunction());
399   for (const auto &BB : *this) {
400     OS << '\n';
401     BB.print(OS, MST, Indexes);
402   }
403 
404   OS << "\n# End machine code for function " << getName() << ".\n\n";
405 }
406 
407 namespace llvm {
408   template<>
409   struct DOTGraphTraits<const MachineFunction*> : public DefaultDOTGraphTraits {
410 
DOTGraphTraitsllvm::DOTGraphTraits411   DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
412 
getGraphNamellvm::DOTGraphTraits413     static std::string getGraphName(const MachineFunction *F) {
414       return ("CFG for '" + F->getName() + "' function").str();
415     }
416 
getNodeLabelllvm::DOTGraphTraits417     std::string getNodeLabel(const MachineBasicBlock *Node,
418                              const MachineFunction *Graph) {
419       std::string OutStr;
420       {
421         raw_string_ostream OSS(OutStr);
422 
423         if (isSimple()) {
424           OSS << "BB#" << Node->getNumber();
425           if (const BasicBlock *BB = Node->getBasicBlock())
426             OSS << ": " << BB->getName();
427         } else
428           Node->print(OSS);
429       }
430 
431       if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
432 
433       // Process string output to make it nicer...
434       for (unsigned i = 0; i != OutStr.length(); ++i)
435         if (OutStr[i] == '\n') {                            // Left justify
436           OutStr[i] = '\\';
437           OutStr.insert(OutStr.begin()+i+1, 'l');
438         }
439       return OutStr;
440     }
441   };
442 }
443 
viewCFG() const444 void MachineFunction::viewCFG() const
445 {
446 #ifndef NDEBUG
447   ViewGraph(this, "mf" + getName());
448 #else
449   errs() << "MachineFunction::viewCFG is only available in debug builds on "
450          << "systems with Graphviz or gv!\n";
451 #endif // NDEBUG
452 }
453 
viewCFGOnly() const454 void MachineFunction::viewCFGOnly() const
455 {
456 #ifndef NDEBUG
457   ViewGraph(this, "mf" + getName(), true);
458 #else
459   errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
460          << "systems with Graphviz or gv!\n";
461 #endif // NDEBUG
462 }
463 
464 /// Add the specified physical register as a live-in value and
465 /// create a corresponding virtual register for it.
addLiveIn(unsigned PReg,const TargetRegisterClass * RC)466 unsigned MachineFunction::addLiveIn(unsigned PReg,
467                                     const TargetRegisterClass *RC) {
468   MachineRegisterInfo &MRI = getRegInfo();
469   unsigned VReg = MRI.getLiveInVirtReg(PReg);
470   if (VReg) {
471     const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg);
472     (void)VRegRC;
473     // A physical register can be added several times.
474     // Between two calls, the register class of the related virtual register
475     // may have been constrained to match some operation constraints.
476     // In that case, check that the current register class includes the
477     // physical register and is a sub class of the specified RC.
478     assert((VRegRC == RC || (VRegRC->contains(PReg) &&
479                              RC->hasSubClassEq(VRegRC))) &&
480             "Register class mismatch!");
481     return VReg;
482   }
483   VReg = MRI.createVirtualRegister(RC);
484   MRI.addLiveIn(PReg, VReg);
485   return VReg;
486 }
487 
488 /// Return the MCSymbol for the specified non-empty jump table.
489 /// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
490 /// normal 'L' label is returned.
getJTISymbol(unsigned JTI,MCContext & Ctx,bool isLinkerPrivate) const491 MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
492                                         bool isLinkerPrivate) const {
493   const DataLayout &DL = getDataLayout();
494   assert(JumpTableInfo && "No jump tables");
495   assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
496 
497   const char *Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix()
498                                        : DL.getPrivateGlobalPrefix();
499   SmallString<60> Name;
500   raw_svector_ostream(Name)
501     << Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
502   return Ctx.getOrCreateSymbol(Name);
503 }
504 
505 /// Return a function-local symbol to represent the PIC base.
getPICBaseSymbol() const506 MCSymbol *MachineFunction::getPICBaseSymbol() const {
507   const DataLayout &DL = getDataLayout();
508   return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) +
509                                Twine(getFunctionNumber()) + "$pb");
510 }
511 
512 //===----------------------------------------------------------------------===//
513 //  MachineFrameInfo implementation
514 //===----------------------------------------------------------------------===//
515 
516 /// Make sure the function is at least Align bytes aligned.
ensureMaxAlignment(unsigned Align)517 void MachineFrameInfo::ensureMaxAlignment(unsigned Align) {
518   if (!StackRealignable || !RealignOption)
519     assert(Align <= StackAlignment &&
520            "For targets without stack realignment, Align is out of limit!");
521   if (MaxAlignment < Align) MaxAlignment = Align;
522 }
523 
524 /// Clamp the alignment if requested and emit a warning.
clampStackAlignment(bool ShouldClamp,unsigned Align,unsigned StackAlign)525 static inline unsigned clampStackAlignment(bool ShouldClamp, unsigned Align,
526                                            unsigned StackAlign) {
527   if (!ShouldClamp || Align <= StackAlign)
528     return Align;
529   DEBUG(dbgs() << "Warning: requested alignment " << Align
530                << " exceeds the stack alignment " << StackAlign
531                << " when stack realignment is off" << '\n');
532   return StackAlign;
533 }
534 
535 /// Create a new statically sized stack object, returning a nonnegative
536 /// identifier to represent it.
CreateStackObject(uint64_t Size,unsigned Alignment,bool isSS,const AllocaInst * Alloca)537 int MachineFrameInfo::CreateStackObject(uint64_t Size, unsigned Alignment,
538                       bool isSS, const AllocaInst *Alloca) {
539   assert(Size != 0 && "Cannot allocate zero size stack objects!");
540   Alignment = clampStackAlignment(!StackRealignable || !RealignOption,
541                                   Alignment, StackAlignment);
542   Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, Alloca,
543                                 !isSS));
544   int Index = (int)Objects.size() - NumFixedObjects - 1;
545   assert(Index >= 0 && "Bad frame index!");
546   ensureMaxAlignment(Alignment);
547   return Index;
548 }
549 
550 /// Create a new statically sized stack object that represents a spill slot,
551 /// returning a nonnegative identifier to represent it.
CreateSpillStackObject(uint64_t Size,unsigned Alignment)552 int MachineFrameInfo::CreateSpillStackObject(uint64_t Size,
553                                              unsigned Alignment) {
554   Alignment = clampStackAlignment(!StackRealignable || !RealignOption,
555                                   Alignment, StackAlignment);
556   CreateStackObject(Size, Alignment, true);
557   int Index = (int)Objects.size() - NumFixedObjects - 1;
558   ensureMaxAlignment(Alignment);
559   return Index;
560 }
561 
562 /// Notify the MachineFrameInfo object that a variable sized object has been
563 /// created. This must be created whenever a variable sized object is created,
564 /// whether or not the index returned is actually used.
CreateVariableSizedObject(unsigned Alignment,const AllocaInst * Alloca)565 int MachineFrameInfo::CreateVariableSizedObject(unsigned Alignment,
566                                                 const AllocaInst *Alloca) {
567   HasVarSizedObjects = true;
568   Alignment = clampStackAlignment(!StackRealignable || !RealignOption,
569                                   Alignment, StackAlignment);
570   Objects.push_back(StackObject(0, Alignment, 0, false, false, Alloca, true));
571   ensureMaxAlignment(Alignment);
572   return (int)Objects.size()-NumFixedObjects-1;
573 }
574 
575 /// Create a new object at a fixed location on the stack.
576 /// All fixed objects should be created before other objects are created for
577 /// efficiency. By default, fixed objects are immutable. This returns an
578 /// index with a negative value.
CreateFixedObject(uint64_t Size,int64_t SPOffset,bool Immutable,bool isAliased)579 int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
580                                         bool Immutable, bool isAliased) {
581   assert(Size != 0 && "Cannot allocate zero size fixed stack objects!");
582   // The alignment of the frame index can be determined from its offset from
583   // the incoming frame position.  If the frame object is at offset 32 and
584   // the stack is guaranteed to be 16-byte aligned, then we know that the
585   // object is 16-byte aligned.
586   unsigned Align = MinAlign(SPOffset, StackAlignment);
587   Align = clampStackAlignment(!StackRealignable || !RealignOption, Align,
588                               StackAlignment);
589   Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable,
590                                               /*isSS*/   false,
591                                               /*Alloca*/ nullptr, isAliased));
592   return -++NumFixedObjects;
593 }
594 
595 /// Create a spill slot at a fixed location on the stack.
596 /// Returns an index with a negative value.
CreateFixedSpillStackObject(uint64_t Size,int64_t SPOffset)597 int MachineFrameInfo::CreateFixedSpillStackObject(uint64_t Size,
598                                                   int64_t SPOffset) {
599   unsigned Align = MinAlign(SPOffset, StackAlignment);
600   Align = clampStackAlignment(!StackRealignable || !RealignOption, Align,
601                               StackAlignment);
602   Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset,
603                                               /*Immutable*/ true,
604                                               /*isSS*/ true,
605                                               /*Alloca*/ nullptr,
606                                               /*isAliased*/ false));
607   return -++NumFixedObjects;
608 }
609 
getPristineRegs(const MachineFunction & MF) const610 BitVector MachineFrameInfo::getPristineRegs(const MachineFunction &MF) const {
611   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
612   BitVector BV(TRI->getNumRegs());
613 
614   // Before CSI is calculated, no registers are considered pristine. They can be
615   // freely used and PEI will make sure they are saved.
616   if (!isCalleeSavedInfoValid())
617     return BV;
618 
619   for (const MCPhysReg *CSR = TRI->getCalleeSavedRegs(&MF); CSR && *CSR; ++CSR)
620     BV.set(*CSR);
621 
622   // Saved CSRs are not pristine.
623   for (auto &I : getCalleeSavedInfo())
624     for (MCSubRegIterator S(I.getReg(), TRI, true); S.isValid(); ++S)
625       BV.reset(*S);
626 
627   return BV;
628 }
629 
estimateStackSize(const MachineFunction & MF) const630 unsigned MachineFrameInfo::estimateStackSize(const MachineFunction &MF) const {
631   const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
632   const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
633   unsigned MaxAlign = getMaxAlignment();
634   int Offset = 0;
635 
636   // This code is very, very similar to PEI::calculateFrameObjectOffsets().
637   // It really should be refactored to share code. Until then, changes
638   // should keep in mind that there's tight coupling between the two.
639 
640   for (int i = getObjectIndexBegin(); i != 0; ++i) {
641     int FixedOff = -getObjectOffset(i);
642     if (FixedOff > Offset) Offset = FixedOff;
643   }
644   for (unsigned i = 0, e = getObjectIndexEnd(); i != e; ++i) {
645     if (isDeadObjectIndex(i))
646       continue;
647     Offset += getObjectSize(i);
648     unsigned Align = getObjectAlignment(i);
649     // Adjust to alignment boundary
650     Offset = (Offset+Align-1)/Align*Align;
651 
652     MaxAlign = std::max(Align, MaxAlign);
653   }
654 
655   if (adjustsStack() && TFI->hasReservedCallFrame(MF))
656     Offset += getMaxCallFrameSize();
657 
658   // Round up the size to a multiple of the alignment.  If the function has
659   // any calls or alloca's, align to the target's StackAlignment value to
660   // ensure that the callee's frame or the alloca data is suitably aligned;
661   // otherwise, for leaf functions, align to the TransientStackAlignment
662   // value.
663   unsigned StackAlign;
664   if (adjustsStack() || hasVarSizedObjects() ||
665       (RegInfo->needsStackRealignment(MF) && getObjectIndexEnd() != 0))
666     StackAlign = TFI->getStackAlignment();
667   else
668     StackAlign = TFI->getTransientStackAlignment();
669 
670   // If the frame pointer is eliminated, all frame offsets will be relative to
671   // SP not FP. Align to MaxAlign so this works.
672   StackAlign = std::max(StackAlign, MaxAlign);
673   unsigned AlignMask = StackAlign - 1;
674   Offset = (Offset + AlignMask) & ~uint64_t(AlignMask);
675 
676   return (unsigned)Offset;
677 }
678 
print(const MachineFunction & MF,raw_ostream & OS) const679 void MachineFrameInfo::print(const MachineFunction &MF, raw_ostream &OS) const{
680   if (Objects.empty()) return;
681 
682   const TargetFrameLowering *FI = MF.getSubtarget().getFrameLowering();
683   int ValOffset = (FI ? FI->getOffsetOfLocalArea() : 0);
684 
685   OS << "Frame Objects:\n";
686 
687   for (unsigned i = 0, e = Objects.size(); i != e; ++i) {
688     const StackObject &SO = Objects[i];
689     OS << "  fi#" << (int)(i-NumFixedObjects) << ": ";
690     if (SO.Size == ~0ULL) {
691       OS << "dead\n";
692       continue;
693     }
694     if (SO.Size == 0)
695       OS << "variable sized";
696     else
697       OS << "size=" << SO.Size;
698     OS << ", align=" << SO.Alignment;
699 
700     if (i < NumFixedObjects)
701       OS << ", fixed";
702     if (i < NumFixedObjects || SO.SPOffset != -1) {
703       int64_t Off = SO.SPOffset - ValOffset;
704       OS << ", at location [SP";
705       if (Off > 0)
706         OS << "+" << Off;
707       else if (Off < 0)
708         OS << Off;
709       OS << "]";
710     }
711     OS << "\n";
712   }
713 }
714 
715 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump(const MachineFunction & MF) const716 void MachineFrameInfo::dump(const MachineFunction &MF) const {
717   print(MF, dbgs());
718 }
719 #endif
720 
721 //===----------------------------------------------------------------------===//
722 //  MachineJumpTableInfo implementation
723 //===----------------------------------------------------------------------===//
724 
725 /// Return the size of each entry in the jump table.
getEntrySize(const DataLayout & TD) const726 unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
727   // The size of a jump table entry is 4 bytes unless the entry is just the
728   // address of a block, in which case it is the pointer size.
729   switch (getEntryKind()) {
730   case MachineJumpTableInfo::EK_BlockAddress:
731     return TD.getPointerSize();
732   case MachineJumpTableInfo::EK_GPRel64BlockAddress:
733     return 8;
734   case MachineJumpTableInfo::EK_GPRel32BlockAddress:
735   case MachineJumpTableInfo::EK_LabelDifference32:
736   case MachineJumpTableInfo::EK_Custom32:
737     return 4;
738   case MachineJumpTableInfo::EK_Inline:
739     return 0;
740   }
741   llvm_unreachable("Unknown jump table encoding!");
742 }
743 
744 /// Return the alignment of each entry in the jump table.
getEntryAlignment(const DataLayout & TD) const745 unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
746   // The alignment of a jump table entry is the alignment of int32 unless the
747   // entry is just the address of a block, in which case it is the pointer
748   // alignment.
749   switch (getEntryKind()) {
750   case MachineJumpTableInfo::EK_BlockAddress:
751     return TD.getPointerABIAlignment();
752   case MachineJumpTableInfo::EK_GPRel64BlockAddress:
753     return TD.getABIIntegerTypeAlignment(64);
754   case MachineJumpTableInfo::EK_GPRel32BlockAddress:
755   case MachineJumpTableInfo::EK_LabelDifference32:
756   case MachineJumpTableInfo::EK_Custom32:
757     return TD.getABIIntegerTypeAlignment(32);
758   case MachineJumpTableInfo::EK_Inline:
759     return 1;
760   }
761   llvm_unreachable("Unknown jump table encoding!");
762 }
763 
764 /// Create a new jump table entry in the jump table info.
createJumpTableIndex(const std::vector<MachineBasicBlock * > & DestBBs)765 unsigned MachineJumpTableInfo::createJumpTableIndex(
766                                const std::vector<MachineBasicBlock*> &DestBBs) {
767   assert(!DestBBs.empty() && "Cannot create an empty jump table!");
768   JumpTables.push_back(MachineJumpTableEntry(DestBBs));
769   return JumpTables.size()-1;
770 }
771 
772 /// If Old is the target of any jump tables, update the jump tables to branch
773 /// to New instead.
ReplaceMBBInJumpTables(MachineBasicBlock * Old,MachineBasicBlock * New)774 bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old,
775                                                   MachineBasicBlock *New) {
776   assert(Old != New && "Not making a change?");
777   bool MadeChange = false;
778   for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
779     ReplaceMBBInJumpTable(i, Old, New);
780   return MadeChange;
781 }
782 
783 /// If Old is a target of the jump tables, update the jump table to branch to
784 /// New instead.
ReplaceMBBInJumpTable(unsigned Idx,MachineBasicBlock * Old,MachineBasicBlock * New)785 bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx,
786                                                  MachineBasicBlock *Old,
787                                                  MachineBasicBlock *New) {
788   assert(Old != New && "Not making a change?");
789   bool MadeChange = false;
790   MachineJumpTableEntry &JTE = JumpTables[Idx];
791   for (size_t j = 0, e = JTE.MBBs.size(); j != e; ++j)
792     if (JTE.MBBs[j] == Old) {
793       JTE.MBBs[j] = New;
794       MadeChange = true;
795     }
796   return MadeChange;
797 }
798 
print(raw_ostream & OS) const799 void MachineJumpTableInfo::print(raw_ostream &OS) const {
800   if (JumpTables.empty()) return;
801 
802   OS << "Jump Tables:\n";
803 
804   for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
805     OS << "  jt#" << i << ": ";
806     for (unsigned j = 0, f = JumpTables[i].MBBs.size(); j != f; ++j)
807       OS << " BB#" << JumpTables[i].MBBs[j]->getNumber();
808   }
809 
810   OS << '\n';
811 }
812 
813 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const814 void MachineJumpTableInfo::dump() const { print(dbgs()); }
815 #endif
816 
817 
818 //===----------------------------------------------------------------------===//
819 //  MachineConstantPool implementation
820 //===----------------------------------------------------------------------===//
821 
anchor()822 void MachineConstantPoolValue::anchor() { }
823 
getType() const824 Type *MachineConstantPoolEntry::getType() const {
825   if (isMachineConstantPoolEntry())
826     return Val.MachineCPVal->getType();
827   return Val.ConstVal->getType();
828 }
829 
needsRelocation() const830 bool MachineConstantPoolEntry::needsRelocation() const {
831   if (isMachineConstantPoolEntry())
832     return true;
833   return Val.ConstVal->needsRelocation();
834 }
835 
836 SectionKind
getSectionKind(const DataLayout * DL) const837 MachineConstantPoolEntry::getSectionKind(const DataLayout *DL) const {
838   if (needsRelocation())
839     return SectionKind::getReadOnlyWithRel();
840   switch (DL->getTypeAllocSize(getType())) {
841   case 4:
842     return SectionKind::getMergeableConst4();
843   case 8:
844     return SectionKind::getMergeableConst8();
845   case 16:
846     return SectionKind::getMergeableConst16();
847   default:
848     return SectionKind::getReadOnly();
849   }
850 }
851 
~MachineConstantPool()852 MachineConstantPool::~MachineConstantPool() {
853   for (unsigned i = 0, e = Constants.size(); i != e; ++i)
854     if (Constants[i].isMachineConstantPoolEntry())
855       delete Constants[i].Val.MachineCPVal;
856   for (DenseSet<MachineConstantPoolValue*>::iterator I =
857        MachineCPVsSharingEntries.begin(), E = MachineCPVsSharingEntries.end();
858        I != E; ++I)
859     delete *I;
860 }
861 
862 /// Test whether the given two constants can be allocated the same constant pool
863 /// entry.
CanShareConstantPoolEntry(const Constant * A,const Constant * B,const DataLayout & DL)864 static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
865                                       const DataLayout &DL) {
866   // Handle the trivial case quickly.
867   if (A == B) return true;
868 
869   // If they have the same type but weren't the same constant, quickly
870   // reject them.
871   if (A->getType() == B->getType()) return false;
872 
873   // We can't handle structs or arrays.
874   if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
875       isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
876     return false;
877 
878   // For now, only support constants with the same size.
879   uint64_t StoreSize = DL.getTypeStoreSize(A->getType());
880   if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128)
881     return false;
882 
883   Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
884 
885   // Try constant folding a bitcast of both instructions to an integer.  If we
886   // get two identical ConstantInt's, then we are good to share them.  We use
887   // the constant folding APIs to do this so that we get the benefit of
888   // DataLayout.
889   if (isa<PointerType>(A->getType()))
890     A = ConstantFoldInstOperands(Instruction::PtrToInt, IntTy,
891                                  const_cast<Constant *>(A), DL);
892   else if (A->getType() != IntTy)
893     A = ConstantFoldInstOperands(Instruction::BitCast, IntTy,
894                                  const_cast<Constant *>(A), DL);
895   if (isa<PointerType>(B->getType()))
896     B = ConstantFoldInstOperands(Instruction::PtrToInt, IntTy,
897                                  const_cast<Constant *>(B), DL);
898   else if (B->getType() != IntTy)
899     B = ConstantFoldInstOperands(Instruction::BitCast, IntTy,
900                                  const_cast<Constant *>(B), DL);
901 
902   return A == B;
903 }
904 
905 /// Create a new entry in the constant pool or return an existing one.
906 /// User must specify the log2 of the minimum required alignment for the object.
getConstantPoolIndex(const Constant * C,unsigned Alignment)907 unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
908                                                    unsigned Alignment) {
909   assert(Alignment && "Alignment must be specified!");
910   if (Alignment > PoolAlignment) PoolAlignment = Alignment;
911 
912   // Check to see if we already have this constant.
913   //
914   // FIXME, this could be made much more efficient for large constant pools.
915   for (unsigned i = 0, e = Constants.size(); i != e; ++i)
916     if (!Constants[i].isMachineConstantPoolEntry() &&
917         CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) {
918       if ((unsigned)Constants[i].getAlignment() < Alignment)
919         Constants[i].Alignment = Alignment;
920       return i;
921     }
922 
923   Constants.push_back(MachineConstantPoolEntry(C, Alignment));
924   return Constants.size()-1;
925 }
926 
getConstantPoolIndex(MachineConstantPoolValue * V,unsigned Alignment)927 unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V,
928                                                    unsigned Alignment) {
929   assert(Alignment && "Alignment must be specified!");
930   if (Alignment > PoolAlignment) PoolAlignment = Alignment;
931 
932   // Check to see if we already have this constant.
933   //
934   // FIXME, this could be made much more efficient for large constant pools.
935   int Idx = V->getExistingMachineCPValue(this, Alignment);
936   if (Idx != -1) {
937     MachineCPVsSharingEntries.insert(V);
938     return (unsigned)Idx;
939   }
940 
941   Constants.push_back(MachineConstantPoolEntry(V, Alignment));
942   return Constants.size()-1;
943 }
944 
print(raw_ostream & OS) const945 void MachineConstantPool::print(raw_ostream &OS) const {
946   if (Constants.empty()) return;
947 
948   OS << "Constant Pool:\n";
949   for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
950     OS << "  cp#" << i << ": ";
951     if (Constants[i].isMachineConstantPoolEntry())
952       Constants[i].Val.MachineCPVal->print(OS);
953     else
954       Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false);
955     OS << ", align=" << Constants[i].getAlignment();
956     OS << "\n";
957   }
958 }
959 
960 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const961 void MachineConstantPool::dump() const { print(dbgs()); }
962 #endif
963