1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements inline cost analysis.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/InlineCost.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AssumptionCache.h"
21 #include "llvm/Analysis/CodeMetrics.h"
22 #include "llvm/Analysis/ConstantFolding.h"
23 #include "llvm/Analysis/InstructionSimplify.h"
24 #include "llvm/Analysis/TargetTransformInfo.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/CallingConv.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/GetElementPtrTypeIterator.h"
29 #include "llvm/IR/GlobalAlias.h"
30 #include "llvm/IR/InstVisitor.h"
31 #include "llvm/IR/IntrinsicInst.h"
32 #include "llvm/IR/Operator.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/raw_ostream.h"
35 
36 using namespace llvm;
37 
38 #define DEBUG_TYPE "inline-cost"
39 
40 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
41 
42 namespace {
43 
44 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
45   typedef InstVisitor<CallAnalyzer, bool> Base;
46   friend class InstVisitor<CallAnalyzer, bool>;
47 
48   /// The TargetTransformInfo available for this compilation.
49   const TargetTransformInfo &TTI;
50 
51   /// The cache of @llvm.assume intrinsics.
52   AssumptionCacheTracker *ACT;
53 
54   // The called function.
55   Function &F;
56 
57   int Threshold;
58   int Cost;
59 
60   bool IsCallerRecursive;
61   bool IsRecursiveCall;
62   bool ExposesReturnsTwice;
63   bool HasDynamicAlloca;
64   bool ContainsNoDuplicateCall;
65   bool HasReturn;
66   bool HasIndirectBr;
67   bool HasFrameEscape;
68 
69   /// Number of bytes allocated statically by the callee.
70   uint64_t AllocatedSize;
71   unsigned NumInstructions, NumVectorInstructions;
72   int FiftyPercentVectorBonus, TenPercentVectorBonus;
73   int VectorBonus;
74 
75   // While we walk the potentially-inlined instructions, we build up and
76   // maintain a mapping of simplified values specific to this callsite. The
77   // idea is to propagate any special information we have about arguments to
78   // this call through the inlinable section of the function, and account for
79   // likely simplifications post-inlining. The most important aspect we track
80   // is CFG altering simplifications -- when we prove a basic block dead, that
81   // can cause dramatic shifts in the cost of inlining a function.
82   DenseMap<Value *, Constant *> SimplifiedValues;
83 
84   // Keep track of the values which map back (through function arguments) to
85   // allocas on the caller stack which could be simplified through SROA.
86   DenseMap<Value *, Value *> SROAArgValues;
87 
88   // The mapping of caller Alloca values to their accumulated cost savings. If
89   // we have to disable SROA for one of the allocas, this tells us how much
90   // cost must be added.
91   DenseMap<Value *, int> SROAArgCosts;
92 
93   // Keep track of values which map to a pointer base and constant offset.
94   DenseMap<Value *, std::pair<Value *, APInt> > ConstantOffsetPtrs;
95 
96   // Custom simplification helper routines.
97   bool isAllocaDerivedArg(Value *V);
98   bool lookupSROAArgAndCost(Value *V, Value *&Arg,
99                             DenseMap<Value *, int>::iterator &CostIt);
100   void disableSROA(DenseMap<Value *, int>::iterator CostIt);
101   void disableSROA(Value *V);
102   void accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
103                           int InstructionCost);
104   bool isGEPOffsetConstant(GetElementPtrInst &GEP);
105   bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
106   bool simplifyCallSite(Function *F, CallSite CS);
107   ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
108 
109   // Custom analysis routines.
110   bool analyzeBlock(BasicBlock *BB, SmallPtrSetImpl<const Value *> &EphValues);
111 
112   // Disable several entry points to the visitor so we don't accidentally use
113   // them by declaring but not defining them here.
114   void visit(Module *);     void visit(Module &);
115   void visit(Function *);   void visit(Function &);
116   void visit(BasicBlock *); void visit(BasicBlock &);
117 
118   // Provide base case for our instruction visit.
119   bool visitInstruction(Instruction &I);
120 
121   // Our visit overrides.
122   bool visitAlloca(AllocaInst &I);
123   bool visitPHI(PHINode &I);
124   bool visitGetElementPtr(GetElementPtrInst &I);
125   bool visitBitCast(BitCastInst &I);
126   bool visitPtrToInt(PtrToIntInst &I);
127   bool visitIntToPtr(IntToPtrInst &I);
128   bool visitCastInst(CastInst &I);
129   bool visitUnaryInstruction(UnaryInstruction &I);
130   bool visitCmpInst(CmpInst &I);
131   bool visitSub(BinaryOperator &I);
132   bool visitBinaryOperator(BinaryOperator &I);
133   bool visitLoad(LoadInst &I);
134   bool visitStore(StoreInst &I);
135   bool visitExtractValue(ExtractValueInst &I);
136   bool visitInsertValue(InsertValueInst &I);
137   bool visitCallSite(CallSite CS);
138   bool visitReturnInst(ReturnInst &RI);
139   bool visitBranchInst(BranchInst &BI);
140   bool visitSwitchInst(SwitchInst &SI);
141   bool visitIndirectBrInst(IndirectBrInst &IBI);
142   bool visitResumeInst(ResumeInst &RI);
143   bool visitUnreachableInst(UnreachableInst &I);
144 
145 public:
CallAnalyzer(const TargetTransformInfo & TTI,AssumptionCacheTracker * ACT,Function & Callee,int Threshold)146   CallAnalyzer(const TargetTransformInfo &TTI, AssumptionCacheTracker *ACT,
147                Function &Callee, int Threshold)
148       : TTI(TTI), ACT(ACT), F(Callee), Threshold(Threshold), Cost(0),
149         IsCallerRecursive(false), IsRecursiveCall(false),
150         ExposesReturnsTwice(false), HasDynamicAlloca(false),
151         ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false),
152         HasFrameEscape(false), AllocatedSize(0), NumInstructions(0),
153         NumVectorInstructions(0), FiftyPercentVectorBonus(0),
154         TenPercentVectorBonus(0), VectorBonus(0), NumConstantArgs(0),
155         NumConstantOffsetPtrArgs(0), NumAllocaArgs(0), NumConstantPtrCmps(0),
156         NumConstantPtrDiffs(0), NumInstructionsSimplified(0),
157         SROACostSavings(0), SROACostSavingsLost(0) {}
158 
159   bool analyzeCall(CallSite CS);
160 
getThreshold()161   int getThreshold() { return Threshold; }
getCost()162   int getCost() { return Cost; }
163 
164   // Keep a bunch of stats about the cost savings found so we can print them
165   // out when debugging.
166   unsigned NumConstantArgs;
167   unsigned NumConstantOffsetPtrArgs;
168   unsigned NumAllocaArgs;
169   unsigned NumConstantPtrCmps;
170   unsigned NumConstantPtrDiffs;
171   unsigned NumInstructionsSimplified;
172   unsigned SROACostSavings;
173   unsigned SROACostSavingsLost;
174 
175   void dump();
176 };
177 
178 } // namespace
179 
180 /// \brief Test whether the given value is an Alloca-derived function argument.
isAllocaDerivedArg(Value * V)181 bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
182   return SROAArgValues.count(V);
183 }
184 
185 /// \brief Lookup the SROA-candidate argument and cost iterator which V maps to.
186 /// Returns false if V does not map to a SROA-candidate.
lookupSROAArgAndCost(Value * V,Value * & Arg,DenseMap<Value *,int>::iterator & CostIt)187 bool CallAnalyzer::lookupSROAArgAndCost(
188     Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) {
189   if (SROAArgValues.empty() || SROAArgCosts.empty())
190     return false;
191 
192   DenseMap<Value *, Value *>::iterator ArgIt = SROAArgValues.find(V);
193   if (ArgIt == SROAArgValues.end())
194     return false;
195 
196   Arg = ArgIt->second;
197   CostIt = SROAArgCosts.find(Arg);
198   return CostIt != SROAArgCosts.end();
199 }
200 
201 /// \brief Disable SROA for the candidate marked by this cost iterator.
202 ///
203 /// This marks the candidate as no longer viable for SROA, and adds the cost
204 /// savings associated with it back into the inline cost measurement.
disableSROA(DenseMap<Value *,int>::iterator CostIt)205 void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) {
206   // If we're no longer able to perform SROA we need to undo its cost savings
207   // and prevent subsequent analysis.
208   Cost += CostIt->second;
209   SROACostSavings -= CostIt->second;
210   SROACostSavingsLost += CostIt->second;
211   SROAArgCosts.erase(CostIt);
212 }
213 
214 /// \brief If 'V' maps to a SROA candidate, disable SROA for it.
disableSROA(Value * V)215 void CallAnalyzer::disableSROA(Value *V) {
216   Value *SROAArg;
217   DenseMap<Value *, int>::iterator CostIt;
218   if (lookupSROAArgAndCost(V, SROAArg, CostIt))
219     disableSROA(CostIt);
220 }
221 
222 /// \brief Accumulate the given cost for a particular SROA candidate.
accumulateSROACost(DenseMap<Value *,int>::iterator CostIt,int InstructionCost)223 void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
224                                       int InstructionCost) {
225   CostIt->second += InstructionCost;
226   SROACostSavings += InstructionCost;
227 }
228 
229 /// \brief Check whether a GEP's indices are all constant.
230 ///
231 /// Respects any simplified values known during the analysis of this callsite.
isGEPOffsetConstant(GetElementPtrInst & GEP)232 bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) {
233   for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
234     if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I))
235       return false;
236 
237   return true;
238 }
239 
240 /// \brief Accumulate a constant GEP offset into an APInt if possible.
241 ///
242 /// Returns false if unable to compute the offset for any reason. Respects any
243 /// simplified values known during the analysis of this callsite.
accumulateGEPOffset(GEPOperator & GEP,APInt & Offset)244 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
245   const DataLayout &DL = F.getParent()->getDataLayout();
246   unsigned IntPtrWidth = DL.getPointerSizeInBits();
247   assert(IntPtrWidth == Offset.getBitWidth());
248 
249   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
250        GTI != GTE; ++GTI) {
251     ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
252     if (!OpC)
253       if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
254         OpC = dyn_cast<ConstantInt>(SimpleOp);
255     if (!OpC)
256       return false;
257     if (OpC->isZero()) continue;
258 
259     // Handle a struct index, which adds its field offset to the pointer.
260     if (StructType *STy = dyn_cast<StructType>(*GTI)) {
261       unsigned ElementIdx = OpC->getZExtValue();
262       const StructLayout *SL = DL.getStructLayout(STy);
263       Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
264       continue;
265     }
266 
267     APInt TypeSize(IntPtrWidth, DL.getTypeAllocSize(GTI.getIndexedType()));
268     Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
269   }
270   return true;
271 }
272 
visitAlloca(AllocaInst & I)273 bool CallAnalyzer::visitAlloca(AllocaInst &I) {
274   // Check whether inlining will turn a dynamic alloca into a static
275   // alloca, and handle that case.
276   if (I.isArrayAllocation()) {
277     if (Constant *Size = SimplifiedValues.lookup(I.getArraySize())) {
278       ConstantInt *AllocSize = dyn_cast<ConstantInt>(Size);
279       assert(AllocSize && "Allocation size not a constant int?");
280       Type *Ty = I.getAllocatedType();
281       AllocatedSize += Ty->getPrimitiveSizeInBits() * AllocSize->getZExtValue();
282       return Base::visitAlloca(I);
283     }
284   }
285 
286   // Accumulate the allocated size.
287   if (I.isStaticAlloca()) {
288     const DataLayout &DL = F.getParent()->getDataLayout();
289     Type *Ty = I.getAllocatedType();
290     AllocatedSize += DL.getTypeAllocSize(Ty);
291   }
292 
293   // We will happily inline static alloca instructions.
294   if (I.isStaticAlloca())
295     return Base::visitAlloca(I);
296 
297   // FIXME: This is overly conservative. Dynamic allocas are inefficient for
298   // a variety of reasons, and so we would like to not inline them into
299   // functions which don't currently have a dynamic alloca. This simply
300   // disables inlining altogether in the presence of a dynamic alloca.
301   HasDynamicAlloca = true;
302   return false;
303 }
304 
visitPHI(PHINode & I)305 bool CallAnalyzer::visitPHI(PHINode &I) {
306   // FIXME: We should potentially be tracking values through phi nodes,
307   // especially when they collapse to a single value due to deleted CFG edges
308   // during inlining.
309 
310   // FIXME: We need to propagate SROA *disabling* through phi nodes, even
311   // though we don't want to propagate it's bonuses. The idea is to disable
312   // SROA if it *might* be used in an inappropriate manner.
313 
314   // Phi nodes are always zero-cost.
315   return true;
316 }
317 
visitGetElementPtr(GetElementPtrInst & I)318 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
319   Value *SROAArg;
320   DenseMap<Value *, int>::iterator CostIt;
321   bool SROACandidate = lookupSROAArgAndCost(I.getPointerOperand(),
322                                             SROAArg, CostIt);
323 
324   // Try to fold GEPs of constant-offset call site argument pointers. This
325   // requires target data and inbounds GEPs.
326   if (I.isInBounds()) {
327     // Check if we have a base + offset for the pointer.
328     Value *Ptr = I.getPointerOperand();
329     std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr);
330     if (BaseAndOffset.first) {
331       // Check if the offset of this GEP is constant, and if so accumulate it
332       // into Offset.
333       if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) {
334         // Non-constant GEPs aren't folded, and disable SROA.
335         if (SROACandidate)
336           disableSROA(CostIt);
337         return false;
338       }
339 
340       // Add the result as a new mapping to Base + Offset.
341       ConstantOffsetPtrs[&I] = BaseAndOffset;
342 
343       // Also handle SROA candidates here, we already know that the GEP is
344       // all-constant indexed.
345       if (SROACandidate)
346         SROAArgValues[&I] = SROAArg;
347 
348       return true;
349     }
350   }
351 
352   if (isGEPOffsetConstant(I)) {
353     if (SROACandidate)
354       SROAArgValues[&I] = SROAArg;
355 
356     // Constant GEPs are modeled as free.
357     return true;
358   }
359 
360   // Variable GEPs will require math and will disable SROA.
361   if (SROACandidate)
362     disableSROA(CostIt);
363   return false;
364 }
365 
visitBitCast(BitCastInst & I)366 bool CallAnalyzer::visitBitCast(BitCastInst &I) {
367   // Propagate constants through bitcasts.
368   Constant *COp = dyn_cast<Constant>(I.getOperand(0));
369   if (!COp)
370     COp = SimplifiedValues.lookup(I.getOperand(0));
371   if (COp)
372     if (Constant *C = ConstantExpr::getBitCast(COp, I.getType())) {
373       SimplifiedValues[&I] = C;
374       return true;
375     }
376 
377   // Track base/offsets through casts
378   std::pair<Value *, APInt> BaseAndOffset
379     = ConstantOffsetPtrs.lookup(I.getOperand(0));
380   // Casts don't change the offset, just wrap it up.
381   if (BaseAndOffset.first)
382     ConstantOffsetPtrs[&I] = BaseAndOffset;
383 
384   // Also look for SROA candidates here.
385   Value *SROAArg;
386   DenseMap<Value *, int>::iterator CostIt;
387   if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
388     SROAArgValues[&I] = SROAArg;
389 
390   // Bitcasts are always zero cost.
391   return true;
392 }
393 
visitPtrToInt(PtrToIntInst & I)394 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
395   // Propagate constants through ptrtoint.
396   Constant *COp = dyn_cast<Constant>(I.getOperand(0));
397   if (!COp)
398     COp = SimplifiedValues.lookup(I.getOperand(0));
399   if (COp)
400     if (Constant *C = ConstantExpr::getPtrToInt(COp, I.getType())) {
401       SimplifiedValues[&I] = C;
402       return true;
403     }
404 
405   // Track base/offset pairs when converted to a plain integer provided the
406   // integer is large enough to represent the pointer.
407   unsigned IntegerSize = I.getType()->getScalarSizeInBits();
408   const DataLayout &DL = F.getParent()->getDataLayout();
409   if (IntegerSize >= DL.getPointerSizeInBits()) {
410     std::pair<Value *, APInt> BaseAndOffset
411       = ConstantOffsetPtrs.lookup(I.getOperand(0));
412     if (BaseAndOffset.first)
413       ConstantOffsetPtrs[&I] = BaseAndOffset;
414   }
415 
416   // This is really weird. Technically, ptrtoint will disable SROA. However,
417   // unless that ptrtoint is *used* somewhere in the live basic blocks after
418   // inlining, it will be nuked, and SROA should proceed. All of the uses which
419   // would block SROA would also block SROA if applied directly to a pointer,
420   // and so we can just add the integer in here. The only places where SROA is
421   // preserved either cannot fire on an integer, or won't in-and-of themselves
422   // disable SROA (ext) w/o some later use that we would see and disable.
423   Value *SROAArg;
424   DenseMap<Value *, int>::iterator CostIt;
425   if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
426     SROAArgValues[&I] = SROAArg;
427 
428   return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
429 }
430 
visitIntToPtr(IntToPtrInst & I)431 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
432   // Propagate constants through ptrtoint.
433   Constant *COp = dyn_cast<Constant>(I.getOperand(0));
434   if (!COp)
435     COp = SimplifiedValues.lookup(I.getOperand(0));
436   if (COp)
437     if (Constant *C = ConstantExpr::getIntToPtr(COp, I.getType())) {
438       SimplifiedValues[&I] = C;
439       return true;
440     }
441 
442   // Track base/offset pairs when round-tripped through a pointer without
443   // modifications provided the integer is not too large.
444   Value *Op = I.getOperand(0);
445   unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
446   const DataLayout &DL = F.getParent()->getDataLayout();
447   if (IntegerSize <= DL.getPointerSizeInBits()) {
448     std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
449     if (BaseAndOffset.first)
450       ConstantOffsetPtrs[&I] = BaseAndOffset;
451   }
452 
453   // "Propagate" SROA here in the same manner as we do for ptrtoint above.
454   Value *SROAArg;
455   DenseMap<Value *, int>::iterator CostIt;
456   if (lookupSROAArgAndCost(Op, SROAArg, CostIt))
457     SROAArgValues[&I] = SROAArg;
458 
459   return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
460 }
461 
visitCastInst(CastInst & I)462 bool CallAnalyzer::visitCastInst(CastInst &I) {
463   // Propagate constants through ptrtoint.
464   Constant *COp = dyn_cast<Constant>(I.getOperand(0));
465   if (!COp)
466     COp = SimplifiedValues.lookup(I.getOperand(0));
467   if (COp)
468     if (Constant *C = ConstantExpr::getCast(I.getOpcode(), COp, I.getType())) {
469       SimplifiedValues[&I] = C;
470       return true;
471     }
472 
473   // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere.
474   disableSROA(I.getOperand(0));
475 
476   return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
477 }
478 
visitUnaryInstruction(UnaryInstruction & I)479 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
480   Value *Operand = I.getOperand(0);
481   Constant *COp = dyn_cast<Constant>(Operand);
482   if (!COp)
483     COp = SimplifiedValues.lookup(Operand);
484   if (COp) {
485     const DataLayout &DL = F.getParent()->getDataLayout();
486     if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(),
487                                                COp, DL)) {
488       SimplifiedValues[&I] = C;
489       return true;
490     }
491   }
492 
493   // Disable any SROA on the argument to arbitrary unary operators.
494   disableSROA(Operand);
495 
496   return false;
497 }
498 
visitCmpInst(CmpInst & I)499 bool CallAnalyzer::visitCmpInst(CmpInst &I) {
500   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
501   // First try to handle simplified comparisons.
502   if (!isa<Constant>(LHS))
503     if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
504       LHS = SimpleLHS;
505   if (!isa<Constant>(RHS))
506     if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
507       RHS = SimpleRHS;
508   if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
509     if (Constant *CRHS = dyn_cast<Constant>(RHS))
510       if (Constant *C = ConstantExpr::getCompare(I.getPredicate(), CLHS, CRHS)) {
511         SimplifiedValues[&I] = C;
512         return true;
513       }
514   }
515 
516   if (I.getOpcode() == Instruction::FCmp)
517     return false;
518 
519   // Otherwise look for a comparison between constant offset pointers with
520   // a common base.
521   Value *LHSBase, *RHSBase;
522   APInt LHSOffset, RHSOffset;
523   std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
524   if (LHSBase) {
525     std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
526     if (RHSBase && LHSBase == RHSBase) {
527       // We have common bases, fold the icmp to a constant based on the
528       // offsets.
529       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
530       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
531       if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
532         SimplifiedValues[&I] = C;
533         ++NumConstantPtrCmps;
534         return true;
535       }
536     }
537   }
538 
539   // If the comparison is an equality comparison with null, we can simplify it
540   // for any alloca-derived argument.
541   if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)))
542     if (isAllocaDerivedArg(I.getOperand(0))) {
543       // We can actually predict the result of comparisons between an
544       // alloca-derived value and null. Note that this fires regardless of
545       // SROA firing.
546       bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE;
547       SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType())
548                                         : ConstantInt::getFalse(I.getType());
549       return true;
550     }
551 
552   // Finally check for SROA candidates in comparisons.
553   Value *SROAArg;
554   DenseMap<Value *, int>::iterator CostIt;
555   if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
556     if (isa<ConstantPointerNull>(I.getOperand(1))) {
557       accumulateSROACost(CostIt, InlineConstants::InstrCost);
558       return true;
559     }
560 
561     disableSROA(CostIt);
562   }
563 
564   return false;
565 }
566 
visitSub(BinaryOperator & I)567 bool CallAnalyzer::visitSub(BinaryOperator &I) {
568   // Try to handle a special case: we can fold computing the difference of two
569   // constant-related pointers.
570   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
571   Value *LHSBase, *RHSBase;
572   APInt LHSOffset, RHSOffset;
573   std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
574   if (LHSBase) {
575     std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
576     if (RHSBase && LHSBase == RHSBase) {
577       // We have common bases, fold the subtract to a constant based on the
578       // offsets.
579       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
580       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
581       if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) {
582         SimplifiedValues[&I] = C;
583         ++NumConstantPtrDiffs;
584         return true;
585       }
586     }
587   }
588 
589   // Otherwise, fall back to the generic logic for simplifying and handling
590   // instructions.
591   return Base::visitSub(I);
592 }
593 
visitBinaryOperator(BinaryOperator & I)594 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
595   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
596   const DataLayout &DL = F.getParent()->getDataLayout();
597   if (!isa<Constant>(LHS))
598     if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
599       LHS = SimpleLHS;
600   if (!isa<Constant>(RHS))
601     if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
602       RHS = SimpleRHS;
603   Value *SimpleV = nullptr;
604   if (auto FI = dyn_cast<FPMathOperator>(&I))
605     SimpleV =
606         SimplifyFPBinOp(I.getOpcode(), LHS, RHS, FI->getFastMathFlags(), DL);
607   else
608     SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, DL);
609 
610   if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) {
611     SimplifiedValues[&I] = C;
612     return true;
613   }
614 
615   // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
616   disableSROA(LHS);
617   disableSROA(RHS);
618 
619   return false;
620 }
621 
visitLoad(LoadInst & I)622 bool CallAnalyzer::visitLoad(LoadInst &I) {
623   Value *SROAArg;
624   DenseMap<Value *, int>::iterator CostIt;
625   if (lookupSROAArgAndCost(I.getPointerOperand(), SROAArg, CostIt)) {
626     if (I.isSimple()) {
627       accumulateSROACost(CostIt, InlineConstants::InstrCost);
628       return true;
629     }
630 
631     disableSROA(CostIt);
632   }
633 
634   return false;
635 }
636 
visitStore(StoreInst & I)637 bool CallAnalyzer::visitStore(StoreInst &I) {
638   Value *SROAArg;
639   DenseMap<Value *, int>::iterator CostIt;
640   if (lookupSROAArgAndCost(I.getPointerOperand(), SROAArg, CostIt)) {
641     if (I.isSimple()) {
642       accumulateSROACost(CostIt, InlineConstants::InstrCost);
643       return true;
644     }
645 
646     disableSROA(CostIt);
647   }
648 
649   return false;
650 }
651 
visitExtractValue(ExtractValueInst & I)652 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) {
653   // Constant folding for extract value is trivial.
654   Constant *C = dyn_cast<Constant>(I.getAggregateOperand());
655   if (!C)
656     C = SimplifiedValues.lookup(I.getAggregateOperand());
657   if (C) {
658     SimplifiedValues[&I] = ConstantExpr::getExtractValue(C, I.getIndices());
659     return true;
660   }
661 
662   // SROA can look through these but give them a cost.
663   return false;
664 }
665 
visitInsertValue(InsertValueInst & I)666 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) {
667   // Constant folding for insert value is trivial.
668   Constant *AggC = dyn_cast<Constant>(I.getAggregateOperand());
669   if (!AggC)
670     AggC = SimplifiedValues.lookup(I.getAggregateOperand());
671   Constant *InsertedC = dyn_cast<Constant>(I.getInsertedValueOperand());
672   if (!InsertedC)
673     InsertedC = SimplifiedValues.lookup(I.getInsertedValueOperand());
674   if (AggC && InsertedC) {
675     SimplifiedValues[&I] = ConstantExpr::getInsertValue(AggC, InsertedC,
676                                                         I.getIndices());
677     return true;
678   }
679 
680   // SROA can look through these but give them a cost.
681   return false;
682 }
683 
684 /// \brief Try to simplify a call site.
685 ///
686 /// Takes a concrete function and callsite and tries to actually simplify it by
687 /// analyzing the arguments and call itself with instsimplify. Returns true if
688 /// it has simplified the callsite to some other entity (a constant), making it
689 /// free.
simplifyCallSite(Function * F,CallSite CS)690 bool CallAnalyzer::simplifyCallSite(Function *F, CallSite CS) {
691   // FIXME: Using the instsimplify logic directly for this is inefficient
692   // because we have to continually rebuild the argument list even when no
693   // simplifications can be performed. Until that is fixed with remapping
694   // inside of instsimplify, directly constant fold calls here.
695   if (!canConstantFoldCallTo(F))
696     return false;
697 
698   // Try to re-map the arguments to constants.
699   SmallVector<Constant *, 4> ConstantArgs;
700   ConstantArgs.reserve(CS.arg_size());
701   for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
702        I != E; ++I) {
703     Constant *C = dyn_cast<Constant>(*I);
704     if (!C)
705       C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(*I));
706     if (!C)
707       return false; // This argument doesn't map to a constant.
708 
709     ConstantArgs.push_back(C);
710   }
711   if (Constant *C = ConstantFoldCall(F, ConstantArgs)) {
712     SimplifiedValues[CS.getInstruction()] = C;
713     return true;
714   }
715 
716   return false;
717 }
718 
visitCallSite(CallSite CS)719 bool CallAnalyzer::visitCallSite(CallSite CS) {
720   if (CS.hasFnAttr(Attribute::ReturnsTwice) &&
721       !F.hasFnAttribute(Attribute::ReturnsTwice)) {
722     // This aborts the entire analysis.
723     ExposesReturnsTwice = true;
724     return false;
725   }
726   if (CS.isCall() &&
727       cast<CallInst>(CS.getInstruction())->cannotDuplicate())
728     ContainsNoDuplicateCall = true;
729 
730   if (Function *F = CS.getCalledFunction()) {
731     // When we have a concrete function, first try to simplify it directly.
732     if (simplifyCallSite(F, CS))
733       return true;
734 
735     // Next check if it is an intrinsic we know about.
736     // FIXME: Lift this into part of the InstVisitor.
737     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
738       switch (II->getIntrinsicID()) {
739       default:
740         return Base::visitCallSite(CS);
741 
742       case Intrinsic::memset:
743       case Intrinsic::memcpy:
744       case Intrinsic::memmove:
745         // SROA can usually chew through these intrinsics, but they aren't free.
746         return false;
747       case Intrinsic::frameescape:
748         HasFrameEscape = true;
749         return false;
750       }
751     }
752 
753     if (F == CS.getInstruction()->getParent()->getParent()) {
754       // This flag will fully abort the analysis, so don't bother with anything
755       // else.
756       IsRecursiveCall = true;
757       return false;
758     }
759 
760     if (TTI.isLoweredToCall(F)) {
761       // We account for the average 1 instruction per call argument setup
762       // here.
763       Cost += CS.arg_size() * InlineConstants::InstrCost;
764 
765       // Everything other than inline ASM will also have a significant cost
766       // merely from making the call.
767       if (!isa<InlineAsm>(CS.getCalledValue()))
768         Cost += InlineConstants::CallPenalty;
769     }
770 
771     return Base::visitCallSite(CS);
772   }
773 
774   // Otherwise we're in a very special case -- an indirect function call. See
775   // if we can be particularly clever about this.
776   Value *Callee = CS.getCalledValue();
777 
778   // First, pay the price of the argument setup. We account for the average
779   // 1 instruction per call argument setup here.
780   Cost += CS.arg_size() * InlineConstants::InstrCost;
781 
782   // Next, check if this happens to be an indirect function call to a known
783   // function in this inline context. If not, we've done all we can.
784   Function *F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee));
785   if (!F)
786     return Base::visitCallSite(CS);
787 
788   // If we have a constant that we are calling as a function, we can peer
789   // through it and see the function target. This happens not infrequently
790   // during devirtualization and so we want to give it a hefty bonus for
791   // inlining, but cap that bonus in the event that inlining wouldn't pan
792   // out. Pretend to inline the function, with a custom threshold.
793   CallAnalyzer CA(TTI, ACT, *F, InlineConstants::IndirectCallThreshold);
794   if (CA.analyzeCall(CS)) {
795     // We were able to inline the indirect call! Subtract the cost from the
796     // bonus we want to apply, but don't go below zero.
797     Cost -= std::max(0, InlineConstants::IndirectCallThreshold - CA.getCost());
798   }
799 
800   return Base::visitCallSite(CS);
801 }
802 
visitReturnInst(ReturnInst & RI)803 bool CallAnalyzer::visitReturnInst(ReturnInst &RI) {
804   // At least one return instruction will be free after inlining.
805   bool Free = !HasReturn;
806   HasReturn = true;
807   return Free;
808 }
809 
visitBranchInst(BranchInst & BI)810 bool CallAnalyzer::visitBranchInst(BranchInst &BI) {
811   // We model unconditional branches as essentially free -- they really
812   // shouldn't exist at all, but handling them makes the behavior of the
813   // inliner more regular and predictable. Interestingly, conditional branches
814   // which will fold away are also free.
815   return BI.isUnconditional() || isa<ConstantInt>(BI.getCondition()) ||
816          dyn_cast_or_null<ConstantInt>(
817              SimplifiedValues.lookup(BI.getCondition()));
818 }
819 
visitSwitchInst(SwitchInst & SI)820 bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) {
821   // We model unconditional switches as free, see the comments on handling
822   // branches.
823   if (isa<ConstantInt>(SI.getCondition()))
824     return true;
825   if (Value *V = SimplifiedValues.lookup(SI.getCondition()))
826     if (isa<ConstantInt>(V))
827       return true;
828 
829   // Otherwise, we need to accumulate a cost proportional to the number of
830   // distinct successor blocks. This fan-out in the CFG cannot be represented
831   // for free even if we can represent the core switch as a jumptable that
832   // takes a single instruction.
833   //
834   // NB: We convert large switches which are just used to initialize large phi
835   // nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent
836   // inlining those. It will prevent inlining in cases where the optimization
837   // does not (yet) fire.
838   SmallPtrSet<BasicBlock *, 8> SuccessorBlocks;
839   SuccessorBlocks.insert(SI.getDefaultDest());
840   for (auto I = SI.case_begin(), E = SI.case_end(); I != E; ++I)
841     SuccessorBlocks.insert(I.getCaseSuccessor());
842   // Add cost corresponding to the number of distinct destinations. The first
843   // we model as free because of fallthrough.
844   Cost += (SuccessorBlocks.size() - 1) * InlineConstants::InstrCost;
845   return false;
846 }
847 
visitIndirectBrInst(IndirectBrInst & IBI)848 bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) {
849   // We never want to inline functions that contain an indirectbr.  This is
850   // incorrect because all the blockaddress's (in static global initializers
851   // for example) would be referring to the original function, and this
852   // indirect jump would jump from the inlined copy of the function into the
853   // original function which is extremely undefined behavior.
854   // FIXME: This logic isn't really right; we can safely inline functions with
855   // indirectbr's as long as no other function or global references the
856   // blockaddress of a block within the current function.
857   HasIndirectBr = true;
858   return false;
859 }
860 
visitResumeInst(ResumeInst & RI)861 bool CallAnalyzer::visitResumeInst(ResumeInst &RI) {
862   // FIXME: It's not clear that a single instruction is an accurate model for
863   // the inline cost of a resume instruction.
864   return false;
865 }
866 
visitUnreachableInst(UnreachableInst & I)867 bool CallAnalyzer::visitUnreachableInst(UnreachableInst &I) {
868   // FIXME: It might be reasonably to discount the cost of instructions leading
869   // to unreachable as they have the lowest possible impact on both runtime and
870   // code size.
871   return true; // No actual code is needed for unreachable.
872 }
873 
visitInstruction(Instruction & I)874 bool CallAnalyzer::visitInstruction(Instruction &I) {
875   // Some instructions are free. All of the free intrinsics can also be
876   // handled by SROA, etc.
877   if (TargetTransformInfo::TCC_Free == TTI.getUserCost(&I))
878     return true;
879 
880   // We found something we don't understand or can't handle. Mark any SROA-able
881   // values in the operand list as no longer viable.
882   for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI)
883     disableSROA(*OI);
884 
885   return false;
886 }
887 
888 
889 /// \brief Analyze a basic block for its contribution to the inline cost.
890 ///
891 /// This method walks the analyzer over every instruction in the given basic
892 /// block and accounts for their cost during inlining at this callsite. It
893 /// aborts early if the threshold has been exceeded or an impossible to inline
894 /// construct has been detected. It returns false if inlining is no longer
895 /// viable, and true if inlining remains viable.
analyzeBlock(BasicBlock * BB,SmallPtrSetImpl<const Value * > & EphValues)896 bool CallAnalyzer::analyzeBlock(BasicBlock *BB,
897                                 SmallPtrSetImpl<const Value *> &EphValues) {
898   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
899     // FIXME: Currently, the number of instructions in a function regardless of
900     // our ability to simplify them during inline to constants or dead code,
901     // are actually used by the vector bonus heuristic. As long as that's true,
902     // we have to special case debug intrinsics here to prevent differences in
903     // inlining due to debug symbols. Eventually, the number of unsimplified
904     // instructions shouldn't factor into the cost computation, but until then,
905     // hack around it here.
906     if (isa<DbgInfoIntrinsic>(I))
907       continue;
908 
909     // Skip ephemeral values.
910     if (EphValues.count(I))
911       continue;
912 
913     ++NumInstructions;
914     if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy())
915       ++NumVectorInstructions;
916 
917     // If the instruction is floating point, and the target says this operation is
918     // expensive or the function has the "use-soft-float" attribute, this may
919     // eventually become a library call.  Treat the cost as such.
920     if (I->getType()->isFloatingPointTy()) {
921       bool hasSoftFloatAttr = false;
922 
923       // If the function has the "use-soft-float" attribute, mark it as expensive.
924       if (F.hasFnAttribute("use-soft-float")) {
925         Attribute Attr = F.getFnAttribute("use-soft-float");
926         StringRef Val = Attr.getValueAsString();
927         if (Val == "true")
928           hasSoftFloatAttr = true;
929       }
930 
931       if (TTI.getFPOpCost(I->getType()) == TargetTransformInfo::TCC_Expensive ||
932           hasSoftFloatAttr)
933         Cost += InlineConstants::CallPenalty;
934     }
935 
936     // If the instruction simplified to a constant, there is no cost to this
937     // instruction. Visit the instructions using our InstVisitor to account for
938     // all of the per-instruction logic. The visit tree returns true if we
939     // consumed the instruction in any way, and false if the instruction's base
940     // cost should count against inlining.
941     if (Base::visit(I))
942       ++NumInstructionsSimplified;
943     else
944       Cost += InlineConstants::InstrCost;
945 
946     // If the visit this instruction detected an uninlinable pattern, abort.
947     if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca ||
948         HasIndirectBr || HasFrameEscape)
949       return false;
950 
951     // If the caller is a recursive function then we don't want to inline
952     // functions which allocate a lot of stack space because it would increase
953     // the caller stack usage dramatically.
954     if (IsCallerRecursive &&
955         AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
956       return false;
957 
958     if (NumVectorInstructions > NumInstructions/2)
959       VectorBonus = FiftyPercentVectorBonus;
960     else if (NumVectorInstructions > NumInstructions/10)
961       VectorBonus = TenPercentVectorBonus;
962     else
963       VectorBonus = 0;
964 
965     // Check if we've past the threshold so we don't spin in huge basic
966     // blocks that will never inline.
967     if (Cost > (Threshold + VectorBonus))
968       return false;
969   }
970 
971   return true;
972 }
973 
974 /// \brief Compute the base pointer and cumulative constant offsets for V.
975 ///
976 /// This strips all constant offsets off of V, leaving it the base pointer, and
977 /// accumulates the total constant offset applied in the returned constant. It
978 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
979 /// no constant offsets applied.
stripAndComputeInBoundsConstantOffsets(Value * & V)980 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
981   if (!V->getType()->isPointerTy())
982     return nullptr;
983 
984   const DataLayout &DL = F.getParent()->getDataLayout();
985   unsigned IntPtrWidth = DL.getPointerSizeInBits();
986   APInt Offset = APInt::getNullValue(IntPtrWidth);
987 
988   // Even though we don't look through PHI nodes, we could be called on an
989   // instruction in an unreachable block, which may be on a cycle.
990   SmallPtrSet<Value *, 4> Visited;
991   Visited.insert(V);
992   do {
993     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
994       if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
995         return nullptr;
996       V = GEP->getPointerOperand();
997     } else if (Operator::getOpcode(V) == Instruction::BitCast) {
998       V = cast<Operator>(V)->getOperand(0);
999     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1000       if (GA->mayBeOverridden())
1001         break;
1002       V = GA->getAliasee();
1003     } else {
1004       break;
1005     }
1006     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
1007   } while (Visited.insert(V).second);
1008 
1009   Type *IntPtrTy = DL.getIntPtrType(V->getContext());
1010   return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
1011 }
1012 
1013 /// \brief Analyze a call site for potential inlining.
1014 ///
1015 /// Returns true if inlining this call is viable, and false if it is not
1016 /// viable. It computes the cost and adjusts the threshold based on numerous
1017 /// factors and heuristics. If this method returns false but the computed cost
1018 /// is below the computed threshold, then inlining was forcibly disabled by
1019 /// some artifact of the routine.
analyzeCall(CallSite CS)1020 bool CallAnalyzer::analyzeCall(CallSite CS) {
1021   ++NumCallsAnalyzed;
1022 
1023   // Track whether the post-inlining function would have more than one basic
1024   // block. A single basic block is often intended for inlining. Balloon the
1025   // threshold by 50% until we pass the single-BB phase.
1026   bool SingleBB = true;
1027   int SingleBBBonus = Threshold / 2;
1028   Threshold += SingleBBBonus;
1029 
1030   // Perform some tweaks to the cost and threshold based on the direct
1031   // callsite information.
1032 
1033   // We want to more aggressively inline vector-dense kernels, so up the
1034   // threshold, and we'll lower it if the % of vector instructions gets too
1035   // low.
1036   assert(NumInstructions == 0);
1037   assert(NumVectorInstructions == 0);
1038   FiftyPercentVectorBonus = Threshold;
1039   TenPercentVectorBonus = Threshold / 2;
1040   const DataLayout &DL = F.getParent()->getDataLayout();
1041 
1042   // Give out bonuses per argument, as the instructions setting them up will
1043   // be gone after inlining.
1044   for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) {
1045     if (CS.isByValArgument(I)) {
1046       // We approximate the number of loads and stores needed by dividing the
1047       // size of the byval type by the target's pointer size.
1048       PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
1049       unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType());
1050       unsigned PointerSize = DL.getPointerSizeInBits();
1051       // Ceiling division.
1052       unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
1053 
1054       // If it generates more than 8 stores it is likely to be expanded as an
1055       // inline memcpy so we take that as an upper bound. Otherwise we assume
1056       // one load and one store per word copied.
1057       // FIXME: The maxStoresPerMemcpy setting from the target should be used
1058       // here instead of a magic number of 8, but it's not available via
1059       // DataLayout.
1060       NumStores = std::min(NumStores, 8U);
1061 
1062       Cost -= 2 * NumStores * InlineConstants::InstrCost;
1063     } else {
1064       // For non-byval arguments subtract off one instruction per call
1065       // argument.
1066       Cost -= InlineConstants::InstrCost;
1067     }
1068   }
1069 
1070   // If there is only one call of the function, and it has internal linkage,
1071   // the cost of inlining it drops dramatically.
1072   bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() &&
1073     &F == CS.getCalledFunction();
1074   if (OnlyOneCallAndLocalLinkage)
1075     Cost += InlineConstants::LastCallToStaticBonus;
1076 
1077   // If the instruction after the call, or if the normal destination of the
1078   // invoke is an unreachable instruction, the function is noreturn. As such,
1079   // there is little point in inlining this unless there is literally zero
1080   // cost.
1081   Instruction *Instr = CS.getInstruction();
1082   if (InvokeInst *II = dyn_cast<InvokeInst>(Instr)) {
1083     if (isa<UnreachableInst>(II->getNormalDest()->begin()))
1084       Threshold = 1;
1085   } else if (isa<UnreachableInst>(++BasicBlock::iterator(Instr)))
1086     Threshold = 1;
1087 
1088   // If this function uses the coldcc calling convention, prefer not to inline
1089   // it.
1090   if (F.getCallingConv() == CallingConv::Cold)
1091     Cost += InlineConstants::ColdccPenalty;
1092 
1093   // Check if we're done. This can happen due to bonuses and penalties.
1094   if (Cost > Threshold)
1095     return false;
1096 
1097   if (F.empty())
1098     return true;
1099 
1100   Function *Caller = CS.getInstruction()->getParent()->getParent();
1101   // Check if the caller function is recursive itself.
1102   for (User *U : Caller->users()) {
1103     CallSite Site(U);
1104     if (!Site)
1105       continue;
1106     Instruction *I = Site.getInstruction();
1107     if (I->getParent()->getParent() == Caller) {
1108       IsCallerRecursive = true;
1109       break;
1110     }
1111   }
1112 
1113   // Populate our simplified values by mapping from function arguments to call
1114   // arguments with known important simplifications.
1115   CallSite::arg_iterator CAI = CS.arg_begin();
1116   for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end();
1117        FAI != FAE; ++FAI, ++CAI) {
1118     assert(CAI != CS.arg_end());
1119     if (Constant *C = dyn_cast<Constant>(CAI))
1120       SimplifiedValues[FAI] = C;
1121 
1122     Value *PtrArg = *CAI;
1123     if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
1124       ConstantOffsetPtrs[FAI] = std::make_pair(PtrArg, C->getValue());
1125 
1126       // We can SROA any pointer arguments derived from alloca instructions.
1127       if (isa<AllocaInst>(PtrArg)) {
1128         SROAArgValues[FAI] = PtrArg;
1129         SROAArgCosts[PtrArg] = 0;
1130       }
1131     }
1132   }
1133   NumConstantArgs = SimplifiedValues.size();
1134   NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
1135   NumAllocaArgs = SROAArgValues.size();
1136 
1137   // FIXME: If a caller has multiple calls to a callee, we end up recomputing
1138   // the ephemeral values multiple times (and they're completely determined by
1139   // the callee, so this is purely duplicate work).
1140   SmallPtrSet<const Value *, 32> EphValues;
1141   CodeMetrics::collectEphemeralValues(&F, &ACT->getAssumptionCache(F), EphValues);
1142 
1143   // The worklist of live basic blocks in the callee *after* inlining. We avoid
1144   // adding basic blocks of the callee which can be proven to be dead for this
1145   // particular call site in order to get more accurate cost estimates. This
1146   // requires a somewhat heavyweight iteration pattern: we need to walk the
1147   // basic blocks in a breadth-first order as we insert live successors. To
1148   // accomplish this, prioritizing for small iterations because we exit after
1149   // crossing our threshold, we use a small-size optimized SetVector.
1150   typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>,
1151                                   SmallPtrSet<BasicBlock *, 16> > BBSetVector;
1152   BBSetVector BBWorklist;
1153   BBWorklist.insert(&F.getEntryBlock());
1154   // Note that we *must not* cache the size, this loop grows the worklist.
1155   for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
1156     // Bail out the moment we cross the threshold. This means we'll under-count
1157     // the cost, but only when undercounting doesn't matter.
1158     if (Cost > (Threshold + VectorBonus))
1159       break;
1160 
1161     BasicBlock *BB = BBWorklist[Idx];
1162     if (BB->empty())
1163       continue;
1164 
1165     // Disallow inlining a blockaddress. A blockaddress only has defined
1166     // behavior for an indirect branch in the same function, and we do not
1167     // currently support inlining indirect branches. But, the inliner may not
1168     // see an indirect branch that ends up being dead code at a particular call
1169     // site. If the blockaddress escapes the function, e.g., via a global
1170     // variable, inlining may lead to an invalid cross-function reference.
1171     if (BB->hasAddressTaken())
1172       return false;
1173 
1174     // Analyze the cost of this block. If we blow through the threshold, this
1175     // returns false, and we can bail on out.
1176     if (!analyzeBlock(BB, EphValues)) {
1177       if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca ||
1178           HasIndirectBr || HasFrameEscape)
1179         return false;
1180 
1181       // If the caller is a recursive function then we don't want to inline
1182       // functions which allocate a lot of stack space because it would increase
1183       // the caller stack usage dramatically.
1184       if (IsCallerRecursive &&
1185           AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
1186         return false;
1187 
1188       break;
1189     }
1190 
1191     TerminatorInst *TI = BB->getTerminator();
1192 
1193     // Add in the live successors by first checking whether we have terminator
1194     // that may be simplified based on the values simplified by this call.
1195     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
1196       if (BI->isConditional()) {
1197         Value *Cond = BI->getCondition();
1198         if (ConstantInt *SimpleCond
1199               = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
1200           BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0));
1201           continue;
1202         }
1203       }
1204     } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
1205       Value *Cond = SI->getCondition();
1206       if (ConstantInt *SimpleCond
1207             = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
1208         BBWorklist.insert(SI->findCaseValue(SimpleCond).getCaseSuccessor());
1209         continue;
1210       }
1211     }
1212 
1213     // If we're unable to select a particular successor, just count all of
1214     // them.
1215     for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
1216          ++TIdx)
1217       BBWorklist.insert(TI->getSuccessor(TIdx));
1218 
1219     // If we had any successors at this point, than post-inlining is likely to
1220     // have them as well. Note that we assume any basic blocks which existed
1221     // due to branches or switches which folded above will also fold after
1222     // inlining.
1223     if (SingleBB && TI->getNumSuccessors() > 1) {
1224       // Take off the bonus we applied to the threshold.
1225       Threshold -= SingleBBBonus;
1226       SingleBB = false;
1227     }
1228   }
1229 
1230   // If this is a noduplicate call, we can still inline as long as
1231   // inlining this would cause the removal of the caller (so the instruction
1232   // is not actually duplicated, just moved).
1233   if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall)
1234     return false;
1235 
1236   Threshold += VectorBonus;
1237 
1238   return Cost < Threshold;
1239 }
1240 
1241 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1242 /// \brief Dump stats about this call's analysis.
dump()1243 void CallAnalyzer::dump() {
1244 #define DEBUG_PRINT_STAT(x) dbgs() << "      " #x ": " << x << "\n"
1245   DEBUG_PRINT_STAT(NumConstantArgs);
1246   DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
1247   DEBUG_PRINT_STAT(NumAllocaArgs);
1248   DEBUG_PRINT_STAT(NumConstantPtrCmps);
1249   DEBUG_PRINT_STAT(NumConstantPtrDiffs);
1250   DEBUG_PRINT_STAT(NumInstructionsSimplified);
1251   DEBUG_PRINT_STAT(SROACostSavings);
1252   DEBUG_PRINT_STAT(SROACostSavingsLost);
1253   DEBUG_PRINT_STAT(ContainsNoDuplicateCall);
1254   DEBUG_PRINT_STAT(Cost);
1255   DEBUG_PRINT_STAT(Threshold);
1256   DEBUG_PRINT_STAT(VectorBonus);
1257 #undef DEBUG_PRINT_STAT
1258 }
1259 #endif
1260 
1261 INITIALIZE_PASS_BEGIN(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis",
1262                       true, true)
1263 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1264 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1265 INITIALIZE_PASS_END(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis",
1266                     true, true)
1267 
1268 char InlineCostAnalysis::ID = 0;
1269 
InlineCostAnalysis()1270 InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID) {}
1271 
~InlineCostAnalysis()1272 InlineCostAnalysis::~InlineCostAnalysis() {}
1273 
getAnalysisUsage(AnalysisUsage & AU) const1274 void InlineCostAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
1275   AU.setPreservesAll();
1276   AU.addRequired<AssumptionCacheTracker>();
1277   AU.addRequired<TargetTransformInfoWrapperPass>();
1278   CallGraphSCCPass::getAnalysisUsage(AU);
1279 }
1280 
runOnSCC(CallGraphSCC & SCC)1281 bool InlineCostAnalysis::runOnSCC(CallGraphSCC &SCC) {
1282   TTIWP = &getAnalysis<TargetTransformInfoWrapperPass>();
1283   ACT = &getAnalysis<AssumptionCacheTracker>();
1284   return false;
1285 }
1286 
getInlineCost(CallSite CS,int Threshold)1287 InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, int Threshold) {
1288   return getInlineCost(CS, CS.getCalledFunction(), Threshold);
1289 }
1290 
1291 /// \brief Test that two functions either have or have not the given attribute
1292 ///        at the same time.
1293 template<typename AttrKind>
attributeMatches(Function * F1,Function * F2,AttrKind Attr)1294 static bool attributeMatches(Function *F1, Function *F2, AttrKind Attr) {
1295   return F1->getFnAttribute(Attr) == F2->getFnAttribute(Attr);
1296 }
1297 
1298 /// \brief Test that there are no attribute conflicts between Caller and Callee
1299 ///        that prevent inlining.
functionsHaveCompatibleAttributes(Function * Caller,Function * Callee)1300 static bool functionsHaveCompatibleAttributes(Function *Caller,
1301                                               Function *Callee) {
1302   return attributeMatches(Caller, Callee, "target-cpu") &&
1303          attributeMatches(Caller, Callee, "target-features") &&
1304          attributeMatches(Caller, Callee, Attribute::SanitizeAddress) &&
1305          attributeMatches(Caller, Callee, Attribute::SanitizeMemory) &&
1306          attributeMatches(Caller, Callee, Attribute::SanitizeThread);
1307 }
1308 
getInlineCost(CallSite CS,Function * Callee,int Threshold)1309 InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee,
1310                                              int Threshold) {
1311   // Cannot inline indirect calls.
1312   if (!Callee)
1313     return llvm::InlineCost::getNever();
1314 
1315   // Calls to functions with always-inline attributes should be inlined
1316   // whenever possible.
1317   if (CS.hasFnAttr(Attribute::AlwaysInline)) {
1318     if (isInlineViable(*Callee))
1319       return llvm::InlineCost::getAlways();
1320     return llvm::InlineCost::getNever();
1321   }
1322 
1323   // Never inline functions with conflicting attributes (unless callee has
1324   // always-inline attribute).
1325   if (!functionsHaveCompatibleAttributes(CS.getCaller(), Callee))
1326     return llvm::InlineCost::getNever();
1327 
1328   // Don't inline this call if the caller has the optnone attribute.
1329   if (CS.getCaller()->hasFnAttribute(Attribute::OptimizeNone))
1330     return llvm::InlineCost::getNever();
1331 
1332   // Don't inline functions which can be redefined at link-time to mean
1333   // something else.  Don't inline functions marked noinline or call sites
1334   // marked noinline.
1335   if (Callee->mayBeOverridden() ||
1336       Callee->hasFnAttribute(Attribute::NoInline) || CS.isNoInline())
1337     return llvm::InlineCost::getNever();
1338 
1339   DEBUG(llvm::dbgs() << "      Analyzing call of " << Callee->getName()
1340         << "...\n");
1341 
1342   CallAnalyzer CA(TTIWP->getTTI(*Callee), ACT, *Callee, Threshold);
1343   bool ShouldInline = CA.analyzeCall(CS);
1344 
1345   DEBUG(CA.dump());
1346 
1347   // Check if there was a reason to force inlining or no inlining.
1348   if (!ShouldInline && CA.getCost() < CA.getThreshold())
1349     return InlineCost::getNever();
1350   if (ShouldInline && CA.getCost() >= CA.getThreshold())
1351     return InlineCost::getAlways();
1352 
1353   return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
1354 }
1355 
isInlineViable(Function & F)1356 bool InlineCostAnalysis::isInlineViable(Function &F) {
1357   bool ReturnsTwice = F.hasFnAttribute(Attribute::ReturnsTwice);
1358   for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
1359     // Disallow inlining of functions which contain indirect branches or
1360     // blockaddresses.
1361     if (isa<IndirectBrInst>(BI->getTerminator()) || BI->hasAddressTaken())
1362       return false;
1363 
1364     for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;
1365          ++II) {
1366       CallSite CS(II);
1367       if (!CS)
1368         continue;
1369 
1370       // Disallow recursive calls.
1371       if (&F == CS.getCalledFunction())
1372         return false;
1373 
1374       // Disallow calls which expose returns-twice to a function not previously
1375       // attributed as such.
1376       if (!ReturnsTwice && CS.isCall() &&
1377           cast<CallInst>(CS.getInstruction())->canReturnTwice())
1378         return false;
1379 
1380       // Disallow inlining functions that call @llvm.frameescape. Doing this
1381       // correctly would require major changes to the inliner.
1382       if (CS.getCalledFunction() &&
1383           CS.getCalledFunction()->getIntrinsicID() ==
1384               llvm::Intrinsic::frameescape)
1385         return false;
1386     }
1387   }
1388 
1389   return true;
1390 }
1391