1 //===- InstCombineInternal.h - InstCombine pass internals -------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 ///
11 /// This file provides internal interfaces used to implement the InstCombine.
12 ///
13 //===----------------------------------------------------------------------===//
14
15 #ifndef LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
16 #define LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
17
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/LoopInfo.h"
21 #include "llvm/Analysis/TargetFolder.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/Dominators.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/InstVisitor.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/Operator.h"
28 #include "llvm/IR/PatternMatch.h"
29 #include "llvm/Pass.h"
30 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
31
32 #define DEBUG_TYPE "instcombine"
33
34 namespace llvm {
35 class CallSite;
36 class DataLayout;
37 class DominatorTree;
38 class TargetLibraryInfo;
39 class DbgDeclareInst;
40 class MemIntrinsic;
41 class MemSetInst;
42
43 /// \brief Assign a complexity or rank value to LLVM Values.
44 ///
45 /// This routine maps IR values to various complexity ranks:
46 /// 0 -> undef
47 /// 1 -> Constants
48 /// 2 -> Other non-instructions
49 /// 3 -> Arguments
50 /// 3 -> Unary operations
51 /// 4 -> Other instructions
getComplexity(Value * V)52 static inline unsigned getComplexity(Value *V) {
53 if (isa<Instruction>(V)) {
54 if (BinaryOperator::isNeg(V) || BinaryOperator::isFNeg(V) ||
55 BinaryOperator::isNot(V))
56 return 3;
57 return 4;
58 }
59 if (isa<Argument>(V))
60 return 3;
61 return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2;
62 }
63
64 /// \brief Add one to a Constant
AddOne(Constant * C)65 static inline Constant *AddOne(Constant *C) {
66 return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
67 }
68 /// \brief Subtract one from a Constant
SubOne(Constant * C)69 static inline Constant *SubOne(Constant *C) {
70 return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
71 }
72
73 /// \brief Return true if the specified value is free to invert (apply ~ to).
74 /// This happens in cases where the ~ can be eliminated. If WillInvertAllUses
75 /// is true, work under the assumption that the caller intends to remove all
76 /// uses of V and only keep uses of ~V.
77 ///
IsFreeToInvert(Value * V,bool WillInvertAllUses)78 static inline bool IsFreeToInvert(Value *V, bool WillInvertAllUses) {
79 // ~(~(X)) -> X.
80 if (BinaryOperator::isNot(V))
81 return true;
82
83 // Constants can be considered to be not'ed values.
84 if (isa<ConstantInt>(V))
85 return true;
86
87 // Compares can be inverted if all of their uses are being modified to use the
88 // ~V.
89 if (isa<CmpInst>(V))
90 return WillInvertAllUses;
91
92 // If `V` is of the form `A + Constant` then `-1 - V` can be folded into `(-1
93 // - Constant) - A` if we are willing to invert all of the uses.
94 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V))
95 if (BO->getOpcode() == Instruction::Add ||
96 BO->getOpcode() == Instruction::Sub)
97 if (isa<Constant>(BO->getOperand(0)) || isa<Constant>(BO->getOperand(1)))
98 return WillInvertAllUses;
99
100 return false;
101 }
102
103
104 /// \brief Specific patterns of overflow check idioms that we match.
105 enum OverflowCheckFlavor {
106 OCF_UNSIGNED_ADD,
107 OCF_SIGNED_ADD,
108 OCF_UNSIGNED_SUB,
109 OCF_SIGNED_SUB,
110 OCF_UNSIGNED_MUL,
111 OCF_SIGNED_MUL,
112
113 OCF_INVALID
114 };
115
116 /// \brief Returns the OverflowCheckFlavor corresponding to a overflow_with_op
117 /// intrinsic.
118 static inline OverflowCheckFlavor
IntrinsicIDToOverflowCheckFlavor(unsigned ID)119 IntrinsicIDToOverflowCheckFlavor(unsigned ID) {
120 switch (ID) {
121 default:
122 return OCF_INVALID;
123 case Intrinsic::uadd_with_overflow:
124 return OCF_UNSIGNED_ADD;
125 case Intrinsic::sadd_with_overflow:
126 return OCF_SIGNED_ADD;
127 case Intrinsic::usub_with_overflow:
128 return OCF_UNSIGNED_SUB;
129 case Intrinsic::ssub_with_overflow:
130 return OCF_SIGNED_SUB;
131 case Intrinsic::umul_with_overflow:
132 return OCF_UNSIGNED_MUL;
133 case Intrinsic::smul_with_overflow:
134 return OCF_SIGNED_MUL;
135 }
136 }
137
138 /// \brief An IRBuilder inserter that adds new instructions to the instcombine
139 /// worklist.
140 class LLVM_LIBRARY_VISIBILITY InstCombineIRInserter
141 : public IRBuilderDefaultInserter<true> {
142 InstCombineWorklist &Worklist;
143 AssumptionCache *AC;
144
145 public:
InstCombineIRInserter(InstCombineWorklist & WL,AssumptionCache * AC)146 InstCombineIRInserter(InstCombineWorklist &WL, AssumptionCache *AC)
147 : Worklist(WL), AC(AC) {}
148
InsertHelper(Instruction * I,const Twine & Name,BasicBlock * BB,BasicBlock::iterator InsertPt)149 void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB,
150 BasicBlock::iterator InsertPt) const {
151 IRBuilderDefaultInserter<true>::InsertHelper(I, Name, BB, InsertPt);
152 Worklist.Add(I);
153
154 using namespace llvm::PatternMatch;
155 if (match(I, m_Intrinsic<Intrinsic::assume>()))
156 AC->registerAssumption(cast<CallInst>(I));
157 }
158 };
159
160 /// \brief The core instruction combiner logic.
161 ///
162 /// This class provides both the logic to recursively visit instructions and
163 /// combine them, as well as the pass infrastructure for running this as part
164 /// of the LLVM pass pipeline.
165 class LLVM_LIBRARY_VISIBILITY InstCombiner
166 : public InstVisitor<InstCombiner, Instruction *> {
167 // FIXME: These members shouldn't be public.
168 public:
169 /// \brief A worklist of the instructions that need to be simplified.
170 InstCombineWorklist &Worklist;
171
172 /// \brief An IRBuilder that automatically inserts new instructions into the
173 /// worklist.
174 typedef IRBuilder<true, TargetFolder, InstCombineIRInserter> BuilderTy;
175 BuilderTy *Builder;
176
177 private:
178 // Mode in which we are running the combiner.
179 const bool MinimizeSize;
180
181 AliasAnalysis *AA;
182
183 // Required analyses.
184 // FIXME: These can never be null and should be references.
185 AssumptionCache *AC;
186 TargetLibraryInfo *TLI;
187 DominatorTree *DT;
188 const DataLayout &DL;
189
190 // Optional analyses. When non-null, these can both be used to do better
191 // combining and will be updated to reflect any changes.
192 LoopInfo *LI;
193
194 bool MadeIRChange;
195
196 public:
InstCombiner(InstCombineWorklist & Worklist,BuilderTy * Builder,bool MinimizeSize,AliasAnalysis * AA,AssumptionCache * AC,TargetLibraryInfo * TLI,DominatorTree * DT,const DataLayout & DL,LoopInfo * LI)197 InstCombiner(InstCombineWorklist &Worklist, BuilderTy *Builder,
198 bool MinimizeSize, AliasAnalysis *AA,
199 AssumptionCache *AC, TargetLibraryInfo *TLI,
200 DominatorTree *DT, const DataLayout &DL, LoopInfo *LI)
201 : Worklist(Worklist), Builder(Builder), MinimizeSize(MinimizeSize),
202 AA(AA), AC(AC), TLI(TLI), DT(DT), DL(DL), LI(LI), MadeIRChange(false) {}
203
204 /// \brief Run the combiner over the entire worklist until it is empty.
205 ///
206 /// \returns true if the IR is changed.
207 bool run();
208
getAssumptionCache()209 AssumptionCache *getAssumptionCache() const { return AC; }
210
getDataLayout()211 const DataLayout &getDataLayout() const { return DL; }
212
getDominatorTree()213 DominatorTree *getDominatorTree() const { return DT; }
214
getLoopInfo()215 LoopInfo *getLoopInfo() const { return LI; }
216
getTargetLibraryInfo()217 TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; }
218
219 // Visitation implementation - Implement instruction combining for different
220 // instruction types. The semantics are as follows:
221 // Return Value:
222 // null - No change was made
223 // I - Change was made, I is still valid, I may be dead though
224 // otherwise - Change was made, replace I with returned instruction
225 //
226 Instruction *visitAdd(BinaryOperator &I);
227 Instruction *visitFAdd(BinaryOperator &I);
228 Value *OptimizePointerDifference(Value *LHS, Value *RHS, Type *Ty);
229 Instruction *visitSub(BinaryOperator &I);
230 Instruction *visitFSub(BinaryOperator &I);
231 Instruction *visitMul(BinaryOperator &I);
232 Value *foldFMulConst(Instruction *FMulOrDiv, Constant *C,
233 Instruction *InsertBefore);
234 Instruction *visitFMul(BinaryOperator &I);
235 Instruction *visitURem(BinaryOperator &I);
236 Instruction *visitSRem(BinaryOperator &I);
237 Instruction *visitFRem(BinaryOperator &I);
238 bool SimplifyDivRemOfSelect(BinaryOperator &I);
239 Instruction *commonRemTransforms(BinaryOperator &I);
240 Instruction *commonIRemTransforms(BinaryOperator &I);
241 Instruction *commonDivTransforms(BinaryOperator &I);
242 Instruction *commonIDivTransforms(BinaryOperator &I);
243 Instruction *visitUDiv(BinaryOperator &I);
244 Instruction *visitSDiv(BinaryOperator &I);
245 Instruction *visitFDiv(BinaryOperator &I);
246 Value *simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted);
247 Value *FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS);
248 Value *FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS);
249 Instruction *visitAnd(BinaryOperator &I);
250 Value *FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, Instruction *CxtI);
251 Value *FoldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS);
252 Instruction *FoldOrWithConstants(BinaryOperator &I, Value *Op, Value *A,
253 Value *B, Value *C);
254 Instruction *FoldXorWithConstants(BinaryOperator &I, Value *Op, Value *A,
255 Value *B, Value *C);
256 Instruction *visitOr(BinaryOperator &I);
257 Instruction *visitXor(BinaryOperator &I);
258 Instruction *visitShl(BinaryOperator &I);
259 Instruction *visitAShr(BinaryOperator &I);
260 Instruction *visitLShr(BinaryOperator &I);
261 Instruction *commonShiftTransforms(BinaryOperator &I);
262 Instruction *FoldFCmp_IntToFP_Cst(FCmpInst &I, Instruction *LHSI,
263 Constant *RHSC);
264 Instruction *FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
265 GlobalVariable *GV, CmpInst &ICI,
266 ConstantInt *AndCst = nullptr);
267 Instruction *visitFCmpInst(FCmpInst &I);
268 Instruction *visitICmpInst(ICmpInst &I);
269 Instruction *visitICmpInstWithCastAndCast(ICmpInst &ICI);
270 Instruction *visitICmpInstWithInstAndIntCst(ICmpInst &ICI, Instruction *LHS,
271 ConstantInt *RHS);
272 Instruction *FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
273 ConstantInt *DivRHS);
274 Instruction *FoldICmpShrCst(ICmpInst &ICI, BinaryOperator *DivI,
275 ConstantInt *DivRHS);
276 Instruction *FoldICmpCstShrCst(ICmpInst &I, Value *Op, Value *A,
277 ConstantInt *CI1, ConstantInt *CI2);
278 Instruction *FoldICmpCstShlCst(ICmpInst &I, Value *Op, Value *A,
279 ConstantInt *CI1, ConstantInt *CI2);
280 Instruction *FoldICmpAddOpCst(Instruction &ICI, Value *X, ConstantInt *CI,
281 ICmpInst::Predicate Pred);
282 Instruction *FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
283 ICmpInst::Predicate Cond, Instruction &I);
284 Instruction *FoldAllocaCmp(ICmpInst &ICI, AllocaInst *Alloca, Value *Other);
285 Instruction *FoldShiftByConstant(Value *Op0, Constant *Op1,
286 BinaryOperator &I);
287 Instruction *commonCastTransforms(CastInst &CI);
288 Instruction *commonPointerCastTransforms(CastInst &CI);
289 Instruction *visitTrunc(TruncInst &CI);
290 Instruction *visitZExt(ZExtInst &CI);
291 Instruction *visitSExt(SExtInst &CI);
292 Instruction *visitFPTrunc(FPTruncInst &CI);
293 Instruction *visitFPExt(CastInst &CI);
294 Instruction *visitFPToUI(FPToUIInst &FI);
295 Instruction *visitFPToSI(FPToSIInst &FI);
296 Instruction *visitUIToFP(CastInst &CI);
297 Instruction *visitSIToFP(CastInst &CI);
298 Instruction *visitPtrToInt(PtrToIntInst &CI);
299 Instruction *visitIntToPtr(IntToPtrInst &CI);
300 Instruction *visitBitCast(BitCastInst &CI);
301 Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI);
302 Instruction *FoldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI);
303 Instruction *FoldSelectIntoOp(SelectInst &SI, Value *, Value *);
304 Instruction *FoldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1,
305 Value *A, Value *B, Instruction &Outer,
306 SelectPatternFlavor SPF2, Value *C);
307 Instruction *FoldItoFPtoI(Instruction &FI);
308 Instruction *visitSelectInst(SelectInst &SI);
309 Instruction *visitSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI);
310 Instruction *visitCallInst(CallInst &CI);
311 Instruction *visitInvokeInst(InvokeInst &II);
312
313 Instruction *SliceUpIllegalIntegerPHI(PHINode &PN);
314 Instruction *visitPHINode(PHINode &PN);
315 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
316 Instruction *visitAllocaInst(AllocaInst &AI);
317 Instruction *visitAllocSite(Instruction &FI);
318 Instruction *visitFree(CallInst &FI);
319 Instruction *visitLoadInst(LoadInst &LI);
320 Instruction *visitStoreInst(StoreInst &SI);
321 Instruction *visitBranchInst(BranchInst &BI);
322 Instruction *visitSwitchInst(SwitchInst &SI);
323 Instruction *visitReturnInst(ReturnInst &RI);
324 Instruction *visitInsertValueInst(InsertValueInst &IV);
325 Instruction *visitInsertElementInst(InsertElementInst &IE);
326 Instruction *visitExtractElementInst(ExtractElementInst &EI);
327 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI);
328 Instruction *visitExtractValueInst(ExtractValueInst &EV);
329 Instruction *visitLandingPadInst(LandingPadInst &LI);
330
331 // visitInstruction - Specify what to return for unhandled instructions...
visitInstruction(Instruction & I)332 Instruction *visitInstruction(Instruction &I) { return nullptr; }
333
334 // True when DB dominates all uses of DI execpt UI.
335 // UI must be in the same block as DI.
336 // The routine checks that the DI parent and DB are different.
337 bool dominatesAllUses(const Instruction *DI, const Instruction *UI,
338 const BasicBlock *DB) const;
339
340 // Replace select with select operand SIOpd in SI-ICmp sequence when possible
341 bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp,
342 const unsigned SIOpd);
343
344 private:
345 bool ShouldChangeType(unsigned FromBitWidth, unsigned ToBitWidth) const;
346 bool ShouldChangeType(Type *From, Type *To) const;
347 Value *dyn_castNegVal(Value *V) const;
348 Value *dyn_castFNegVal(Value *V, bool NoSignedZero = false) const;
349 Type *FindElementAtOffset(PointerType *PtrTy, int64_t Offset,
350 SmallVectorImpl<Value *> &NewIndices);
351 Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI);
352
353 /// \brief Classify whether a cast is worth optimizing.
354 ///
355 /// Returns true if the cast from "V to Ty" actually results in any code
356 /// being generated and is interesting to optimize out. If the cast can be
357 /// eliminated by some other simple transformation, we prefer to do the
358 /// simplification first.
359 bool ShouldOptimizeCast(Instruction::CastOps opcode, const Value *V,
360 Type *Ty);
361
362 /// \brief Try to optimize a sequence of instructions checking if an operation
363 /// on LHS and RHS overflows.
364 ///
365 /// If this overflow check is done via one of the overflow check intrinsics,
366 /// then CtxI has to be the call instruction calling that intrinsic. If this
367 /// overflow check is done by arithmetic followed by a compare, then CtxI has
368 /// to be the arithmetic instruction.
369 ///
370 /// If a simplification is possible, stores the simplified result of the
371 /// operation in OperationResult and result of the overflow check in
372 /// OverflowResult, and return true. If no simplification is possible,
373 /// returns false.
374 bool OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS, Value *RHS,
375 Instruction &CtxI, Value *&OperationResult,
376 Constant *&OverflowResult);
377
378 Instruction *visitCallSite(CallSite CS);
379 Instruction *tryOptimizeCall(CallInst *CI);
380 bool transformConstExprCastCall(CallSite CS);
381 Instruction *transformCallThroughTrampoline(CallSite CS,
382 IntrinsicInst *Tramp);
383 Instruction *transformZExtICmp(ICmpInst *ICI, Instruction &CI,
384 bool DoXform = true);
385 Instruction *transformSExtICmp(ICmpInst *ICI, Instruction &CI);
386 bool WillNotOverflowSignedAdd(Value *LHS, Value *RHS, Instruction &CxtI);
387 bool WillNotOverflowSignedSub(Value *LHS, Value *RHS, Instruction &CxtI);
388 bool WillNotOverflowUnsignedSub(Value *LHS, Value *RHS, Instruction &CxtI);
389 bool WillNotOverflowSignedMul(Value *LHS, Value *RHS, Instruction &CxtI);
390 Value *EmitGEPOffset(User *GEP);
391 Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN);
392 Value *EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask);
393
394 public:
395 /// \brief Inserts an instruction \p New before instruction \p Old
396 ///
397 /// Also adds the new instruction to the worklist and returns \p New so that
398 /// it is suitable for use as the return from the visitation patterns.
InsertNewInstBefore(Instruction * New,Instruction & Old)399 Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) {
400 assert(New && !New->getParent() &&
401 "New instruction already inserted into a basic block!");
402 BasicBlock *BB = Old.getParent();
403 BB->getInstList().insert(Old.getIterator(), New); // Insert inst
404 Worklist.Add(New);
405 return New;
406 }
407
408 /// \brief Same as InsertNewInstBefore, but also sets the debug loc.
InsertNewInstWith(Instruction * New,Instruction & Old)409 Instruction *InsertNewInstWith(Instruction *New, Instruction &Old) {
410 New->setDebugLoc(Old.getDebugLoc());
411 return InsertNewInstBefore(New, Old);
412 }
413
414 /// \brief A combiner-aware RAUW-like routine.
415 ///
416 /// This method is to be used when an instruction is found to be dead,
417 /// replacable with another preexisting expression. Here we add all uses of
418 /// I to the worklist, replace all uses of I with the new value, then return
419 /// I, so that the inst combiner will know that I was modified.
ReplaceInstUsesWith(Instruction & I,Value * V)420 Instruction *ReplaceInstUsesWith(Instruction &I, Value *V) {
421 // If there are no uses to replace, then we return nullptr to indicate that
422 // no changes were made to the program.
423 if (I.use_empty()) return nullptr;
424
425 Worklist.AddUsersToWorkList(I); // Add all modified instrs to worklist.
426
427 // If we are replacing the instruction with itself, this must be in a
428 // segment of unreachable code, so just clobber the instruction.
429 if (&I == V)
430 V = UndefValue::get(I.getType());
431
432 DEBUG(dbgs() << "IC: Replacing " << I << "\n"
433 << " with " << *V << '\n');
434
435 I.replaceAllUsesWith(V);
436 return &I;
437 }
438
439 /// Creates a result tuple for an overflow intrinsic \p II with a given
440 /// \p Result and a constant \p Overflow value.
CreateOverflowTuple(IntrinsicInst * II,Value * Result,Constant * Overflow)441 Instruction *CreateOverflowTuple(IntrinsicInst *II, Value *Result,
442 Constant *Overflow) {
443 Constant *V[] = {UndefValue::get(Result->getType()), Overflow};
444 StructType *ST = cast<StructType>(II->getType());
445 Constant *Struct = ConstantStruct::get(ST, V);
446 return InsertValueInst::Create(Struct, Result, 0);
447 }
448
449 /// \brief Combiner aware instruction erasure.
450 ///
451 /// When dealing with an instruction that has side effects or produces a void
452 /// value, we can't rely on DCE to delete the instruction. Instead, visit
453 /// methods should return the value returned by this function.
EraseInstFromFunction(Instruction & I)454 Instruction *EraseInstFromFunction(Instruction &I) {
455 DEBUG(dbgs() << "IC: ERASE " << I << '\n');
456
457 assert(I.use_empty() && "Cannot erase instruction that is used!");
458 // Make sure that we reprocess all operands now that we reduced their
459 // use counts.
460 if (I.getNumOperands() < 8) {
461 for (User::op_iterator i = I.op_begin(), e = I.op_end(); i != e; ++i)
462 if (Instruction *Op = dyn_cast<Instruction>(*i))
463 Worklist.Add(Op);
464 }
465 Worklist.Remove(&I);
466 I.eraseFromParent();
467 MadeIRChange = true;
468 return nullptr; // Don't do anything with FI
469 }
470
computeKnownBits(Value * V,APInt & KnownZero,APInt & KnownOne,unsigned Depth,Instruction * CxtI)471 void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
472 unsigned Depth, Instruction *CxtI) const {
473 return llvm::computeKnownBits(V, KnownZero, KnownOne, DL, Depth, AC, CxtI,
474 DT);
475 }
476
477 bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth = 0,
478 Instruction *CxtI = nullptr) const {
479 return llvm::MaskedValueIsZero(V, Mask, DL, Depth, AC, CxtI, DT);
480 }
481 unsigned ComputeNumSignBits(Value *Op, unsigned Depth = 0,
482 Instruction *CxtI = nullptr) const {
483 return llvm::ComputeNumSignBits(Op, DL, Depth, AC, CxtI, DT);
484 }
485 void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
486 unsigned Depth = 0, Instruction *CxtI = nullptr) const {
487 return llvm::ComputeSignBit(V, KnownZero, KnownOne, DL, Depth, AC, CxtI,
488 DT);
489 }
computeOverflowForUnsignedMul(Value * LHS,Value * RHS,const Instruction * CxtI)490 OverflowResult computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
491 const Instruction *CxtI) {
492 return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, AC, CxtI, DT);
493 }
computeOverflowForUnsignedAdd(Value * LHS,Value * RHS,const Instruction * CxtI)494 OverflowResult computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
495 const Instruction *CxtI) {
496 return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, AC, CxtI, DT);
497 }
498
499 private:
500 /// \brief Performs a few simplifications for operators which are associative
501 /// or commutative.
502 bool SimplifyAssociativeOrCommutative(BinaryOperator &I);
503
504 /// \brief Tries to simplify binary operations which some other binary
505 /// operation distributes over.
506 ///
507 /// It does this by either by factorizing out common terms (eg "(A*B)+(A*C)"
508 /// -> "A*(B+C)") or expanding out if this results in simplifications (eg: "A
509 /// & (B | C) -> (A&B) | (A&C)" if this is a win). Returns the simplified
510 /// value, or null if it didn't simplify.
511 Value *SimplifyUsingDistributiveLaws(BinaryOperator &I);
512
513 /// \brief Attempts to replace V with a simpler value based on the demanded
514 /// bits.
515 Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, APInt &KnownZero,
516 APInt &KnownOne, unsigned Depth,
517 Instruction *CxtI);
518 bool SimplifyDemandedBits(Use &U, APInt DemandedMask, APInt &KnownZero,
519 APInt &KnownOne, unsigned Depth = 0);
520 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded
521 /// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence.
522 Value *SimplifyShrShlDemandedBits(Instruction *Lsr, Instruction *Sftl,
523 APInt DemandedMask, APInt &KnownZero,
524 APInt &KnownOne);
525
526 /// \brief Tries to simplify operands to an integer instruction based on its
527 /// demanded bits.
528 bool SimplifyDemandedInstructionBits(Instruction &Inst);
529
530 Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
531 APInt &UndefElts, unsigned Depth = 0);
532
533 Value *SimplifyVectorOp(BinaryOperator &Inst);
534 Value *SimplifyBSwap(BinaryOperator &Inst);
535
536 // FoldOpIntoPhi - Given a binary operator, cast instruction, or select
537 // which has a PHI node as operand #0, see if we can fold the instruction
538 // into the PHI (which is only possible if all operands to the PHI are
539 // constants).
540 //
541 Instruction *FoldOpIntoPhi(Instruction &I);
542
543 /// \brief Try to rotate an operation below a PHI node, using PHI nodes for
544 /// its operands.
545 Instruction *FoldPHIArgOpIntoPHI(PHINode &PN);
546 Instruction *FoldPHIArgBinOpIntoPHI(PHINode &PN);
547 Instruction *FoldPHIArgGEPIntoPHI(PHINode &PN);
548 Instruction *FoldPHIArgLoadIntoPHI(PHINode &PN);
549 Instruction *FoldPHIArgZextsIntoPHI(PHINode &PN);
550
551 Instruction *OptAndOp(Instruction *Op, ConstantInt *OpRHS,
552 ConstantInt *AndRHS, BinaryOperator &TheAnd);
553
554 Value *FoldLogicalPlusAnd(Value *LHS, Value *RHS, ConstantInt *Mask,
555 bool isSub, Instruction &I);
556 Value *InsertRangeTest(Value *V, Constant *Lo, Constant *Hi, bool isSigned,
557 bool Inside);
558 Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI);
559 Instruction *MatchBSwapOrBitReverse(BinaryOperator &I);
560 bool SimplifyStoreAtEndOfBlock(StoreInst &SI);
561 Instruction *SimplifyMemTransfer(MemIntrinsic *MI);
562 Instruction *SimplifyMemSet(MemSetInst *MI);
563
564 Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned);
565
566 /// \brief Returns a value X such that Val = X * Scale, or null if none.
567 ///
568 /// If the multiplication is known not to overflow then NoSignedWrap is set.
569 Value *Descale(Value *Val, APInt Scale, bool &NoSignedWrap);
570 };
571
572 } // end namespace llvm.
573
574 #undef DEBUG_TYPE
575
576 #endif
577