1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the scalar evolution analysis
11 // engine, which is used primarily to analyze expressions involving induction
12 // variables in loops.
13 //
14 // There are several aspects to this library. First is the representation of
15 // scalar expressions, which are represented as subclasses of the SCEV class.
16 // These classes are used to represent certain types of subexpressions that we
17 // can handle. We only create one SCEV of a particular shape, so
18 // pointer-comparisons for equality are legal.
19 //
20 // One important aspect of the SCEV objects is that they are never cyclic, even
21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
23 // recurrence) then we represent it directly as a recurrence node, otherwise we
24 // represent it as a SCEVUnknown node.
25 //
26 // In addition to being able to represent expressions of various types, we also
27 // have folders that are used to build the *canonical* representation for a
28 // particular expression. These folders are capable of using a variety of
29 // rewrite rules to simplify the expressions.
30 //
31 // Once the folders are defined, we can implement the more interesting
32 // higher-level code, such as the code that recognizes PHI nodes of various
33 // types, computes the execution count of a loop, etc.
34 //
35 // TODO: We should use these routines and value representations to implement
36 // dependence analysis!
37 //
38 //===----------------------------------------------------------------------===//
39 //
40 // There are several good references for the techniques used in this analysis.
41 //
42 // Chains of recurrences -- a method to expedite the evaluation
43 // of closed-form functions
44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
45 //
46 // On computational properties of chains of recurrences
47 // Eugene V. Zima
48 //
49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
50 // Robert A. van Engelen
51 //
52 // Efficient Symbolic Analysis for Optimizing Compilers
53 // Robert A. van Engelen
54 //
55 // Using the chains of recurrences algebra for data dependence testing and
56 // induction variable substitution
57 // MS Thesis, Johnie Birch
58 //
59 //===----------------------------------------------------------------------===//
60
61 #include "llvm/Analysis/ScalarEvolution.h"
62 #include "llvm/ADT/Optional.h"
63 #include "llvm/ADT/STLExtras.h"
64 #include "llvm/ADT/SmallPtrSet.h"
65 #include "llvm/ADT/Statistic.h"
66 #include "llvm/Analysis/AssumptionCache.h"
67 #include "llvm/Analysis/ConstantFolding.h"
68 #include "llvm/Analysis/InstructionSimplify.h"
69 #include "llvm/Analysis/LoopInfo.h"
70 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
71 #include "llvm/Analysis/TargetLibraryInfo.h"
72 #include "llvm/Analysis/ValueTracking.h"
73 #include "llvm/IR/ConstantRange.h"
74 #include "llvm/IR/Constants.h"
75 #include "llvm/IR/DataLayout.h"
76 #include "llvm/IR/DerivedTypes.h"
77 #include "llvm/IR/Dominators.h"
78 #include "llvm/IR/GetElementPtrTypeIterator.h"
79 #include "llvm/IR/GlobalAlias.h"
80 #include "llvm/IR/GlobalVariable.h"
81 #include "llvm/IR/InstIterator.h"
82 #include "llvm/IR/Instructions.h"
83 #include "llvm/IR/LLVMContext.h"
84 #include "llvm/IR/Metadata.h"
85 #include "llvm/IR/Operator.h"
86 #include "llvm/Support/CommandLine.h"
87 #include "llvm/Support/Debug.h"
88 #include "llvm/Support/ErrorHandling.h"
89 #include "llvm/Support/MathExtras.h"
90 #include "llvm/Support/raw_ostream.h"
91 #include <algorithm>
92 using namespace llvm;
93
94 #define DEBUG_TYPE "scalar-evolution"
95
96 STATISTIC(NumArrayLenItCounts,
97 "Number of trip counts computed with array length");
98 STATISTIC(NumTripCountsComputed,
99 "Number of loops with predictable loop counts");
100 STATISTIC(NumTripCountsNotComputed,
101 "Number of loops without predictable loop counts");
102 STATISTIC(NumBruteForceTripCountsComputed,
103 "Number of loops with trip counts computed by force");
104
105 static cl::opt<unsigned>
106 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
107 cl::desc("Maximum number of iterations SCEV will "
108 "symbolically execute a constant "
109 "derived loop"),
110 cl::init(100));
111
112 // FIXME: Enable this with XDEBUG when the test suite is clean.
113 static cl::opt<bool>
114 VerifySCEV("verify-scev",
115 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
116
117 INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
118 "Scalar Evolution Analysis", false, true)
119 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
120 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
121 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
122 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
123 INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution",
124 "Scalar Evolution Analysis", false, true)
125 char ScalarEvolution::ID = 0;
126
127 //===----------------------------------------------------------------------===//
128 // SCEV class definitions
129 //===----------------------------------------------------------------------===//
130
131 //===----------------------------------------------------------------------===//
132 // Implementation of the SCEV class.
133 //
134
135 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const136 void SCEV::dump() const {
137 print(dbgs());
138 dbgs() << '\n';
139 }
140 #endif
141
print(raw_ostream & OS) const142 void SCEV::print(raw_ostream &OS) const {
143 switch (static_cast<SCEVTypes>(getSCEVType())) {
144 case scConstant:
145 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false);
146 return;
147 case scTruncate: {
148 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
149 const SCEV *Op = Trunc->getOperand();
150 OS << "(trunc " << *Op->getType() << " " << *Op << " to "
151 << *Trunc->getType() << ")";
152 return;
153 }
154 case scZeroExtend: {
155 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
156 const SCEV *Op = ZExt->getOperand();
157 OS << "(zext " << *Op->getType() << " " << *Op << " to "
158 << *ZExt->getType() << ")";
159 return;
160 }
161 case scSignExtend: {
162 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
163 const SCEV *Op = SExt->getOperand();
164 OS << "(sext " << *Op->getType() << " " << *Op << " to "
165 << *SExt->getType() << ")";
166 return;
167 }
168 case scAddRecExpr: {
169 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
170 OS << "{" << *AR->getOperand(0);
171 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
172 OS << ",+," << *AR->getOperand(i);
173 OS << "}<";
174 if (AR->getNoWrapFlags(FlagNUW))
175 OS << "nuw><";
176 if (AR->getNoWrapFlags(FlagNSW))
177 OS << "nsw><";
178 if (AR->getNoWrapFlags(FlagNW) &&
179 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
180 OS << "nw><";
181 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false);
182 OS << ">";
183 return;
184 }
185 case scAddExpr:
186 case scMulExpr:
187 case scUMaxExpr:
188 case scSMaxExpr: {
189 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
190 const char *OpStr = nullptr;
191 switch (NAry->getSCEVType()) {
192 case scAddExpr: OpStr = " + "; break;
193 case scMulExpr: OpStr = " * "; break;
194 case scUMaxExpr: OpStr = " umax "; break;
195 case scSMaxExpr: OpStr = " smax "; break;
196 }
197 OS << "(";
198 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
199 I != E; ++I) {
200 OS << **I;
201 if (std::next(I) != E)
202 OS << OpStr;
203 }
204 OS << ")";
205 switch (NAry->getSCEVType()) {
206 case scAddExpr:
207 case scMulExpr:
208 if (NAry->getNoWrapFlags(FlagNUW))
209 OS << "<nuw>";
210 if (NAry->getNoWrapFlags(FlagNSW))
211 OS << "<nsw>";
212 }
213 return;
214 }
215 case scUDivExpr: {
216 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
217 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
218 return;
219 }
220 case scUnknown: {
221 const SCEVUnknown *U = cast<SCEVUnknown>(this);
222 Type *AllocTy;
223 if (U->isSizeOf(AllocTy)) {
224 OS << "sizeof(" << *AllocTy << ")";
225 return;
226 }
227 if (U->isAlignOf(AllocTy)) {
228 OS << "alignof(" << *AllocTy << ")";
229 return;
230 }
231
232 Type *CTy;
233 Constant *FieldNo;
234 if (U->isOffsetOf(CTy, FieldNo)) {
235 OS << "offsetof(" << *CTy << ", ";
236 FieldNo->printAsOperand(OS, false);
237 OS << ")";
238 return;
239 }
240
241 // Otherwise just print it normally.
242 U->getValue()->printAsOperand(OS, false);
243 return;
244 }
245 case scCouldNotCompute:
246 OS << "***COULDNOTCOMPUTE***";
247 return;
248 }
249 llvm_unreachable("Unknown SCEV kind!");
250 }
251
getType() const252 Type *SCEV::getType() const {
253 switch (static_cast<SCEVTypes>(getSCEVType())) {
254 case scConstant:
255 return cast<SCEVConstant>(this)->getType();
256 case scTruncate:
257 case scZeroExtend:
258 case scSignExtend:
259 return cast<SCEVCastExpr>(this)->getType();
260 case scAddRecExpr:
261 case scMulExpr:
262 case scUMaxExpr:
263 case scSMaxExpr:
264 return cast<SCEVNAryExpr>(this)->getType();
265 case scAddExpr:
266 return cast<SCEVAddExpr>(this)->getType();
267 case scUDivExpr:
268 return cast<SCEVUDivExpr>(this)->getType();
269 case scUnknown:
270 return cast<SCEVUnknown>(this)->getType();
271 case scCouldNotCompute:
272 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
273 }
274 llvm_unreachable("Unknown SCEV kind!");
275 }
276
isZero() const277 bool SCEV::isZero() const {
278 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
279 return SC->getValue()->isZero();
280 return false;
281 }
282
isOne() const283 bool SCEV::isOne() const {
284 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
285 return SC->getValue()->isOne();
286 return false;
287 }
288
isAllOnesValue() const289 bool SCEV::isAllOnesValue() const {
290 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
291 return SC->getValue()->isAllOnesValue();
292 return false;
293 }
294
295 /// isNonConstantNegative - Return true if the specified scev is negated, but
296 /// not a constant.
isNonConstantNegative() const297 bool SCEV::isNonConstantNegative() const {
298 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
299 if (!Mul) return false;
300
301 // If there is a constant factor, it will be first.
302 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
303 if (!SC) return false;
304
305 // Return true if the value is negative, this matches things like (-42 * V).
306 return SC->getValue()->getValue().isNegative();
307 }
308
SCEVCouldNotCompute()309 SCEVCouldNotCompute::SCEVCouldNotCompute() :
310 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
311
classof(const SCEV * S)312 bool SCEVCouldNotCompute::classof(const SCEV *S) {
313 return S->getSCEVType() == scCouldNotCompute;
314 }
315
getConstant(ConstantInt * V)316 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
317 FoldingSetNodeID ID;
318 ID.AddInteger(scConstant);
319 ID.AddPointer(V);
320 void *IP = nullptr;
321 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
322 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
323 UniqueSCEVs.InsertNode(S, IP);
324 return S;
325 }
326
getConstant(const APInt & Val)327 const SCEV *ScalarEvolution::getConstant(const APInt &Val) {
328 return getConstant(ConstantInt::get(getContext(), Val));
329 }
330
331 const SCEV *
getConstant(Type * Ty,uint64_t V,bool isSigned)332 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
333 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
334 return getConstant(ConstantInt::get(ITy, V, isSigned));
335 }
336
SCEVCastExpr(const FoldingSetNodeIDRef ID,unsigned SCEVTy,const SCEV * op,Type * ty)337 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
338 unsigned SCEVTy, const SCEV *op, Type *ty)
339 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
340
SCEVTruncateExpr(const FoldingSetNodeIDRef ID,const SCEV * op,Type * ty)341 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
342 const SCEV *op, Type *ty)
343 : SCEVCastExpr(ID, scTruncate, op, ty) {
344 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
345 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
346 "Cannot truncate non-integer value!");
347 }
348
SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,const SCEV * op,Type * ty)349 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
350 const SCEV *op, Type *ty)
351 : SCEVCastExpr(ID, scZeroExtend, op, ty) {
352 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
353 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
354 "Cannot zero extend non-integer value!");
355 }
356
SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,const SCEV * op,Type * ty)357 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
358 const SCEV *op, Type *ty)
359 : SCEVCastExpr(ID, scSignExtend, op, ty) {
360 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
361 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
362 "Cannot sign extend non-integer value!");
363 }
364
deleted()365 void SCEVUnknown::deleted() {
366 // Clear this SCEVUnknown from various maps.
367 SE->forgetMemoizedResults(this);
368
369 // Remove this SCEVUnknown from the uniquing map.
370 SE->UniqueSCEVs.RemoveNode(this);
371
372 // Release the value.
373 setValPtr(nullptr);
374 }
375
allUsesReplacedWith(Value * New)376 void SCEVUnknown::allUsesReplacedWith(Value *New) {
377 // Clear this SCEVUnknown from various maps.
378 SE->forgetMemoizedResults(this);
379
380 // Remove this SCEVUnknown from the uniquing map.
381 SE->UniqueSCEVs.RemoveNode(this);
382
383 // Update this SCEVUnknown to point to the new value. This is needed
384 // because there may still be outstanding SCEVs which still point to
385 // this SCEVUnknown.
386 setValPtr(New);
387 }
388
isSizeOf(Type * & AllocTy) const389 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
390 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
391 if (VCE->getOpcode() == Instruction::PtrToInt)
392 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
393 if (CE->getOpcode() == Instruction::GetElementPtr &&
394 CE->getOperand(0)->isNullValue() &&
395 CE->getNumOperands() == 2)
396 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
397 if (CI->isOne()) {
398 AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
399 ->getElementType();
400 return true;
401 }
402
403 return false;
404 }
405
isAlignOf(Type * & AllocTy) const406 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
407 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
408 if (VCE->getOpcode() == Instruction::PtrToInt)
409 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
410 if (CE->getOpcode() == Instruction::GetElementPtr &&
411 CE->getOperand(0)->isNullValue()) {
412 Type *Ty =
413 cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
414 if (StructType *STy = dyn_cast<StructType>(Ty))
415 if (!STy->isPacked() &&
416 CE->getNumOperands() == 3 &&
417 CE->getOperand(1)->isNullValue()) {
418 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
419 if (CI->isOne() &&
420 STy->getNumElements() == 2 &&
421 STy->getElementType(0)->isIntegerTy(1)) {
422 AllocTy = STy->getElementType(1);
423 return true;
424 }
425 }
426 }
427
428 return false;
429 }
430
isOffsetOf(Type * & CTy,Constant * & FieldNo) const431 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
432 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
433 if (VCE->getOpcode() == Instruction::PtrToInt)
434 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
435 if (CE->getOpcode() == Instruction::GetElementPtr &&
436 CE->getNumOperands() == 3 &&
437 CE->getOperand(0)->isNullValue() &&
438 CE->getOperand(1)->isNullValue()) {
439 Type *Ty =
440 cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
441 // Ignore vector types here so that ScalarEvolutionExpander doesn't
442 // emit getelementptrs that index into vectors.
443 if (Ty->isStructTy() || Ty->isArrayTy()) {
444 CTy = Ty;
445 FieldNo = CE->getOperand(2);
446 return true;
447 }
448 }
449
450 return false;
451 }
452
453 //===----------------------------------------------------------------------===//
454 // SCEV Utilities
455 //===----------------------------------------------------------------------===//
456
457 namespace {
458 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
459 /// than the complexity of the RHS. This comparator is used to canonicalize
460 /// expressions.
461 class SCEVComplexityCompare {
462 const LoopInfo *const LI;
463 public:
SCEVComplexityCompare(const LoopInfo * li)464 explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
465
466 // Return true or false if LHS is less than, or at least RHS, respectively.
operator ()(const SCEV * LHS,const SCEV * RHS) const467 bool operator()(const SCEV *LHS, const SCEV *RHS) const {
468 return compare(LHS, RHS) < 0;
469 }
470
471 // Return negative, zero, or positive, if LHS is less than, equal to, or
472 // greater than RHS, respectively. A three-way result allows recursive
473 // comparisons to be more efficient.
compare(const SCEV * LHS,const SCEV * RHS) const474 int compare(const SCEV *LHS, const SCEV *RHS) const {
475 // Fast-path: SCEVs are uniqued so we can do a quick equality check.
476 if (LHS == RHS)
477 return 0;
478
479 // Primarily, sort the SCEVs by their getSCEVType().
480 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
481 if (LType != RType)
482 return (int)LType - (int)RType;
483
484 // Aside from the getSCEVType() ordering, the particular ordering
485 // isn't very important except that it's beneficial to be consistent,
486 // so that (a + b) and (b + a) don't end up as different expressions.
487 switch (static_cast<SCEVTypes>(LType)) {
488 case scUnknown: {
489 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
490 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
491
492 // Sort SCEVUnknown values with some loose heuristics. TODO: This is
493 // not as complete as it could be.
494 const Value *LV = LU->getValue(), *RV = RU->getValue();
495
496 // Order pointer values after integer values. This helps SCEVExpander
497 // form GEPs.
498 bool LIsPointer = LV->getType()->isPointerTy(),
499 RIsPointer = RV->getType()->isPointerTy();
500 if (LIsPointer != RIsPointer)
501 return (int)LIsPointer - (int)RIsPointer;
502
503 // Compare getValueID values.
504 unsigned LID = LV->getValueID(),
505 RID = RV->getValueID();
506 if (LID != RID)
507 return (int)LID - (int)RID;
508
509 // Sort arguments by their position.
510 if (const Argument *LA = dyn_cast<Argument>(LV)) {
511 const Argument *RA = cast<Argument>(RV);
512 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
513 return (int)LArgNo - (int)RArgNo;
514 }
515
516 // For instructions, compare their loop depth, and their operand
517 // count. This is pretty loose.
518 if (const Instruction *LInst = dyn_cast<Instruction>(LV)) {
519 const Instruction *RInst = cast<Instruction>(RV);
520
521 // Compare loop depths.
522 const BasicBlock *LParent = LInst->getParent(),
523 *RParent = RInst->getParent();
524 if (LParent != RParent) {
525 unsigned LDepth = LI->getLoopDepth(LParent),
526 RDepth = LI->getLoopDepth(RParent);
527 if (LDepth != RDepth)
528 return (int)LDepth - (int)RDepth;
529 }
530
531 // Compare the number of operands.
532 unsigned LNumOps = LInst->getNumOperands(),
533 RNumOps = RInst->getNumOperands();
534 return (int)LNumOps - (int)RNumOps;
535 }
536
537 return 0;
538 }
539
540 case scConstant: {
541 const SCEVConstant *LC = cast<SCEVConstant>(LHS);
542 const SCEVConstant *RC = cast<SCEVConstant>(RHS);
543
544 // Compare constant values.
545 const APInt &LA = LC->getValue()->getValue();
546 const APInt &RA = RC->getValue()->getValue();
547 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
548 if (LBitWidth != RBitWidth)
549 return (int)LBitWidth - (int)RBitWidth;
550 return LA.ult(RA) ? -1 : 1;
551 }
552
553 case scAddRecExpr: {
554 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
555 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
556
557 // Compare addrec loop depths.
558 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
559 if (LLoop != RLoop) {
560 unsigned LDepth = LLoop->getLoopDepth(),
561 RDepth = RLoop->getLoopDepth();
562 if (LDepth != RDepth)
563 return (int)LDepth - (int)RDepth;
564 }
565
566 // Addrec complexity grows with operand count.
567 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
568 if (LNumOps != RNumOps)
569 return (int)LNumOps - (int)RNumOps;
570
571 // Lexicographically compare.
572 for (unsigned i = 0; i != LNumOps; ++i) {
573 long X = compare(LA->getOperand(i), RA->getOperand(i));
574 if (X != 0)
575 return X;
576 }
577
578 return 0;
579 }
580
581 case scAddExpr:
582 case scMulExpr:
583 case scSMaxExpr:
584 case scUMaxExpr: {
585 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
586 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
587
588 // Lexicographically compare n-ary expressions.
589 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
590 if (LNumOps != RNumOps)
591 return (int)LNumOps - (int)RNumOps;
592
593 for (unsigned i = 0; i != LNumOps; ++i) {
594 if (i >= RNumOps)
595 return 1;
596 long X = compare(LC->getOperand(i), RC->getOperand(i));
597 if (X != 0)
598 return X;
599 }
600 return (int)LNumOps - (int)RNumOps;
601 }
602
603 case scUDivExpr: {
604 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
605 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
606
607 // Lexicographically compare udiv expressions.
608 long X = compare(LC->getLHS(), RC->getLHS());
609 if (X != 0)
610 return X;
611 return compare(LC->getRHS(), RC->getRHS());
612 }
613
614 case scTruncate:
615 case scZeroExtend:
616 case scSignExtend: {
617 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
618 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
619
620 // Compare cast expressions by operand.
621 return compare(LC->getOperand(), RC->getOperand());
622 }
623
624 case scCouldNotCompute:
625 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
626 }
627 llvm_unreachable("Unknown SCEV kind!");
628 }
629 };
630 }
631
632 /// GroupByComplexity - Given a list of SCEV objects, order them by their
633 /// complexity, and group objects of the same complexity together by value.
634 /// When this routine is finished, we know that any duplicates in the vector are
635 /// consecutive and that complexity is monotonically increasing.
636 ///
637 /// Note that we go take special precautions to ensure that we get deterministic
638 /// results from this routine. In other words, we don't want the results of
639 /// this to depend on where the addresses of various SCEV objects happened to
640 /// land in memory.
641 ///
GroupByComplexity(SmallVectorImpl<const SCEV * > & Ops,LoopInfo * LI)642 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
643 LoopInfo *LI) {
644 if (Ops.size() < 2) return; // Noop
645 if (Ops.size() == 2) {
646 // This is the common case, which also happens to be trivially simple.
647 // Special case it.
648 const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
649 if (SCEVComplexityCompare(LI)(RHS, LHS))
650 std::swap(LHS, RHS);
651 return;
652 }
653
654 // Do the rough sort by complexity.
655 std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
656
657 // Now that we are sorted by complexity, group elements of the same
658 // complexity. Note that this is, at worst, N^2, but the vector is likely to
659 // be extremely short in practice. Note that we take this approach because we
660 // do not want to depend on the addresses of the objects we are grouping.
661 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
662 const SCEV *S = Ops[i];
663 unsigned Complexity = S->getSCEVType();
664
665 // If there are any objects of the same complexity and same value as this
666 // one, group them.
667 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
668 if (Ops[j] == S) { // Found a duplicate.
669 // Move it to immediately after i'th element.
670 std::swap(Ops[i+1], Ops[j]);
671 ++i; // no need to rescan it.
672 if (i == e-2) return; // Done!
673 }
674 }
675 }
676 }
677
678 namespace {
679 struct FindSCEVSize {
680 int Size;
FindSCEVSize__anond3aa2a800211::FindSCEVSize681 FindSCEVSize() : Size(0) {}
682
follow__anond3aa2a800211::FindSCEVSize683 bool follow(const SCEV *S) {
684 ++Size;
685 // Keep looking at all operands of S.
686 return true;
687 }
isDone__anond3aa2a800211::FindSCEVSize688 bool isDone() const {
689 return false;
690 }
691 };
692 }
693
694 // Returns the size of the SCEV S.
sizeOfSCEV(const SCEV * S)695 static inline int sizeOfSCEV(const SCEV *S) {
696 FindSCEVSize F;
697 SCEVTraversal<FindSCEVSize> ST(F);
698 ST.visitAll(S);
699 return F.Size;
700 }
701
702 namespace {
703
704 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> {
705 public:
706 // Computes the Quotient and Remainder of the division of Numerator by
707 // Denominator.
divide__anond3aa2a800311::SCEVDivision708 static void divide(ScalarEvolution &SE, const SCEV *Numerator,
709 const SCEV *Denominator, const SCEV **Quotient,
710 const SCEV **Remainder) {
711 assert(Numerator && Denominator && "Uninitialized SCEV");
712
713 SCEVDivision D(SE, Numerator, Denominator);
714
715 // Check for the trivial case here to avoid having to check for it in the
716 // rest of the code.
717 if (Numerator == Denominator) {
718 *Quotient = D.One;
719 *Remainder = D.Zero;
720 return;
721 }
722
723 if (Numerator->isZero()) {
724 *Quotient = D.Zero;
725 *Remainder = D.Zero;
726 return;
727 }
728
729 // Split the Denominator when it is a product.
730 if (const SCEVMulExpr *T = dyn_cast<const SCEVMulExpr>(Denominator)) {
731 const SCEV *Q, *R;
732 *Quotient = Numerator;
733 for (const SCEV *Op : T->operands()) {
734 divide(SE, *Quotient, Op, &Q, &R);
735 *Quotient = Q;
736
737 // Bail out when the Numerator is not divisible by one of the terms of
738 // the Denominator.
739 if (!R->isZero()) {
740 *Quotient = D.Zero;
741 *Remainder = Numerator;
742 return;
743 }
744 }
745 *Remainder = D.Zero;
746 return;
747 }
748
749 D.visit(Numerator);
750 *Quotient = D.Quotient;
751 *Remainder = D.Remainder;
752 }
753
754 // Except in the trivial case described above, we do not know how to divide
755 // Expr by Denominator for the following functions with empty implementation.
visitTruncateExpr__anond3aa2a800311::SCEVDivision756 void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {}
visitZeroExtendExpr__anond3aa2a800311::SCEVDivision757 void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {}
visitSignExtendExpr__anond3aa2a800311::SCEVDivision758 void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {}
visitUDivExpr__anond3aa2a800311::SCEVDivision759 void visitUDivExpr(const SCEVUDivExpr *Numerator) {}
visitSMaxExpr__anond3aa2a800311::SCEVDivision760 void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {}
visitUMaxExpr__anond3aa2a800311::SCEVDivision761 void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {}
visitUnknown__anond3aa2a800311::SCEVDivision762 void visitUnknown(const SCEVUnknown *Numerator) {}
visitCouldNotCompute__anond3aa2a800311::SCEVDivision763 void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {}
764
visitConstant__anond3aa2a800311::SCEVDivision765 void visitConstant(const SCEVConstant *Numerator) {
766 if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) {
767 APInt NumeratorVal = Numerator->getValue()->getValue();
768 APInt DenominatorVal = D->getValue()->getValue();
769 uint32_t NumeratorBW = NumeratorVal.getBitWidth();
770 uint32_t DenominatorBW = DenominatorVal.getBitWidth();
771
772 if (NumeratorBW > DenominatorBW)
773 DenominatorVal = DenominatorVal.sext(NumeratorBW);
774 else if (NumeratorBW < DenominatorBW)
775 NumeratorVal = NumeratorVal.sext(DenominatorBW);
776
777 APInt QuotientVal(NumeratorVal.getBitWidth(), 0);
778 APInt RemainderVal(NumeratorVal.getBitWidth(), 0);
779 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal);
780 Quotient = SE.getConstant(QuotientVal);
781 Remainder = SE.getConstant(RemainderVal);
782 return;
783 }
784 }
785
visitAddRecExpr__anond3aa2a800311::SCEVDivision786 void visitAddRecExpr(const SCEVAddRecExpr *Numerator) {
787 const SCEV *StartQ, *StartR, *StepQ, *StepR;
788 assert(Numerator->isAffine() && "Numerator should be affine");
789 divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR);
790 divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR);
791 Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(),
792 Numerator->getNoWrapFlags());
793 Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(),
794 Numerator->getNoWrapFlags());
795 }
796
visitAddExpr__anond3aa2a800311::SCEVDivision797 void visitAddExpr(const SCEVAddExpr *Numerator) {
798 SmallVector<const SCEV *, 2> Qs, Rs;
799 Type *Ty = Denominator->getType();
800
801 for (const SCEV *Op : Numerator->operands()) {
802 const SCEV *Q, *R;
803 divide(SE, Op, Denominator, &Q, &R);
804
805 // Bail out if types do not match.
806 if (Ty != Q->getType() || Ty != R->getType()) {
807 Quotient = Zero;
808 Remainder = Numerator;
809 return;
810 }
811
812 Qs.push_back(Q);
813 Rs.push_back(R);
814 }
815
816 if (Qs.size() == 1) {
817 Quotient = Qs[0];
818 Remainder = Rs[0];
819 return;
820 }
821
822 Quotient = SE.getAddExpr(Qs);
823 Remainder = SE.getAddExpr(Rs);
824 }
825
visitMulExpr__anond3aa2a800311::SCEVDivision826 void visitMulExpr(const SCEVMulExpr *Numerator) {
827 SmallVector<const SCEV *, 2> Qs;
828 Type *Ty = Denominator->getType();
829
830 bool FoundDenominatorTerm = false;
831 for (const SCEV *Op : Numerator->operands()) {
832 // Bail out if types do not match.
833 if (Ty != Op->getType()) {
834 Quotient = Zero;
835 Remainder = Numerator;
836 return;
837 }
838
839 if (FoundDenominatorTerm) {
840 Qs.push_back(Op);
841 continue;
842 }
843
844 // Check whether Denominator divides one of the product operands.
845 const SCEV *Q, *R;
846 divide(SE, Op, Denominator, &Q, &R);
847 if (!R->isZero()) {
848 Qs.push_back(Op);
849 continue;
850 }
851
852 // Bail out if types do not match.
853 if (Ty != Q->getType()) {
854 Quotient = Zero;
855 Remainder = Numerator;
856 return;
857 }
858
859 FoundDenominatorTerm = true;
860 Qs.push_back(Q);
861 }
862
863 if (FoundDenominatorTerm) {
864 Remainder = Zero;
865 if (Qs.size() == 1)
866 Quotient = Qs[0];
867 else
868 Quotient = SE.getMulExpr(Qs);
869 return;
870 }
871
872 if (!isa<SCEVUnknown>(Denominator)) {
873 Quotient = Zero;
874 Remainder = Numerator;
875 return;
876 }
877
878 // The Remainder is obtained by replacing Denominator by 0 in Numerator.
879 ValueToValueMap RewriteMap;
880 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
881 cast<SCEVConstant>(Zero)->getValue();
882 Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true);
883
884 if (Remainder->isZero()) {
885 // The Quotient is obtained by replacing Denominator by 1 in Numerator.
886 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
887 cast<SCEVConstant>(One)->getValue();
888 Quotient =
889 SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true);
890 return;
891 }
892
893 // Quotient is (Numerator - Remainder) divided by Denominator.
894 const SCEV *Q, *R;
895 const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder);
896 if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) {
897 // This SCEV does not seem to simplify: fail the division here.
898 Quotient = Zero;
899 Remainder = Numerator;
900 return;
901 }
902 divide(SE, Diff, Denominator, &Q, &R);
903 assert(R == Zero &&
904 "(Numerator - Remainder) should evenly divide Denominator");
905 Quotient = Q;
906 }
907
908 private:
SCEVDivision__anond3aa2a800311::SCEVDivision909 SCEVDivision(ScalarEvolution &S, const SCEV *Numerator,
910 const SCEV *Denominator)
911 : SE(S), Denominator(Denominator) {
912 Zero = SE.getConstant(Denominator->getType(), 0);
913 One = SE.getConstant(Denominator->getType(), 1);
914
915 // By default, we don't know how to divide Expr by Denominator.
916 // Providing the default here simplifies the rest of the code.
917 Quotient = Zero;
918 Remainder = Numerator;
919 }
920
921 ScalarEvolution &SE;
922 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One;
923 };
924
925 }
926
927 //===----------------------------------------------------------------------===//
928 // Simple SCEV method implementations
929 //===----------------------------------------------------------------------===//
930
931 /// BinomialCoefficient - Compute BC(It, K). The result has width W.
932 /// Assume, K > 0.
BinomialCoefficient(const SCEV * It,unsigned K,ScalarEvolution & SE,Type * ResultTy)933 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
934 ScalarEvolution &SE,
935 Type *ResultTy) {
936 // Handle the simplest case efficiently.
937 if (K == 1)
938 return SE.getTruncateOrZeroExtend(It, ResultTy);
939
940 // We are using the following formula for BC(It, K):
941 //
942 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
943 //
944 // Suppose, W is the bitwidth of the return value. We must be prepared for
945 // overflow. Hence, we must assure that the result of our computation is
946 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
947 // safe in modular arithmetic.
948 //
949 // However, this code doesn't use exactly that formula; the formula it uses
950 // is something like the following, where T is the number of factors of 2 in
951 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
952 // exponentiation:
953 //
954 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
955 //
956 // This formula is trivially equivalent to the previous formula. However,
957 // this formula can be implemented much more efficiently. The trick is that
958 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
959 // arithmetic. To do exact division in modular arithmetic, all we have
960 // to do is multiply by the inverse. Therefore, this step can be done at
961 // width W.
962 //
963 // The next issue is how to safely do the division by 2^T. The way this
964 // is done is by doing the multiplication step at a width of at least W + T
965 // bits. This way, the bottom W+T bits of the product are accurate. Then,
966 // when we perform the division by 2^T (which is equivalent to a right shift
967 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
968 // truncated out after the division by 2^T.
969 //
970 // In comparison to just directly using the first formula, this technique
971 // is much more efficient; using the first formula requires W * K bits,
972 // but this formula less than W + K bits. Also, the first formula requires
973 // a division step, whereas this formula only requires multiplies and shifts.
974 //
975 // It doesn't matter whether the subtraction step is done in the calculation
976 // width or the input iteration count's width; if the subtraction overflows,
977 // the result must be zero anyway. We prefer here to do it in the width of
978 // the induction variable because it helps a lot for certain cases; CodeGen
979 // isn't smart enough to ignore the overflow, which leads to much less
980 // efficient code if the width of the subtraction is wider than the native
981 // register width.
982 //
983 // (It's possible to not widen at all by pulling out factors of 2 before
984 // the multiplication; for example, K=2 can be calculated as
985 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
986 // extra arithmetic, so it's not an obvious win, and it gets
987 // much more complicated for K > 3.)
988
989 // Protection from insane SCEVs; this bound is conservative,
990 // but it probably doesn't matter.
991 if (K > 1000)
992 return SE.getCouldNotCompute();
993
994 unsigned W = SE.getTypeSizeInBits(ResultTy);
995
996 // Calculate K! / 2^T and T; we divide out the factors of two before
997 // multiplying for calculating K! / 2^T to avoid overflow.
998 // Other overflow doesn't matter because we only care about the bottom
999 // W bits of the result.
1000 APInt OddFactorial(W, 1);
1001 unsigned T = 1;
1002 for (unsigned i = 3; i <= K; ++i) {
1003 APInt Mult(W, i);
1004 unsigned TwoFactors = Mult.countTrailingZeros();
1005 T += TwoFactors;
1006 Mult = Mult.lshr(TwoFactors);
1007 OddFactorial *= Mult;
1008 }
1009
1010 // We need at least W + T bits for the multiplication step
1011 unsigned CalculationBits = W + T;
1012
1013 // Calculate 2^T, at width T+W.
1014 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T);
1015
1016 // Calculate the multiplicative inverse of K! / 2^T;
1017 // this multiplication factor will perform the exact division by
1018 // K! / 2^T.
1019 APInt Mod = APInt::getSignedMinValue(W+1);
1020 APInt MultiplyFactor = OddFactorial.zext(W+1);
1021 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
1022 MultiplyFactor = MultiplyFactor.trunc(W);
1023
1024 // Calculate the product, at width T+W
1025 IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
1026 CalculationBits);
1027 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
1028 for (unsigned i = 1; i != K; ++i) {
1029 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
1030 Dividend = SE.getMulExpr(Dividend,
1031 SE.getTruncateOrZeroExtend(S, CalculationTy));
1032 }
1033
1034 // Divide by 2^T
1035 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
1036
1037 // Truncate the result, and divide by K! / 2^T.
1038
1039 return SE.getMulExpr(SE.getConstant(MultiplyFactor),
1040 SE.getTruncateOrZeroExtend(DivResult, ResultTy));
1041 }
1042
1043 /// evaluateAtIteration - Return the value of this chain of recurrences at
1044 /// the specified iteration number. We can evaluate this recurrence by
1045 /// multiplying each element in the chain by the binomial coefficient
1046 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as:
1047 ///
1048 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
1049 ///
1050 /// where BC(It, k) stands for binomial coefficient.
1051 ///
evaluateAtIteration(const SCEV * It,ScalarEvolution & SE) const1052 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
1053 ScalarEvolution &SE) const {
1054 const SCEV *Result = getStart();
1055 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1056 // The computation is correct in the face of overflow provided that the
1057 // multiplication is performed _after_ the evaluation of the binomial
1058 // coefficient.
1059 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
1060 if (isa<SCEVCouldNotCompute>(Coeff))
1061 return Coeff;
1062
1063 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
1064 }
1065 return Result;
1066 }
1067
1068 //===----------------------------------------------------------------------===//
1069 // SCEV Expression folder implementations
1070 //===----------------------------------------------------------------------===//
1071
getTruncateExpr(const SCEV * Op,Type * Ty)1072 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
1073 Type *Ty) {
1074 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
1075 "This is not a truncating conversion!");
1076 assert(isSCEVable(Ty) &&
1077 "This is not a conversion to a SCEVable type!");
1078 Ty = getEffectiveSCEVType(Ty);
1079
1080 FoldingSetNodeID ID;
1081 ID.AddInteger(scTruncate);
1082 ID.AddPointer(Op);
1083 ID.AddPointer(Ty);
1084 void *IP = nullptr;
1085 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1086
1087 // Fold if the operand is constant.
1088 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1089 return getConstant(
1090 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
1091
1092 // trunc(trunc(x)) --> trunc(x)
1093 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
1094 return getTruncateExpr(ST->getOperand(), Ty);
1095
1096 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
1097 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1098 return getTruncateOrSignExtend(SS->getOperand(), Ty);
1099
1100 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
1101 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1102 return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
1103
1104 // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can
1105 // eliminate all the truncates, or we replace other casts with truncates.
1106 if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) {
1107 SmallVector<const SCEV *, 4> Operands;
1108 bool hasTrunc = false;
1109 for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) {
1110 const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty);
1111 if (!isa<SCEVCastExpr>(SA->getOperand(i)))
1112 hasTrunc = isa<SCEVTruncateExpr>(S);
1113 Operands.push_back(S);
1114 }
1115 if (!hasTrunc)
1116 return getAddExpr(Operands);
1117 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
1118 }
1119
1120 // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can
1121 // eliminate all the truncates, or we replace other casts with truncates.
1122 if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) {
1123 SmallVector<const SCEV *, 4> Operands;
1124 bool hasTrunc = false;
1125 for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) {
1126 const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty);
1127 if (!isa<SCEVCastExpr>(SM->getOperand(i)))
1128 hasTrunc = isa<SCEVTruncateExpr>(S);
1129 Operands.push_back(S);
1130 }
1131 if (!hasTrunc)
1132 return getMulExpr(Operands);
1133 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
1134 }
1135
1136 // If the input value is a chrec scev, truncate the chrec's operands.
1137 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
1138 SmallVector<const SCEV *, 4> Operands;
1139 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1140 Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
1141 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
1142 }
1143
1144 // The cast wasn't folded; create an explicit cast node. We can reuse
1145 // the existing insert position since if we get here, we won't have
1146 // made any changes which would invalidate it.
1147 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
1148 Op, Ty);
1149 UniqueSCEVs.InsertNode(S, IP);
1150 return S;
1151 }
1152
1153 // Get the limit of a recurrence such that incrementing by Step cannot cause
1154 // signed overflow as long as the value of the recurrence within the
1155 // loop does not exceed this limit before incrementing.
getSignedOverflowLimitForStep(const SCEV * Step,ICmpInst::Predicate * Pred,ScalarEvolution * SE)1156 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step,
1157 ICmpInst::Predicate *Pred,
1158 ScalarEvolution *SE) {
1159 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1160 if (SE->isKnownPositive(Step)) {
1161 *Pred = ICmpInst::ICMP_SLT;
1162 return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
1163 SE->getSignedRange(Step).getSignedMax());
1164 }
1165 if (SE->isKnownNegative(Step)) {
1166 *Pred = ICmpInst::ICMP_SGT;
1167 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
1168 SE->getSignedRange(Step).getSignedMin());
1169 }
1170 return nullptr;
1171 }
1172
1173 // Get the limit of a recurrence such that incrementing by Step cannot cause
1174 // unsigned overflow as long as the value of the recurrence within the loop does
1175 // not exceed this limit before incrementing.
getUnsignedOverflowLimitForStep(const SCEV * Step,ICmpInst::Predicate * Pred,ScalarEvolution * SE)1176 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step,
1177 ICmpInst::Predicate *Pred,
1178 ScalarEvolution *SE) {
1179 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1180 *Pred = ICmpInst::ICMP_ULT;
1181
1182 return SE->getConstant(APInt::getMinValue(BitWidth) -
1183 SE->getUnsignedRange(Step).getUnsignedMax());
1184 }
1185
1186 namespace {
1187
1188 struct ExtendOpTraitsBase {
1189 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *);
1190 };
1191
1192 // Used to make code generic over signed and unsigned overflow.
1193 template <typename ExtendOp> struct ExtendOpTraits {
1194 // Members present:
1195 //
1196 // static const SCEV::NoWrapFlags WrapType;
1197 //
1198 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr;
1199 //
1200 // static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1201 // ICmpInst::Predicate *Pred,
1202 // ScalarEvolution *SE);
1203 };
1204
1205 template <>
1206 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase {
1207 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW;
1208
1209 static const GetExtendExprTy GetExtendExpr;
1210
getOverflowLimitForStep__anond3aa2a800411::ExtendOpTraits1211 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1212 ICmpInst::Predicate *Pred,
1213 ScalarEvolution *SE) {
1214 return getSignedOverflowLimitForStep(Step, Pred, SE);
1215 }
1216 };
1217
1218 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1219 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr;
1220
1221 template <>
1222 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
1223 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW;
1224
1225 static const GetExtendExprTy GetExtendExpr;
1226
getOverflowLimitForStep__anond3aa2a800411::ExtendOpTraits1227 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1228 ICmpInst::Predicate *Pred,
1229 ScalarEvolution *SE) {
1230 return getUnsignedOverflowLimitForStep(Step, Pred, SE);
1231 }
1232 };
1233
1234 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1235 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr;
1236 }
1237
1238 // The recurrence AR has been shown to have no signed/unsigned wrap or something
1239 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
1240 // easily prove NSW/NUW for its preincrement or postincrement sibling. This
1241 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step +
1242 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the
1243 // expression "Step + sext/zext(PreIncAR)" is congruent with
1244 // "sext/zext(PostIncAR)"
1245 template <typename ExtendOpTy>
getPreStartForExtend(const SCEVAddRecExpr * AR,Type * Ty,ScalarEvolution * SE)1246 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
1247 ScalarEvolution *SE) {
1248 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1249 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1250
1251 const Loop *L = AR->getLoop();
1252 const SCEV *Start = AR->getStart();
1253 const SCEV *Step = AR->getStepRecurrence(*SE);
1254
1255 // Check for a simple looking step prior to loop entry.
1256 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1257 if (!SA)
1258 return nullptr;
1259
1260 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
1261 // subtraction is expensive. For this purpose, perform a quick and dirty
1262 // difference, by checking for Step in the operand list.
1263 SmallVector<const SCEV *, 4> DiffOps;
1264 for (const SCEV *Op : SA->operands())
1265 if (Op != Step)
1266 DiffOps.push_back(Op);
1267
1268 if (DiffOps.size() == SA->getNumOperands())
1269 return nullptr;
1270
1271 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` +
1272 // `Step`:
1273
1274 // 1. NSW/NUW flags on the step increment.
1275 const SCEV *PreStart = SE->getAddExpr(DiffOps, SA->getNoWrapFlags());
1276 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
1277 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
1278
1279 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies
1280 // "S+X does not sign/unsign-overflow".
1281 //
1282
1283 const SCEV *BECount = SE->getBackedgeTakenCount(L);
1284 if (PreAR && PreAR->getNoWrapFlags(WrapType) &&
1285 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount))
1286 return PreStart;
1287
1288 // 2. Direct overflow check on the step operation's expression.
1289 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
1290 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
1291 const SCEV *OperandExtendedStart =
1292 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy),
1293 (SE->*GetExtendExpr)(Step, WideTy));
1294 if ((SE->*GetExtendExpr)(Start, WideTy) == OperandExtendedStart) {
1295 if (PreAR && AR->getNoWrapFlags(WrapType)) {
1296 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
1297 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
1298 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact.
1299 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType);
1300 }
1301 return PreStart;
1302 }
1303
1304 // 3. Loop precondition.
1305 ICmpInst::Predicate Pred;
1306 const SCEV *OverflowLimit =
1307 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1308
1309 if (OverflowLimit &&
1310 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) {
1311 return PreStart;
1312 }
1313 return nullptr;
1314 }
1315
1316 // Get the normalized zero or sign extended expression for this AddRec's Start.
1317 template <typename ExtendOpTy>
getExtendAddRecStart(const SCEVAddRecExpr * AR,Type * Ty,ScalarEvolution * SE)1318 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
1319 ScalarEvolution *SE) {
1320 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1321
1322 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE);
1323 if (!PreStart)
1324 return (SE->*GetExtendExpr)(AR->getStart(), Ty);
1325
1326 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty),
1327 (SE->*GetExtendExpr)(PreStart, Ty));
1328 }
1329
1330 // Try to prove away overflow by looking at "nearby" add recurrences. A
1331 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it
1332 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`.
1333 //
1334 // Formally:
1335 //
1336 // {S,+,X} == {S-T,+,X} + T
1337 // => Ext({S,+,X}) == Ext({S-T,+,X} + T)
1338 //
1339 // If ({S-T,+,X} + T) does not overflow ... (1)
1340 //
1341 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T)
1342 //
1343 // If {S-T,+,X} does not overflow ... (2)
1344 //
1345 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T)
1346 // == {Ext(S-T)+Ext(T),+,Ext(X)}
1347 //
1348 // If (S-T)+T does not overflow ... (3)
1349 //
1350 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)}
1351 // == {Ext(S),+,Ext(X)} == LHS
1352 //
1353 // Thus, if (1), (2) and (3) are true for some T, then
1354 // Ext({S,+,X}) == {Ext(S),+,Ext(X)}
1355 //
1356 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T)
1357 // does not overflow" restricted to the 0th iteration. Therefore we only need
1358 // to check for (1) and (2).
1359 //
1360 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
1361 // is `Delta` (defined below).
1362 //
1363 template <typename ExtendOpTy>
proveNoWrapByVaryingStart(const SCEV * Start,const SCEV * Step,const Loop * L)1364 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
1365 const SCEV *Step,
1366 const Loop *L) {
1367 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1368
1369 // We restrict `Start` to a constant to prevent SCEV from spending too much
1370 // time here. It is correct (but more expensive) to continue with a
1371 // non-constant `Start` and do a general SCEV subtraction to compute
1372 // `PreStart` below.
1373 //
1374 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
1375 if (!StartC)
1376 return false;
1377
1378 APInt StartAI = StartC->getValue()->getValue();
1379
1380 for (unsigned Delta : {-2, -1, 1, 2}) {
1381 const SCEV *PreStart = getConstant(StartAI - Delta);
1382
1383 // Give up if we don't already have the add recurrence we need because
1384 // actually constructing an add recurrence is relatively expensive.
1385 const SCEVAddRecExpr *PreAR = [&]() {
1386 FoldingSetNodeID ID;
1387 ID.AddInteger(scAddRecExpr);
1388 ID.AddPointer(PreStart);
1389 ID.AddPointer(Step);
1390 ID.AddPointer(L);
1391 void *IP = nullptr;
1392 return static_cast<SCEVAddRecExpr *>(
1393 this->UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1394 }();
1395
1396 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2)
1397 const SCEV *DeltaS = getConstant(StartC->getType(), Delta);
1398 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
1399 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1400 DeltaS, &Pred, this);
1401 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1)
1402 return true;
1403 }
1404 }
1405
1406 return false;
1407 }
1408
getZeroExtendExpr(const SCEV * Op,Type * Ty)1409 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
1410 Type *Ty) {
1411 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1412 "This is not an extending conversion!");
1413 assert(isSCEVable(Ty) &&
1414 "This is not a conversion to a SCEVable type!");
1415 Ty = getEffectiveSCEVType(Ty);
1416
1417 // Fold if the operand is constant.
1418 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1419 return getConstant(
1420 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
1421
1422 // zext(zext(x)) --> zext(x)
1423 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1424 return getZeroExtendExpr(SZ->getOperand(), Ty);
1425
1426 // Before doing any expensive analysis, check to see if we've already
1427 // computed a SCEV for this Op and Ty.
1428 FoldingSetNodeID ID;
1429 ID.AddInteger(scZeroExtend);
1430 ID.AddPointer(Op);
1431 ID.AddPointer(Ty);
1432 void *IP = nullptr;
1433 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1434
1435 // zext(trunc(x)) --> zext(x) or x or trunc(x)
1436 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1437 // It's possible the bits taken off by the truncate were all zero bits. If
1438 // so, we should be able to simplify this further.
1439 const SCEV *X = ST->getOperand();
1440 ConstantRange CR = getUnsignedRange(X);
1441 unsigned TruncBits = getTypeSizeInBits(ST->getType());
1442 unsigned NewBits = getTypeSizeInBits(Ty);
1443 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
1444 CR.zextOrTrunc(NewBits)))
1445 return getTruncateOrZeroExtend(X, Ty);
1446 }
1447
1448 // If the input value is a chrec scev, and we can prove that the value
1449 // did not overflow the old, smaller, value, we can zero extend all of the
1450 // operands (often constants). This allows analysis of something like
1451 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
1452 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1453 if (AR->isAffine()) {
1454 const SCEV *Start = AR->getStart();
1455 const SCEV *Step = AR->getStepRecurrence(*this);
1456 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1457 const Loop *L = AR->getLoop();
1458
1459 // If we have special knowledge that this addrec won't overflow,
1460 // we don't need to do any further analysis.
1461 if (AR->getNoWrapFlags(SCEV::FlagNUW))
1462 return getAddRecExpr(
1463 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
1464 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1465
1466 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1467 // Note that this serves two purposes: It filters out loops that are
1468 // simply not analyzable, and it covers the case where this code is
1469 // being called from within backedge-taken count analysis, such that
1470 // attempting to ask for the backedge-taken count would likely result
1471 // in infinite recursion. In the later case, the analysis code will
1472 // cope with a conservative value, and it will take care to purge
1473 // that value once it has finished.
1474 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
1475 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1476 // Manually compute the final value for AR, checking for
1477 // overflow.
1478
1479 // Check whether the backedge-taken count can be losslessly casted to
1480 // the addrec's type. The count is always unsigned.
1481 const SCEV *CastedMaxBECount =
1482 getTruncateOrZeroExtend(MaxBECount, Start->getType());
1483 const SCEV *RecastedMaxBECount =
1484 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
1485 if (MaxBECount == RecastedMaxBECount) {
1486 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1487 // Check whether Start+Step*MaxBECount has no unsigned overflow.
1488 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
1489 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul), WideTy);
1490 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy);
1491 const SCEV *WideMaxBECount =
1492 getZeroExtendExpr(CastedMaxBECount, WideTy);
1493 const SCEV *OperandExtendedAdd =
1494 getAddExpr(WideStart,
1495 getMulExpr(WideMaxBECount,
1496 getZeroExtendExpr(Step, WideTy)));
1497 if (ZAdd == OperandExtendedAdd) {
1498 // Cache knowledge of AR NUW, which is propagated to this AddRec.
1499 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
1500 // Return the expression with the addrec on the outside.
1501 return getAddRecExpr(
1502 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
1503 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1504 }
1505 // Similar to above, only this time treat the step value as signed.
1506 // This covers loops that count down.
1507 OperandExtendedAdd =
1508 getAddExpr(WideStart,
1509 getMulExpr(WideMaxBECount,
1510 getSignExtendExpr(Step, WideTy)));
1511 if (ZAdd == OperandExtendedAdd) {
1512 // Cache knowledge of AR NW, which is propagated to this AddRec.
1513 // Negative step causes unsigned wrap, but it still can't self-wrap.
1514 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
1515 // Return the expression with the addrec on the outside.
1516 return getAddRecExpr(
1517 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
1518 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1519 }
1520 }
1521
1522 // If the backedge is guarded by a comparison with the pre-inc value
1523 // the addrec is safe. Also, if the entry is guarded by a comparison
1524 // with the start value and the backedge is guarded by a comparison
1525 // with the post-inc value, the addrec is safe.
1526 if (isKnownPositive(Step)) {
1527 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
1528 getUnsignedRange(Step).getUnsignedMax());
1529 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
1530 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
1531 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
1532 AR->getPostIncExpr(*this), N))) {
1533 // Cache knowledge of AR NUW, which is propagated to this AddRec.
1534 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
1535 // Return the expression with the addrec on the outside.
1536 return getAddRecExpr(
1537 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
1538 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1539 }
1540 } else if (isKnownNegative(Step)) {
1541 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
1542 getSignedRange(Step).getSignedMin());
1543 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
1544 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
1545 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
1546 AR->getPostIncExpr(*this), N))) {
1547 // Cache knowledge of AR NW, which is propagated to this AddRec.
1548 // Negative step causes unsigned wrap, but it still can't self-wrap.
1549 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
1550 // Return the expression with the addrec on the outside.
1551 return getAddRecExpr(
1552 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
1553 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1554 }
1555 }
1556 }
1557
1558 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1559 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
1560 return getAddRecExpr(
1561 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
1562 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1563 }
1564 }
1565
1566 // The cast wasn't folded; create an explicit cast node.
1567 // Recompute the insert position, as it may have been invalidated.
1568 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1569 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1570 Op, Ty);
1571 UniqueSCEVs.InsertNode(S, IP);
1572 return S;
1573 }
1574
getSignExtendExpr(const SCEV * Op,Type * Ty)1575 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
1576 Type *Ty) {
1577 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1578 "This is not an extending conversion!");
1579 assert(isSCEVable(Ty) &&
1580 "This is not a conversion to a SCEVable type!");
1581 Ty = getEffectiveSCEVType(Ty);
1582
1583 // Fold if the operand is constant.
1584 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1585 return getConstant(
1586 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
1587
1588 // sext(sext(x)) --> sext(x)
1589 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1590 return getSignExtendExpr(SS->getOperand(), Ty);
1591
1592 // sext(zext(x)) --> zext(x)
1593 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1594 return getZeroExtendExpr(SZ->getOperand(), Ty);
1595
1596 // Before doing any expensive analysis, check to see if we've already
1597 // computed a SCEV for this Op and Ty.
1598 FoldingSetNodeID ID;
1599 ID.AddInteger(scSignExtend);
1600 ID.AddPointer(Op);
1601 ID.AddPointer(Ty);
1602 void *IP = nullptr;
1603 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1604
1605 // If the input value is provably positive, build a zext instead.
1606 if (isKnownNonNegative(Op))
1607 return getZeroExtendExpr(Op, Ty);
1608
1609 // sext(trunc(x)) --> sext(x) or x or trunc(x)
1610 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1611 // It's possible the bits taken off by the truncate were all sign bits. If
1612 // so, we should be able to simplify this further.
1613 const SCEV *X = ST->getOperand();
1614 ConstantRange CR = getSignedRange(X);
1615 unsigned TruncBits = getTypeSizeInBits(ST->getType());
1616 unsigned NewBits = getTypeSizeInBits(Ty);
1617 if (CR.truncate(TruncBits).signExtend(NewBits).contains(
1618 CR.sextOrTrunc(NewBits)))
1619 return getTruncateOrSignExtend(X, Ty);
1620 }
1621
1622 // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2
1623 if (auto SA = dyn_cast<SCEVAddExpr>(Op)) {
1624 if (SA->getNumOperands() == 2) {
1625 auto SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0));
1626 auto SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1));
1627 if (SMul && SC1) {
1628 if (auto SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) {
1629 const APInt &C1 = SC1->getValue()->getValue();
1630 const APInt &C2 = SC2->getValue()->getValue();
1631 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() &&
1632 C2.ugt(C1) && C2.isPowerOf2())
1633 return getAddExpr(getSignExtendExpr(SC1, Ty),
1634 getSignExtendExpr(SMul, Ty));
1635 }
1636 }
1637 }
1638 }
1639 // If the input value is a chrec scev, and we can prove that the value
1640 // did not overflow the old, smaller, value, we can sign extend all of the
1641 // operands (often constants). This allows analysis of something like
1642 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
1643 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1644 if (AR->isAffine()) {
1645 const SCEV *Start = AR->getStart();
1646 const SCEV *Step = AR->getStepRecurrence(*this);
1647 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1648 const Loop *L = AR->getLoop();
1649
1650 // If we have special knowledge that this addrec won't overflow,
1651 // we don't need to do any further analysis.
1652 if (AR->getNoWrapFlags(SCEV::FlagNSW))
1653 return getAddRecExpr(
1654 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
1655 getSignExtendExpr(Step, Ty), L, SCEV::FlagNSW);
1656
1657 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1658 // Note that this serves two purposes: It filters out loops that are
1659 // simply not analyzable, and it covers the case where this code is
1660 // being called from within backedge-taken count analysis, such that
1661 // attempting to ask for the backedge-taken count would likely result
1662 // in infinite recursion. In the later case, the analysis code will
1663 // cope with a conservative value, and it will take care to purge
1664 // that value once it has finished.
1665 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
1666 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1667 // Manually compute the final value for AR, checking for
1668 // overflow.
1669
1670 // Check whether the backedge-taken count can be losslessly casted to
1671 // the addrec's type. The count is always unsigned.
1672 const SCEV *CastedMaxBECount =
1673 getTruncateOrZeroExtend(MaxBECount, Start->getType());
1674 const SCEV *RecastedMaxBECount =
1675 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
1676 if (MaxBECount == RecastedMaxBECount) {
1677 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1678 // Check whether Start+Step*MaxBECount has no signed overflow.
1679 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
1680 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul), WideTy);
1681 const SCEV *WideStart = getSignExtendExpr(Start, WideTy);
1682 const SCEV *WideMaxBECount =
1683 getZeroExtendExpr(CastedMaxBECount, WideTy);
1684 const SCEV *OperandExtendedAdd =
1685 getAddExpr(WideStart,
1686 getMulExpr(WideMaxBECount,
1687 getSignExtendExpr(Step, WideTy)));
1688 if (SAdd == OperandExtendedAdd) {
1689 // Cache knowledge of AR NSW, which is propagated to this AddRec.
1690 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
1691 // Return the expression with the addrec on the outside.
1692 return getAddRecExpr(
1693 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
1694 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1695 }
1696 // Similar to above, only this time treat the step value as unsigned.
1697 // This covers loops that count up with an unsigned step.
1698 OperandExtendedAdd =
1699 getAddExpr(WideStart,
1700 getMulExpr(WideMaxBECount,
1701 getZeroExtendExpr(Step, WideTy)));
1702 if (SAdd == OperandExtendedAdd) {
1703 // If AR wraps around then
1704 //
1705 // abs(Step) * MaxBECount > unsigned-max(AR->getType())
1706 // => SAdd != OperandExtendedAdd
1707 //
1708 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=>
1709 // (SAdd == OperandExtendedAdd => AR is NW)
1710
1711 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
1712
1713 // Return the expression with the addrec on the outside.
1714 return getAddRecExpr(
1715 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
1716 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1717 }
1718 }
1719
1720 // If the backedge is guarded by a comparison with the pre-inc value
1721 // the addrec is safe. Also, if the entry is guarded by a comparison
1722 // with the start value and the backedge is guarded by a comparison
1723 // with the post-inc value, the addrec is safe.
1724 ICmpInst::Predicate Pred;
1725 const SCEV *OverflowLimit =
1726 getSignedOverflowLimitForStep(Step, &Pred, this);
1727 if (OverflowLimit &&
1728 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
1729 (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) &&
1730 isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this),
1731 OverflowLimit)))) {
1732 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
1733 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
1734 return getAddRecExpr(
1735 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
1736 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1737 }
1738 }
1739 // If Start and Step are constants, check if we can apply this
1740 // transformation:
1741 // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2
1742 auto SC1 = dyn_cast<SCEVConstant>(Start);
1743 auto SC2 = dyn_cast<SCEVConstant>(Step);
1744 if (SC1 && SC2) {
1745 const APInt &C1 = SC1->getValue()->getValue();
1746 const APInt &C2 = SC2->getValue()->getValue();
1747 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) &&
1748 C2.isPowerOf2()) {
1749 Start = getSignExtendExpr(Start, Ty);
1750 const SCEV *NewAR = getAddRecExpr(getConstant(AR->getType(), 0), Step,
1751 L, AR->getNoWrapFlags());
1752 return getAddExpr(Start, getSignExtendExpr(NewAR, Ty));
1753 }
1754 }
1755
1756 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
1757 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
1758 return getAddRecExpr(
1759 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
1760 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1761 }
1762 }
1763
1764 // The cast wasn't folded; create an explicit cast node.
1765 // Recompute the insert position, as it may have been invalidated.
1766 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1767 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1768 Op, Ty);
1769 UniqueSCEVs.InsertNode(S, IP);
1770 return S;
1771 }
1772
1773 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
1774 /// unspecified bits out to the given type.
1775 ///
getAnyExtendExpr(const SCEV * Op,Type * Ty)1776 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
1777 Type *Ty) {
1778 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1779 "This is not an extending conversion!");
1780 assert(isSCEVable(Ty) &&
1781 "This is not a conversion to a SCEVable type!");
1782 Ty = getEffectiveSCEVType(Ty);
1783
1784 // Sign-extend negative constants.
1785 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1786 if (SC->getValue()->getValue().isNegative())
1787 return getSignExtendExpr(Op, Ty);
1788
1789 // Peel off a truncate cast.
1790 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
1791 const SCEV *NewOp = T->getOperand();
1792 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
1793 return getAnyExtendExpr(NewOp, Ty);
1794 return getTruncateOrNoop(NewOp, Ty);
1795 }
1796
1797 // Next try a zext cast. If the cast is folded, use it.
1798 const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
1799 if (!isa<SCEVZeroExtendExpr>(ZExt))
1800 return ZExt;
1801
1802 // Next try a sext cast. If the cast is folded, use it.
1803 const SCEV *SExt = getSignExtendExpr(Op, Ty);
1804 if (!isa<SCEVSignExtendExpr>(SExt))
1805 return SExt;
1806
1807 // Force the cast to be folded into the operands of an addrec.
1808 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
1809 SmallVector<const SCEV *, 4> Ops;
1810 for (const SCEV *Op : AR->operands())
1811 Ops.push_back(getAnyExtendExpr(Op, Ty));
1812 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
1813 }
1814
1815 // If the expression is obviously signed, use the sext cast value.
1816 if (isa<SCEVSMaxExpr>(Op))
1817 return SExt;
1818
1819 // Absent any other information, use the zext cast value.
1820 return ZExt;
1821 }
1822
1823 /// CollectAddOperandsWithScales - Process the given Ops list, which is
1824 /// a list of operands to be added under the given scale, update the given
1825 /// map. This is a helper function for getAddRecExpr. As an example of
1826 /// what it does, given a sequence of operands that would form an add
1827 /// expression like this:
1828 ///
1829 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
1830 ///
1831 /// where A and B are constants, update the map with these values:
1832 ///
1833 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1834 ///
1835 /// and add 13 + A*B*29 to AccumulatedConstant.
1836 /// This will allow getAddRecExpr to produce this:
1837 ///
1838 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1839 ///
1840 /// This form often exposes folding opportunities that are hidden in
1841 /// the original operand list.
1842 ///
1843 /// Return true iff it appears that any interesting folding opportunities
1844 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
1845 /// the common case where no interesting opportunities are present, and
1846 /// is also used as a check to avoid infinite recursion.
1847 ///
1848 static bool
CollectAddOperandsWithScales(DenseMap<const SCEV *,APInt> & M,SmallVectorImpl<const SCEV * > & NewOps,APInt & AccumulatedConstant,const SCEV * const * Ops,size_t NumOperands,const APInt & Scale,ScalarEvolution & SE)1849 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1850 SmallVectorImpl<const SCEV *> &NewOps,
1851 APInt &AccumulatedConstant,
1852 const SCEV *const *Ops, size_t NumOperands,
1853 const APInt &Scale,
1854 ScalarEvolution &SE) {
1855 bool Interesting = false;
1856
1857 // Iterate over the add operands. They are sorted, with constants first.
1858 unsigned i = 0;
1859 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1860 ++i;
1861 // Pull a buried constant out to the outside.
1862 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
1863 Interesting = true;
1864 AccumulatedConstant += Scale * C->getValue()->getValue();
1865 }
1866
1867 // Next comes everything else. We're especially interested in multiplies
1868 // here, but they're in the middle, so just visit the rest with one loop.
1869 for (; i != NumOperands; ++i) {
1870 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1871 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1872 APInt NewScale =
1873 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1874 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1875 // A multiplication of a constant with another add; recurse.
1876 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
1877 Interesting |=
1878 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1879 Add->op_begin(), Add->getNumOperands(),
1880 NewScale, SE);
1881 } else {
1882 // A multiplication of a constant with some other value. Update
1883 // the map.
1884 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1885 const SCEV *Key = SE.getMulExpr(MulOps);
1886 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1887 M.insert(std::make_pair(Key, NewScale));
1888 if (Pair.second) {
1889 NewOps.push_back(Pair.first->first);
1890 } else {
1891 Pair.first->second += NewScale;
1892 // The map already had an entry for this value, which may indicate
1893 // a folding opportunity.
1894 Interesting = true;
1895 }
1896 }
1897 } else {
1898 // An ordinary operand. Update the map.
1899 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1900 M.insert(std::make_pair(Ops[i], Scale));
1901 if (Pair.second) {
1902 NewOps.push_back(Pair.first->first);
1903 } else {
1904 Pair.first->second += Scale;
1905 // The map already had an entry for this value, which may indicate
1906 // a folding opportunity.
1907 Interesting = true;
1908 }
1909 }
1910 }
1911
1912 return Interesting;
1913 }
1914
1915 namespace {
1916 struct APIntCompare {
operator ()__anond3aa2a800611::APIntCompare1917 bool operator()(const APInt &LHS, const APInt &RHS) const {
1918 return LHS.ult(RHS);
1919 }
1920 };
1921 }
1922
1923 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and
1924 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of
1925 // can't-overflow flags for the operation if possible.
1926 static SCEV::NoWrapFlags
StrengthenNoWrapFlags(ScalarEvolution * SE,SCEVTypes Type,const SmallVectorImpl<const SCEV * > & Ops,SCEV::NoWrapFlags OldFlags)1927 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
1928 const SmallVectorImpl<const SCEV *> &Ops,
1929 SCEV::NoWrapFlags OldFlags) {
1930 using namespace std::placeholders;
1931
1932 bool CanAnalyze =
1933 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr;
1934 (void)CanAnalyze;
1935 assert(CanAnalyze && "don't call from other places!");
1936
1937 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
1938 SCEV::NoWrapFlags SignOrUnsignWrap =
1939 ScalarEvolution::maskFlags(OldFlags, SignOrUnsignMask);
1940
1941 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
1942 auto IsKnownNonNegative =
1943 std::bind(std::mem_fn(&ScalarEvolution::isKnownNonNegative), SE, _1);
1944
1945 if (SignOrUnsignWrap == SCEV::FlagNSW &&
1946 std::all_of(Ops.begin(), Ops.end(), IsKnownNonNegative))
1947 return ScalarEvolution::setFlags(OldFlags,
1948 (SCEV::NoWrapFlags)SignOrUnsignMask);
1949
1950 return OldFlags;
1951 }
1952
1953 /// getAddExpr - Get a canonical add expression, or something simpler if
1954 /// possible.
getAddExpr(SmallVectorImpl<const SCEV * > & Ops,SCEV::NoWrapFlags Flags)1955 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
1956 SCEV::NoWrapFlags Flags) {
1957 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
1958 "only nuw or nsw allowed");
1959 assert(!Ops.empty() && "Cannot get empty add!");
1960 if (Ops.size() == 1) return Ops[0];
1961 #ifndef NDEBUG
1962 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
1963 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1964 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
1965 "SCEVAddExpr operand types don't match!");
1966 #endif
1967
1968 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags);
1969
1970 // Sort by complexity, this groups all similar expression types together.
1971 GroupByComplexity(Ops, LI);
1972
1973 // If there are any constants, fold them together.
1974 unsigned Idx = 0;
1975 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1976 ++Idx;
1977 assert(Idx < Ops.size());
1978 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1979 // We found two constants, fold them together!
1980 Ops[0] = getConstant(LHSC->getValue()->getValue() +
1981 RHSC->getValue()->getValue());
1982 if (Ops.size() == 2) return Ops[0];
1983 Ops.erase(Ops.begin()+1); // Erase the folded element
1984 LHSC = cast<SCEVConstant>(Ops[0]);
1985 }
1986
1987 // If we are left with a constant zero being added, strip it off.
1988 if (LHSC->getValue()->isZero()) {
1989 Ops.erase(Ops.begin());
1990 --Idx;
1991 }
1992
1993 if (Ops.size() == 1) return Ops[0];
1994 }
1995
1996 // Okay, check to see if the same value occurs in the operand list more than
1997 // once. If so, merge them together into an multiply expression. Since we
1998 // sorted the list, these values are required to be adjacent.
1999 Type *Ty = Ops[0]->getType();
2000 bool FoundMatch = false;
2001 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
2002 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
2003 // Scan ahead to count how many equal operands there are.
2004 unsigned Count = 2;
2005 while (i+Count != e && Ops[i+Count] == Ops[i])
2006 ++Count;
2007 // Merge the values into a multiply.
2008 const SCEV *Scale = getConstant(Ty, Count);
2009 const SCEV *Mul = getMulExpr(Scale, Ops[i]);
2010 if (Ops.size() == Count)
2011 return Mul;
2012 Ops[i] = Mul;
2013 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
2014 --i; e -= Count - 1;
2015 FoundMatch = true;
2016 }
2017 if (FoundMatch)
2018 return getAddExpr(Ops, Flags);
2019
2020 // Check for truncates. If all the operands are truncated from the same
2021 // type, see if factoring out the truncate would permit the result to be
2022 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
2023 // if the contents of the resulting outer trunc fold to something simple.
2024 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
2025 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
2026 Type *DstType = Trunc->getType();
2027 Type *SrcType = Trunc->getOperand()->getType();
2028 SmallVector<const SCEV *, 8> LargeOps;
2029 bool Ok = true;
2030 // Check all the operands to see if they can be represented in the
2031 // source type of the truncate.
2032 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
2033 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
2034 if (T->getOperand()->getType() != SrcType) {
2035 Ok = false;
2036 break;
2037 }
2038 LargeOps.push_back(T->getOperand());
2039 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
2040 LargeOps.push_back(getAnyExtendExpr(C, SrcType));
2041 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
2042 SmallVector<const SCEV *, 8> LargeMulOps;
2043 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2044 if (const SCEVTruncateExpr *T =
2045 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
2046 if (T->getOperand()->getType() != SrcType) {
2047 Ok = false;
2048 break;
2049 }
2050 LargeMulOps.push_back(T->getOperand());
2051 } else if (const SCEVConstant *C =
2052 dyn_cast<SCEVConstant>(M->getOperand(j))) {
2053 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
2054 } else {
2055 Ok = false;
2056 break;
2057 }
2058 }
2059 if (Ok)
2060 LargeOps.push_back(getMulExpr(LargeMulOps));
2061 } else {
2062 Ok = false;
2063 break;
2064 }
2065 }
2066 if (Ok) {
2067 // Evaluate the expression in the larger type.
2068 const SCEV *Fold = getAddExpr(LargeOps, Flags);
2069 // If it folds to something simple, use it. Otherwise, don't.
2070 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2071 return getTruncateExpr(Fold, DstType);
2072 }
2073 }
2074
2075 // Skip past any other cast SCEVs.
2076 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
2077 ++Idx;
2078
2079 // If there are add operands they would be next.
2080 if (Idx < Ops.size()) {
2081 bool DeletedAdd = false;
2082 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
2083 // If we have an add, expand the add operands onto the end of the operands
2084 // list.
2085 Ops.erase(Ops.begin()+Idx);
2086 Ops.append(Add->op_begin(), Add->op_end());
2087 DeletedAdd = true;
2088 }
2089
2090 // If we deleted at least one add, we added operands to the end of the list,
2091 // and they are not necessarily sorted. Recurse to resort and resimplify
2092 // any operands we just acquired.
2093 if (DeletedAdd)
2094 return getAddExpr(Ops);
2095 }
2096
2097 // Skip over the add expression until we get to a multiply.
2098 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
2099 ++Idx;
2100
2101 // Check to see if there are any folding opportunities present with
2102 // operands multiplied by constant values.
2103 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
2104 uint64_t BitWidth = getTypeSizeInBits(Ty);
2105 DenseMap<const SCEV *, APInt> M;
2106 SmallVector<const SCEV *, 8> NewOps;
2107 APInt AccumulatedConstant(BitWidth, 0);
2108 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
2109 Ops.data(), Ops.size(),
2110 APInt(BitWidth, 1), *this)) {
2111 // Some interesting folding opportunity is present, so its worthwhile to
2112 // re-generate the operands list. Group the operands by constant scale,
2113 // to avoid multiplying by the same constant scale multiple times.
2114 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2115 for (SmallVectorImpl<const SCEV *>::const_iterator I = NewOps.begin(),
2116 E = NewOps.end(); I != E; ++I)
2117 MulOpLists[M.find(*I)->second].push_back(*I);
2118 // Re-generate the operands list.
2119 Ops.clear();
2120 if (AccumulatedConstant != 0)
2121 Ops.push_back(getConstant(AccumulatedConstant));
2122 for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
2123 I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
2124 if (I->first != 0)
2125 Ops.push_back(getMulExpr(getConstant(I->first),
2126 getAddExpr(I->second)));
2127 if (Ops.empty())
2128 return getConstant(Ty, 0);
2129 if (Ops.size() == 1)
2130 return Ops[0];
2131 return getAddExpr(Ops);
2132 }
2133 }
2134
2135 // If we are adding something to a multiply expression, make sure the
2136 // something is not already an operand of the multiply. If so, merge it into
2137 // the multiply.
2138 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
2139 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
2140 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
2141 const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
2142 if (isa<SCEVConstant>(MulOpSCEV))
2143 continue;
2144 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
2145 if (MulOpSCEV == Ops[AddOp]) {
2146 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
2147 const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
2148 if (Mul->getNumOperands() != 2) {
2149 // If the multiply has more than two operands, we must get the
2150 // Y*Z term.
2151 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2152 Mul->op_begin()+MulOp);
2153 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2154 InnerMul = getMulExpr(MulOps);
2155 }
2156 const SCEV *One = getConstant(Ty, 1);
2157 const SCEV *AddOne = getAddExpr(One, InnerMul);
2158 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
2159 if (Ops.size() == 2) return OuterMul;
2160 if (AddOp < Idx) {
2161 Ops.erase(Ops.begin()+AddOp);
2162 Ops.erase(Ops.begin()+Idx-1);
2163 } else {
2164 Ops.erase(Ops.begin()+Idx);
2165 Ops.erase(Ops.begin()+AddOp-1);
2166 }
2167 Ops.push_back(OuterMul);
2168 return getAddExpr(Ops);
2169 }
2170
2171 // Check this multiply against other multiplies being added together.
2172 for (unsigned OtherMulIdx = Idx+1;
2173 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2174 ++OtherMulIdx) {
2175 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2176 // If MulOp occurs in OtherMul, we can fold the two multiplies
2177 // together.
2178 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
2179 OMulOp != e; ++OMulOp)
2180 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
2181 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
2182 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
2183 if (Mul->getNumOperands() != 2) {
2184 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2185 Mul->op_begin()+MulOp);
2186 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2187 InnerMul1 = getMulExpr(MulOps);
2188 }
2189 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
2190 if (OtherMul->getNumOperands() != 2) {
2191 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
2192 OtherMul->op_begin()+OMulOp);
2193 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
2194 InnerMul2 = getMulExpr(MulOps);
2195 }
2196 const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
2197 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
2198 if (Ops.size() == 2) return OuterMul;
2199 Ops.erase(Ops.begin()+Idx);
2200 Ops.erase(Ops.begin()+OtherMulIdx-1);
2201 Ops.push_back(OuterMul);
2202 return getAddExpr(Ops);
2203 }
2204 }
2205 }
2206 }
2207
2208 // If there are any add recurrences in the operands list, see if any other
2209 // added values are loop invariant. If so, we can fold them into the
2210 // recurrence.
2211 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
2212 ++Idx;
2213
2214 // Scan over all recurrences, trying to fold loop invariants into them.
2215 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2216 // Scan all of the other operands to this add and add them to the vector if
2217 // they are loop invariant w.r.t. the recurrence.
2218 SmallVector<const SCEV *, 8> LIOps;
2219 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
2220 const Loop *AddRecLoop = AddRec->getLoop();
2221 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2222 if (isLoopInvariant(Ops[i], AddRecLoop)) {
2223 LIOps.push_back(Ops[i]);
2224 Ops.erase(Ops.begin()+i);
2225 --i; --e;
2226 }
2227
2228 // If we found some loop invariants, fold them into the recurrence.
2229 if (!LIOps.empty()) {
2230 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
2231 LIOps.push_back(AddRec->getStart());
2232
2233 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
2234 AddRec->op_end());
2235 AddRecOps[0] = getAddExpr(LIOps);
2236
2237 // Build the new addrec. Propagate the NUW and NSW flags if both the
2238 // outer add and the inner addrec are guaranteed to have no overflow.
2239 // Always propagate NW.
2240 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
2241 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
2242
2243 // If all of the other operands were loop invariant, we are done.
2244 if (Ops.size() == 1) return NewRec;
2245
2246 // Otherwise, add the folded AddRec by the non-invariant parts.
2247 for (unsigned i = 0;; ++i)
2248 if (Ops[i] == AddRec) {
2249 Ops[i] = NewRec;
2250 break;
2251 }
2252 return getAddExpr(Ops);
2253 }
2254
2255 // Okay, if there weren't any loop invariants to be folded, check to see if
2256 // there are multiple AddRec's with the same loop induction variable being
2257 // added together. If so, we can fold them.
2258 for (unsigned OtherIdx = Idx+1;
2259 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2260 ++OtherIdx)
2261 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2262 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
2263 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
2264 AddRec->op_end());
2265 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2266 ++OtherIdx)
2267 if (const SCEVAddRecExpr *OtherAddRec =
2268 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
2269 if (OtherAddRec->getLoop() == AddRecLoop) {
2270 for (unsigned i = 0, e = OtherAddRec->getNumOperands();
2271 i != e; ++i) {
2272 if (i >= AddRecOps.size()) {
2273 AddRecOps.append(OtherAddRec->op_begin()+i,
2274 OtherAddRec->op_end());
2275 break;
2276 }
2277 AddRecOps[i] = getAddExpr(AddRecOps[i],
2278 OtherAddRec->getOperand(i));
2279 }
2280 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
2281 }
2282 // Step size has changed, so we cannot guarantee no self-wraparound.
2283 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
2284 return getAddExpr(Ops);
2285 }
2286
2287 // Otherwise couldn't fold anything into this recurrence. Move onto the
2288 // next one.
2289 }
2290
2291 // Okay, it looks like we really DO need an add expr. Check to see if we
2292 // already have one, otherwise create a new one.
2293 FoldingSetNodeID ID;
2294 ID.AddInteger(scAddExpr);
2295 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2296 ID.AddPointer(Ops[i]);
2297 void *IP = nullptr;
2298 SCEVAddExpr *S =
2299 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2300 if (!S) {
2301 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2302 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2303 S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
2304 O, Ops.size());
2305 UniqueSCEVs.InsertNode(S, IP);
2306 }
2307 S->setNoWrapFlags(Flags);
2308 return S;
2309 }
2310
umul_ov(uint64_t i,uint64_t j,bool & Overflow)2311 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
2312 uint64_t k = i*j;
2313 if (j > 1 && k / j != i) Overflow = true;
2314 return k;
2315 }
2316
2317 /// Compute the result of "n choose k", the binomial coefficient. If an
2318 /// intermediate computation overflows, Overflow will be set and the return will
2319 /// be garbage. Overflow is not cleared on absence of overflow.
Choose(uint64_t n,uint64_t k,bool & Overflow)2320 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
2321 // We use the multiplicative formula:
2322 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
2323 // At each iteration, we take the n-th term of the numeral and divide by the
2324 // (k-n)th term of the denominator. This division will always produce an
2325 // integral result, and helps reduce the chance of overflow in the
2326 // intermediate computations. However, we can still overflow even when the
2327 // final result would fit.
2328
2329 if (n == 0 || n == k) return 1;
2330 if (k > n) return 0;
2331
2332 if (k > n/2)
2333 k = n-k;
2334
2335 uint64_t r = 1;
2336 for (uint64_t i = 1; i <= k; ++i) {
2337 r = umul_ov(r, n-(i-1), Overflow);
2338 r /= i;
2339 }
2340 return r;
2341 }
2342
2343 /// Determine if any of the operands in this SCEV are a constant or if
2344 /// any of the add or multiply expressions in this SCEV contain a constant.
containsConstantSomewhere(const SCEV * StartExpr)2345 static bool containsConstantSomewhere(const SCEV *StartExpr) {
2346 SmallVector<const SCEV *, 4> Ops;
2347 Ops.push_back(StartExpr);
2348 while (!Ops.empty()) {
2349 const SCEV *CurrentExpr = Ops.pop_back_val();
2350 if (isa<SCEVConstant>(*CurrentExpr))
2351 return true;
2352
2353 if (isa<SCEVAddExpr>(*CurrentExpr) || isa<SCEVMulExpr>(*CurrentExpr)) {
2354 const auto *CurrentNAry = cast<SCEVNAryExpr>(CurrentExpr);
2355 Ops.append(CurrentNAry->op_begin(), CurrentNAry->op_end());
2356 }
2357 }
2358 return false;
2359 }
2360
2361 /// getMulExpr - Get a canonical multiply expression, or something simpler if
2362 /// possible.
getMulExpr(SmallVectorImpl<const SCEV * > & Ops,SCEV::NoWrapFlags Flags)2363 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
2364 SCEV::NoWrapFlags Flags) {
2365 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
2366 "only nuw or nsw allowed");
2367 assert(!Ops.empty() && "Cannot get empty mul!");
2368 if (Ops.size() == 1) return Ops[0];
2369 #ifndef NDEBUG
2370 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2371 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2372 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2373 "SCEVMulExpr operand types don't match!");
2374 #endif
2375
2376 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags);
2377
2378 // Sort by complexity, this groups all similar expression types together.
2379 GroupByComplexity(Ops, LI);
2380
2381 // If there are any constants, fold them together.
2382 unsigned Idx = 0;
2383 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2384
2385 // C1*(C2+V) -> C1*C2 + C1*V
2386 if (Ops.size() == 2)
2387 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
2388 // If any of Add's ops are Adds or Muls with a constant,
2389 // apply this transformation as well.
2390 if (Add->getNumOperands() == 2)
2391 if (containsConstantSomewhere(Add))
2392 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
2393 getMulExpr(LHSC, Add->getOperand(1)));
2394
2395 ++Idx;
2396 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2397 // We found two constants, fold them together!
2398 ConstantInt *Fold = ConstantInt::get(getContext(),
2399 LHSC->getValue()->getValue() *
2400 RHSC->getValue()->getValue());
2401 Ops[0] = getConstant(Fold);
2402 Ops.erase(Ops.begin()+1); // Erase the folded element
2403 if (Ops.size() == 1) return Ops[0];
2404 LHSC = cast<SCEVConstant>(Ops[0]);
2405 }
2406
2407 // If we are left with a constant one being multiplied, strip it off.
2408 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
2409 Ops.erase(Ops.begin());
2410 --Idx;
2411 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
2412 // If we have a multiply of zero, it will always be zero.
2413 return Ops[0];
2414 } else if (Ops[0]->isAllOnesValue()) {
2415 // If we have a mul by -1 of an add, try distributing the -1 among the
2416 // add operands.
2417 if (Ops.size() == 2) {
2418 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
2419 SmallVector<const SCEV *, 4> NewOps;
2420 bool AnyFolded = false;
2421 for (SCEVAddRecExpr::op_iterator I = Add->op_begin(),
2422 E = Add->op_end(); I != E; ++I) {
2423 const SCEV *Mul = getMulExpr(Ops[0], *I);
2424 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
2425 NewOps.push_back(Mul);
2426 }
2427 if (AnyFolded)
2428 return getAddExpr(NewOps);
2429 }
2430 else if (const SCEVAddRecExpr *
2431 AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
2432 // Negation preserves a recurrence's no self-wrap property.
2433 SmallVector<const SCEV *, 4> Operands;
2434 for (SCEVAddRecExpr::op_iterator I = AddRec->op_begin(),
2435 E = AddRec->op_end(); I != E; ++I) {
2436 Operands.push_back(getMulExpr(Ops[0], *I));
2437 }
2438 return getAddRecExpr(Operands, AddRec->getLoop(),
2439 AddRec->getNoWrapFlags(SCEV::FlagNW));
2440 }
2441 }
2442 }
2443
2444 if (Ops.size() == 1)
2445 return Ops[0];
2446 }
2447
2448 // Skip over the add expression until we get to a multiply.
2449 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
2450 ++Idx;
2451
2452 // If there are mul operands inline them all into this expression.
2453 if (Idx < Ops.size()) {
2454 bool DeletedMul = false;
2455 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
2456 // If we have an mul, expand the mul operands onto the end of the operands
2457 // list.
2458 Ops.erase(Ops.begin()+Idx);
2459 Ops.append(Mul->op_begin(), Mul->op_end());
2460 DeletedMul = true;
2461 }
2462
2463 // If we deleted at least one mul, we added operands to the end of the list,
2464 // and they are not necessarily sorted. Recurse to resort and resimplify
2465 // any operands we just acquired.
2466 if (DeletedMul)
2467 return getMulExpr(Ops);
2468 }
2469
2470 // If there are any add recurrences in the operands list, see if any other
2471 // added values are loop invariant. If so, we can fold them into the
2472 // recurrence.
2473 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
2474 ++Idx;
2475
2476 // Scan over all recurrences, trying to fold loop invariants into them.
2477 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2478 // Scan all of the other operands to this mul and add them to the vector if
2479 // they are loop invariant w.r.t. the recurrence.
2480 SmallVector<const SCEV *, 8> LIOps;
2481 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
2482 const Loop *AddRecLoop = AddRec->getLoop();
2483 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2484 if (isLoopInvariant(Ops[i], AddRecLoop)) {
2485 LIOps.push_back(Ops[i]);
2486 Ops.erase(Ops.begin()+i);
2487 --i; --e;
2488 }
2489
2490 // If we found some loop invariants, fold them into the recurrence.
2491 if (!LIOps.empty()) {
2492 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
2493 SmallVector<const SCEV *, 4> NewOps;
2494 NewOps.reserve(AddRec->getNumOperands());
2495 const SCEV *Scale = getMulExpr(LIOps);
2496 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
2497 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
2498
2499 // Build the new addrec. Propagate the NUW and NSW flags if both the
2500 // outer mul and the inner addrec are guaranteed to have no overflow.
2501 //
2502 // No self-wrap cannot be guaranteed after changing the step size, but
2503 // will be inferred if either NUW or NSW is true.
2504 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW));
2505 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags);
2506
2507 // If all of the other operands were loop invariant, we are done.
2508 if (Ops.size() == 1) return NewRec;
2509
2510 // Otherwise, multiply the folded AddRec by the non-invariant parts.
2511 for (unsigned i = 0;; ++i)
2512 if (Ops[i] == AddRec) {
2513 Ops[i] = NewRec;
2514 break;
2515 }
2516 return getMulExpr(Ops);
2517 }
2518
2519 // Okay, if there weren't any loop invariants to be folded, check to see if
2520 // there are multiple AddRec's with the same loop induction variable being
2521 // multiplied together. If so, we can fold them.
2522
2523 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
2524 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
2525 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
2526 // ]]],+,...up to x=2n}.
2527 // Note that the arguments to choose() are always integers with values
2528 // known at compile time, never SCEV objects.
2529 //
2530 // The implementation avoids pointless extra computations when the two
2531 // addrec's are of different length (mathematically, it's equivalent to
2532 // an infinite stream of zeros on the right).
2533 bool OpsModified = false;
2534 for (unsigned OtherIdx = Idx+1;
2535 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2536 ++OtherIdx) {
2537 const SCEVAddRecExpr *OtherAddRec =
2538 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2539 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop)
2540 continue;
2541
2542 bool Overflow = false;
2543 Type *Ty = AddRec->getType();
2544 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
2545 SmallVector<const SCEV*, 7> AddRecOps;
2546 for (int x = 0, xe = AddRec->getNumOperands() +
2547 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
2548 const SCEV *Term = getConstant(Ty, 0);
2549 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
2550 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
2551 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
2552 ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
2553 z < ze && !Overflow; ++z) {
2554 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
2555 uint64_t Coeff;
2556 if (LargerThan64Bits)
2557 Coeff = umul_ov(Coeff1, Coeff2, Overflow);
2558 else
2559 Coeff = Coeff1*Coeff2;
2560 const SCEV *CoeffTerm = getConstant(Ty, Coeff);
2561 const SCEV *Term1 = AddRec->getOperand(y-z);
2562 const SCEV *Term2 = OtherAddRec->getOperand(z);
2563 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2));
2564 }
2565 }
2566 AddRecOps.push_back(Term);
2567 }
2568 if (!Overflow) {
2569 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(),
2570 SCEV::FlagAnyWrap);
2571 if (Ops.size() == 2) return NewAddRec;
2572 Ops[Idx] = NewAddRec;
2573 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
2574 OpsModified = true;
2575 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
2576 if (!AddRec)
2577 break;
2578 }
2579 }
2580 if (OpsModified)
2581 return getMulExpr(Ops);
2582
2583 // Otherwise couldn't fold anything into this recurrence. Move onto the
2584 // next one.
2585 }
2586
2587 // Okay, it looks like we really DO need an mul expr. Check to see if we
2588 // already have one, otherwise create a new one.
2589 FoldingSetNodeID ID;
2590 ID.AddInteger(scMulExpr);
2591 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2592 ID.AddPointer(Ops[i]);
2593 void *IP = nullptr;
2594 SCEVMulExpr *S =
2595 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2596 if (!S) {
2597 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2598 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2599 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
2600 O, Ops.size());
2601 UniqueSCEVs.InsertNode(S, IP);
2602 }
2603 S->setNoWrapFlags(Flags);
2604 return S;
2605 }
2606
2607 /// getUDivExpr - Get a canonical unsigned division expression, or something
2608 /// simpler if possible.
getUDivExpr(const SCEV * LHS,const SCEV * RHS)2609 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
2610 const SCEV *RHS) {
2611 assert(getEffectiveSCEVType(LHS->getType()) ==
2612 getEffectiveSCEVType(RHS->getType()) &&
2613 "SCEVUDivExpr operand types don't match!");
2614
2615 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
2616 if (RHSC->getValue()->equalsInt(1))
2617 return LHS; // X udiv 1 --> x
2618 // If the denominator is zero, the result of the udiv is undefined. Don't
2619 // try to analyze it, because the resolution chosen here may differ from
2620 // the resolution chosen in other parts of the compiler.
2621 if (!RHSC->getValue()->isZero()) {
2622 // Determine if the division can be folded into the operands of
2623 // its operands.
2624 // TODO: Generalize this to non-constants by using known-bits information.
2625 Type *Ty = LHS->getType();
2626 unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
2627 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
2628 // For non-power-of-two values, effectively round the value up to the
2629 // nearest power of two.
2630 if (!RHSC->getValue()->getValue().isPowerOf2())
2631 ++MaxShiftAmt;
2632 IntegerType *ExtTy =
2633 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
2634 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
2635 if (const SCEVConstant *Step =
2636 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
2637 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
2638 const APInt &StepInt = Step->getValue()->getValue();
2639 const APInt &DivInt = RHSC->getValue()->getValue();
2640 if (!StepInt.urem(DivInt) &&
2641 getZeroExtendExpr(AR, ExtTy) ==
2642 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
2643 getZeroExtendExpr(Step, ExtTy),
2644 AR->getLoop(), SCEV::FlagAnyWrap)) {
2645 SmallVector<const SCEV *, 4> Operands;
2646 for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
2647 Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
2648 return getAddRecExpr(Operands, AR->getLoop(),
2649 SCEV::FlagNW);
2650 }
2651 /// Get a canonical UDivExpr for a recurrence.
2652 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
2653 // We can currently only fold X%N if X is constant.
2654 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
2655 if (StartC && !DivInt.urem(StepInt) &&
2656 getZeroExtendExpr(AR, ExtTy) ==
2657 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
2658 getZeroExtendExpr(Step, ExtTy),
2659 AR->getLoop(), SCEV::FlagAnyWrap)) {
2660 const APInt &StartInt = StartC->getValue()->getValue();
2661 const APInt &StartRem = StartInt.urem(StepInt);
2662 if (StartRem != 0)
2663 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step,
2664 AR->getLoop(), SCEV::FlagNW);
2665 }
2666 }
2667 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
2668 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
2669 SmallVector<const SCEV *, 4> Operands;
2670 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
2671 Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
2672 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
2673 // Find an operand that's safely divisible.
2674 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
2675 const SCEV *Op = M->getOperand(i);
2676 const SCEV *Div = getUDivExpr(Op, RHSC);
2677 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
2678 Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
2679 M->op_end());
2680 Operands[i] = Div;
2681 return getMulExpr(Operands);
2682 }
2683 }
2684 }
2685 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
2686 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
2687 SmallVector<const SCEV *, 4> Operands;
2688 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
2689 Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
2690 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
2691 Operands.clear();
2692 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
2693 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
2694 if (isa<SCEVUDivExpr>(Op) ||
2695 getMulExpr(Op, RHS) != A->getOperand(i))
2696 break;
2697 Operands.push_back(Op);
2698 }
2699 if (Operands.size() == A->getNumOperands())
2700 return getAddExpr(Operands);
2701 }
2702 }
2703
2704 // Fold if both operands are constant.
2705 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
2706 Constant *LHSCV = LHSC->getValue();
2707 Constant *RHSCV = RHSC->getValue();
2708 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
2709 RHSCV)));
2710 }
2711 }
2712 }
2713
2714 FoldingSetNodeID ID;
2715 ID.AddInteger(scUDivExpr);
2716 ID.AddPointer(LHS);
2717 ID.AddPointer(RHS);
2718 void *IP = nullptr;
2719 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2720 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
2721 LHS, RHS);
2722 UniqueSCEVs.InsertNode(S, IP);
2723 return S;
2724 }
2725
gcd(const SCEVConstant * C1,const SCEVConstant * C2)2726 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
2727 APInt A = C1->getValue()->getValue().abs();
2728 APInt B = C2->getValue()->getValue().abs();
2729 uint32_t ABW = A.getBitWidth();
2730 uint32_t BBW = B.getBitWidth();
2731
2732 if (ABW > BBW)
2733 B = B.zext(ABW);
2734 else if (ABW < BBW)
2735 A = A.zext(BBW);
2736
2737 return APIntOps::GreatestCommonDivisor(A, B);
2738 }
2739
2740 /// getUDivExactExpr - Get a canonical unsigned division expression, or
2741 /// something simpler if possible. There is no representation for an exact udiv
2742 /// in SCEV IR, but we can attempt to remove factors from the LHS and RHS.
2743 /// We can't do this when it's not exact because the udiv may be clearing bits.
getUDivExactExpr(const SCEV * LHS,const SCEV * RHS)2744 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
2745 const SCEV *RHS) {
2746 // TODO: we could try to find factors in all sorts of things, but for now we
2747 // just deal with u/exact (multiply, constant). See SCEVDivision towards the
2748 // end of this file for inspiration.
2749
2750 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS);
2751 if (!Mul)
2752 return getUDivExpr(LHS, RHS);
2753
2754 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) {
2755 // If the mulexpr multiplies by a constant, then that constant must be the
2756 // first element of the mulexpr.
2757 if (const SCEVConstant *LHSCst =
2758 dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
2759 if (LHSCst == RHSCst) {
2760 SmallVector<const SCEV *, 2> Operands;
2761 Operands.append(Mul->op_begin() + 1, Mul->op_end());
2762 return getMulExpr(Operands);
2763 }
2764
2765 // We can't just assume that LHSCst divides RHSCst cleanly, it could be
2766 // that there's a factor provided by one of the other terms. We need to
2767 // check.
2768 APInt Factor = gcd(LHSCst, RHSCst);
2769 if (!Factor.isIntN(1)) {
2770 LHSCst = cast<SCEVConstant>(
2771 getConstant(LHSCst->getValue()->getValue().udiv(Factor)));
2772 RHSCst = cast<SCEVConstant>(
2773 getConstant(RHSCst->getValue()->getValue().udiv(Factor)));
2774 SmallVector<const SCEV *, 2> Operands;
2775 Operands.push_back(LHSCst);
2776 Operands.append(Mul->op_begin() + 1, Mul->op_end());
2777 LHS = getMulExpr(Operands);
2778 RHS = RHSCst;
2779 Mul = dyn_cast<SCEVMulExpr>(LHS);
2780 if (!Mul)
2781 return getUDivExactExpr(LHS, RHS);
2782 }
2783 }
2784 }
2785
2786 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
2787 if (Mul->getOperand(i) == RHS) {
2788 SmallVector<const SCEV *, 2> Operands;
2789 Operands.append(Mul->op_begin(), Mul->op_begin() + i);
2790 Operands.append(Mul->op_begin() + i + 1, Mul->op_end());
2791 return getMulExpr(Operands);
2792 }
2793 }
2794
2795 return getUDivExpr(LHS, RHS);
2796 }
2797
2798 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
2799 /// Simplify the expression as much as possible.
getAddRecExpr(const SCEV * Start,const SCEV * Step,const Loop * L,SCEV::NoWrapFlags Flags)2800 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
2801 const Loop *L,
2802 SCEV::NoWrapFlags Flags) {
2803 SmallVector<const SCEV *, 4> Operands;
2804 Operands.push_back(Start);
2805 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
2806 if (StepChrec->getLoop() == L) {
2807 Operands.append(StepChrec->op_begin(), StepChrec->op_end());
2808 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
2809 }
2810
2811 Operands.push_back(Step);
2812 return getAddRecExpr(Operands, L, Flags);
2813 }
2814
2815 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
2816 /// Simplify the expression as much as possible.
2817 const SCEV *
getAddRecExpr(SmallVectorImpl<const SCEV * > & Operands,const Loop * L,SCEV::NoWrapFlags Flags)2818 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
2819 const Loop *L, SCEV::NoWrapFlags Flags) {
2820 if (Operands.size() == 1) return Operands[0];
2821 #ifndef NDEBUG
2822 Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
2823 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
2824 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
2825 "SCEVAddRecExpr operand types don't match!");
2826 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2827 assert(isLoopInvariant(Operands[i], L) &&
2828 "SCEVAddRecExpr operand is not loop-invariant!");
2829 #endif
2830
2831 if (Operands.back()->isZero()) {
2832 Operands.pop_back();
2833 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X
2834 }
2835
2836 // It's tempting to want to call getMaxBackedgeTakenCount count here and
2837 // use that information to infer NUW and NSW flags. However, computing a
2838 // BE count requires calling getAddRecExpr, so we may not yet have a
2839 // meaningful BE count at this point (and if we don't, we'd be stuck
2840 // with a SCEVCouldNotCompute as the cached BE count).
2841
2842 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
2843
2844 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
2845 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
2846 const Loop *NestedLoop = NestedAR->getLoop();
2847 if (L->contains(NestedLoop) ?
2848 (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
2849 (!NestedLoop->contains(L) &&
2850 DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
2851 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
2852 NestedAR->op_end());
2853 Operands[0] = NestedAR->getStart();
2854 // AddRecs require their operands be loop-invariant with respect to their
2855 // loops. Don't perform this transformation if it would break this
2856 // requirement.
2857 bool AllInvariant = true;
2858 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2859 if (!isLoopInvariant(Operands[i], L)) {
2860 AllInvariant = false;
2861 break;
2862 }
2863 if (AllInvariant) {
2864 // Create a recurrence for the outer loop with the same step size.
2865 //
2866 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
2867 // inner recurrence has the same property.
2868 SCEV::NoWrapFlags OuterFlags =
2869 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
2870
2871 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
2872 AllInvariant = true;
2873 for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
2874 if (!isLoopInvariant(NestedOperands[i], NestedLoop)) {
2875 AllInvariant = false;
2876 break;
2877 }
2878 if (AllInvariant) {
2879 // Ok, both add recurrences are valid after the transformation.
2880 //
2881 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
2882 // the outer recurrence has the same property.
2883 SCEV::NoWrapFlags InnerFlags =
2884 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
2885 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
2886 }
2887 }
2888 // Reset Operands to its original state.
2889 Operands[0] = NestedAR;
2890 }
2891 }
2892
2893 // Okay, it looks like we really DO need an addrec expr. Check to see if we
2894 // already have one, otherwise create a new one.
2895 FoldingSetNodeID ID;
2896 ID.AddInteger(scAddRecExpr);
2897 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2898 ID.AddPointer(Operands[i]);
2899 ID.AddPointer(L);
2900 void *IP = nullptr;
2901 SCEVAddRecExpr *S =
2902 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2903 if (!S) {
2904 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
2905 std::uninitialized_copy(Operands.begin(), Operands.end(), O);
2906 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
2907 O, Operands.size(), L);
2908 UniqueSCEVs.InsertNode(S, IP);
2909 }
2910 S->setNoWrapFlags(Flags);
2911 return S;
2912 }
2913
getSMaxExpr(const SCEV * LHS,const SCEV * RHS)2914 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
2915 const SCEV *RHS) {
2916 SmallVector<const SCEV *, 2> Ops;
2917 Ops.push_back(LHS);
2918 Ops.push_back(RHS);
2919 return getSMaxExpr(Ops);
2920 }
2921
2922 const SCEV *
getSMaxExpr(SmallVectorImpl<const SCEV * > & Ops)2923 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2924 assert(!Ops.empty() && "Cannot get empty smax!");
2925 if (Ops.size() == 1) return Ops[0];
2926 #ifndef NDEBUG
2927 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2928 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2929 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2930 "SCEVSMaxExpr operand types don't match!");
2931 #endif
2932
2933 // Sort by complexity, this groups all similar expression types together.
2934 GroupByComplexity(Ops, LI);
2935
2936 // If there are any constants, fold them together.
2937 unsigned Idx = 0;
2938 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2939 ++Idx;
2940 assert(Idx < Ops.size());
2941 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2942 // We found two constants, fold them together!
2943 ConstantInt *Fold = ConstantInt::get(getContext(),
2944 APIntOps::smax(LHSC->getValue()->getValue(),
2945 RHSC->getValue()->getValue()));
2946 Ops[0] = getConstant(Fold);
2947 Ops.erase(Ops.begin()+1); // Erase the folded element
2948 if (Ops.size() == 1) return Ops[0];
2949 LHSC = cast<SCEVConstant>(Ops[0]);
2950 }
2951
2952 // If we are left with a constant minimum-int, strip it off.
2953 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
2954 Ops.erase(Ops.begin());
2955 --Idx;
2956 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
2957 // If we have an smax with a constant maximum-int, it will always be
2958 // maximum-int.
2959 return Ops[0];
2960 }
2961
2962 if (Ops.size() == 1) return Ops[0];
2963 }
2964
2965 // Find the first SMax
2966 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
2967 ++Idx;
2968
2969 // Check to see if one of the operands is an SMax. If so, expand its operands
2970 // onto our operand list, and recurse to simplify.
2971 if (Idx < Ops.size()) {
2972 bool DeletedSMax = false;
2973 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
2974 Ops.erase(Ops.begin()+Idx);
2975 Ops.append(SMax->op_begin(), SMax->op_end());
2976 DeletedSMax = true;
2977 }
2978
2979 if (DeletedSMax)
2980 return getSMaxExpr(Ops);
2981 }
2982
2983 // Okay, check to see if the same value occurs in the operand list twice. If
2984 // so, delete one. Since we sorted the list, these values are required to
2985 // be adjacent.
2986 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2987 // X smax Y smax Y --> X smax Y
2988 // X smax Y --> X, if X is always greater than Y
2989 if (Ops[i] == Ops[i+1] ||
2990 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) {
2991 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
2992 --i; --e;
2993 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) {
2994 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2995 --i; --e;
2996 }
2997
2998 if (Ops.size() == 1) return Ops[0];
2999
3000 assert(!Ops.empty() && "Reduced smax down to nothing!");
3001
3002 // Okay, it looks like we really DO need an smax expr. Check to see if we
3003 // already have one, otherwise create a new one.
3004 FoldingSetNodeID ID;
3005 ID.AddInteger(scSMaxExpr);
3006 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3007 ID.AddPointer(Ops[i]);
3008 void *IP = nullptr;
3009 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
3010 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
3011 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3012 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
3013 O, Ops.size());
3014 UniqueSCEVs.InsertNode(S, IP);
3015 return S;
3016 }
3017
getUMaxExpr(const SCEV * LHS,const SCEV * RHS)3018 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
3019 const SCEV *RHS) {
3020 SmallVector<const SCEV *, 2> Ops;
3021 Ops.push_back(LHS);
3022 Ops.push_back(RHS);
3023 return getUMaxExpr(Ops);
3024 }
3025
3026 const SCEV *
getUMaxExpr(SmallVectorImpl<const SCEV * > & Ops)3027 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
3028 assert(!Ops.empty() && "Cannot get empty umax!");
3029 if (Ops.size() == 1) return Ops[0];
3030 #ifndef NDEBUG
3031 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
3032 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
3033 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
3034 "SCEVUMaxExpr operand types don't match!");
3035 #endif
3036
3037 // Sort by complexity, this groups all similar expression types together.
3038 GroupByComplexity(Ops, LI);
3039
3040 // If there are any constants, fold them together.
3041 unsigned Idx = 0;
3042 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3043 ++Idx;
3044 assert(Idx < Ops.size());
3045 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3046 // We found two constants, fold them together!
3047 ConstantInt *Fold = ConstantInt::get(getContext(),
3048 APIntOps::umax(LHSC->getValue()->getValue(),
3049 RHSC->getValue()->getValue()));
3050 Ops[0] = getConstant(Fold);
3051 Ops.erase(Ops.begin()+1); // Erase the folded element
3052 if (Ops.size() == 1) return Ops[0];
3053 LHSC = cast<SCEVConstant>(Ops[0]);
3054 }
3055
3056 // If we are left with a constant minimum-int, strip it off.
3057 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
3058 Ops.erase(Ops.begin());
3059 --Idx;
3060 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
3061 // If we have an umax with a constant maximum-int, it will always be
3062 // maximum-int.
3063 return Ops[0];
3064 }
3065
3066 if (Ops.size() == 1) return Ops[0];
3067 }
3068
3069 // Find the first UMax
3070 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
3071 ++Idx;
3072
3073 // Check to see if one of the operands is a UMax. If so, expand its operands
3074 // onto our operand list, and recurse to simplify.
3075 if (Idx < Ops.size()) {
3076 bool DeletedUMax = false;
3077 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
3078 Ops.erase(Ops.begin()+Idx);
3079 Ops.append(UMax->op_begin(), UMax->op_end());
3080 DeletedUMax = true;
3081 }
3082
3083 if (DeletedUMax)
3084 return getUMaxExpr(Ops);
3085 }
3086
3087 // Okay, check to see if the same value occurs in the operand list twice. If
3088 // so, delete one. Since we sorted the list, these values are required to
3089 // be adjacent.
3090 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
3091 // X umax Y umax Y --> X umax Y
3092 // X umax Y --> X, if X is always greater than Y
3093 if (Ops[i] == Ops[i+1] ||
3094 isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) {
3095 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
3096 --i; --e;
3097 } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) {
3098 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
3099 --i; --e;
3100 }
3101
3102 if (Ops.size() == 1) return Ops[0];
3103
3104 assert(!Ops.empty() && "Reduced umax down to nothing!");
3105
3106 // Okay, it looks like we really DO need a umax expr. Check to see if we
3107 // already have one, otherwise create a new one.
3108 FoldingSetNodeID ID;
3109 ID.AddInteger(scUMaxExpr);
3110 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3111 ID.AddPointer(Ops[i]);
3112 void *IP = nullptr;
3113 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
3114 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
3115 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3116 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
3117 O, Ops.size());
3118 UniqueSCEVs.InsertNode(S, IP);
3119 return S;
3120 }
3121
getSMinExpr(const SCEV * LHS,const SCEV * RHS)3122 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
3123 const SCEV *RHS) {
3124 // ~smax(~x, ~y) == smin(x, y).
3125 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
3126 }
3127
getUMinExpr(const SCEV * LHS,const SCEV * RHS)3128 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
3129 const SCEV *RHS) {
3130 // ~umax(~x, ~y) == umin(x, y)
3131 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
3132 }
3133
getSizeOfExpr(Type * IntTy,Type * AllocTy)3134 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
3135 // We can bypass creating a target-independent
3136 // constant expression and then folding it back into a ConstantInt.
3137 // This is just a compile-time optimization.
3138 return getConstant(IntTy,
3139 F->getParent()->getDataLayout().getTypeAllocSize(AllocTy));
3140 }
3141
getOffsetOfExpr(Type * IntTy,StructType * STy,unsigned FieldNo)3142 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
3143 StructType *STy,
3144 unsigned FieldNo) {
3145 // We can bypass creating a target-independent
3146 // constant expression and then folding it back into a ConstantInt.
3147 // This is just a compile-time optimization.
3148 return getConstant(
3149 IntTy,
3150 F->getParent()->getDataLayout().getStructLayout(STy)->getElementOffset(
3151 FieldNo));
3152 }
3153
getUnknown(Value * V)3154 const SCEV *ScalarEvolution::getUnknown(Value *V) {
3155 // Don't attempt to do anything other than create a SCEVUnknown object
3156 // here. createSCEV only calls getUnknown after checking for all other
3157 // interesting possibilities, and any other code that calls getUnknown
3158 // is doing so in order to hide a value from SCEV canonicalization.
3159
3160 FoldingSetNodeID ID;
3161 ID.AddInteger(scUnknown);
3162 ID.AddPointer(V);
3163 void *IP = nullptr;
3164 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
3165 assert(cast<SCEVUnknown>(S)->getValue() == V &&
3166 "Stale SCEVUnknown in uniquing map!");
3167 return S;
3168 }
3169 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
3170 FirstUnknown);
3171 FirstUnknown = cast<SCEVUnknown>(S);
3172 UniqueSCEVs.InsertNode(S, IP);
3173 return S;
3174 }
3175
3176 //===----------------------------------------------------------------------===//
3177 // Basic SCEV Analysis and PHI Idiom Recognition Code
3178 //
3179
3180 /// isSCEVable - Test if values of the given type are analyzable within
3181 /// the SCEV framework. This primarily includes integer types, and it
3182 /// can optionally include pointer types if the ScalarEvolution class
3183 /// has access to target-specific information.
isSCEVable(Type * Ty) const3184 bool ScalarEvolution::isSCEVable(Type *Ty) const {
3185 // Integers and pointers are always SCEVable.
3186 return Ty->isIntegerTy() || Ty->isPointerTy();
3187 }
3188
3189 /// getTypeSizeInBits - Return the size in bits of the specified type,
3190 /// for which isSCEVable must return true.
getTypeSizeInBits(Type * Ty) const3191 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
3192 assert(isSCEVable(Ty) && "Type is not SCEVable!");
3193 return F->getParent()->getDataLayout().getTypeSizeInBits(Ty);
3194 }
3195
3196 /// getEffectiveSCEVType - Return a type with the same bitwidth as
3197 /// the given type and which represents how SCEV will treat the given
3198 /// type, for which isSCEVable must return true. For pointer types,
3199 /// this is the pointer-sized integer type.
getEffectiveSCEVType(Type * Ty) const3200 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
3201 assert(isSCEVable(Ty) && "Type is not SCEVable!");
3202
3203 if (Ty->isIntegerTy()) {
3204 return Ty;
3205 }
3206
3207 // The only other support type is pointer.
3208 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
3209 return F->getParent()->getDataLayout().getIntPtrType(Ty);
3210 }
3211
getCouldNotCompute()3212 const SCEV *ScalarEvolution::getCouldNotCompute() {
3213 return &CouldNotCompute;
3214 }
3215
3216 namespace {
3217 // Helper class working with SCEVTraversal to figure out if a SCEV contains
3218 // a SCEVUnknown with null value-pointer. FindInvalidSCEVUnknown::FindOne
3219 // is set iff if find such SCEVUnknown.
3220 //
3221 struct FindInvalidSCEVUnknown {
3222 bool FindOne;
FindInvalidSCEVUnknown__anond3aa2a800711::FindInvalidSCEVUnknown3223 FindInvalidSCEVUnknown() { FindOne = false; }
follow__anond3aa2a800711::FindInvalidSCEVUnknown3224 bool follow(const SCEV *S) {
3225 switch (static_cast<SCEVTypes>(S->getSCEVType())) {
3226 case scConstant:
3227 return false;
3228 case scUnknown:
3229 if (!cast<SCEVUnknown>(S)->getValue())
3230 FindOne = true;
3231 return false;
3232 default:
3233 return true;
3234 }
3235 }
isDone__anond3aa2a800711::FindInvalidSCEVUnknown3236 bool isDone() const { return FindOne; }
3237 };
3238 }
3239
checkValidity(const SCEV * S) const3240 bool ScalarEvolution::checkValidity(const SCEV *S) const {
3241 FindInvalidSCEVUnknown F;
3242 SCEVTraversal<FindInvalidSCEVUnknown> ST(F);
3243 ST.visitAll(S);
3244
3245 return !F.FindOne;
3246 }
3247
3248 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
3249 /// expression and create a new one.
getSCEV(Value * V)3250 const SCEV *ScalarEvolution::getSCEV(Value *V) {
3251 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
3252
3253 ValueExprMapType::iterator I = ValueExprMap.find_as(V);
3254 if (I != ValueExprMap.end()) {
3255 const SCEV *S = I->second;
3256 if (checkValidity(S))
3257 return S;
3258 else
3259 ValueExprMap.erase(I);
3260 }
3261 const SCEV *S = createSCEV(V);
3262
3263 // The process of creating a SCEV for V may have caused other SCEVs
3264 // to have been created, so it's necessary to insert the new entry
3265 // from scratch, rather than trying to remember the insert position
3266 // above.
3267 ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
3268 return S;
3269 }
3270
3271 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
3272 ///
getNegativeSCEV(const SCEV * V)3273 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
3274 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
3275 return getConstant(
3276 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
3277
3278 Type *Ty = V->getType();
3279 Ty = getEffectiveSCEVType(Ty);
3280 return getMulExpr(V,
3281 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
3282 }
3283
3284 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
getNotSCEV(const SCEV * V)3285 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
3286 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
3287 return getConstant(
3288 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
3289
3290 Type *Ty = V->getType();
3291 Ty = getEffectiveSCEVType(Ty);
3292 const SCEV *AllOnes =
3293 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
3294 return getMinusSCEV(AllOnes, V);
3295 }
3296
3297 /// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
getMinusSCEV(const SCEV * LHS,const SCEV * RHS,SCEV::NoWrapFlags Flags)3298 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
3299 SCEV::NoWrapFlags Flags) {
3300 assert(!maskFlags(Flags, SCEV::FlagNUW) && "subtraction does not have NUW");
3301
3302 // Fast path: X - X --> 0.
3303 if (LHS == RHS)
3304 return getConstant(LHS->getType(), 0);
3305
3306 // X - Y --> X + -Y.
3307 // X -(nsw || nuw) Y --> X + -Y.
3308 return getAddExpr(LHS, getNegativeSCEV(RHS));
3309 }
3310
3311 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
3312 /// input value to the specified type. If the type must be extended, it is zero
3313 /// extended.
3314 const SCEV *
getTruncateOrZeroExtend(const SCEV * V,Type * Ty)3315 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) {
3316 Type *SrcTy = V->getType();
3317 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
3318 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
3319 "Cannot truncate or zero extend with non-integer arguments!");
3320 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
3321 return V; // No conversion
3322 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
3323 return getTruncateExpr(V, Ty);
3324 return getZeroExtendExpr(V, Ty);
3325 }
3326
3327 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
3328 /// input value to the specified type. If the type must be extended, it is sign
3329 /// extended.
3330 const SCEV *
getTruncateOrSignExtend(const SCEV * V,Type * Ty)3331 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
3332 Type *Ty) {
3333 Type *SrcTy = V->getType();
3334 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
3335 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
3336 "Cannot truncate or zero extend with non-integer arguments!");
3337 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
3338 return V; // No conversion
3339 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
3340 return getTruncateExpr(V, Ty);
3341 return getSignExtendExpr(V, Ty);
3342 }
3343
3344 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
3345 /// input value to the specified type. If the type must be extended, it is zero
3346 /// extended. The conversion must not be narrowing.
3347 const SCEV *
getNoopOrZeroExtend(const SCEV * V,Type * Ty)3348 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
3349 Type *SrcTy = V->getType();
3350 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
3351 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
3352 "Cannot noop or zero extend with non-integer arguments!");
3353 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
3354 "getNoopOrZeroExtend cannot truncate!");
3355 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
3356 return V; // No conversion
3357 return getZeroExtendExpr(V, Ty);
3358 }
3359
3360 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
3361 /// input value to the specified type. If the type must be extended, it is sign
3362 /// extended. The conversion must not be narrowing.
3363 const SCEV *
getNoopOrSignExtend(const SCEV * V,Type * Ty)3364 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
3365 Type *SrcTy = V->getType();
3366 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
3367 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
3368 "Cannot noop or sign extend with non-integer arguments!");
3369 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
3370 "getNoopOrSignExtend cannot truncate!");
3371 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
3372 return V; // No conversion
3373 return getSignExtendExpr(V, Ty);
3374 }
3375
3376 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
3377 /// the input value to the specified type. If the type must be extended,
3378 /// it is extended with unspecified bits. The conversion must not be
3379 /// narrowing.
3380 const SCEV *
getNoopOrAnyExtend(const SCEV * V,Type * Ty)3381 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
3382 Type *SrcTy = V->getType();
3383 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
3384 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
3385 "Cannot noop or any extend with non-integer arguments!");
3386 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
3387 "getNoopOrAnyExtend cannot truncate!");
3388 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
3389 return V; // No conversion
3390 return getAnyExtendExpr(V, Ty);
3391 }
3392
3393 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
3394 /// input value to the specified type. The conversion must not be widening.
3395 const SCEV *
getTruncateOrNoop(const SCEV * V,Type * Ty)3396 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
3397 Type *SrcTy = V->getType();
3398 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
3399 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
3400 "Cannot truncate or noop with non-integer arguments!");
3401 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
3402 "getTruncateOrNoop cannot extend!");
3403 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
3404 return V; // No conversion
3405 return getTruncateExpr(V, Ty);
3406 }
3407
3408 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of
3409 /// the types using zero-extension, and then perform a umax operation
3410 /// with them.
getUMaxFromMismatchedTypes(const SCEV * LHS,const SCEV * RHS)3411 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
3412 const SCEV *RHS) {
3413 const SCEV *PromotedLHS = LHS;
3414 const SCEV *PromotedRHS = RHS;
3415
3416 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
3417 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
3418 else
3419 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
3420
3421 return getUMaxExpr(PromotedLHS, PromotedRHS);
3422 }
3423
3424 /// getUMinFromMismatchedTypes - Promote the operands to the wider of
3425 /// the types using zero-extension, and then perform a umin operation
3426 /// with them.
getUMinFromMismatchedTypes(const SCEV * LHS,const SCEV * RHS)3427 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
3428 const SCEV *RHS) {
3429 const SCEV *PromotedLHS = LHS;
3430 const SCEV *PromotedRHS = RHS;
3431
3432 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
3433 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
3434 else
3435 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
3436
3437 return getUMinExpr(PromotedLHS, PromotedRHS);
3438 }
3439
3440 /// getPointerBase - Transitively follow the chain of pointer-type operands
3441 /// until reaching a SCEV that does not have a single pointer operand. This
3442 /// returns a SCEVUnknown pointer for well-formed pointer-type expressions,
3443 /// but corner cases do exist.
getPointerBase(const SCEV * V)3444 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
3445 // A pointer operand may evaluate to a nonpointer expression, such as null.
3446 if (!V->getType()->isPointerTy())
3447 return V;
3448
3449 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) {
3450 return getPointerBase(Cast->getOperand());
3451 }
3452 else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) {
3453 const SCEV *PtrOp = nullptr;
3454 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
3455 I != E; ++I) {
3456 if ((*I)->getType()->isPointerTy()) {
3457 // Cannot find the base of an expression with multiple pointer operands.
3458 if (PtrOp)
3459 return V;
3460 PtrOp = *I;
3461 }
3462 }
3463 if (!PtrOp)
3464 return V;
3465 return getPointerBase(PtrOp);
3466 }
3467 return V;
3468 }
3469
3470 /// PushDefUseChildren - Push users of the given Instruction
3471 /// onto the given Worklist.
3472 static void
PushDefUseChildren(Instruction * I,SmallVectorImpl<Instruction * > & Worklist)3473 PushDefUseChildren(Instruction *I,
3474 SmallVectorImpl<Instruction *> &Worklist) {
3475 // Push the def-use children onto the Worklist stack.
3476 for (User *U : I->users())
3477 Worklist.push_back(cast<Instruction>(U));
3478 }
3479
3480 /// ForgetSymbolicValue - This looks up computed SCEV values for all
3481 /// instructions that depend on the given instruction and removes them from
3482 /// the ValueExprMapType map if they reference SymName. This is used during PHI
3483 /// resolution.
3484 void
ForgetSymbolicName(Instruction * PN,const SCEV * SymName)3485 ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
3486 SmallVector<Instruction *, 16> Worklist;
3487 PushDefUseChildren(PN, Worklist);
3488
3489 SmallPtrSet<Instruction *, 8> Visited;
3490 Visited.insert(PN);
3491 while (!Worklist.empty()) {
3492 Instruction *I = Worklist.pop_back_val();
3493 if (!Visited.insert(I).second)
3494 continue;
3495
3496 ValueExprMapType::iterator It =
3497 ValueExprMap.find_as(static_cast<Value *>(I));
3498 if (It != ValueExprMap.end()) {
3499 const SCEV *Old = It->second;
3500
3501 // Short-circuit the def-use traversal if the symbolic name
3502 // ceases to appear in expressions.
3503 if (Old != SymName && !hasOperand(Old, SymName))
3504 continue;
3505
3506 // SCEVUnknown for a PHI either means that it has an unrecognized
3507 // structure, it's a PHI that's in the progress of being computed
3508 // by createNodeForPHI, or it's a single-value PHI. In the first case,
3509 // additional loop trip count information isn't going to change anything.
3510 // In the second case, createNodeForPHI will perform the necessary
3511 // updates on its own when it gets to that point. In the third, we do
3512 // want to forget the SCEVUnknown.
3513 if (!isa<PHINode>(I) ||
3514 !isa<SCEVUnknown>(Old) ||
3515 (I != PN && Old == SymName)) {
3516 forgetMemoizedResults(Old);
3517 ValueExprMap.erase(It);
3518 }
3519 }
3520
3521 PushDefUseChildren(I, Worklist);
3522 }
3523 }
3524
3525 /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in
3526 /// a loop header, making it a potential recurrence, or it doesn't.
3527 ///
createNodeForPHI(PHINode * PN)3528 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
3529 if (const Loop *L = LI->getLoopFor(PN->getParent()))
3530 if (L->getHeader() == PN->getParent()) {
3531 // The loop may have multiple entrances or multiple exits; we can analyze
3532 // this phi as an addrec if it has a unique entry value and a unique
3533 // backedge value.
3534 Value *BEValueV = nullptr, *StartValueV = nullptr;
3535 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
3536 Value *V = PN->getIncomingValue(i);
3537 if (L->contains(PN->getIncomingBlock(i))) {
3538 if (!BEValueV) {
3539 BEValueV = V;
3540 } else if (BEValueV != V) {
3541 BEValueV = nullptr;
3542 break;
3543 }
3544 } else if (!StartValueV) {
3545 StartValueV = V;
3546 } else if (StartValueV != V) {
3547 StartValueV = nullptr;
3548 break;
3549 }
3550 }
3551 if (BEValueV && StartValueV) {
3552 // While we are analyzing this PHI node, handle its value symbolically.
3553 const SCEV *SymbolicName = getUnknown(PN);
3554 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
3555 "PHI node already processed?");
3556 ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
3557
3558 // Using this symbolic name for the PHI, analyze the value coming around
3559 // the back-edge.
3560 const SCEV *BEValue = getSCEV(BEValueV);
3561
3562 // NOTE: If BEValue is loop invariant, we know that the PHI node just
3563 // has a special value for the first iteration of the loop.
3564
3565 // If the value coming around the backedge is an add with the symbolic
3566 // value we just inserted, then we found a simple induction variable!
3567 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
3568 // If there is a single occurrence of the symbolic value, replace it
3569 // with a recurrence.
3570 unsigned FoundIndex = Add->getNumOperands();
3571 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
3572 if (Add->getOperand(i) == SymbolicName)
3573 if (FoundIndex == e) {
3574 FoundIndex = i;
3575 break;
3576 }
3577
3578 if (FoundIndex != Add->getNumOperands()) {
3579 // Create an add with everything but the specified operand.
3580 SmallVector<const SCEV *, 8> Ops;
3581 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
3582 if (i != FoundIndex)
3583 Ops.push_back(Add->getOperand(i));
3584 const SCEV *Accum = getAddExpr(Ops);
3585
3586 // This is not a valid addrec if the step amount is varying each
3587 // loop iteration, but is not itself an addrec in this loop.
3588 if (isLoopInvariant(Accum, L) ||
3589 (isa<SCEVAddRecExpr>(Accum) &&
3590 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
3591 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
3592
3593 // If the increment doesn't overflow, then neither the addrec nor
3594 // the post-increment will overflow.
3595 if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
3596 if (OBO->getOperand(0) == PN) {
3597 if (OBO->hasNoUnsignedWrap())
3598 Flags = setFlags(Flags, SCEV::FlagNUW);
3599 if (OBO->hasNoSignedWrap())
3600 Flags = setFlags(Flags, SCEV::FlagNSW);
3601 }
3602 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
3603 // If the increment is an inbounds GEP, then we know the address
3604 // space cannot be wrapped around. We cannot make any guarantee
3605 // about signed or unsigned overflow because pointers are
3606 // unsigned but we may have a negative index from the base
3607 // pointer. We can guarantee that no unsigned wrap occurs if the
3608 // indices form a positive value.
3609 if (GEP->isInBounds() && GEP->getOperand(0) == PN) {
3610 Flags = setFlags(Flags, SCEV::FlagNW);
3611
3612 const SCEV *Ptr = getSCEV(GEP->getPointerOperand());
3613 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr)))
3614 Flags = setFlags(Flags, SCEV::FlagNUW);
3615 }
3616
3617 // We cannot transfer nuw and nsw flags from subtraction
3618 // operations -- sub nuw X, Y is not the same as add nuw X, -Y
3619 // for instance.
3620 }
3621
3622 const SCEV *StartVal = getSCEV(StartValueV);
3623 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
3624
3625 // Since the no-wrap flags are on the increment, they apply to the
3626 // post-incremented value as well.
3627 if (isLoopInvariant(Accum, L))
3628 (void)getAddRecExpr(getAddExpr(StartVal, Accum),
3629 Accum, L, Flags);
3630
3631 // Okay, for the entire analysis of this edge we assumed the PHI
3632 // to be symbolic. We now need to go back and purge all of the
3633 // entries for the scalars that use the symbolic expression.
3634 ForgetSymbolicName(PN, SymbolicName);
3635 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
3636 return PHISCEV;
3637 }
3638 }
3639 } else if (const SCEVAddRecExpr *AddRec =
3640 dyn_cast<SCEVAddRecExpr>(BEValue)) {
3641 // Otherwise, this could be a loop like this:
3642 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
3643 // In this case, j = {1,+,1} and BEValue is j.
3644 // Because the other in-value of i (0) fits the evolution of BEValue
3645 // i really is an addrec evolution.
3646 if (AddRec->getLoop() == L && AddRec->isAffine()) {
3647 const SCEV *StartVal = getSCEV(StartValueV);
3648
3649 // If StartVal = j.start - j.stride, we can use StartVal as the
3650 // initial step of the addrec evolution.
3651 if (StartVal == getMinusSCEV(AddRec->getOperand(0),
3652 AddRec->getOperand(1))) {
3653 // FIXME: For constant StartVal, we should be able to infer
3654 // no-wrap flags.
3655 const SCEV *PHISCEV =
3656 getAddRecExpr(StartVal, AddRec->getOperand(1), L,
3657 SCEV::FlagAnyWrap);
3658
3659 // Okay, for the entire analysis of this edge we assumed the PHI
3660 // to be symbolic. We now need to go back and purge all of the
3661 // entries for the scalars that use the symbolic expression.
3662 ForgetSymbolicName(PN, SymbolicName);
3663 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
3664 return PHISCEV;
3665 }
3666 }
3667 }
3668 }
3669 }
3670
3671 // If the PHI has a single incoming value, follow that value, unless the
3672 // PHI's incoming blocks are in a different loop, in which case doing so
3673 // risks breaking LCSSA form. Instcombine would normally zap these, but
3674 // it doesn't have DominatorTree information, so it may miss cases.
3675 if (Value *V =
3676 SimplifyInstruction(PN, F->getParent()->getDataLayout(), TLI, DT, AC))
3677 if (LI->replacementPreservesLCSSAForm(PN, V))
3678 return getSCEV(V);
3679
3680 // If it's not a loop phi, we can't handle it yet.
3681 return getUnknown(PN);
3682 }
3683
3684 /// createNodeForGEP - Expand GEP instructions into add and multiply
3685 /// operations. This allows them to be analyzed by regular SCEV code.
3686 ///
createNodeForGEP(GEPOperator * GEP)3687 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
3688 Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
3689 Value *Base = GEP->getOperand(0);
3690 // Don't attempt to analyze GEPs over unsized objects.
3691 if (!Base->getType()->getPointerElementType()->isSized())
3692 return getUnknown(GEP);
3693
3694 // Don't blindly transfer the inbounds flag from the GEP instruction to the
3695 // Add expression, because the Instruction may be guarded by control flow
3696 // and the no-overflow bits may not be valid for the expression in any
3697 // context.
3698 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
3699
3700 const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
3701 gep_type_iterator GTI = gep_type_begin(GEP);
3702 for (GetElementPtrInst::op_iterator I = std::next(GEP->op_begin()),
3703 E = GEP->op_end();
3704 I != E; ++I) {
3705 Value *Index = *I;
3706 // Compute the (potentially symbolic) offset in bytes for this index.
3707 if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
3708 // For a struct, add the member offset.
3709 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
3710 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo);
3711
3712 // Add the field offset to the running total offset.
3713 TotalOffset = getAddExpr(TotalOffset, FieldOffset);
3714 } else {
3715 // For an array, add the element offset, explicitly scaled.
3716 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, *GTI);
3717 const SCEV *IndexS = getSCEV(Index);
3718 // Getelementptr indices are signed.
3719 IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
3720
3721 // Multiply the index by the element size to compute the element offset.
3722 const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize, Wrap);
3723
3724 // Add the element offset to the running total offset.
3725 TotalOffset = getAddExpr(TotalOffset, LocalOffset);
3726 }
3727 }
3728
3729 // Get the SCEV for the GEP base.
3730 const SCEV *BaseS = getSCEV(Base);
3731
3732 // Add the total offset from all the GEP indices to the base.
3733 return getAddExpr(BaseS, TotalOffset, Wrap);
3734 }
3735
3736 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
3737 /// guaranteed to end in (at every loop iteration). It is, at the same time,
3738 /// the minimum number of times S is divisible by 2. For example, given {4,+,8}
3739 /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S.
3740 uint32_t
GetMinTrailingZeros(const SCEV * S)3741 ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
3742 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
3743 return C->getValue()->getValue().countTrailingZeros();
3744
3745 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
3746 return std::min(GetMinTrailingZeros(T->getOperand()),
3747 (uint32_t)getTypeSizeInBits(T->getType()));
3748
3749 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
3750 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
3751 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
3752 getTypeSizeInBits(E->getType()) : OpRes;
3753 }
3754
3755 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
3756 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
3757 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
3758 getTypeSizeInBits(E->getType()) : OpRes;
3759 }
3760
3761 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
3762 // The result is the min of all operands results.
3763 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
3764 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
3765 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
3766 return MinOpRes;
3767 }
3768
3769 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
3770 // The result is the sum of all operands results.
3771 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
3772 uint32_t BitWidth = getTypeSizeInBits(M->getType());
3773 for (unsigned i = 1, e = M->getNumOperands();
3774 SumOpRes != BitWidth && i != e; ++i)
3775 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
3776 BitWidth);
3777 return SumOpRes;
3778 }
3779
3780 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
3781 // The result is the min of all operands results.
3782 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
3783 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
3784 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
3785 return MinOpRes;
3786 }
3787
3788 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
3789 // The result is the min of all operands results.
3790 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
3791 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
3792 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
3793 return MinOpRes;
3794 }
3795
3796 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
3797 // The result is the min of all operands results.
3798 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
3799 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
3800 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
3801 return MinOpRes;
3802 }
3803
3804 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3805 // For a SCEVUnknown, ask ValueTracking.
3806 unsigned BitWidth = getTypeSizeInBits(U->getType());
3807 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
3808 computeKnownBits(U->getValue(), Zeros, Ones,
3809 F->getParent()->getDataLayout(), 0, AC, nullptr, DT);
3810 return Zeros.countTrailingOnes();
3811 }
3812
3813 // SCEVUDivExpr
3814 return 0;
3815 }
3816
3817 /// GetRangeFromMetadata - Helper method to assign a range to V from
3818 /// metadata present in the IR.
GetRangeFromMetadata(Value * V)3819 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) {
3820 if (Instruction *I = dyn_cast<Instruction>(V)) {
3821 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) {
3822 ConstantRange TotalRange(
3823 cast<IntegerType>(I->getType())->getBitWidth(), false);
3824
3825 unsigned NumRanges = MD->getNumOperands() / 2;
3826 assert(NumRanges >= 1);
3827
3828 for (unsigned i = 0; i < NumRanges; ++i) {
3829 ConstantInt *Lower =
3830 mdconst::extract<ConstantInt>(MD->getOperand(2 * i + 0));
3831 ConstantInt *Upper =
3832 mdconst::extract<ConstantInt>(MD->getOperand(2 * i + 1));
3833 ConstantRange Range(Lower->getValue(), Upper->getValue());
3834 TotalRange = TotalRange.unionWith(Range);
3835 }
3836
3837 return TotalRange;
3838 }
3839 }
3840
3841 return None;
3842 }
3843
3844 /// getRange - Determine the range for a particular SCEV. If SignHint is
3845 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges
3846 /// with a "cleaner" unsigned (resp. signed) representation.
3847 ///
3848 ConstantRange
getRange(const SCEV * S,ScalarEvolution::RangeSignHint SignHint)3849 ScalarEvolution::getRange(const SCEV *S,
3850 ScalarEvolution::RangeSignHint SignHint) {
3851 DenseMap<const SCEV *, ConstantRange> &Cache =
3852 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
3853 : SignedRanges;
3854
3855 // See if we've computed this range already.
3856 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S);
3857 if (I != Cache.end())
3858 return I->second;
3859
3860 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
3861 return setRange(C, SignHint, ConstantRange(C->getValue()->getValue()));
3862
3863 unsigned BitWidth = getTypeSizeInBits(S->getType());
3864 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
3865
3866 // If the value has known zeros, the maximum value will have those known zeros
3867 // as well.
3868 uint32_t TZ = GetMinTrailingZeros(S);
3869 if (TZ != 0) {
3870 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED)
3871 ConservativeResult =
3872 ConstantRange(APInt::getMinValue(BitWidth),
3873 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
3874 else
3875 ConservativeResult = ConstantRange(
3876 APInt::getSignedMinValue(BitWidth),
3877 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
3878 }
3879
3880 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
3881 ConstantRange X = getRange(Add->getOperand(0), SignHint);
3882 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
3883 X = X.add(getRange(Add->getOperand(i), SignHint));
3884 return setRange(Add, SignHint, ConservativeResult.intersectWith(X));
3885 }
3886
3887 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
3888 ConstantRange X = getRange(Mul->getOperand(0), SignHint);
3889 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
3890 X = X.multiply(getRange(Mul->getOperand(i), SignHint));
3891 return setRange(Mul, SignHint, ConservativeResult.intersectWith(X));
3892 }
3893
3894 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
3895 ConstantRange X = getRange(SMax->getOperand(0), SignHint);
3896 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
3897 X = X.smax(getRange(SMax->getOperand(i), SignHint));
3898 return setRange(SMax, SignHint, ConservativeResult.intersectWith(X));
3899 }
3900
3901 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
3902 ConstantRange X = getRange(UMax->getOperand(0), SignHint);
3903 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
3904 X = X.umax(getRange(UMax->getOperand(i), SignHint));
3905 return setRange(UMax, SignHint, ConservativeResult.intersectWith(X));
3906 }
3907
3908 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
3909 ConstantRange X = getRange(UDiv->getLHS(), SignHint);
3910 ConstantRange Y = getRange(UDiv->getRHS(), SignHint);
3911 return setRange(UDiv, SignHint,
3912 ConservativeResult.intersectWith(X.udiv(Y)));
3913 }
3914
3915 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
3916 ConstantRange X = getRange(ZExt->getOperand(), SignHint);
3917 return setRange(ZExt, SignHint,
3918 ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
3919 }
3920
3921 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
3922 ConstantRange X = getRange(SExt->getOperand(), SignHint);
3923 return setRange(SExt, SignHint,
3924 ConservativeResult.intersectWith(X.signExtend(BitWidth)));
3925 }
3926
3927 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
3928 ConstantRange X = getRange(Trunc->getOperand(), SignHint);
3929 return setRange(Trunc, SignHint,
3930 ConservativeResult.intersectWith(X.truncate(BitWidth)));
3931 }
3932
3933 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
3934 // If there's no unsigned wrap, the value will never be less than its
3935 // initial value.
3936 if (AddRec->getNoWrapFlags(SCEV::FlagNUW))
3937 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
3938 if (!C->getValue()->isZero())
3939 ConservativeResult =
3940 ConservativeResult.intersectWith(
3941 ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0)));
3942
3943 // If there's no signed wrap, and all the operands have the same sign or
3944 // zero, the value won't ever change sign.
3945 if (AddRec->getNoWrapFlags(SCEV::FlagNSW)) {
3946 bool AllNonNeg = true;
3947 bool AllNonPos = true;
3948 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
3949 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
3950 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
3951 }
3952 if (AllNonNeg)
3953 ConservativeResult = ConservativeResult.intersectWith(
3954 ConstantRange(APInt(BitWidth, 0),
3955 APInt::getSignedMinValue(BitWidth)));
3956 else if (AllNonPos)
3957 ConservativeResult = ConservativeResult.intersectWith(
3958 ConstantRange(APInt::getSignedMinValue(BitWidth),
3959 APInt(BitWidth, 1)));
3960 }
3961
3962 // TODO: non-affine addrec
3963 if (AddRec->isAffine()) {
3964 Type *Ty = AddRec->getType();
3965 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
3966 if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
3967 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
3968
3969 // Check for overflow. This must be done with ConstantRange arithmetic
3970 // because we could be called from within the ScalarEvolution overflow
3971 // checking code.
3972
3973 MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
3974 ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
3975 ConstantRange ZExtMaxBECountRange =
3976 MaxBECountRange.zextOrTrunc(BitWidth * 2 + 1);
3977
3978 const SCEV *Start = AddRec->getStart();
3979 const SCEV *Step = AddRec->getStepRecurrence(*this);
3980 ConstantRange StepSRange = getSignedRange(Step);
3981 ConstantRange SExtStepSRange = StepSRange.sextOrTrunc(BitWidth * 2 + 1);
3982
3983 ConstantRange StartURange = getUnsignedRange(Start);
3984 ConstantRange EndURange =
3985 StartURange.add(MaxBECountRange.multiply(StepSRange));
3986
3987 // Check for unsigned overflow.
3988 ConstantRange ZExtStartURange =
3989 StartURange.zextOrTrunc(BitWidth * 2 + 1);
3990 ConstantRange ZExtEndURange = EndURange.zextOrTrunc(BitWidth * 2 + 1);
3991 if (ZExtStartURange.add(ZExtMaxBECountRange.multiply(SExtStepSRange)) ==
3992 ZExtEndURange) {
3993 APInt Min = APIntOps::umin(StartURange.getUnsignedMin(),
3994 EndURange.getUnsignedMin());
3995 APInt Max = APIntOps::umax(StartURange.getUnsignedMax(),
3996 EndURange.getUnsignedMax());
3997 bool IsFullRange = Min.isMinValue() && Max.isMaxValue();
3998 if (!IsFullRange)
3999 ConservativeResult =
4000 ConservativeResult.intersectWith(ConstantRange(Min, Max + 1));
4001 }
4002
4003 ConstantRange StartSRange = getSignedRange(Start);
4004 ConstantRange EndSRange =
4005 StartSRange.add(MaxBECountRange.multiply(StepSRange));
4006
4007 // Check for signed overflow. This must be done with ConstantRange
4008 // arithmetic because we could be called from within the ScalarEvolution
4009 // overflow checking code.
4010 ConstantRange SExtStartSRange =
4011 StartSRange.sextOrTrunc(BitWidth * 2 + 1);
4012 ConstantRange SExtEndSRange = EndSRange.sextOrTrunc(BitWidth * 2 + 1);
4013 if (SExtStartSRange.add(ZExtMaxBECountRange.multiply(SExtStepSRange)) ==
4014 SExtEndSRange) {
4015 APInt Min = APIntOps::smin(StartSRange.getSignedMin(),
4016 EndSRange.getSignedMin());
4017 APInt Max = APIntOps::smax(StartSRange.getSignedMax(),
4018 EndSRange.getSignedMax());
4019 bool IsFullRange = Min.isMinSignedValue() && Max.isMaxSignedValue();
4020 if (!IsFullRange)
4021 ConservativeResult =
4022 ConservativeResult.intersectWith(ConstantRange(Min, Max + 1));
4023 }
4024 }
4025 }
4026
4027 return setRange(AddRec, SignHint, ConservativeResult);
4028 }
4029
4030 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
4031 // Check if the IR explicitly contains !range metadata.
4032 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue());
4033 if (MDRange.hasValue())
4034 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue());
4035
4036 // Split here to avoid paying the compile-time cost of calling both
4037 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted
4038 // if needed.
4039 const DataLayout &DL = F->getParent()->getDataLayout();
4040 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
4041 // For a SCEVUnknown, ask ValueTracking.
4042 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
4043 computeKnownBits(U->getValue(), Zeros, Ones, DL, 0, AC, nullptr, DT);
4044 if (Ones != ~Zeros + 1)
4045 ConservativeResult =
4046 ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1));
4047 } else {
4048 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED &&
4049 "generalize as needed!");
4050 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, AC, nullptr, DT);
4051 if (NS > 1)
4052 ConservativeResult = ConservativeResult.intersectWith(
4053 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
4054 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1));
4055 }
4056
4057 return setRange(U, SignHint, ConservativeResult);
4058 }
4059
4060 return setRange(S, SignHint, ConservativeResult);
4061 }
4062
4063 /// createSCEV - We know that there is no SCEV for the specified value.
4064 /// Analyze the expression.
4065 ///
createSCEV(Value * V)4066 const SCEV *ScalarEvolution::createSCEV(Value *V) {
4067 if (!isSCEVable(V->getType()))
4068 return getUnknown(V);
4069
4070 unsigned Opcode = Instruction::UserOp1;
4071 if (Instruction *I = dyn_cast<Instruction>(V)) {
4072 Opcode = I->getOpcode();
4073
4074 // Don't attempt to analyze instructions in blocks that aren't
4075 // reachable. Such instructions don't matter, and they aren't required
4076 // to obey basic rules for definitions dominating uses which this
4077 // analysis depends on.
4078 if (!DT->isReachableFromEntry(I->getParent()))
4079 return getUnknown(V);
4080 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
4081 Opcode = CE->getOpcode();
4082 else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
4083 return getConstant(CI);
4084 else if (isa<ConstantPointerNull>(V))
4085 return getConstant(V->getType(), 0);
4086 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
4087 return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
4088 else
4089 return getUnknown(V);
4090
4091 Operator *U = cast<Operator>(V);
4092 switch (Opcode) {
4093 case Instruction::Add: {
4094 // The simple thing to do would be to just call getSCEV on both operands
4095 // and call getAddExpr with the result. However if we're looking at a
4096 // bunch of things all added together, this can be quite inefficient,
4097 // because it leads to N-1 getAddExpr calls for N ultimate operands.
4098 // Instead, gather up all the operands and make a single getAddExpr call.
4099 // LLVM IR canonical form means we need only traverse the left operands.
4100 //
4101 // Don't apply this instruction's NSW or NUW flags to the new
4102 // expression. The instruction may be guarded by control flow that the
4103 // no-wrap behavior depends on. Non-control-equivalent instructions can be
4104 // mapped to the same SCEV expression, and it would be incorrect to transfer
4105 // NSW/NUW semantics to those operations.
4106 SmallVector<const SCEV *, 4> AddOps;
4107 AddOps.push_back(getSCEV(U->getOperand(1)));
4108 for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
4109 unsigned Opcode = Op->getValueID() - Value::InstructionVal;
4110 if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
4111 break;
4112 U = cast<Operator>(Op);
4113 const SCEV *Op1 = getSCEV(U->getOperand(1));
4114 if (Opcode == Instruction::Sub)
4115 AddOps.push_back(getNegativeSCEV(Op1));
4116 else
4117 AddOps.push_back(Op1);
4118 }
4119 AddOps.push_back(getSCEV(U->getOperand(0)));
4120 return getAddExpr(AddOps);
4121 }
4122 case Instruction::Mul: {
4123 // Don't transfer NSW/NUW for the same reason as AddExpr.
4124 SmallVector<const SCEV *, 4> MulOps;
4125 MulOps.push_back(getSCEV(U->getOperand(1)));
4126 for (Value *Op = U->getOperand(0);
4127 Op->getValueID() == Instruction::Mul + Value::InstructionVal;
4128 Op = U->getOperand(0)) {
4129 U = cast<Operator>(Op);
4130 MulOps.push_back(getSCEV(U->getOperand(1)));
4131 }
4132 MulOps.push_back(getSCEV(U->getOperand(0)));
4133 return getMulExpr(MulOps);
4134 }
4135 case Instruction::UDiv:
4136 return getUDivExpr(getSCEV(U->getOperand(0)),
4137 getSCEV(U->getOperand(1)));
4138 case Instruction::Sub:
4139 return getMinusSCEV(getSCEV(U->getOperand(0)),
4140 getSCEV(U->getOperand(1)));
4141 case Instruction::And:
4142 // For an expression like x&255 that merely masks off the high bits,
4143 // use zext(trunc(x)) as the SCEV expression.
4144 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
4145 if (CI->isNullValue())
4146 return getSCEV(U->getOperand(1));
4147 if (CI->isAllOnesValue())
4148 return getSCEV(U->getOperand(0));
4149 const APInt &A = CI->getValue();
4150
4151 // Instcombine's ShrinkDemandedConstant may strip bits out of
4152 // constants, obscuring what would otherwise be a low-bits mask.
4153 // Use computeKnownBits to compute what ShrinkDemandedConstant
4154 // knew about to reconstruct a low-bits mask value.
4155 unsigned LZ = A.countLeadingZeros();
4156 unsigned TZ = A.countTrailingZeros();
4157 unsigned BitWidth = A.getBitWidth();
4158 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
4159 computeKnownBits(U->getOperand(0), KnownZero, KnownOne,
4160 F->getParent()->getDataLayout(), 0, AC, nullptr, DT);
4161
4162 APInt EffectiveMask =
4163 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
4164 if ((LZ != 0 || TZ != 0) && !((~A & ~KnownZero) & EffectiveMask)) {
4165 const SCEV *MulCount = getConstant(
4166 ConstantInt::get(getContext(), APInt::getOneBitSet(BitWidth, TZ)));
4167 return getMulExpr(
4168 getZeroExtendExpr(
4169 getTruncateExpr(
4170 getUDivExactExpr(getSCEV(U->getOperand(0)), MulCount),
4171 IntegerType::get(getContext(), BitWidth - LZ - TZ)),
4172 U->getType()),
4173 MulCount);
4174 }
4175 }
4176 break;
4177
4178 case Instruction::Or:
4179 // If the RHS of the Or is a constant, we may have something like:
4180 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
4181 // optimizations will transparently handle this case.
4182 //
4183 // In order for this transformation to be safe, the LHS must be of the
4184 // form X*(2^n) and the Or constant must be less than 2^n.
4185 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
4186 const SCEV *LHS = getSCEV(U->getOperand(0));
4187 const APInt &CIVal = CI->getValue();
4188 if (GetMinTrailingZeros(LHS) >=
4189 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
4190 // Build a plain add SCEV.
4191 const SCEV *S = getAddExpr(LHS, getSCEV(CI));
4192 // If the LHS of the add was an addrec and it has no-wrap flags,
4193 // transfer the no-wrap flags, since an or won't introduce a wrap.
4194 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
4195 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
4196 const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags(
4197 OldAR->getNoWrapFlags());
4198 }
4199 return S;
4200 }
4201 }
4202 break;
4203 case Instruction::Xor:
4204 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
4205 // If the RHS of the xor is a signbit, then this is just an add.
4206 // Instcombine turns add of signbit into xor as a strength reduction step.
4207 if (CI->getValue().isSignBit())
4208 return getAddExpr(getSCEV(U->getOperand(0)),
4209 getSCEV(U->getOperand(1)));
4210
4211 // If the RHS of xor is -1, then this is a not operation.
4212 if (CI->isAllOnesValue())
4213 return getNotSCEV(getSCEV(U->getOperand(0)));
4214
4215 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
4216 // This is a variant of the check for xor with -1, and it handles
4217 // the case where instcombine has trimmed non-demanded bits out
4218 // of an xor with -1.
4219 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
4220 if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
4221 if (BO->getOpcode() == Instruction::And &&
4222 LCI->getValue() == CI->getValue())
4223 if (const SCEVZeroExtendExpr *Z =
4224 dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
4225 Type *UTy = U->getType();
4226 const SCEV *Z0 = Z->getOperand();
4227 Type *Z0Ty = Z0->getType();
4228 unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
4229
4230 // If C is a low-bits mask, the zero extend is serving to
4231 // mask off the high bits. Complement the operand and
4232 // re-apply the zext.
4233 if (APIntOps::isMask(Z0TySize, CI->getValue()))
4234 return getZeroExtendExpr(getNotSCEV(Z0), UTy);
4235
4236 // If C is a single bit, it may be in the sign-bit position
4237 // before the zero-extend. In this case, represent the xor
4238 // using an add, which is equivalent, and re-apply the zext.
4239 APInt Trunc = CI->getValue().trunc(Z0TySize);
4240 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
4241 Trunc.isSignBit())
4242 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
4243 UTy);
4244 }
4245 }
4246 break;
4247
4248 case Instruction::Shl:
4249 // Turn shift left of a constant amount into a multiply.
4250 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
4251 uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
4252
4253 // If the shift count is not less than the bitwidth, the result of
4254 // the shift is undefined. Don't try to analyze it, because the
4255 // resolution chosen here may differ from the resolution chosen in
4256 // other parts of the compiler.
4257 if (SA->getValue().uge(BitWidth))
4258 break;
4259
4260 Constant *X = ConstantInt::get(getContext(),
4261 APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
4262 return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
4263 }
4264 break;
4265
4266 case Instruction::LShr:
4267 // Turn logical shift right of a constant into a unsigned divide.
4268 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
4269 uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
4270
4271 // If the shift count is not less than the bitwidth, the result of
4272 // the shift is undefined. Don't try to analyze it, because the
4273 // resolution chosen here may differ from the resolution chosen in
4274 // other parts of the compiler.
4275 if (SA->getValue().uge(BitWidth))
4276 break;
4277
4278 Constant *X = ConstantInt::get(getContext(),
4279 APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
4280 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
4281 }
4282 break;
4283
4284 case Instruction::AShr:
4285 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
4286 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
4287 if (Operator *L = dyn_cast<Operator>(U->getOperand(0)))
4288 if (L->getOpcode() == Instruction::Shl &&
4289 L->getOperand(1) == U->getOperand(1)) {
4290 uint64_t BitWidth = getTypeSizeInBits(U->getType());
4291
4292 // If the shift count is not less than the bitwidth, the result of
4293 // the shift is undefined. Don't try to analyze it, because the
4294 // resolution chosen here may differ from the resolution chosen in
4295 // other parts of the compiler.
4296 if (CI->getValue().uge(BitWidth))
4297 break;
4298
4299 uint64_t Amt = BitWidth - CI->getZExtValue();
4300 if (Amt == BitWidth)
4301 return getSCEV(L->getOperand(0)); // shift by zero --> noop
4302 return
4303 getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
4304 IntegerType::get(getContext(),
4305 Amt)),
4306 U->getType());
4307 }
4308 break;
4309
4310 case Instruction::Trunc:
4311 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
4312
4313 case Instruction::ZExt:
4314 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
4315
4316 case Instruction::SExt:
4317 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
4318
4319 case Instruction::BitCast:
4320 // BitCasts are no-op casts so we just eliminate the cast.
4321 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
4322 return getSCEV(U->getOperand(0));
4323 break;
4324
4325 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
4326 // lead to pointer expressions which cannot safely be expanded to GEPs,
4327 // because ScalarEvolution doesn't respect the GEP aliasing rules when
4328 // simplifying integer expressions.
4329
4330 case Instruction::GetElementPtr:
4331 return createNodeForGEP(cast<GEPOperator>(U));
4332
4333 case Instruction::PHI:
4334 return createNodeForPHI(cast<PHINode>(U));
4335
4336 case Instruction::Select:
4337 // This could be a smax or umax that was lowered earlier.
4338 // Try to recover it.
4339 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
4340 Value *LHS = ICI->getOperand(0);
4341 Value *RHS = ICI->getOperand(1);
4342 switch (ICI->getPredicate()) {
4343 case ICmpInst::ICMP_SLT:
4344 case ICmpInst::ICMP_SLE:
4345 std::swap(LHS, RHS);
4346 // fall through
4347 case ICmpInst::ICMP_SGT:
4348 case ICmpInst::ICMP_SGE:
4349 // a >s b ? a+x : b+x -> smax(a, b)+x
4350 // a >s b ? b+x : a+x -> smin(a, b)+x
4351 if (getTypeSizeInBits(LHS->getType()) <=
4352 getTypeSizeInBits(U->getType())) {
4353 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), U->getType());
4354 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), U->getType());
4355 const SCEV *LA = getSCEV(U->getOperand(1));
4356 const SCEV *RA = getSCEV(U->getOperand(2));
4357 const SCEV *LDiff = getMinusSCEV(LA, LS);
4358 const SCEV *RDiff = getMinusSCEV(RA, RS);
4359 if (LDiff == RDiff)
4360 return getAddExpr(getSMaxExpr(LS, RS), LDiff);
4361 LDiff = getMinusSCEV(LA, RS);
4362 RDiff = getMinusSCEV(RA, LS);
4363 if (LDiff == RDiff)
4364 return getAddExpr(getSMinExpr(LS, RS), LDiff);
4365 }
4366 break;
4367 case ICmpInst::ICMP_ULT:
4368 case ICmpInst::ICMP_ULE:
4369 std::swap(LHS, RHS);
4370 // fall through
4371 case ICmpInst::ICMP_UGT:
4372 case ICmpInst::ICMP_UGE:
4373 // a >u b ? a+x : b+x -> umax(a, b)+x
4374 // a >u b ? b+x : a+x -> umin(a, b)+x
4375 if (getTypeSizeInBits(LHS->getType()) <=
4376 getTypeSizeInBits(U->getType())) {
4377 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), U->getType());
4378 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), U->getType());
4379 const SCEV *LA = getSCEV(U->getOperand(1));
4380 const SCEV *RA = getSCEV(U->getOperand(2));
4381 const SCEV *LDiff = getMinusSCEV(LA, LS);
4382 const SCEV *RDiff = getMinusSCEV(RA, RS);
4383 if (LDiff == RDiff)
4384 return getAddExpr(getUMaxExpr(LS, RS), LDiff);
4385 LDiff = getMinusSCEV(LA, RS);
4386 RDiff = getMinusSCEV(RA, LS);
4387 if (LDiff == RDiff)
4388 return getAddExpr(getUMinExpr(LS, RS), LDiff);
4389 }
4390 break;
4391 case ICmpInst::ICMP_NE:
4392 // n != 0 ? n+x : 1+x -> umax(n, 1)+x
4393 if (getTypeSizeInBits(LHS->getType()) <=
4394 getTypeSizeInBits(U->getType()) &&
4395 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
4396 const SCEV *One = getConstant(U->getType(), 1);
4397 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), U->getType());
4398 const SCEV *LA = getSCEV(U->getOperand(1));
4399 const SCEV *RA = getSCEV(U->getOperand(2));
4400 const SCEV *LDiff = getMinusSCEV(LA, LS);
4401 const SCEV *RDiff = getMinusSCEV(RA, One);
4402 if (LDiff == RDiff)
4403 return getAddExpr(getUMaxExpr(One, LS), LDiff);
4404 }
4405 break;
4406 case ICmpInst::ICMP_EQ:
4407 // n == 0 ? 1+x : n+x -> umax(n, 1)+x
4408 if (getTypeSizeInBits(LHS->getType()) <=
4409 getTypeSizeInBits(U->getType()) &&
4410 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
4411 const SCEV *One = getConstant(U->getType(), 1);
4412 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), U->getType());
4413 const SCEV *LA = getSCEV(U->getOperand(1));
4414 const SCEV *RA = getSCEV(U->getOperand(2));
4415 const SCEV *LDiff = getMinusSCEV(LA, One);
4416 const SCEV *RDiff = getMinusSCEV(RA, LS);
4417 if (LDiff == RDiff)
4418 return getAddExpr(getUMaxExpr(One, LS), LDiff);
4419 }
4420 break;
4421 default:
4422 break;
4423 }
4424 }
4425
4426 default: // We cannot analyze this expression.
4427 break;
4428 }
4429
4430 return getUnknown(V);
4431 }
4432
4433
4434
4435 //===----------------------------------------------------------------------===//
4436 // Iteration Count Computation Code
4437 //
4438
getSmallConstantTripCount(Loop * L)4439 unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L) {
4440 if (BasicBlock *ExitingBB = L->getExitingBlock())
4441 return getSmallConstantTripCount(L, ExitingBB);
4442
4443 // No trip count information for multiple exits.
4444 return 0;
4445 }
4446
4447 /// getSmallConstantTripCount - Returns the maximum trip count of this loop as a
4448 /// normal unsigned value. Returns 0 if the trip count is unknown or not
4449 /// constant. Will also return 0 if the maximum trip count is very large (>=
4450 /// 2^32).
4451 ///
4452 /// This "trip count" assumes that control exits via ExitingBlock. More
4453 /// precisely, it is the number of times that control may reach ExitingBlock
4454 /// before taking the branch. For loops with multiple exits, it may not be the
4455 /// number times that the loop header executes because the loop may exit
4456 /// prematurely via another branch.
getSmallConstantTripCount(Loop * L,BasicBlock * ExitingBlock)4457 unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L,
4458 BasicBlock *ExitingBlock) {
4459 assert(ExitingBlock && "Must pass a non-null exiting block!");
4460 assert(L->isLoopExiting(ExitingBlock) &&
4461 "Exiting block must actually branch out of the loop!");
4462 const SCEVConstant *ExitCount =
4463 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock));
4464 if (!ExitCount)
4465 return 0;
4466
4467 ConstantInt *ExitConst = ExitCount->getValue();
4468
4469 // Guard against huge trip counts.
4470 if (ExitConst->getValue().getActiveBits() > 32)
4471 return 0;
4472
4473 // In case of integer overflow, this returns 0, which is correct.
4474 return ((unsigned)ExitConst->getZExtValue()) + 1;
4475 }
4476
getSmallConstantTripMultiple(Loop * L)4477 unsigned ScalarEvolution::getSmallConstantTripMultiple(Loop *L) {
4478 if (BasicBlock *ExitingBB = L->getExitingBlock())
4479 return getSmallConstantTripMultiple(L, ExitingBB);
4480
4481 // No trip multiple information for multiple exits.
4482 return 0;
4483 }
4484
4485 /// getSmallConstantTripMultiple - Returns the largest constant divisor of the
4486 /// trip count of this loop as a normal unsigned value, if possible. This
4487 /// means that the actual trip count is always a multiple of the returned
4488 /// value (don't forget the trip count could very well be zero as well!).
4489 ///
4490 /// Returns 1 if the trip count is unknown or not guaranteed to be the
4491 /// multiple of a constant (which is also the case if the trip count is simply
4492 /// constant, use getSmallConstantTripCount for that case), Will also return 1
4493 /// if the trip count is very large (>= 2^32).
4494 ///
4495 /// As explained in the comments for getSmallConstantTripCount, this assumes
4496 /// that control exits the loop via ExitingBlock.
4497 unsigned
getSmallConstantTripMultiple(Loop * L,BasicBlock * ExitingBlock)4498 ScalarEvolution::getSmallConstantTripMultiple(Loop *L,
4499 BasicBlock *ExitingBlock) {
4500 assert(ExitingBlock && "Must pass a non-null exiting block!");
4501 assert(L->isLoopExiting(ExitingBlock) &&
4502 "Exiting block must actually branch out of the loop!");
4503 const SCEV *ExitCount = getExitCount(L, ExitingBlock);
4504 if (ExitCount == getCouldNotCompute())
4505 return 1;
4506
4507 // Get the trip count from the BE count by adding 1.
4508 const SCEV *TCMul = getAddExpr(ExitCount,
4509 getConstant(ExitCount->getType(), 1));
4510 // FIXME: SCEV distributes multiplication as V1*C1 + V2*C1. We could attempt
4511 // to factor simple cases.
4512 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(TCMul))
4513 TCMul = Mul->getOperand(0);
4514
4515 const SCEVConstant *MulC = dyn_cast<SCEVConstant>(TCMul);
4516 if (!MulC)
4517 return 1;
4518
4519 ConstantInt *Result = MulC->getValue();
4520
4521 // Guard against huge trip counts (this requires checking
4522 // for zero to handle the case where the trip count == -1 and the
4523 // addition wraps).
4524 if (!Result || Result->getValue().getActiveBits() > 32 ||
4525 Result->getValue().getActiveBits() == 0)
4526 return 1;
4527
4528 return (unsigned)Result->getZExtValue();
4529 }
4530
4531 // getExitCount - Get the expression for the number of loop iterations for which
4532 // this loop is guaranteed not to exit via ExitingBlock. Otherwise return
4533 // SCEVCouldNotCompute.
getExitCount(Loop * L,BasicBlock * ExitingBlock)4534 const SCEV *ScalarEvolution::getExitCount(Loop *L, BasicBlock *ExitingBlock) {
4535 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this);
4536 }
4537
4538 /// getBackedgeTakenCount - If the specified loop has a predictable
4539 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
4540 /// object. The backedge-taken count is the number of times the loop header
4541 /// will be branched to from within the loop. This is one less than the
4542 /// trip count of the loop, since it doesn't count the first iteration,
4543 /// when the header is branched to from outside the loop.
4544 ///
4545 /// Note that it is not valid to call this method on a loop without a
4546 /// loop-invariant backedge-taken count (see
4547 /// hasLoopInvariantBackedgeTakenCount).
4548 ///
getBackedgeTakenCount(const Loop * L)4549 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
4550 return getBackedgeTakenInfo(L).getExact(this);
4551 }
4552
4553 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
4554 /// return the least SCEV value that is known never to be less than the
4555 /// actual backedge taken count.
getMaxBackedgeTakenCount(const Loop * L)4556 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
4557 return getBackedgeTakenInfo(L).getMax(this);
4558 }
4559
4560 /// PushLoopPHIs - Push PHI nodes in the header of the given loop
4561 /// onto the given Worklist.
4562 static void
PushLoopPHIs(const Loop * L,SmallVectorImpl<Instruction * > & Worklist)4563 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
4564 BasicBlock *Header = L->getHeader();
4565
4566 // Push all Loop-header PHIs onto the Worklist stack.
4567 for (BasicBlock::iterator I = Header->begin();
4568 PHINode *PN = dyn_cast<PHINode>(I); ++I)
4569 Worklist.push_back(PN);
4570 }
4571
4572 const ScalarEvolution::BackedgeTakenInfo &
getBackedgeTakenInfo(const Loop * L)4573 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
4574 // Initially insert an invalid entry for this loop. If the insertion
4575 // succeeds, proceed to actually compute a backedge-taken count and
4576 // update the value. The temporary CouldNotCompute value tells SCEV
4577 // code elsewhere that it shouldn't attempt to request a new
4578 // backedge-taken count, which could result in infinite recursion.
4579 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
4580 BackedgeTakenCounts.insert(std::make_pair(L, BackedgeTakenInfo()));
4581 if (!Pair.second)
4582 return Pair.first->second;
4583
4584 // ComputeBackedgeTakenCount may allocate memory for its result. Inserting it
4585 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
4586 // must be cleared in this scope.
4587 BackedgeTakenInfo Result = ComputeBackedgeTakenCount(L);
4588
4589 if (Result.getExact(this) != getCouldNotCompute()) {
4590 assert(isLoopInvariant(Result.getExact(this), L) &&
4591 isLoopInvariant(Result.getMax(this), L) &&
4592 "Computed backedge-taken count isn't loop invariant for loop!");
4593 ++NumTripCountsComputed;
4594 }
4595 else if (Result.getMax(this) == getCouldNotCompute() &&
4596 isa<PHINode>(L->getHeader()->begin())) {
4597 // Only count loops that have phi nodes as not being computable.
4598 ++NumTripCountsNotComputed;
4599 }
4600
4601 // Now that we know more about the trip count for this loop, forget any
4602 // existing SCEV values for PHI nodes in this loop since they are only
4603 // conservative estimates made without the benefit of trip count
4604 // information. This is similar to the code in forgetLoop, except that
4605 // it handles SCEVUnknown PHI nodes specially.
4606 if (Result.hasAnyInfo()) {
4607 SmallVector<Instruction *, 16> Worklist;
4608 PushLoopPHIs(L, Worklist);
4609
4610 SmallPtrSet<Instruction *, 8> Visited;
4611 while (!Worklist.empty()) {
4612 Instruction *I = Worklist.pop_back_val();
4613 if (!Visited.insert(I).second)
4614 continue;
4615
4616 ValueExprMapType::iterator It =
4617 ValueExprMap.find_as(static_cast<Value *>(I));
4618 if (It != ValueExprMap.end()) {
4619 const SCEV *Old = It->second;
4620
4621 // SCEVUnknown for a PHI either means that it has an unrecognized
4622 // structure, or it's a PHI that's in the progress of being computed
4623 // by createNodeForPHI. In the former case, additional loop trip
4624 // count information isn't going to change anything. In the later
4625 // case, createNodeForPHI will perform the necessary updates on its
4626 // own when it gets to that point.
4627 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
4628 forgetMemoizedResults(Old);
4629 ValueExprMap.erase(It);
4630 }
4631 if (PHINode *PN = dyn_cast<PHINode>(I))
4632 ConstantEvolutionLoopExitValue.erase(PN);
4633 }
4634
4635 PushDefUseChildren(I, Worklist);
4636 }
4637 }
4638
4639 // Re-lookup the insert position, since the call to
4640 // ComputeBackedgeTakenCount above could result in a
4641 // recusive call to getBackedgeTakenInfo (on a different
4642 // loop), which would invalidate the iterator computed
4643 // earlier.
4644 return BackedgeTakenCounts.find(L)->second = Result;
4645 }
4646
4647 /// forgetLoop - This method should be called by the client when it has
4648 /// changed a loop in a way that may effect ScalarEvolution's ability to
4649 /// compute a trip count, or if the loop is deleted.
forgetLoop(const Loop * L)4650 void ScalarEvolution::forgetLoop(const Loop *L) {
4651 // Drop any stored trip count value.
4652 DenseMap<const Loop*, BackedgeTakenInfo>::iterator BTCPos =
4653 BackedgeTakenCounts.find(L);
4654 if (BTCPos != BackedgeTakenCounts.end()) {
4655 BTCPos->second.clear();
4656 BackedgeTakenCounts.erase(BTCPos);
4657 }
4658
4659 // Drop information about expressions based on loop-header PHIs.
4660 SmallVector<Instruction *, 16> Worklist;
4661 PushLoopPHIs(L, Worklist);
4662
4663 SmallPtrSet<Instruction *, 8> Visited;
4664 while (!Worklist.empty()) {
4665 Instruction *I = Worklist.pop_back_val();
4666 if (!Visited.insert(I).second)
4667 continue;
4668
4669 ValueExprMapType::iterator It =
4670 ValueExprMap.find_as(static_cast<Value *>(I));
4671 if (It != ValueExprMap.end()) {
4672 forgetMemoizedResults(It->second);
4673 ValueExprMap.erase(It);
4674 if (PHINode *PN = dyn_cast<PHINode>(I))
4675 ConstantEvolutionLoopExitValue.erase(PN);
4676 }
4677
4678 PushDefUseChildren(I, Worklist);
4679 }
4680
4681 // Forget all contained loops too, to avoid dangling entries in the
4682 // ValuesAtScopes map.
4683 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
4684 forgetLoop(*I);
4685 }
4686
4687 /// forgetValue - This method should be called by the client when it has
4688 /// changed a value in a way that may effect its value, or which may
4689 /// disconnect it from a def-use chain linking it to a loop.
forgetValue(Value * V)4690 void ScalarEvolution::forgetValue(Value *V) {
4691 Instruction *I = dyn_cast<Instruction>(V);
4692 if (!I) return;
4693
4694 // Drop information about expressions based on loop-header PHIs.
4695 SmallVector<Instruction *, 16> Worklist;
4696 Worklist.push_back(I);
4697
4698 SmallPtrSet<Instruction *, 8> Visited;
4699 while (!Worklist.empty()) {
4700 I = Worklist.pop_back_val();
4701 if (!Visited.insert(I).second)
4702 continue;
4703
4704 ValueExprMapType::iterator It =
4705 ValueExprMap.find_as(static_cast<Value *>(I));
4706 if (It != ValueExprMap.end()) {
4707 forgetMemoizedResults(It->second);
4708 ValueExprMap.erase(It);
4709 if (PHINode *PN = dyn_cast<PHINode>(I))
4710 ConstantEvolutionLoopExitValue.erase(PN);
4711 }
4712
4713 PushDefUseChildren(I, Worklist);
4714 }
4715 }
4716
4717 /// getExact - Get the exact loop backedge taken count considering all loop
4718 /// exits. A computable result can only be return for loops with a single exit.
4719 /// Returning the minimum taken count among all exits is incorrect because one
4720 /// of the loop's exit limit's may have been skipped. HowFarToZero assumes that
4721 /// the limit of each loop test is never skipped. This is a valid assumption as
4722 /// long as the loop exits via that test. For precise results, it is the
4723 /// caller's responsibility to specify the relevant loop exit using
4724 /// getExact(ExitingBlock, SE).
4725 const SCEV *
getExact(ScalarEvolution * SE) const4726 ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE) const {
4727 // If any exits were not computable, the loop is not computable.
4728 if (!ExitNotTaken.isCompleteList()) return SE->getCouldNotCompute();
4729
4730 // We need exactly one computable exit.
4731 if (!ExitNotTaken.ExitingBlock) return SE->getCouldNotCompute();
4732 assert(ExitNotTaken.ExactNotTaken && "uninitialized not-taken info");
4733
4734 const SCEV *BECount = nullptr;
4735 for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
4736 ENT != nullptr; ENT = ENT->getNextExit()) {
4737
4738 assert(ENT->ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV");
4739
4740 if (!BECount)
4741 BECount = ENT->ExactNotTaken;
4742 else if (BECount != ENT->ExactNotTaken)
4743 return SE->getCouldNotCompute();
4744 }
4745 assert(BECount && "Invalid not taken count for loop exit");
4746 return BECount;
4747 }
4748
4749 /// getExact - Get the exact not taken count for this loop exit.
4750 const SCEV *
getExact(BasicBlock * ExitingBlock,ScalarEvolution * SE) const4751 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock,
4752 ScalarEvolution *SE) const {
4753 for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
4754 ENT != nullptr; ENT = ENT->getNextExit()) {
4755
4756 if (ENT->ExitingBlock == ExitingBlock)
4757 return ENT->ExactNotTaken;
4758 }
4759 return SE->getCouldNotCompute();
4760 }
4761
4762 /// getMax - Get the max backedge taken count for the loop.
4763 const SCEV *
getMax(ScalarEvolution * SE) const4764 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const {
4765 return Max ? Max : SE->getCouldNotCompute();
4766 }
4767
hasOperand(const SCEV * S,ScalarEvolution * SE) const4768 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S,
4769 ScalarEvolution *SE) const {
4770 if (Max && Max != SE->getCouldNotCompute() && SE->hasOperand(Max, S))
4771 return true;
4772
4773 if (!ExitNotTaken.ExitingBlock)
4774 return false;
4775
4776 for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
4777 ENT != nullptr; ENT = ENT->getNextExit()) {
4778
4779 if (ENT->ExactNotTaken != SE->getCouldNotCompute()
4780 && SE->hasOperand(ENT->ExactNotTaken, S)) {
4781 return true;
4782 }
4783 }
4784 return false;
4785 }
4786
4787 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
4788 /// computable exit into a persistent ExitNotTakenInfo array.
BackedgeTakenInfo(SmallVectorImpl<std::pair<BasicBlock *,const SCEV * >> & ExitCounts,bool Complete,const SCEV * MaxCount)4789 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
4790 SmallVectorImpl< std::pair<BasicBlock *, const SCEV *> > &ExitCounts,
4791 bool Complete, const SCEV *MaxCount) : Max(MaxCount) {
4792
4793 if (!Complete)
4794 ExitNotTaken.setIncomplete();
4795
4796 unsigned NumExits = ExitCounts.size();
4797 if (NumExits == 0) return;
4798
4799 ExitNotTaken.ExitingBlock = ExitCounts[0].first;
4800 ExitNotTaken.ExactNotTaken = ExitCounts[0].second;
4801 if (NumExits == 1) return;
4802
4803 // Handle the rare case of multiple computable exits.
4804 ExitNotTakenInfo *ENT = new ExitNotTakenInfo[NumExits-1];
4805
4806 ExitNotTakenInfo *PrevENT = &ExitNotTaken;
4807 for (unsigned i = 1; i < NumExits; ++i, PrevENT = ENT, ++ENT) {
4808 PrevENT->setNextExit(ENT);
4809 ENT->ExitingBlock = ExitCounts[i].first;
4810 ENT->ExactNotTaken = ExitCounts[i].second;
4811 }
4812 }
4813
4814 /// clear - Invalidate this result and free the ExitNotTakenInfo array.
clear()4815 void ScalarEvolution::BackedgeTakenInfo::clear() {
4816 ExitNotTaken.ExitingBlock = nullptr;
4817 ExitNotTaken.ExactNotTaken = nullptr;
4818 delete[] ExitNotTaken.getNextExit();
4819 }
4820
4821 /// ComputeBackedgeTakenCount - Compute the number of times the backedge
4822 /// of the specified loop will execute.
4823 ScalarEvolution::BackedgeTakenInfo
ComputeBackedgeTakenCount(const Loop * L)4824 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
4825 SmallVector<BasicBlock *, 8> ExitingBlocks;
4826 L->getExitingBlocks(ExitingBlocks);
4827
4828 SmallVector<std::pair<BasicBlock *, const SCEV *>, 4> ExitCounts;
4829 bool CouldComputeBECount = true;
4830 BasicBlock *Latch = L->getLoopLatch(); // may be NULL.
4831 const SCEV *MustExitMaxBECount = nullptr;
4832 const SCEV *MayExitMaxBECount = nullptr;
4833
4834 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts
4835 // and compute maxBECount.
4836 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
4837 BasicBlock *ExitBB = ExitingBlocks[i];
4838 ExitLimit EL = ComputeExitLimit(L, ExitBB);
4839
4840 // 1. For each exit that can be computed, add an entry to ExitCounts.
4841 // CouldComputeBECount is true only if all exits can be computed.
4842 if (EL.Exact == getCouldNotCompute())
4843 // We couldn't compute an exact value for this exit, so
4844 // we won't be able to compute an exact value for the loop.
4845 CouldComputeBECount = false;
4846 else
4847 ExitCounts.push_back(std::make_pair(ExitBB, EL.Exact));
4848
4849 // 2. Derive the loop's MaxBECount from each exit's max number of
4850 // non-exiting iterations. Partition the loop exits into two kinds:
4851 // LoopMustExits and LoopMayExits.
4852 //
4853 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it
4854 // is a LoopMayExit. If any computable LoopMustExit is found, then
4855 // MaxBECount is the minimum EL.Max of computable LoopMustExits. Otherwise,
4856 // MaxBECount is conservatively the maximum EL.Max, where CouldNotCompute is
4857 // considered greater than any computable EL.Max.
4858 if (EL.Max != getCouldNotCompute() && Latch &&
4859 DT->dominates(ExitBB, Latch)) {
4860 if (!MustExitMaxBECount)
4861 MustExitMaxBECount = EL.Max;
4862 else {
4863 MustExitMaxBECount =
4864 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.Max);
4865 }
4866 } else if (MayExitMaxBECount != getCouldNotCompute()) {
4867 if (!MayExitMaxBECount || EL.Max == getCouldNotCompute())
4868 MayExitMaxBECount = EL.Max;
4869 else {
4870 MayExitMaxBECount =
4871 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.Max);
4872 }
4873 }
4874 }
4875 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
4876 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute());
4877 return BackedgeTakenInfo(ExitCounts, CouldComputeBECount, MaxBECount);
4878 }
4879
4880 /// ComputeExitLimit - Compute the number of times the backedge of the specified
4881 /// loop will execute if it exits via the specified block.
4882 ScalarEvolution::ExitLimit
ComputeExitLimit(const Loop * L,BasicBlock * ExitingBlock)4883 ScalarEvolution::ComputeExitLimit(const Loop *L, BasicBlock *ExitingBlock) {
4884
4885 // Okay, we've chosen an exiting block. See what condition causes us to
4886 // exit at this block and remember the exit block and whether all other targets
4887 // lead to the loop header.
4888 bool MustExecuteLoopHeader = true;
4889 BasicBlock *Exit = nullptr;
4890 for (succ_iterator SI = succ_begin(ExitingBlock), SE = succ_end(ExitingBlock);
4891 SI != SE; ++SI)
4892 if (!L->contains(*SI)) {
4893 if (Exit) // Multiple exit successors.
4894 return getCouldNotCompute();
4895 Exit = *SI;
4896 } else if (*SI != L->getHeader()) {
4897 MustExecuteLoopHeader = false;
4898 }
4899
4900 // At this point, we know we have a conditional branch that determines whether
4901 // the loop is exited. However, we don't know if the branch is executed each
4902 // time through the loop. If not, then the execution count of the branch will
4903 // not be equal to the trip count of the loop.
4904 //
4905 // Currently we check for this by checking to see if the Exit branch goes to
4906 // the loop header. If so, we know it will always execute the same number of
4907 // times as the loop. We also handle the case where the exit block *is* the
4908 // loop header. This is common for un-rotated loops.
4909 //
4910 // If both of those tests fail, walk up the unique predecessor chain to the
4911 // header, stopping if there is an edge that doesn't exit the loop. If the
4912 // header is reached, the execution count of the branch will be equal to the
4913 // trip count of the loop.
4914 //
4915 // More extensive analysis could be done to handle more cases here.
4916 //
4917 if (!MustExecuteLoopHeader && ExitingBlock != L->getHeader()) {
4918 // The simple checks failed, try climbing the unique predecessor chain
4919 // up to the header.
4920 bool Ok = false;
4921 for (BasicBlock *BB = ExitingBlock; BB; ) {
4922 BasicBlock *Pred = BB->getUniquePredecessor();
4923 if (!Pred)
4924 return getCouldNotCompute();
4925 TerminatorInst *PredTerm = Pred->getTerminator();
4926 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
4927 BasicBlock *PredSucc = PredTerm->getSuccessor(i);
4928 if (PredSucc == BB)
4929 continue;
4930 // If the predecessor has a successor that isn't BB and isn't
4931 // outside the loop, assume the worst.
4932 if (L->contains(PredSucc))
4933 return getCouldNotCompute();
4934 }
4935 if (Pred == L->getHeader()) {
4936 Ok = true;
4937 break;
4938 }
4939 BB = Pred;
4940 }
4941 if (!Ok)
4942 return getCouldNotCompute();
4943 }
4944
4945 bool IsOnlyExit = (L->getExitingBlock() != nullptr);
4946 TerminatorInst *Term = ExitingBlock->getTerminator();
4947 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) {
4948 assert(BI->isConditional() && "If unconditional, it can't be in loop!");
4949 // Proceed to the next level to examine the exit condition expression.
4950 return ComputeExitLimitFromCond(L, BI->getCondition(), BI->getSuccessor(0),
4951 BI->getSuccessor(1),
4952 /*ControlsExit=*/IsOnlyExit);
4953 }
4954
4955 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term))
4956 return ComputeExitLimitFromSingleExitSwitch(L, SI, Exit,
4957 /*ControlsExit=*/IsOnlyExit);
4958
4959 return getCouldNotCompute();
4960 }
4961
4962 /// ComputeExitLimitFromCond - Compute the number of times the
4963 /// backedge of the specified loop will execute if its exit condition
4964 /// were a conditional branch of ExitCond, TBB, and FBB.
4965 ///
4966 /// @param ControlsExit is true if ExitCond directly controls the exit
4967 /// branch. In this case, we can assume that the loop exits only if the
4968 /// condition is true and can infer that failing to meet the condition prior to
4969 /// integer wraparound results in undefined behavior.
4970 ScalarEvolution::ExitLimit
ComputeExitLimitFromCond(const Loop * L,Value * ExitCond,BasicBlock * TBB,BasicBlock * FBB,bool ControlsExit)4971 ScalarEvolution::ComputeExitLimitFromCond(const Loop *L,
4972 Value *ExitCond,
4973 BasicBlock *TBB,
4974 BasicBlock *FBB,
4975 bool ControlsExit) {
4976 // Check if the controlling expression for this loop is an And or Or.
4977 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
4978 if (BO->getOpcode() == Instruction::And) {
4979 // Recurse on the operands of the and.
4980 bool EitherMayExit = L->contains(TBB);
4981 ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB,
4982 ControlsExit && !EitherMayExit);
4983 ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB,
4984 ControlsExit && !EitherMayExit);
4985 const SCEV *BECount = getCouldNotCompute();
4986 const SCEV *MaxBECount = getCouldNotCompute();
4987 if (EitherMayExit) {
4988 // Both conditions must be true for the loop to continue executing.
4989 // Choose the less conservative count.
4990 if (EL0.Exact == getCouldNotCompute() ||
4991 EL1.Exact == getCouldNotCompute())
4992 BECount = getCouldNotCompute();
4993 else
4994 BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact);
4995 if (EL0.Max == getCouldNotCompute())
4996 MaxBECount = EL1.Max;
4997 else if (EL1.Max == getCouldNotCompute())
4998 MaxBECount = EL0.Max;
4999 else
5000 MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
5001 } else {
5002 // Both conditions must be true at the same time for the loop to exit.
5003 // For now, be conservative.
5004 assert(L->contains(FBB) && "Loop block has no successor in loop!");
5005 if (EL0.Max == EL1.Max)
5006 MaxBECount = EL0.Max;
5007 if (EL0.Exact == EL1.Exact)
5008 BECount = EL0.Exact;
5009 }
5010
5011 return ExitLimit(BECount, MaxBECount);
5012 }
5013 if (BO->getOpcode() == Instruction::Or) {
5014 // Recurse on the operands of the or.
5015 bool EitherMayExit = L->contains(FBB);
5016 ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB,
5017 ControlsExit && !EitherMayExit);
5018 ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB,
5019 ControlsExit && !EitherMayExit);
5020 const SCEV *BECount = getCouldNotCompute();
5021 const SCEV *MaxBECount = getCouldNotCompute();
5022 if (EitherMayExit) {
5023 // Both conditions must be false for the loop to continue executing.
5024 // Choose the less conservative count.
5025 if (EL0.Exact == getCouldNotCompute() ||
5026 EL1.Exact == getCouldNotCompute())
5027 BECount = getCouldNotCompute();
5028 else
5029 BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact);
5030 if (EL0.Max == getCouldNotCompute())
5031 MaxBECount = EL1.Max;
5032 else if (EL1.Max == getCouldNotCompute())
5033 MaxBECount = EL0.Max;
5034 else
5035 MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
5036 } else {
5037 // Both conditions must be false at the same time for the loop to exit.
5038 // For now, be conservative.
5039 assert(L->contains(TBB) && "Loop block has no successor in loop!");
5040 if (EL0.Max == EL1.Max)
5041 MaxBECount = EL0.Max;
5042 if (EL0.Exact == EL1.Exact)
5043 BECount = EL0.Exact;
5044 }
5045
5046 return ExitLimit(BECount, MaxBECount);
5047 }
5048 }
5049
5050 // With an icmp, it may be feasible to compute an exact backedge-taken count.
5051 // Proceed to the next level to examine the icmp.
5052 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
5053 return ComputeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit);
5054
5055 // Check for a constant condition. These are normally stripped out by
5056 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
5057 // preserve the CFG and is temporarily leaving constant conditions
5058 // in place.
5059 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
5060 if (L->contains(FBB) == !CI->getZExtValue())
5061 // The backedge is always taken.
5062 return getCouldNotCompute();
5063 else
5064 // The backedge is never taken.
5065 return getConstant(CI->getType(), 0);
5066 }
5067
5068 // If it's not an integer or pointer comparison then compute it the hard way.
5069 return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB));
5070 }
5071
5072 /// ComputeExitLimitFromICmp - Compute the number of times the
5073 /// backedge of the specified loop will execute if its exit condition
5074 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
5075 ScalarEvolution::ExitLimit
ComputeExitLimitFromICmp(const Loop * L,ICmpInst * ExitCond,BasicBlock * TBB,BasicBlock * FBB,bool ControlsExit)5076 ScalarEvolution::ComputeExitLimitFromICmp(const Loop *L,
5077 ICmpInst *ExitCond,
5078 BasicBlock *TBB,
5079 BasicBlock *FBB,
5080 bool ControlsExit) {
5081
5082 // If the condition was exit on true, convert the condition to exit on false
5083 ICmpInst::Predicate Cond;
5084 if (!L->contains(FBB))
5085 Cond = ExitCond->getPredicate();
5086 else
5087 Cond = ExitCond->getInversePredicate();
5088
5089 // Handle common loops like: for (X = "string"; *X; ++X)
5090 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
5091 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
5092 ExitLimit ItCnt =
5093 ComputeLoadConstantCompareExitLimit(LI, RHS, L, Cond);
5094 if (ItCnt.hasAnyInfo())
5095 return ItCnt;
5096 }
5097
5098 const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
5099 const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
5100
5101 // Try to evaluate any dependencies out of the loop.
5102 LHS = getSCEVAtScope(LHS, L);
5103 RHS = getSCEVAtScope(RHS, L);
5104
5105 // At this point, we would like to compute how many iterations of the
5106 // loop the predicate will return true for these inputs.
5107 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
5108 // If there is a loop-invariant, force it into the RHS.
5109 std::swap(LHS, RHS);
5110 Cond = ICmpInst::getSwappedPredicate(Cond);
5111 }
5112
5113 // Simplify the operands before analyzing them.
5114 (void)SimplifyICmpOperands(Cond, LHS, RHS);
5115
5116 // If we have a comparison of a chrec against a constant, try to use value
5117 // ranges to answer this query.
5118 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
5119 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
5120 if (AddRec->getLoop() == L) {
5121 // Form the constant range.
5122 ConstantRange CompRange(
5123 ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
5124
5125 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
5126 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
5127 }
5128
5129 switch (Cond) {
5130 case ICmpInst::ICMP_NE: { // while (X != Y)
5131 // Convert to: while (X-Y != 0)
5132 ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit);
5133 if (EL.hasAnyInfo()) return EL;
5134 break;
5135 }
5136 case ICmpInst::ICMP_EQ: { // while (X == Y)
5137 // Convert to: while (X-Y == 0)
5138 ExitLimit EL = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
5139 if (EL.hasAnyInfo()) return EL;
5140 break;
5141 }
5142 case ICmpInst::ICMP_SLT:
5143 case ICmpInst::ICMP_ULT: { // while (X < Y)
5144 bool IsSigned = Cond == ICmpInst::ICMP_SLT;
5145 ExitLimit EL = HowManyLessThans(LHS, RHS, L, IsSigned, ControlsExit);
5146 if (EL.hasAnyInfo()) return EL;
5147 break;
5148 }
5149 case ICmpInst::ICMP_SGT:
5150 case ICmpInst::ICMP_UGT: { // while (X > Y)
5151 bool IsSigned = Cond == ICmpInst::ICMP_SGT;
5152 ExitLimit EL = HowManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit);
5153 if (EL.hasAnyInfo()) return EL;
5154 break;
5155 }
5156 default:
5157 #if 0
5158 dbgs() << "ComputeBackedgeTakenCount ";
5159 if (ExitCond->getOperand(0)->getType()->isUnsigned())
5160 dbgs() << "[unsigned] ";
5161 dbgs() << *LHS << " "
5162 << Instruction::getOpcodeName(Instruction::ICmp)
5163 << " " << *RHS << "\n";
5164 #endif
5165 break;
5166 }
5167 return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB));
5168 }
5169
5170 ScalarEvolution::ExitLimit
ComputeExitLimitFromSingleExitSwitch(const Loop * L,SwitchInst * Switch,BasicBlock * ExitingBlock,bool ControlsExit)5171 ScalarEvolution::ComputeExitLimitFromSingleExitSwitch(const Loop *L,
5172 SwitchInst *Switch,
5173 BasicBlock *ExitingBlock,
5174 bool ControlsExit) {
5175 assert(!L->contains(ExitingBlock) && "Not an exiting block!");
5176
5177 // Give up if the exit is the default dest of a switch.
5178 if (Switch->getDefaultDest() == ExitingBlock)
5179 return getCouldNotCompute();
5180
5181 assert(L->contains(Switch->getDefaultDest()) &&
5182 "Default case must not exit the loop!");
5183 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L);
5184 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock));
5185
5186 // while (X != Y) --> while (X-Y != 0)
5187 ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit);
5188 if (EL.hasAnyInfo())
5189 return EL;
5190
5191 return getCouldNotCompute();
5192 }
5193
5194 static ConstantInt *
EvaluateConstantChrecAtConstant(const SCEVAddRecExpr * AddRec,ConstantInt * C,ScalarEvolution & SE)5195 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
5196 ScalarEvolution &SE) {
5197 const SCEV *InVal = SE.getConstant(C);
5198 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
5199 assert(isa<SCEVConstant>(Val) &&
5200 "Evaluation of SCEV at constant didn't fold correctly?");
5201 return cast<SCEVConstant>(Val)->getValue();
5202 }
5203
5204 /// ComputeLoadConstantCompareExitLimit - Given an exit condition of
5205 /// 'icmp op load X, cst', try to see if we can compute the backedge
5206 /// execution count.
5207 ScalarEvolution::ExitLimit
ComputeLoadConstantCompareExitLimit(LoadInst * LI,Constant * RHS,const Loop * L,ICmpInst::Predicate predicate)5208 ScalarEvolution::ComputeLoadConstantCompareExitLimit(
5209 LoadInst *LI,
5210 Constant *RHS,
5211 const Loop *L,
5212 ICmpInst::Predicate predicate) {
5213
5214 if (LI->isVolatile()) return getCouldNotCompute();
5215
5216 // Check to see if the loaded pointer is a getelementptr of a global.
5217 // TODO: Use SCEV instead of manually grubbing with GEPs.
5218 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
5219 if (!GEP) return getCouldNotCompute();
5220
5221 // Make sure that it is really a constant global we are gepping, with an
5222 // initializer, and make sure the first IDX is really 0.
5223 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
5224 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
5225 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
5226 !cast<Constant>(GEP->getOperand(1))->isNullValue())
5227 return getCouldNotCompute();
5228
5229 // Okay, we allow one non-constant index into the GEP instruction.
5230 Value *VarIdx = nullptr;
5231 std::vector<Constant*> Indexes;
5232 unsigned VarIdxNum = 0;
5233 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
5234 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
5235 Indexes.push_back(CI);
5236 } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
5237 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's.
5238 VarIdx = GEP->getOperand(i);
5239 VarIdxNum = i-2;
5240 Indexes.push_back(nullptr);
5241 }
5242
5243 // Loop-invariant loads may be a byproduct of loop optimization. Skip them.
5244 if (!VarIdx)
5245 return getCouldNotCompute();
5246
5247 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
5248 // Check to see if X is a loop variant variable value now.
5249 const SCEV *Idx = getSCEV(VarIdx);
5250 Idx = getSCEVAtScope(Idx, L);
5251
5252 // We can only recognize very limited forms of loop index expressions, in
5253 // particular, only affine AddRec's like {C1,+,C2}.
5254 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
5255 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) ||
5256 !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
5257 !isa<SCEVConstant>(IdxExpr->getOperand(1)))
5258 return getCouldNotCompute();
5259
5260 unsigned MaxSteps = MaxBruteForceIterations;
5261 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
5262 ConstantInt *ItCst = ConstantInt::get(
5263 cast<IntegerType>(IdxExpr->getType()), IterationNum);
5264 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
5265
5266 // Form the GEP offset.
5267 Indexes[VarIdxNum] = Val;
5268
5269 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(),
5270 Indexes);
5271 if (!Result) break; // Cannot compute!
5272
5273 // Evaluate the condition for this iteration.
5274 Result = ConstantExpr::getICmp(predicate, Result, RHS);
5275 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure
5276 if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
5277 #if 0
5278 dbgs() << "\n***\n*** Computed loop count " << *ItCst
5279 << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
5280 << "***\n";
5281 #endif
5282 ++NumArrayLenItCounts;
5283 return getConstant(ItCst); // Found terminating iteration!
5284 }
5285 }
5286 return getCouldNotCompute();
5287 }
5288
5289
5290 /// CanConstantFold - Return true if we can constant fold an instruction of the
5291 /// specified type, assuming that all operands were constants.
CanConstantFold(const Instruction * I)5292 static bool CanConstantFold(const Instruction *I) {
5293 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
5294 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
5295 isa<LoadInst>(I))
5296 return true;
5297
5298 if (const CallInst *CI = dyn_cast<CallInst>(I))
5299 if (const Function *F = CI->getCalledFunction())
5300 return canConstantFoldCallTo(F);
5301 return false;
5302 }
5303
5304 /// Determine whether this instruction can constant evolve within this loop
5305 /// assuming its operands can all constant evolve.
canConstantEvolve(Instruction * I,const Loop * L)5306 static bool canConstantEvolve(Instruction *I, const Loop *L) {
5307 // An instruction outside of the loop can't be derived from a loop PHI.
5308 if (!L->contains(I)) return false;
5309
5310 if (isa<PHINode>(I)) {
5311 // We don't currently keep track of the control flow needed to evaluate
5312 // PHIs, so we cannot handle PHIs inside of loops.
5313 return L->getHeader() == I->getParent();
5314 }
5315
5316 // If we won't be able to constant fold this expression even if the operands
5317 // are constants, bail early.
5318 return CanConstantFold(I);
5319 }
5320
5321 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
5322 /// recursing through each instruction operand until reaching a loop header phi.
5323 static PHINode *
getConstantEvolvingPHIOperands(Instruction * UseInst,const Loop * L,DenseMap<Instruction *,PHINode * > & PHIMap)5324 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
5325 DenseMap<Instruction *, PHINode *> &PHIMap) {
5326
5327 // Otherwise, we can evaluate this instruction if all of its operands are
5328 // constant or derived from a PHI node themselves.
5329 PHINode *PHI = nullptr;
5330 for (Instruction::op_iterator OpI = UseInst->op_begin(),
5331 OpE = UseInst->op_end(); OpI != OpE; ++OpI) {
5332
5333 if (isa<Constant>(*OpI)) continue;
5334
5335 Instruction *OpInst = dyn_cast<Instruction>(*OpI);
5336 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr;
5337
5338 PHINode *P = dyn_cast<PHINode>(OpInst);
5339 if (!P)
5340 // If this operand is already visited, reuse the prior result.
5341 // We may have P != PHI if this is the deepest point at which the
5342 // inconsistent paths meet.
5343 P = PHIMap.lookup(OpInst);
5344 if (!P) {
5345 // Recurse and memoize the results, whether a phi is found or not.
5346 // This recursive call invalidates pointers into PHIMap.
5347 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap);
5348 PHIMap[OpInst] = P;
5349 }
5350 if (!P)
5351 return nullptr; // Not evolving from PHI
5352 if (PHI && PHI != P)
5353 return nullptr; // Evolving from multiple different PHIs.
5354 PHI = P;
5355 }
5356 // This is a expression evolving from a constant PHI!
5357 return PHI;
5358 }
5359
5360 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
5361 /// in the loop that V is derived from. We allow arbitrary operations along the
5362 /// way, but the operands of an operation must either be constants or a value
5363 /// derived from a constant PHI. If this expression does not fit with these
5364 /// constraints, return null.
getConstantEvolvingPHI(Value * V,const Loop * L)5365 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
5366 Instruction *I = dyn_cast<Instruction>(V);
5367 if (!I || !canConstantEvolve(I, L)) return nullptr;
5368
5369 if (PHINode *PN = dyn_cast<PHINode>(I)) {
5370 return PN;
5371 }
5372
5373 // Record non-constant instructions contained by the loop.
5374 DenseMap<Instruction *, PHINode *> PHIMap;
5375 return getConstantEvolvingPHIOperands(I, L, PHIMap);
5376 }
5377
5378 /// EvaluateExpression - Given an expression that passes the
5379 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
5380 /// in the loop has the value PHIVal. If we can't fold this expression for some
5381 /// reason, return null.
EvaluateExpression(Value * V,const Loop * L,DenseMap<Instruction *,Constant * > & Vals,const DataLayout & DL,const TargetLibraryInfo * TLI)5382 static Constant *EvaluateExpression(Value *V, const Loop *L,
5383 DenseMap<Instruction *, Constant *> &Vals,
5384 const DataLayout &DL,
5385 const TargetLibraryInfo *TLI) {
5386 // Convenient constant check, but redundant for recursive calls.
5387 if (Constant *C = dyn_cast<Constant>(V)) return C;
5388 Instruction *I = dyn_cast<Instruction>(V);
5389 if (!I) return nullptr;
5390
5391 if (Constant *C = Vals.lookup(I)) return C;
5392
5393 // An instruction inside the loop depends on a value outside the loop that we
5394 // weren't given a mapping for, or a value such as a call inside the loop.
5395 if (!canConstantEvolve(I, L)) return nullptr;
5396
5397 // An unmapped PHI can be due to a branch or another loop inside this loop,
5398 // or due to this not being the initial iteration through a loop where we
5399 // couldn't compute the evolution of this particular PHI last time.
5400 if (isa<PHINode>(I)) return nullptr;
5401
5402 std::vector<Constant*> Operands(I->getNumOperands());
5403
5404 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
5405 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i));
5406 if (!Operand) {
5407 Operands[i] = dyn_cast<Constant>(I->getOperand(i));
5408 if (!Operands[i]) return nullptr;
5409 continue;
5410 }
5411 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI);
5412 Vals[Operand] = C;
5413 if (!C) return nullptr;
5414 Operands[i] = C;
5415 }
5416
5417 if (CmpInst *CI = dyn_cast<CmpInst>(I))
5418 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
5419 Operands[1], DL, TLI);
5420 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
5421 if (!LI->isVolatile())
5422 return ConstantFoldLoadFromConstPtr(Operands[0], DL);
5423 }
5424 return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, DL,
5425 TLI);
5426 }
5427
5428 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
5429 /// in the header of its containing loop, we know the loop executes a
5430 /// constant number of times, and the PHI node is just a recurrence
5431 /// involving constants, fold it.
5432 Constant *
getConstantEvolutionLoopExitValue(PHINode * PN,const APInt & BEs,const Loop * L)5433 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
5434 const APInt &BEs,
5435 const Loop *L) {
5436 DenseMap<PHINode*, Constant*>::const_iterator I =
5437 ConstantEvolutionLoopExitValue.find(PN);
5438 if (I != ConstantEvolutionLoopExitValue.end())
5439 return I->second;
5440
5441 if (BEs.ugt(MaxBruteForceIterations))
5442 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it.
5443
5444 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
5445
5446 DenseMap<Instruction *, Constant *> CurrentIterVals;
5447 BasicBlock *Header = L->getHeader();
5448 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
5449
5450 // Since the loop is canonicalized, the PHI node must have two entries. One
5451 // entry must be a constant (coming in from outside of the loop), and the
5452 // second must be derived from the same PHI.
5453 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
5454 PHINode *PHI = nullptr;
5455 for (BasicBlock::iterator I = Header->begin();
5456 (PHI = dyn_cast<PHINode>(I)); ++I) {
5457 Constant *StartCST =
5458 dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
5459 if (!StartCST) continue;
5460 CurrentIterVals[PHI] = StartCST;
5461 }
5462 if (!CurrentIterVals.count(PN))
5463 return RetVal = nullptr;
5464
5465 Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
5466
5467 // Execute the loop symbolically to determine the exit value.
5468 if (BEs.getActiveBits() >= 32)
5469 return RetVal = nullptr; // More than 2^32-1 iterations?? Not doing it!
5470
5471 unsigned NumIterations = BEs.getZExtValue(); // must be in range
5472 unsigned IterationNum = 0;
5473 const DataLayout &DL = F->getParent()->getDataLayout();
5474 for (; ; ++IterationNum) {
5475 if (IterationNum == NumIterations)
5476 return RetVal = CurrentIterVals[PN]; // Got exit value!
5477
5478 // Compute the value of the PHIs for the next iteration.
5479 // EvaluateExpression adds non-phi values to the CurrentIterVals map.
5480 DenseMap<Instruction *, Constant *> NextIterVals;
5481 Constant *NextPHI =
5482 EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
5483 if (!NextPHI)
5484 return nullptr; // Couldn't evaluate!
5485 NextIterVals[PN] = NextPHI;
5486
5487 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
5488
5489 // Also evaluate the other PHI nodes. However, we don't get to stop if we
5490 // cease to be able to evaluate one of them or if they stop evolving,
5491 // because that doesn't necessarily prevent us from computing PN.
5492 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute;
5493 for (DenseMap<Instruction *, Constant *>::const_iterator
5494 I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
5495 PHINode *PHI = dyn_cast<PHINode>(I->first);
5496 if (!PHI || PHI == PN || PHI->getParent() != Header) continue;
5497 PHIsToCompute.push_back(std::make_pair(PHI, I->second));
5498 }
5499 // We use two distinct loops because EvaluateExpression may invalidate any
5500 // iterators into CurrentIterVals.
5501 for (SmallVectorImpl<std::pair<PHINode *, Constant*> >::const_iterator
5502 I = PHIsToCompute.begin(), E = PHIsToCompute.end(); I != E; ++I) {
5503 PHINode *PHI = I->first;
5504 Constant *&NextPHI = NextIterVals[PHI];
5505 if (!NextPHI) { // Not already computed.
5506 Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
5507 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
5508 }
5509 if (NextPHI != I->second)
5510 StoppedEvolving = false;
5511 }
5512
5513 // If all entries in CurrentIterVals == NextIterVals then we can stop
5514 // iterating, the loop can't continue to change.
5515 if (StoppedEvolving)
5516 return RetVal = CurrentIterVals[PN];
5517
5518 CurrentIterVals.swap(NextIterVals);
5519 }
5520 }
5521
5522 /// ComputeExitCountExhaustively - If the loop is known to execute a
5523 /// constant number of times (the condition evolves only from constants),
5524 /// try to evaluate a few iterations of the loop until we get the exit
5525 /// condition gets a value of ExitWhen (true or false). If we cannot
5526 /// evaluate the trip count of the loop, return getCouldNotCompute().
ComputeExitCountExhaustively(const Loop * L,Value * Cond,bool ExitWhen)5527 const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
5528 Value *Cond,
5529 bool ExitWhen) {
5530 PHINode *PN = getConstantEvolvingPHI(Cond, L);
5531 if (!PN) return getCouldNotCompute();
5532
5533 // If the loop is canonicalized, the PHI will have exactly two entries.
5534 // That's the only form we support here.
5535 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
5536
5537 DenseMap<Instruction *, Constant *> CurrentIterVals;
5538 BasicBlock *Header = L->getHeader();
5539 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
5540
5541 // One entry must be a constant (coming in from outside of the loop), and the
5542 // second must be derived from the same PHI.
5543 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
5544 PHINode *PHI = nullptr;
5545 for (BasicBlock::iterator I = Header->begin();
5546 (PHI = dyn_cast<PHINode>(I)); ++I) {
5547 Constant *StartCST =
5548 dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
5549 if (!StartCST) continue;
5550 CurrentIterVals[PHI] = StartCST;
5551 }
5552 if (!CurrentIterVals.count(PN))
5553 return getCouldNotCompute();
5554
5555 // Okay, we find a PHI node that defines the trip count of this loop. Execute
5556 // the loop symbolically to determine when the condition gets a value of
5557 // "ExitWhen".
5558 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
5559 const DataLayout &DL = F->getParent()->getDataLayout();
5560 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
5561 ConstantInt *CondVal = dyn_cast_or_null<ConstantInt>(
5562 EvaluateExpression(Cond, L, CurrentIterVals, DL, TLI));
5563
5564 // Couldn't symbolically evaluate.
5565 if (!CondVal) return getCouldNotCompute();
5566
5567 if (CondVal->getValue() == uint64_t(ExitWhen)) {
5568 ++NumBruteForceTripCountsComputed;
5569 return getConstant(Type::getInt32Ty(getContext()), IterationNum);
5570 }
5571
5572 // Update all the PHI nodes for the next iteration.
5573 DenseMap<Instruction *, Constant *> NextIterVals;
5574
5575 // Create a list of which PHIs we need to compute. We want to do this before
5576 // calling EvaluateExpression on them because that may invalidate iterators
5577 // into CurrentIterVals.
5578 SmallVector<PHINode *, 8> PHIsToCompute;
5579 for (DenseMap<Instruction *, Constant *>::const_iterator
5580 I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
5581 PHINode *PHI = dyn_cast<PHINode>(I->first);
5582 if (!PHI || PHI->getParent() != Header) continue;
5583 PHIsToCompute.push_back(PHI);
5584 }
5585 for (SmallVectorImpl<PHINode *>::const_iterator I = PHIsToCompute.begin(),
5586 E = PHIsToCompute.end(); I != E; ++I) {
5587 PHINode *PHI = *I;
5588 Constant *&NextPHI = NextIterVals[PHI];
5589 if (NextPHI) continue; // Already computed!
5590
5591 Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
5592 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
5593 }
5594 CurrentIterVals.swap(NextIterVals);
5595 }
5596
5597 // Too many iterations were needed to evaluate.
5598 return getCouldNotCompute();
5599 }
5600
5601 /// getSCEVAtScope - Return a SCEV expression for the specified value
5602 /// at the specified scope in the program. The L value specifies a loop
5603 /// nest to evaluate the expression at, where null is the top-level or a
5604 /// specified loop is immediately inside of the loop.
5605 ///
5606 /// This method can be used to compute the exit value for a variable defined
5607 /// in a loop by querying what the value will hold in the parent loop.
5608 ///
5609 /// In the case that a relevant loop exit value cannot be computed, the
5610 /// original value V is returned.
getSCEVAtScope(const SCEV * V,const Loop * L)5611 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
5612 // Check to see if we've folded this expression at this loop before.
5613 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = ValuesAtScopes[V];
5614 for (unsigned u = 0; u < Values.size(); u++) {
5615 if (Values[u].first == L)
5616 return Values[u].second ? Values[u].second : V;
5617 }
5618 Values.push_back(std::make_pair(L, static_cast<const SCEV *>(nullptr)));
5619 // Otherwise compute it.
5620 const SCEV *C = computeSCEVAtScope(V, L);
5621 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values2 = ValuesAtScopes[V];
5622 for (unsigned u = Values2.size(); u > 0; u--) {
5623 if (Values2[u - 1].first == L) {
5624 Values2[u - 1].second = C;
5625 break;
5626 }
5627 }
5628 return C;
5629 }
5630
5631 /// This builds up a Constant using the ConstantExpr interface. That way, we
5632 /// will return Constants for objects which aren't represented by a
5633 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
5634 /// Returns NULL if the SCEV isn't representable as a Constant.
BuildConstantFromSCEV(const SCEV * V)5635 static Constant *BuildConstantFromSCEV(const SCEV *V) {
5636 switch (static_cast<SCEVTypes>(V->getSCEVType())) {
5637 case scCouldNotCompute:
5638 case scAddRecExpr:
5639 break;
5640 case scConstant:
5641 return cast<SCEVConstant>(V)->getValue();
5642 case scUnknown:
5643 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
5644 case scSignExtend: {
5645 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V);
5646 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand()))
5647 return ConstantExpr::getSExt(CastOp, SS->getType());
5648 break;
5649 }
5650 case scZeroExtend: {
5651 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V);
5652 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand()))
5653 return ConstantExpr::getZExt(CastOp, SZ->getType());
5654 break;
5655 }
5656 case scTruncate: {
5657 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V);
5658 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand()))
5659 return ConstantExpr::getTrunc(CastOp, ST->getType());
5660 break;
5661 }
5662 case scAddExpr: {
5663 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V);
5664 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) {
5665 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
5666 unsigned AS = PTy->getAddressSpace();
5667 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
5668 C = ConstantExpr::getBitCast(C, DestPtrTy);
5669 }
5670 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) {
5671 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i));
5672 if (!C2) return nullptr;
5673
5674 // First pointer!
5675 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) {
5676 unsigned AS = C2->getType()->getPointerAddressSpace();
5677 std::swap(C, C2);
5678 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
5679 // The offsets have been converted to bytes. We can add bytes to an
5680 // i8* by GEP with the byte count in the first index.
5681 C = ConstantExpr::getBitCast(C, DestPtrTy);
5682 }
5683
5684 // Don't bother trying to sum two pointers. We probably can't
5685 // statically compute a load that results from it anyway.
5686 if (C2->getType()->isPointerTy())
5687 return nullptr;
5688
5689 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
5690 if (PTy->getElementType()->isStructTy())
5691 C2 = ConstantExpr::getIntegerCast(
5692 C2, Type::getInt32Ty(C->getContext()), true);
5693 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2);
5694 } else
5695 C = ConstantExpr::getAdd(C, C2);
5696 }
5697 return C;
5698 }
5699 break;
5700 }
5701 case scMulExpr: {
5702 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V);
5703 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) {
5704 // Don't bother with pointers at all.
5705 if (C->getType()->isPointerTy()) return nullptr;
5706 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) {
5707 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i));
5708 if (!C2 || C2->getType()->isPointerTy()) return nullptr;
5709 C = ConstantExpr::getMul(C, C2);
5710 }
5711 return C;
5712 }
5713 break;
5714 }
5715 case scUDivExpr: {
5716 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V);
5717 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS()))
5718 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS()))
5719 if (LHS->getType() == RHS->getType())
5720 return ConstantExpr::getUDiv(LHS, RHS);
5721 break;
5722 }
5723 case scSMaxExpr:
5724 case scUMaxExpr:
5725 break; // TODO: smax, umax.
5726 }
5727 return nullptr;
5728 }
5729
computeSCEVAtScope(const SCEV * V,const Loop * L)5730 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
5731 if (isa<SCEVConstant>(V)) return V;
5732
5733 // If this instruction is evolved from a constant-evolving PHI, compute the
5734 // exit value from the loop without using SCEVs.
5735 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
5736 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
5737 const Loop *LI = (*this->LI)[I->getParent()];
5738 if (LI && LI->getParentLoop() == L) // Looking for loop exit value.
5739 if (PHINode *PN = dyn_cast<PHINode>(I))
5740 if (PN->getParent() == LI->getHeader()) {
5741 // Okay, there is no closed form solution for the PHI node. Check
5742 // to see if the loop that contains it has a known backedge-taken
5743 // count. If so, we may be able to force computation of the exit
5744 // value.
5745 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
5746 if (const SCEVConstant *BTCC =
5747 dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
5748 // Okay, we know how many times the containing loop executes. If
5749 // this is a constant evolving PHI node, get the final value at
5750 // the specified iteration number.
5751 Constant *RV = getConstantEvolutionLoopExitValue(PN,
5752 BTCC->getValue()->getValue(),
5753 LI);
5754 if (RV) return getSCEV(RV);
5755 }
5756 }
5757
5758 // Okay, this is an expression that we cannot symbolically evaluate
5759 // into a SCEV. Check to see if it's possible to symbolically evaluate
5760 // the arguments into constants, and if so, try to constant propagate the
5761 // result. This is particularly useful for computing loop exit values.
5762 if (CanConstantFold(I)) {
5763 SmallVector<Constant *, 4> Operands;
5764 bool MadeImprovement = false;
5765 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
5766 Value *Op = I->getOperand(i);
5767 if (Constant *C = dyn_cast<Constant>(Op)) {
5768 Operands.push_back(C);
5769 continue;
5770 }
5771
5772 // If any of the operands is non-constant and if they are
5773 // non-integer and non-pointer, don't even try to analyze them
5774 // with scev techniques.
5775 if (!isSCEVable(Op->getType()))
5776 return V;
5777
5778 const SCEV *OrigV = getSCEV(Op);
5779 const SCEV *OpV = getSCEVAtScope(OrigV, L);
5780 MadeImprovement |= OrigV != OpV;
5781
5782 Constant *C = BuildConstantFromSCEV(OpV);
5783 if (!C) return V;
5784 if (C->getType() != Op->getType())
5785 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
5786 Op->getType(),
5787 false),
5788 C, Op->getType());
5789 Operands.push_back(C);
5790 }
5791
5792 // Check to see if getSCEVAtScope actually made an improvement.
5793 if (MadeImprovement) {
5794 Constant *C = nullptr;
5795 const DataLayout &DL = F->getParent()->getDataLayout();
5796 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
5797 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
5798 Operands[1], DL, TLI);
5799 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
5800 if (!LI->isVolatile())
5801 C = ConstantFoldLoadFromConstPtr(Operands[0], DL);
5802 } else
5803 C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands,
5804 DL, TLI);
5805 if (!C) return V;
5806 return getSCEV(C);
5807 }
5808 }
5809 }
5810
5811 // This is some other type of SCEVUnknown, just return it.
5812 return V;
5813 }
5814
5815 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
5816 // Avoid performing the look-up in the common case where the specified
5817 // expression has no loop-variant portions.
5818 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
5819 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
5820 if (OpAtScope != Comm->getOperand(i)) {
5821 // Okay, at least one of these operands is loop variant but might be
5822 // foldable. Build a new instance of the folded commutative expression.
5823 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
5824 Comm->op_begin()+i);
5825 NewOps.push_back(OpAtScope);
5826
5827 for (++i; i != e; ++i) {
5828 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
5829 NewOps.push_back(OpAtScope);
5830 }
5831 if (isa<SCEVAddExpr>(Comm))
5832 return getAddExpr(NewOps);
5833 if (isa<SCEVMulExpr>(Comm))
5834 return getMulExpr(NewOps);
5835 if (isa<SCEVSMaxExpr>(Comm))
5836 return getSMaxExpr(NewOps);
5837 if (isa<SCEVUMaxExpr>(Comm))
5838 return getUMaxExpr(NewOps);
5839 llvm_unreachable("Unknown commutative SCEV type!");
5840 }
5841 }
5842 // If we got here, all operands are loop invariant.
5843 return Comm;
5844 }
5845
5846 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
5847 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
5848 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
5849 if (LHS == Div->getLHS() && RHS == Div->getRHS())
5850 return Div; // must be loop invariant
5851 return getUDivExpr(LHS, RHS);
5852 }
5853
5854 // If this is a loop recurrence for a loop that does not contain L, then we
5855 // are dealing with the final value computed by the loop.
5856 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
5857 // First, attempt to evaluate each operand.
5858 // Avoid performing the look-up in the common case where the specified
5859 // expression has no loop-variant portions.
5860 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
5861 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
5862 if (OpAtScope == AddRec->getOperand(i))
5863 continue;
5864
5865 // Okay, at least one of these operands is loop variant but might be
5866 // foldable. Build a new instance of the folded commutative expression.
5867 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
5868 AddRec->op_begin()+i);
5869 NewOps.push_back(OpAtScope);
5870 for (++i; i != e; ++i)
5871 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
5872
5873 const SCEV *FoldedRec =
5874 getAddRecExpr(NewOps, AddRec->getLoop(),
5875 AddRec->getNoWrapFlags(SCEV::FlagNW));
5876 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
5877 // The addrec may be folded to a nonrecurrence, for example, if the
5878 // induction variable is multiplied by zero after constant folding. Go
5879 // ahead and return the folded value.
5880 if (!AddRec)
5881 return FoldedRec;
5882 break;
5883 }
5884
5885 // If the scope is outside the addrec's loop, evaluate it by using the
5886 // loop exit value of the addrec.
5887 if (!AddRec->getLoop()->contains(L)) {
5888 // To evaluate this recurrence, we need to know how many times the AddRec
5889 // loop iterates. Compute this now.
5890 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
5891 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
5892
5893 // Then, evaluate the AddRec.
5894 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
5895 }
5896
5897 return AddRec;
5898 }
5899
5900 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
5901 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
5902 if (Op == Cast->getOperand())
5903 return Cast; // must be loop invariant
5904 return getZeroExtendExpr(Op, Cast->getType());
5905 }
5906
5907 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
5908 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
5909 if (Op == Cast->getOperand())
5910 return Cast; // must be loop invariant
5911 return getSignExtendExpr(Op, Cast->getType());
5912 }
5913
5914 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
5915 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
5916 if (Op == Cast->getOperand())
5917 return Cast; // must be loop invariant
5918 return getTruncateExpr(Op, Cast->getType());
5919 }
5920
5921 llvm_unreachable("Unknown SCEV type!");
5922 }
5923
5924 /// getSCEVAtScope - This is a convenience function which does
5925 /// getSCEVAtScope(getSCEV(V), L).
getSCEVAtScope(Value * V,const Loop * L)5926 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
5927 return getSCEVAtScope(getSCEV(V), L);
5928 }
5929
5930 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
5931 /// following equation:
5932 ///
5933 /// A * X = B (mod N)
5934 ///
5935 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
5936 /// A and B isn't important.
5937 ///
5938 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
SolveLinEquationWithOverflow(const APInt & A,const APInt & B,ScalarEvolution & SE)5939 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
5940 ScalarEvolution &SE) {
5941 uint32_t BW = A.getBitWidth();
5942 assert(BW == B.getBitWidth() && "Bit widths must be the same.");
5943 assert(A != 0 && "A must be non-zero.");
5944
5945 // 1. D = gcd(A, N)
5946 //
5947 // The gcd of A and N may have only one prime factor: 2. The number of
5948 // trailing zeros in A is its multiplicity
5949 uint32_t Mult2 = A.countTrailingZeros();
5950 // D = 2^Mult2
5951
5952 // 2. Check if B is divisible by D.
5953 //
5954 // B is divisible by D if and only if the multiplicity of prime factor 2 for B
5955 // is not less than multiplicity of this prime factor for D.
5956 if (B.countTrailingZeros() < Mult2)
5957 return SE.getCouldNotCompute();
5958
5959 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
5960 // modulo (N / D).
5961 //
5962 // (N / D) may need BW+1 bits in its representation. Hence, we'll use this
5963 // bit width during computations.
5964 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
5965 APInt Mod(BW + 1, 0);
5966 Mod.setBit(BW - Mult2); // Mod = N / D
5967 APInt I = AD.multiplicativeInverse(Mod);
5968
5969 // 4. Compute the minimum unsigned root of the equation:
5970 // I * (B / D) mod (N / D)
5971 APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
5972
5973 // The result is guaranteed to be less than 2^BW so we may truncate it to BW
5974 // bits.
5975 return SE.getConstant(Result.trunc(BW));
5976 }
5977
5978 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the
5979 /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which
5980 /// might be the same) or two SCEVCouldNotCompute objects.
5981 ///
5982 static std::pair<const SCEV *,const SCEV *>
SolveQuadraticEquation(const SCEVAddRecExpr * AddRec,ScalarEvolution & SE)5983 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
5984 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
5985 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
5986 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
5987 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
5988
5989 // We currently can only solve this if the coefficients are constants.
5990 if (!LC || !MC || !NC) {
5991 const SCEV *CNC = SE.getCouldNotCompute();
5992 return std::make_pair(CNC, CNC);
5993 }
5994
5995 uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
5996 const APInt &L = LC->getValue()->getValue();
5997 const APInt &M = MC->getValue()->getValue();
5998 const APInt &N = NC->getValue()->getValue();
5999 APInt Two(BitWidth, 2);
6000 APInt Four(BitWidth, 4);
6001
6002 {
6003 using namespace APIntOps;
6004 const APInt& C = L;
6005 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
6006 // The B coefficient is M-N/2
6007 APInt B(M);
6008 B -= sdiv(N,Two);
6009
6010 // The A coefficient is N/2
6011 APInt A(N.sdiv(Two));
6012
6013 // Compute the B^2-4ac term.
6014 APInt SqrtTerm(B);
6015 SqrtTerm *= B;
6016 SqrtTerm -= Four * (A * C);
6017
6018 if (SqrtTerm.isNegative()) {
6019 // The loop is provably infinite.
6020 const SCEV *CNC = SE.getCouldNotCompute();
6021 return std::make_pair(CNC, CNC);
6022 }
6023
6024 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
6025 // integer value or else APInt::sqrt() will assert.
6026 APInt SqrtVal(SqrtTerm.sqrt());
6027
6028 // Compute the two solutions for the quadratic formula.
6029 // The divisions must be performed as signed divisions.
6030 APInt NegB(-B);
6031 APInt TwoA(A << 1);
6032 if (TwoA.isMinValue()) {
6033 const SCEV *CNC = SE.getCouldNotCompute();
6034 return std::make_pair(CNC, CNC);
6035 }
6036
6037 LLVMContext &Context = SE.getContext();
6038
6039 ConstantInt *Solution1 =
6040 ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA));
6041 ConstantInt *Solution2 =
6042 ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA));
6043
6044 return std::make_pair(SE.getConstant(Solution1),
6045 SE.getConstant(Solution2));
6046 } // end APIntOps namespace
6047 }
6048
6049 /// HowFarToZero - Return the number of times a backedge comparing the specified
6050 /// value to zero will execute. If not computable, return CouldNotCompute.
6051 ///
6052 /// This is only used for loops with a "x != y" exit test. The exit condition is
6053 /// now expressed as a single expression, V = x-y. So the exit test is
6054 /// effectively V != 0. We know and take advantage of the fact that this
6055 /// expression only being used in a comparison by zero context.
6056 ScalarEvolution::ExitLimit
HowFarToZero(const SCEV * V,const Loop * L,bool ControlsExit)6057 ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L, bool ControlsExit) {
6058 // If the value is a constant
6059 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
6060 // If the value is already zero, the branch will execute zero times.
6061 if (C->getValue()->isZero()) return C;
6062 return getCouldNotCompute(); // Otherwise it will loop infinitely.
6063 }
6064
6065 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
6066 if (!AddRec || AddRec->getLoop() != L)
6067 return getCouldNotCompute();
6068
6069 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
6070 // the quadratic equation to solve it.
6071 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
6072 std::pair<const SCEV *,const SCEV *> Roots =
6073 SolveQuadraticEquation(AddRec, *this);
6074 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
6075 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
6076 if (R1 && R2) {
6077 #if 0
6078 dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
6079 << " sol#2: " << *R2 << "\n";
6080 #endif
6081 // Pick the smallest positive root value.
6082 if (ConstantInt *CB =
6083 dyn_cast<ConstantInt>(ConstantExpr::getICmp(CmpInst::ICMP_ULT,
6084 R1->getValue(),
6085 R2->getValue()))) {
6086 if (!CB->getZExtValue())
6087 std::swap(R1, R2); // R1 is the minimum root now.
6088
6089 // We can only use this value if the chrec ends up with an exact zero
6090 // value at this index. When solving for "X*X != 5", for example, we
6091 // should not accept a root of 2.
6092 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
6093 if (Val->isZero())
6094 return R1; // We found a quadratic root!
6095 }
6096 }
6097 return getCouldNotCompute();
6098 }
6099
6100 // Otherwise we can only handle this if it is affine.
6101 if (!AddRec->isAffine())
6102 return getCouldNotCompute();
6103
6104 // If this is an affine expression, the execution count of this branch is
6105 // the minimum unsigned root of the following equation:
6106 //
6107 // Start + Step*N = 0 (mod 2^BW)
6108 //
6109 // equivalent to:
6110 //
6111 // Step*N = -Start (mod 2^BW)
6112 //
6113 // where BW is the common bit width of Start and Step.
6114
6115 // Get the initial value for the loop.
6116 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
6117 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
6118
6119 // For now we handle only constant steps.
6120 //
6121 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
6122 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
6123 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
6124 // We have not yet seen any such cases.
6125 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
6126 if (!StepC || StepC->getValue()->equalsInt(0))
6127 return getCouldNotCompute();
6128
6129 // For positive steps (counting up until unsigned overflow):
6130 // N = -Start/Step (as unsigned)
6131 // For negative steps (counting down to zero):
6132 // N = Start/-Step
6133 // First compute the unsigned distance from zero in the direction of Step.
6134 bool CountDown = StepC->getValue()->getValue().isNegative();
6135 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
6136
6137 // Handle unitary steps, which cannot wraparound.
6138 // 1*N = -Start; -1*N = Start (mod 2^BW), so:
6139 // N = Distance (as unsigned)
6140 if (StepC->getValue()->equalsInt(1) || StepC->getValue()->isAllOnesValue()) {
6141 ConstantRange CR = getUnsignedRange(Start);
6142 const SCEV *MaxBECount;
6143 if (!CountDown && CR.getUnsignedMin().isMinValue())
6144 // When counting up, the worst starting value is 1, not 0.
6145 MaxBECount = CR.getUnsignedMax().isMinValue()
6146 ? getConstant(APInt::getMinValue(CR.getBitWidth()))
6147 : getConstant(APInt::getMaxValue(CR.getBitWidth()));
6148 else
6149 MaxBECount = getConstant(CountDown ? CR.getUnsignedMax()
6150 : -CR.getUnsignedMin());
6151 return ExitLimit(Distance, MaxBECount);
6152 }
6153
6154 // As a special case, handle the instance where Step is a positive power of
6155 // two. In this case, determining whether Step divides Distance evenly can be
6156 // done by counting and comparing the number of trailing zeros of Step and
6157 // Distance.
6158 if (!CountDown) {
6159 const APInt &StepV = StepC->getValue()->getValue();
6160 // StepV.isPowerOf2() returns true if StepV is an positive power of two. It
6161 // also returns true if StepV is maximally negative (eg, INT_MIN), but that
6162 // case is not handled as this code is guarded by !CountDown.
6163 if (StepV.isPowerOf2() &&
6164 GetMinTrailingZeros(Distance) >= StepV.countTrailingZeros())
6165 return getUDivExactExpr(Distance, Step);
6166 }
6167
6168 // If the condition controls loop exit (the loop exits only if the expression
6169 // is true) and the addition is no-wrap we can use unsigned divide to
6170 // compute the backedge count. In this case, the step may not divide the
6171 // distance, but we don't care because if the condition is "missed" the loop
6172 // will have undefined behavior due to wrapping.
6173 if (ControlsExit && AddRec->getNoWrapFlags(SCEV::FlagNW)) {
6174 const SCEV *Exact =
6175 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
6176 return ExitLimit(Exact, Exact);
6177 }
6178
6179 // Then, try to solve the above equation provided that Start is constant.
6180 if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
6181 return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
6182 -StartC->getValue()->getValue(),
6183 *this);
6184 return getCouldNotCompute();
6185 }
6186
6187 /// HowFarToNonZero - Return the number of times a backedge checking the
6188 /// specified value for nonzero will execute. If not computable, return
6189 /// CouldNotCompute
6190 ScalarEvolution::ExitLimit
HowFarToNonZero(const SCEV * V,const Loop * L)6191 ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
6192 // Loops that look like: while (X == 0) are very strange indeed. We don't
6193 // handle them yet except for the trivial case. This could be expanded in the
6194 // future as needed.
6195
6196 // If the value is a constant, check to see if it is known to be non-zero
6197 // already. If so, the backedge will execute zero times.
6198 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
6199 if (!C->getValue()->isNullValue())
6200 return getConstant(C->getType(), 0);
6201 return getCouldNotCompute(); // Otherwise it will loop infinitely.
6202 }
6203
6204 // We could implement others, but I really doubt anyone writes loops like
6205 // this, and if they did, they would already be constant folded.
6206 return getCouldNotCompute();
6207 }
6208
6209 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
6210 /// (which may not be an immediate predecessor) which has exactly one
6211 /// successor from which BB is reachable, or null if no such block is
6212 /// found.
6213 ///
6214 std::pair<BasicBlock *, BasicBlock *>
getPredecessorWithUniqueSuccessorForBB(BasicBlock * BB)6215 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
6216 // If the block has a unique predecessor, then there is no path from the
6217 // predecessor to the block that does not go through the direct edge
6218 // from the predecessor to the block.
6219 if (BasicBlock *Pred = BB->getSinglePredecessor())
6220 return std::make_pair(Pred, BB);
6221
6222 // A loop's header is defined to be a block that dominates the loop.
6223 // If the header has a unique predecessor outside the loop, it must be
6224 // a block that has exactly one successor that can reach the loop.
6225 if (Loop *L = LI->getLoopFor(BB))
6226 return std::make_pair(L->getLoopPredecessor(), L->getHeader());
6227
6228 return std::pair<BasicBlock *, BasicBlock *>();
6229 }
6230
6231 /// HasSameValue - SCEV structural equivalence is usually sufficient for
6232 /// testing whether two expressions are equal, however for the purposes of
6233 /// looking for a condition guarding a loop, it can be useful to be a little
6234 /// more general, since a front-end may have replicated the controlling
6235 /// expression.
6236 ///
HasSameValue(const SCEV * A,const SCEV * B)6237 static bool HasSameValue(const SCEV *A, const SCEV *B) {
6238 // Quick check to see if they are the same SCEV.
6239 if (A == B) return true;
6240
6241 // Otherwise, if they're both SCEVUnknown, it's possible that they hold
6242 // two different instructions with the same value. Check for this case.
6243 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
6244 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
6245 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
6246 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
6247 if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory())
6248 return true;
6249
6250 // Otherwise assume they may have a different value.
6251 return false;
6252 }
6253
6254 /// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with
6255 /// predicate Pred. Return true iff any changes were made.
6256 ///
SimplifyICmpOperands(ICmpInst::Predicate & Pred,const SCEV * & LHS,const SCEV * & RHS,unsigned Depth)6257 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
6258 const SCEV *&LHS, const SCEV *&RHS,
6259 unsigned Depth) {
6260 bool Changed = false;
6261
6262 // If we hit the max recursion limit bail out.
6263 if (Depth >= 3)
6264 return false;
6265
6266 // Canonicalize a constant to the right side.
6267 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
6268 // Check for both operands constant.
6269 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
6270 if (ConstantExpr::getICmp(Pred,
6271 LHSC->getValue(),
6272 RHSC->getValue())->isNullValue())
6273 goto trivially_false;
6274 else
6275 goto trivially_true;
6276 }
6277 // Otherwise swap the operands to put the constant on the right.
6278 std::swap(LHS, RHS);
6279 Pred = ICmpInst::getSwappedPredicate(Pred);
6280 Changed = true;
6281 }
6282
6283 // If we're comparing an addrec with a value which is loop-invariant in the
6284 // addrec's loop, put the addrec on the left. Also make a dominance check,
6285 // as both operands could be addrecs loop-invariant in each other's loop.
6286 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
6287 const Loop *L = AR->getLoop();
6288 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
6289 std::swap(LHS, RHS);
6290 Pred = ICmpInst::getSwappedPredicate(Pred);
6291 Changed = true;
6292 }
6293 }
6294
6295 // If there's a constant operand, canonicalize comparisons with boundary
6296 // cases, and canonicalize *-or-equal comparisons to regular comparisons.
6297 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
6298 const APInt &RA = RC->getValue()->getValue();
6299 switch (Pred) {
6300 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
6301 case ICmpInst::ICMP_EQ:
6302 case ICmpInst::ICMP_NE:
6303 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
6304 if (!RA)
6305 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS))
6306 if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(AE->getOperand(0)))
6307 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 &&
6308 ME->getOperand(0)->isAllOnesValue()) {
6309 RHS = AE->getOperand(1);
6310 LHS = ME->getOperand(1);
6311 Changed = true;
6312 }
6313 break;
6314 case ICmpInst::ICMP_UGE:
6315 if ((RA - 1).isMinValue()) {
6316 Pred = ICmpInst::ICMP_NE;
6317 RHS = getConstant(RA - 1);
6318 Changed = true;
6319 break;
6320 }
6321 if (RA.isMaxValue()) {
6322 Pred = ICmpInst::ICMP_EQ;
6323 Changed = true;
6324 break;
6325 }
6326 if (RA.isMinValue()) goto trivially_true;
6327
6328 Pred = ICmpInst::ICMP_UGT;
6329 RHS = getConstant(RA - 1);
6330 Changed = true;
6331 break;
6332 case ICmpInst::ICMP_ULE:
6333 if ((RA + 1).isMaxValue()) {
6334 Pred = ICmpInst::ICMP_NE;
6335 RHS = getConstant(RA + 1);
6336 Changed = true;
6337 break;
6338 }
6339 if (RA.isMinValue()) {
6340 Pred = ICmpInst::ICMP_EQ;
6341 Changed = true;
6342 break;
6343 }
6344 if (RA.isMaxValue()) goto trivially_true;
6345
6346 Pred = ICmpInst::ICMP_ULT;
6347 RHS = getConstant(RA + 1);
6348 Changed = true;
6349 break;
6350 case ICmpInst::ICMP_SGE:
6351 if ((RA - 1).isMinSignedValue()) {
6352 Pred = ICmpInst::ICMP_NE;
6353 RHS = getConstant(RA - 1);
6354 Changed = true;
6355 break;
6356 }
6357 if (RA.isMaxSignedValue()) {
6358 Pred = ICmpInst::ICMP_EQ;
6359 Changed = true;
6360 break;
6361 }
6362 if (RA.isMinSignedValue()) goto trivially_true;
6363
6364 Pred = ICmpInst::ICMP_SGT;
6365 RHS = getConstant(RA - 1);
6366 Changed = true;
6367 break;
6368 case ICmpInst::ICMP_SLE:
6369 if ((RA + 1).isMaxSignedValue()) {
6370 Pred = ICmpInst::ICMP_NE;
6371 RHS = getConstant(RA + 1);
6372 Changed = true;
6373 break;
6374 }
6375 if (RA.isMinSignedValue()) {
6376 Pred = ICmpInst::ICMP_EQ;
6377 Changed = true;
6378 break;
6379 }
6380 if (RA.isMaxSignedValue()) goto trivially_true;
6381
6382 Pred = ICmpInst::ICMP_SLT;
6383 RHS = getConstant(RA + 1);
6384 Changed = true;
6385 break;
6386 case ICmpInst::ICMP_UGT:
6387 if (RA.isMinValue()) {
6388 Pred = ICmpInst::ICMP_NE;
6389 Changed = true;
6390 break;
6391 }
6392 if ((RA + 1).isMaxValue()) {
6393 Pred = ICmpInst::ICMP_EQ;
6394 RHS = getConstant(RA + 1);
6395 Changed = true;
6396 break;
6397 }
6398 if (RA.isMaxValue()) goto trivially_false;
6399 break;
6400 case ICmpInst::ICMP_ULT:
6401 if (RA.isMaxValue()) {
6402 Pred = ICmpInst::ICMP_NE;
6403 Changed = true;
6404 break;
6405 }
6406 if ((RA - 1).isMinValue()) {
6407 Pred = ICmpInst::ICMP_EQ;
6408 RHS = getConstant(RA - 1);
6409 Changed = true;
6410 break;
6411 }
6412 if (RA.isMinValue()) goto trivially_false;
6413 break;
6414 case ICmpInst::ICMP_SGT:
6415 if (RA.isMinSignedValue()) {
6416 Pred = ICmpInst::ICMP_NE;
6417 Changed = true;
6418 break;
6419 }
6420 if ((RA + 1).isMaxSignedValue()) {
6421 Pred = ICmpInst::ICMP_EQ;
6422 RHS = getConstant(RA + 1);
6423 Changed = true;
6424 break;
6425 }
6426 if (RA.isMaxSignedValue()) goto trivially_false;
6427 break;
6428 case ICmpInst::ICMP_SLT:
6429 if (RA.isMaxSignedValue()) {
6430 Pred = ICmpInst::ICMP_NE;
6431 Changed = true;
6432 break;
6433 }
6434 if ((RA - 1).isMinSignedValue()) {
6435 Pred = ICmpInst::ICMP_EQ;
6436 RHS = getConstant(RA - 1);
6437 Changed = true;
6438 break;
6439 }
6440 if (RA.isMinSignedValue()) goto trivially_false;
6441 break;
6442 }
6443 }
6444
6445 // Check for obvious equality.
6446 if (HasSameValue(LHS, RHS)) {
6447 if (ICmpInst::isTrueWhenEqual(Pred))
6448 goto trivially_true;
6449 if (ICmpInst::isFalseWhenEqual(Pred))
6450 goto trivially_false;
6451 }
6452
6453 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
6454 // adding or subtracting 1 from one of the operands.
6455 switch (Pred) {
6456 case ICmpInst::ICMP_SLE:
6457 if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) {
6458 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
6459 SCEV::FlagNSW);
6460 Pred = ICmpInst::ICMP_SLT;
6461 Changed = true;
6462 } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) {
6463 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
6464 SCEV::FlagNSW);
6465 Pred = ICmpInst::ICMP_SLT;
6466 Changed = true;
6467 }
6468 break;
6469 case ICmpInst::ICMP_SGE:
6470 if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) {
6471 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
6472 SCEV::FlagNSW);
6473 Pred = ICmpInst::ICMP_SGT;
6474 Changed = true;
6475 } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) {
6476 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
6477 SCEV::FlagNSW);
6478 Pred = ICmpInst::ICMP_SGT;
6479 Changed = true;
6480 }
6481 break;
6482 case ICmpInst::ICMP_ULE:
6483 if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) {
6484 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
6485 SCEV::FlagNUW);
6486 Pred = ICmpInst::ICMP_ULT;
6487 Changed = true;
6488 } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) {
6489 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
6490 SCEV::FlagNUW);
6491 Pred = ICmpInst::ICMP_ULT;
6492 Changed = true;
6493 }
6494 break;
6495 case ICmpInst::ICMP_UGE:
6496 if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) {
6497 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
6498 SCEV::FlagNUW);
6499 Pred = ICmpInst::ICMP_UGT;
6500 Changed = true;
6501 } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) {
6502 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
6503 SCEV::FlagNUW);
6504 Pred = ICmpInst::ICMP_UGT;
6505 Changed = true;
6506 }
6507 break;
6508 default:
6509 break;
6510 }
6511
6512 // TODO: More simplifications are possible here.
6513
6514 // Recursively simplify until we either hit a recursion limit or nothing
6515 // changes.
6516 if (Changed)
6517 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1);
6518
6519 return Changed;
6520
6521 trivially_true:
6522 // Return 0 == 0.
6523 LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
6524 Pred = ICmpInst::ICMP_EQ;
6525 return true;
6526
6527 trivially_false:
6528 // Return 0 != 0.
6529 LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
6530 Pred = ICmpInst::ICMP_NE;
6531 return true;
6532 }
6533
isKnownNegative(const SCEV * S)6534 bool ScalarEvolution::isKnownNegative(const SCEV *S) {
6535 return getSignedRange(S).getSignedMax().isNegative();
6536 }
6537
isKnownPositive(const SCEV * S)6538 bool ScalarEvolution::isKnownPositive(const SCEV *S) {
6539 return getSignedRange(S).getSignedMin().isStrictlyPositive();
6540 }
6541
isKnownNonNegative(const SCEV * S)6542 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
6543 return !getSignedRange(S).getSignedMin().isNegative();
6544 }
6545
isKnownNonPositive(const SCEV * S)6546 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
6547 return !getSignedRange(S).getSignedMax().isStrictlyPositive();
6548 }
6549
isKnownNonZero(const SCEV * S)6550 bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
6551 return isKnownNegative(S) || isKnownPositive(S);
6552 }
6553
isKnownPredicate(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)6554 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
6555 const SCEV *LHS, const SCEV *RHS) {
6556 // Canonicalize the inputs first.
6557 (void)SimplifyICmpOperands(Pred, LHS, RHS);
6558
6559 // If LHS or RHS is an addrec, check to see if the condition is true in
6560 // every iteration of the loop.
6561 // If LHS and RHS are both addrec, both conditions must be true in
6562 // every iteration of the loop.
6563 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS);
6564 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
6565 bool LeftGuarded = false;
6566 bool RightGuarded = false;
6567 if (LAR) {
6568 const Loop *L = LAR->getLoop();
6569 if (isLoopEntryGuardedByCond(L, Pred, LAR->getStart(), RHS) &&
6570 isLoopBackedgeGuardedByCond(L, Pred, LAR->getPostIncExpr(*this), RHS)) {
6571 if (!RAR) return true;
6572 LeftGuarded = true;
6573 }
6574 }
6575 if (RAR) {
6576 const Loop *L = RAR->getLoop();
6577 if (isLoopEntryGuardedByCond(L, Pred, LHS, RAR->getStart()) &&
6578 isLoopBackedgeGuardedByCond(L, Pred, LHS, RAR->getPostIncExpr(*this))) {
6579 if (!LAR) return true;
6580 RightGuarded = true;
6581 }
6582 }
6583 if (LeftGuarded && RightGuarded)
6584 return true;
6585
6586 // Otherwise see what can be done with known constant ranges.
6587 return isKnownPredicateWithRanges(Pred, LHS, RHS);
6588 }
6589
6590 bool
isKnownPredicateWithRanges(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)6591 ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
6592 const SCEV *LHS, const SCEV *RHS) {
6593 if (HasSameValue(LHS, RHS))
6594 return ICmpInst::isTrueWhenEqual(Pred);
6595
6596 // This code is split out from isKnownPredicate because it is called from
6597 // within isLoopEntryGuardedByCond.
6598 switch (Pred) {
6599 default:
6600 llvm_unreachable("Unexpected ICmpInst::Predicate value!");
6601 case ICmpInst::ICMP_SGT:
6602 std::swap(LHS, RHS);
6603 case ICmpInst::ICMP_SLT: {
6604 ConstantRange LHSRange = getSignedRange(LHS);
6605 ConstantRange RHSRange = getSignedRange(RHS);
6606 if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
6607 return true;
6608 if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
6609 return false;
6610 break;
6611 }
6612 case ICmpInst::ICMP_SGE:
6613 std::swap(LHS, RHS);
6614 case ICmpInst::ICMP_SLE: {
6615 ConstantRange LHSRange = getSignedRange(LHS);
6616 ConstantRange RHSRange = getSignedRange(RHS);
6617 if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
6618 return true;
6619 if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
6620 return false;
6621 break;
6622 }
6623 case ICmpInst::ICMP_UGT:
6624 std::swap(LHS, RHS);
6625 case ICmpInst::ICMP_ULT: {
6626 ConstantRange LHSRange = getUnsignedRange(LHS);
6627 ConstantRange RHSRange = getUnsignedRange(RHS);
6628 if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
6629 return true;
6630 if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
6631 return false;
6632 break;
6633 }
6634 case ICmpInst::ICMP_UGE:
6635 std::swap(LHS, RHS);
6636 case ICmpInst::ICMP_ULE: {
6637 ConstantRange LHSRange = getUnsignedRange(LHS);
6638 ConstantRange RHSRange = getUnsignedRange(RHS);
6639 if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
6640 return true;
6641 if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
6642 return false;
6643 break;
6644 }
6645 case ICmpInst::ICMP_NE: {
6646 if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
6647 return true;
6648 if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
6649 return true;
6650
6651 const SCEV *Diff = getMinusSCEV(LHS, RHS);
6652 if (isKnownNonZero(Diff))
6653 return true;
6654 break;
6655 }
6656 case ICmpInst::ICMP_EQ:
6657 // The check at the top of the function catches the case where
6658 // the values are known to be equal.
6659 break;
6660 }
6661 return false;
6662 }
6663
6664 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
6665 /// protected by a conditional between LHS and RHS. This is used to
6666 /// to eliminate casts.
6667 bool
isLoopBackedgeGuardedByCond(const Loop * L,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)6668 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
6669 ICmpInst::Predicate Pred,
6670 const SCEV *LHS, const SCEV *RHS) {
6671 // Interpret a null as meaning no loop, where there is obviously no guard
6672 // (interprocedural conditions notwithstanding).
6673 if (!L) return true;
6674
6675 if (isKnownPredicateWithRanges(Pred, LHS, RHS)) return true;
6676
6677 BasicBlock *Latch = L->getLoopLatch();
6678 if (!Latch)
6679 return false;
6680
6681 BranchInst *LoopContinuePredicate =
6682 dyn_cast<BranchInst>(Latch->getTerminator());
6683 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() &&
6684 isImpliedCond(Pred, LHS, RHS,
6685 LoopContinuePredicate->getCondition(),
6686 LoopContinuePredicate->getSuccessor(0) != L->getHeader()))
6687 return true;
6688
6689 // Check conditions due to any @llvm.assume intrinsics.
6690 for (auto &AssumeVH : AC->assumptions()) {
6691 if (!AssumeVH)
6692 continue;
6693 auto *CI = cast<CallInst>(AssumeVH);
6694 if (!DT->dominates(CI, Latch->getTerminator()))
6695 continue;
6696
6697 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
6698 return true;
6699 }
6700
6701 struct ClearWalkingBEDominatingCondsOnExit {
6702 ScalarEvolution &SE;
6703
6704 explicit ClearWalkingBEDominatingCondsOnExit(ScalarEvolution &SE)
6705 : SE(SE){};
6706
6707 ~ClearWalkingBEDominatingCondsOnExit() {
6708 SE.WalkingBEDominatingConds = false;
6709 }
6710 };
6711
6712 // We don't want more than one activation of the following loop on the stack
6713 // -- that can lead to O(n!) time complexity.
6714 if (WalkingBEDominatingConds)
6715 return false;
6716
6717 WalkingBEDominatingConds = true;
6718 ClearWalkingBEDominatingCondsOnExit ClearOnExit(*this);
6719
6720 // If the loop is not reachable from the entry block, we risk running into an
6721 // infinite loop as we walk up into the dom tree. These loops do not matter
6722 // anyway, so we just return a conservative answer when we see them.
6723 if (!DT->isReachableFromEntry(L->getHeader()))
6724 return false;
6725
6726 for (DomTreeNode *DTN = (*DT)[Latch], *HeaderDTN = (*DT)[L->getHeader()];
6727 DTN != HeaderDTN;
6728 DTN = DTN->getIDom()) {
6729
6730 assert(DTN && "should reach the loop header before reaching the root!");
6731
6732 BasicBlock *BB = DTN->getBlock();
6733 BasicBlock *PBB = BB->getSinglePredecessor();
6734 if (!PBB)
6735 continue;
6736
6737 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator());
6738 if (!ContinuePredicate || !ContinuePredicate->isConditional())
6739 continue;
6740
6741 Value *Condition = ContinuePredicate->getCondition();
6742
6743 // If we have an edge `E` within the loop body that dominates the only
6744 // latch, the condition guarding `E` also guards the backedge. This
6745 // reasoning works only for loops with a single latch.
6746
6747 BasicBlockEdge DominatingEdge(PBB, BB);
6748 if (DominatingEdge.isSingleEdge()) {
6749 // We're constructively (and conservatively) enumerating edges within the
6750 // loop body that dominate the latch. The dominator tree better agree
6751 // with us on this:
6752 assert(DT->dominates(DominatingEdge, Latch) && "should be!");
6753
6754 if (isImpliedCond(Pred, LHS, RHS, Condition,
6755 BB != ContinuePredicate->getSuccessor(0)))
6756 return true;
6757 }
6758 }
6759
6760 return false;
6761 }
6762
6763 /// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
6764 /// by a conditional between LHS and RHS. This is used to help avoid max
6765 /// expressions in loop trip counts, and to eliminate casts.
6766 bool
isLoopEntryGuardedByCond(const Loop * L,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)6767 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
6768 ICmpInst::Predicate Pred,
6769 const SCEV *LHS, const SCEV *RHS) {
6770 // Interpret a null as meaning no loop, where there is obviously no guard
6771 // (interprocedural conditions notwithstanding).
6772 if (!L) return false;
6773
6774 if (isKnownPredicateWithRanges(Pred, LHS, RHS)) return true;
6775
6776 // Starting at the loop predecessor, climb up the predecessor chain, as long
6777 // as there are predecessors that can be found that have unique successors
6778 // leading to the original header.
6779 for (std::pair<BasicBlock *, BasicBlock *>
6780 Pair(L->getLoopPredecessor(), L->getHeader());
6781 Pair.first;
6782 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
6783
6784 BranchInst *LoopEntryPredicate =
6785 dyn_cast<BranchInst>(Pair.first->getTerminator());
6786 if (!LoopEntryPredicate ||
6787 LoopEntryPredicate->isUnconditional())
6788 continue;
6789
6790 if (isImpliedCond(Pred, LHS, RHS,
6791 LoopEntryPredicate->getCondition(),
6792 LoopEntryPredicate->getSuccessor(0) != Pair.second))
6793 return true;
6794 }
6795
6796 // Check conditions due to any @llvm.assume intrinsics.
6797 for (auto &AssumeVH : AC->assumptions()) {
6798 if (!AssumeVH)
6799 continue;
6800 auto *CI = cast<CallInst>(AssumeVH);
6801 if (!DT->dominates(CI, L->getHeader()))
6802 continue;
6803
6804 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
6805 return true;
6806 }
6807
6808 return false;
6809 }
6810
6811 /// RAII wrapper to prevent recursive application of isImpliedCond.
6812 /// ScalarEvolution's PendingLoopPredicates set must be empty unless we are
6813 /// currently evaluating isImpliedCond.
6814 struct MarkPendingLoopPredicate {
6815 Value *Cond;
6816 DenseSet<Value*> &LoopPreds;
6817 bool Pending;
6818
MarkPendingLoopPredicateMarkPendingLoopPredicate6819 MarkPendingLoopPredicate(Value *C, DenseSet<Value*> &LP)
6820 : Cond(C), LoopPreds(LP) {
6821 Pending = !LoopPreds.insert(Cond).second;
6822 }
~MarkPendingLoopPredicateMarkPendingLoopPredicate6823 ~MarkPendingLoopPredicate() {
6824 if (!Pending)
6825 LoopPreds.erase(Cond);
6826 }
6827 };
6828
6829 /// isImpliedCond - Test whether the condition described by Pred, LHS,
6830 /// and RHS is true whenever the given Cond value evaluates to true.
isImpliedCond(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,Value * FoundCondValue,bool Inverse)6831 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
6832 const SCEV *LHS, const SCEV *RHS,
6833 Value *FoundCondValue,
6834 bool Inverse) {
6835 MarkPendingLoopPredicate Mark(FoundCondValue, PendingLoopPredicates);
6836 if (Mark.Pending)
6837 return false;
6838
6839 // Recursively handle And and Or conditions.
6840 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
6841 if (BO->getOpcode() == Instruction::And) {
6842 if (!Inverse)
6843 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
6844 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
6845 } else if (BO->getOpcode() == Instruction::Or) {
6846 if (Inverse)
6847 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
6848 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
6849 }
6850 }
6851
6852 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
6853 if (!ICI) return false;
6854
6855 // Now that we found a conditional branch that dominates the loop or controls
6856 // the loop latch. Check to see if it is the comparison we are looking for.
6857 ICmpInst::Predicate FoundPred;
6858 if (Inverse)
6859 FoundPred = ICI->getInversePredicate();
6860 else
6861 FoundPred = ICI->getPredicate();
6862
6863 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
6864 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
6865
6866 // Balance the types.
6867 if (getTypeSizeInBits(LHS->getType()) <
6868 getTypeSizeInBits(FoundLHS->getType())) {
6869 if (CmpInst::isSigned(Pred)) {
6870 LHS = getSignExtendExpr(LHS, FoundLHS->getType());
6871 RHS = getSignExtendExpr(RHS, FoundLHS->getType());
6872 } else {
6873 LHS = getZeroExtendExpr(LHS, FoundLHS->getType());
6874 RHS = getZeroExtendExpr(RHS, FoundLHS->getType());
6875 }
6876 } else if (getTypeSizeInBits(LHS->getType()) >
6877 getTypeSizeInBits(FoundLHS->getType())) {
6878 if (CmpInst::isSigned(FoundPred)) {
6879 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
6880 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
6881 } else {
6882 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
6883 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
6884 }
6885 }
6886
6887 // Canonicalize the query to match the way instcombine will have
6888 // canonicalized the comparison.
6889 if (SimplifyICmpOperands(Pred, LHS, RHS))
6890 if (LHS == RHS)
6891 return CmpInst::isTrueWhenEqual(Pred);
6892 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
6893 if (FoundLHS == FoundRHS)
6894 return CmpInst::isFalseWhenEqual(FoundPred);
6895
6896 // Check to see if we can make the LHS or RHS match.
6897 if (LHS == FoundRHS || RHS == FoundLHS) {
6898 if (isa<SCEVConstant>(RHS)) {
6899 std::swap(FoundLHS, FoundRHS);
6900 FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
6901 } else {
6902 std::swap(LHS, RHS);
6903 Pred = ICmpInst::getSwappedPredicate(Pred);
6904 }
6905 }
6906
6907 // Check whether the found predicate is the same as the desired predicate.
6908 if (FoundPred == Pred)
6909 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);
6910
6911 // Check whether swapping the found predicate makes it the same as the
6912 // desired predicate.
6913 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
6914 if (isa<SCEVConstant>(RHS))
6915 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
6916 else
6917 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
6918 RHS, LHS, FoundLHS, FoundRHS);
6919 }
6920
6921 // Check if we can make progress by sharpening ranges.
6922 if (FoundPred == ICmpInst::ICMP_NE &&
6923 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) {
6924
6925 const SCEVConstant *C = nullptr;
6926 const SCEV *V = nullptr;
6927
6928 if (isa<SCEVConstant>(FoundLHS)) {
6929 C = cast<SCEVConstant>(FoundLHS);
6930 V = FoundRHS;
6931 } else {
6932 C = cast<SCEVConstant>(FoundRHS);
6933 V = FoundLHS;
6934 }
6935
6936 // The guarding predicate tells us that C != V. If the known range
6937 // of V is [C, t), we can sharpen the range to [C + 1, t). The
6938 // range we consider has to correspond to same signedness as the
6939 // predicate we're interested in folding.
6940
6941 APInt Min = ICmpInst::isSigned(Pred) ?
6942 getSignedRange(V).getSignedMin() : getUnsignedRange(V).getUnsignedMin();
6943
6944 if (Min == C->getValue()->getValue()) {
6945 // Given (V >= Min && V != Min) we conclude V >= (Min + 1).
6946 // This is true even if (Min + 1) wraps around -- in case of
6947 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)).
6948
6949 APInt SharperMin = Min + 1;
6950
6951 switch (Pred) {
6952 case ICmpInst::ICMP_SGE:
6953 case ICmpInst::ICMP_UGE:
6954 // We know V `Pred` SharperMin. If this implies LHS `Pred`
6955 // RHS, we're done.
6956 if (isImpliedCondOperands(Pred, LHS, RHS, V,
6957 getConstant(SharperMin)))
6958 return true;
6959
6960 case ICmpInst::ICMP_SGT:
6961 case ICmpInst::ICMP_UGT:
6962 // We know from the range information that (V `Pred` Min ||
6963 // V == Min). We know from the guarding condition that !(V
6964 // == Min). This gives us
6965 //
6966 // V `Pred` Min || V == Min && !(V == Min)
6967 // => V `Pred` Min
6968 //
6969 // If V `Pred` Min implies LHS `Pred` RHS, we're done.
6970
6971 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min)))
6972 return true;
6973
6974 default:
6975 // No change
6976 break;
6977 }
6978 }
6979 }
6980
6981 // Check whether the actual condition is beyond sufficient.
6982 if (FoundPred == ICmpInst::ICMP_EQ)
6983 if (ICmpInst::isTrueWhenEqual(Pred))
6984 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
6985 return true;
6986 if (Pred == ICmpInst::ICMP_NE)
6987 if (!ICmpInst::isTrueWhenEqual(FoundPred))
6988 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
6989 return true;
6990
6991 // Otherwise assume the worst.
6992 return false;
6993 }
6994
6995 /// isImpliedCondOperands - Test whether the condition described by Pred,
6996 /// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
6997 /// and FoundRHS is true.
isImpliedCondOperands(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS)6998 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
6999 const SCEV *LHS, const SCEV *RHS,
7000 const SCEV *FoundLHS,
7001 const SCEV *FoundRHS) {
7002 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS))
7003 return true;
7004
7005 return isImpliedCondOperandsHelper(Pred, LHS, RHS,
7006 FoundLHS, FoundRHS) ||
7007 // ~x < ~y --> x > y
7008 isImpliedCondOperandsHelper(Pred, LHS, RHS,
7009 getNotSCEV(FoundRHS),
7010 getNotSCEV(FoundLHS));
7011 }
7012
7013
7014 /// If Expr computes ~A, return A else return nullptr
MatchNotExpr(const SCEV * Expr)7015 static const SCEV *MatchNotExpr(const SCEV *Expr) {
7016 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr);
7017 if (!Add || Add->getNumOperands() != 2) return nullptr;
7018
7019 const SCEVConstant *AddLHS = dyn_cast<SCEVConstant>(Add->getOperand(0));
7020 if (!(AddLHS && AddLHS->getValue()->getValue().isAllOnesValue()))
7021 return nullptr;
7022
7023 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1));
7024 if (!AddRHS || AddRHS->getNumOperands() != 2) return nullptr;
7025
7026 const SCEVConstant *MulLHS = dyn_cast<SCEVConstant>(AddRHS->getOperand(0));
7027 if (!(MulLHS && MulLHS->getValue()->getValue().isAllOnesValue()))
7028 return nullptr;
7029
7030 return AddRHS->getOperand(1);
7031 }
7032
7033
7034 /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values?
7035 template<typename MaxExprType>
IsMaxConsistingOf(const SCEV * MaybeMaxExpr,const SCEV * Candidate)7036 static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr,
7037 const SCEV *Candidate) {
7038 const MaxExprType *MaxExpr = dyn_cast<MaxExprType>(MaybeMaxExpr);
7039 if (!MaxExpr) return false;
7040
7041 auto It = std::find(MaxExpr->op_begin(), MaxExpr->op_end(), Candidate);
7042 return It != MaxExpr->op_end();
7043 }
7044
7045
7046 /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values?
7047 template<typename MaxExprType>
IsMinConsistingOf(ScalarEvolution & SE,const SCEV * MaybeMinExpr,const SCEV * Candidate)7048 static bool IsMinConsistingOf(ScalarEvolution &SE,
7049 const SCEV *MaybeMinExpr,
7050 const SCEV *Candidate) {
7051 const SCEV *MaybeMaxExpr = MatchNotExpr(MaybeMinExpr);
7052 if (!MaybeMaxExpr)
7053 return false;
7054
7055 return IsMaxConsistingOf<MaxExprType>(MaybeMaxExpr, SE.getNotSCEV(Candidate));
7056 }
7057
7058
7059 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max
7060 /// expression?
IsKnownPredicateViaMinOrMax(ScalarEvolution & SE,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)7061 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE,
7062 ICmpInst::Predicate Pred,
7063 const SCEV *LHS, const SCEV *RHS) {
7064 switch (Pred) {
7065 default:
7066 return false;
7067
7068 case ICmpInst::ICMP_SGE:
7069 std::swap(LHS, RHS);
7070 // fall through
7071 case ICmpInst::ICMP_SLE:
7072 return
7073 // min(A, ...) <= A
7074 IsMinConsistingOf<SCEVSMaxExpr>(SE, LHS, RHS) ||
7075 // A <= max(A, ...)
7076 IsMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS);
7077
7078 case ICmpInst::ICMP_UGE:
7079 std::swap(LHS, RHS);
7080 // fall through
7081 case ICmpInst::ICMP_ULE:
7082 return
7083 // min(A, ...) <= A
7084 IsMinConsistingOf<SCEVUMaxExpr>(SE, LHS, RHS) ||
7085 // A <= max(A, ...)
7086 IsMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS);
7087 }
7088
7089 llvm_unreachable("covered switch fell through?!");
7090 }
7091
7092 /// isImpliedCondOperandsHelper - Test whether the condition described by
7093 /// Pred, LHS, and RHS is true whenever the condition described by Pred,
7094 /// FoundLHS, and FoundRHS is true.
7095 bool
isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS)7096 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
7097 const SCEV *LHS, const SCEV *RHS,
7098 const SCEV *FoundLHS,
7099 const SCEV *FoundRHS) {
7100 auto IsKnownPredicateFull =
7101 [this](ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) {
7102 return isKnownPredicateWithRanges(Pred, LHS, RHS) ||
7103 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS);
7104 };
7105
7106 switch (Pred) {
7107 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
7108 case ICmpInst::ICMP_EQ:
7109 case ICmpInst::ICMP_NE:
7110 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
7111 return true;
7112 break;
7113 case ICmpInst::ICMP_SLT:
7114 case ICmpInst::ICMP_SLE:
7115 if (IsKnownPredicateFull(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
7116 IsKnownPredicateFull(ICmpInst::ICMP_SGE, RHS, FoundRHS))
7117 return true;
7118 break;
7119 case ICmpInst::ICMP_SGT:
7120 case ICmpInst::ICMP_SGE:
7121 if (IsKnownPredicateFull(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
7122 IsKnownPredicateFull(ICmpInst::ICMP_SLE, RHS, FoundRHS))
7123 return true;
7124 break;
7125 case ICmpInst::ICMP_ULT:
7126 case ICmpInst::ICMP_ULE:
7127 if (IsKnownPredicateFull(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
7128 IsKnownPredicateFull(ICmpInst::ICMP_UGE, RHS, FoundRHS))
7129 return true;
7130 break;
7131 case ICmpInst::ICMP_UGT:
7132 case ICmpInst::ICMP_UGE:
7133 if (IsKnownPredicateFull(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
7134 IsKnownPredicateFull(ICmpInst::ICMP_ULE, RHS, FoundRHS))
7135 return true;
7136 break;
7137 }
7138
7139 return false;
7140 }
7141
7142 /// isImpliedCondOperandsViaRanges - helper function for isImpliedCondOperands.
7143 /// Tries to get cases like "X `sgt` 0 => X - 1 `sgt` -1".
isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS)7144 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
7145 const SCEV *LHS,
7146 const SCEV *RHS,
7147 const SCEV *FoundLHS,
7148 const SCEV *FoundRHS) {
7149 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS))
7150 // The restriction on `FoundRHS` be lifted easily -- it exists only to
7151 // reduce the compile time impact of this optimization.
7152 return false;
7153
7154 const SCEVAddExpr *AddLHS = dyn_cast<SCEVAddExpr>(LHS);
7155 if (!AddLHS || AddLHS->getOperand(1) != FoundLHS ||
7156 !isa<SCEVConstant>(AddLHS->getOperand(0)))
7157 return false;
7158
7159 APInt ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getValue()->getValue();
7160
7161 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
7162 // antecedent "`FoundLHS` `Pred` `FoundRHS`".
7163 ConstantRange FoundLHSRange =
7164 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS);
7165
7166 // Since `LHS` is `FoundLHS` + `AddLHS->getOperand(0)`, we can compute a range
7167 // for `LHS`:
7168 APInt Addend =
7169 cast<SCEVConstant>(AddLHS->getOperand(0))->getValue()->getValue();
7170 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(Addend));
7171
7172 // We can also compute the range of values for `LHS` that satisfy the
7173 // consequent, "`LHS` `Pred` `RHS`":
7174 APInt ConstRHS = cast<SCEVConstant>(RHS)->getValue()->getValue();
7175 ConstantRange SatisfyingLHSRange =
7176 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS);
7177
7178 // The antecedent implies the consequent if every value of `LHS` that
7179 // satisfies the antecedent also satisfies the consequent.
7180 return SatisfyingLHSRange.contains(LHSRange);
7181 }
7182
7183 // Verify if an linear IV with positive stride can overflow when in a
7184 // less-than comparison, knowing the invariant term of the comparison, the
7185 // stride and the knowledge of NSW/NUW flags on the recurrence.
doesIVOverflowOnLT(const SCEV * RHS,const SCEV * Stride,bool IsSigned,bool NoWrap)7186 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
7187 bool IsSigned, bool NoWrap) {
7188 if (NoWrap) return false;
7189
7190 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
7191 const SCEV *One = getConstant(Stride->getType(), 1);
7192
7193 if (IsSigned) {
7194 APInt MaxRHS = getSignedRange(RHS).getSignedMax();
7195 APInt MaxValue = APInt::getSignedMaxValue(BitWidth);
7196 APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One))
7197 .getSignedMax();
7198
7199 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
7200 return (MaxValue - MaxStrideMinusOne).slt(MaxRHS);
7201 }
7202
7203 APInt MaxRHS = getUnsignedRange(RHS).getUnsignedMax();
7204 APInt MaxValue = APInt::getMaxValue(BitWidth);
7205 APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One))
7206 .getUnsignedMax();
7207
7208 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
7209 return (MaxValue - MaxStrideMinusOne).ult(MaxRHS);
7210 }
7211
7212 // Verify if an linear IV with negative stride can overflow when in a
7213 // greater-than comparison, knowing the invariant term of the comparison,
7214 // the stride and the knowledge of NSW/NUW flags on the recurrence.
doesIVOverflowOnGT(const SCEV * RHS,const SCEV * Stride,bool IsSigned,bool NoWrap)7215 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
7216 bool IsSigned, bool NoWrap) {
7217 if (NoWrap) return false;
7218
7219 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
7220 const SCEV *One = getConstant(Stride->getType(), 1);
7221
7222 if (IsSigned) {
7223 APInt MinRHS = getSignedRange(RHS).getSignedMin();
7224 APInt MinValue = APInt::getSignedMinValue(BitWidth);
7225 APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One))
7226 .getSignedMax();
7227
7228 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
7229 return (MinValue + MaxStrideMinusOne).sgt(MinRHS);
7230 }
7231
7232 APInt MinRHS = getUnsignedRange(RHS).getUnsignedMin();
7233 APInt MinValue = APInt::getMinValue(BitWidth);
7234 APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One))
7235 .getUnsignedMax();
7236
7237 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
7238 return (MinValue + MaxStrideMinusOne).ugt(MinRHS);
7239 }
7240
7241 // Compute the backedge taken count knowing the interval difference, the
7242 // stride and presence of the equality in the comparison.
computeBECount(const SCEV * Delta,const SCEV * Step,bool Equality)7243 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step,
7244 bool Equality) {
7245 const SCEV *One = getConstant(Step->getType(), 1);
7246 Delta = Equality ? getAddExpr(Delta, Step)
7247 : getAddExpr(Delta, getMinusSCEV(Step, One));
7248 return getUDivExpr(Delta, Step);
7249 }
7250
7251 /// HowManyLessThans - Return the number of times a backedge containing the
7252 /// specified less-than comparison will execute. If not computable, return
7253 /// CouldNotCompute.
7254 ///
7255 /// @param ControlsExit is true when the LHS < RHS condition directly controls
7256 /// the branch (loops exits only if condition is true). In this case, we can use
7257 /// NoWrapFlags to skip overflow checks.
7258 ScalarEvolution::ExitLimit
HowManyLessThans(const SCEV * LHS,const SCEV * RHS,const Loop * L,bool IsSigned,bool ControlsExit)7259 ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
7260 const Loop *L, bool IsSigned,
7261 bool ControlsExit) {
7262 // We handle only IV < Invariant
7263 if (!isLoopInvariant(RHS, L))
7264 return getCouldNotCompute();
7265
7266 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
7267
7268 // Avoid weird loops
7269 if (!IV || IV->getLoop() != L || !IV->isAffine())
7270 return getCouldNotCompute();
7271
7272 bool NoWrap = ControlsExit &&
7273 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
7274
7275 const SCEV *Stride = IV->getStepRecurrence(*this);
7276
7277 // Avoid negative or zero stride values
7278 if (!isKnownPositive(Stride))
7279 return getCouldNotCompute();
7280
7281 // Avoid proven overflow cases: this will ensure that the backedge taken count
7282 // will not generate any unsigned overflow. Relaxed no-overflow conditions
7283 // exploit NoWrapFlags, allowing to optimize in presence of undefined
7284 // behaviors like the case of C language.
7285 if (!Stride->isOne() && doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap))
7286 return getCouldNotCompute();
7287
7288 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT
7289 : ICmpInst::ICMP_ULT;
7290 const SCEV *Start = IV->getStart();
7291 const SCEV *End = RHS;
7292 if (!isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) {
7293 const SCEV *Diff = getMinusSCEV(RHS, Start);
7294 // If we have NoWrap set, then we can assume that the increment won't
7295 // overflow, in which case if RHS - Start is a constant, we don't need to
7296 // do a max operation since we can just figure it out statically
7297 if (NoWrap && isa<SCEVConstant>(Diff)) {
7298 APInt D = dyn_cast<const SCEVConstant>(Diff)->getValue()->getValue();
7299 if (D.isNegative())
7300 End = Start;
7301 } else
7302 End = IsSigned ? getSMaxExpr(RHS, Start)
7303 : getUMaxExpr(RHS, Start);
7304 }
7305
7306 const SCEV *BECount = computeBECount(getMinusSCEV(End, Start), Stride, false);
7307
7308 APInt MinStart = IsSigned ? getSignedRange(Start).getSignedMin()
7309 : getUnsignedRange(Start).getUnsignedMin();
7310
7311 APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin()
7312 : getUnsignedRange(Stride).getUnsignedMin();
7313
7314 unsigned BitWidth = getTypeSizeInBits(LHS->getType());
7315 APInt Limit = IsSigned ? APInt::getSignedMaxValue(BitWidth) - (MinStride - 1)
7316 : APInt::getMaxValue(BitWidth) - (MinStride - 1);
7317
7318 // Although End can be a MAX expression we estimate MaxEnd considering only
7319 // the case End = RHS. This is safe because in the other case (End - Start)
7320 // is zero, leading to a zero maximum backedge taken count.
7321 APInt MaxEnd =
7322 IsSigned ? APIntOps::smin(getSignedRange(RHS).getSignedMax(), Limit)
7323 : APIntOps::umin(getUnsignedRange(RHS).getUnsignedMax(), Limit);
7324
7325 const SCEV *MaxBECount;
7326 if (isa<SCEVConstant>(BECount))
7327 MaxBECount = BECount;
7328 else
7329 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart),
7330 getConstant(MinStride), false);
7331
7332 if (isa<SCEVCouldNotCompute>(MaxBECount))
7333 MaxBECount = BECount;
7334
7335 return ExitLimit(BECount, MaxBECount);
7336 }
7337
7338 ScalarEvolution::ExitLimit
HowManyGreaterThans(const SCEV * LHS,const SCEV * RHS,const Loop * L,bool IsSigned,bool ControlsExit)7339 ScalarEvolution::HowManyGreaterThans(const SCEV *LHS, const SCEV *RHS,
7340 const Loop *L, bool IsSigned,
7341 bool ControlsExit) {
7342 // We handle only IV > Invariant
7343 if (!isLoopInvariant(RHS, L))
7344 return getCouldNotCompute();
7345
7346 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
7347
7348 // Avoid weird loops
7349 if (!IV || IV->getLoop() != L || !IV->isAffine())
7350 return getCouldNotCompute();
7351
7352 bool NoWrap = ControlsExit &&
7353 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
7354
7355 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this));
7356
7357 // Avoid negative or zero stride values
7358 if (!isKnownPositive(Stride))
7359 return getCouldNotCompute();
7360
7361 // Avoid proven overflow cases: this will ensure that the backedge taken count
7362 // will not generate any unsigned overflow. Relaxed no-overflow conditions
7363 // exploit NoWrapFlags, allowing to optimize in presence of undefined
7364 // behaviors like the case of C language.
7365 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap))
7366 return getCouldNotCompute();
7367
7368 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT
7369 : ICmpInst::ICMP_UGT;
7370
7371 const SCEV *Start = IV->getStart();
7372 const SCEV *End = RHS;
7373 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) {
7374 const SCEV *Diff = getMinusSCEV(RHS, Start);
7375 // If we have NoWrap set, then we can assume that the increment won't
7376 // overflow, in which case if RHS - Start is a constant, we don't need to
7377 // do a max operation since we can just figure it out statically
7378 if (NoWrap && isa<SCEVConstant>(Diff)) {
7379 APInt D = dyn_cast<const SCEVConstant>(Diff)->getValue()->getValue();
7380 if (!D.isNegative())
7381 End = Start;
7382 } else
7383 End = IsSigned ? getSMinExpr(RHS, Start)
7384 : getUMinExpr(RHS, Start);
7385 }
7386
7387 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false);
7388
7389 APInt MaxStart = IsSigned ? getSignedRange(Start).getSignedMax()
7390 : getUnsignedRange(Start).getUnsignedMax();
7391
7392 APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin()
7393 : getUnsignedRange(Stride).getUnsignedMin();
7394
7395 unsigned BitWidth = getTypeSizeInBits(LHS->getType());
7396 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1)
7397 : APInt::getMinValue(BitWidth) + (MinStride - 1);
7398
7399 // Although End can be a MIN expression we estimate MinEnd considering only
7400 // the case End = RHS. This is safe because in the other case (Start - End)
7401 // is zero, leading to a zero maximum backedge taken count.
7402 APInt MinEnd =
7403 IsSigned ? APIntOps::smax(getSignedRange(RHS).getSignedMin(), Limit)
7404 : APIntOps::umax(getUnsignedRange(RHS).getUnsignedMin(), Limit);
7405
7406
7407 const SCEV *MaxBECount = getCouldNotCompute();
7408 if (isa<SCEVConstant>(BECount))
7409 MaxBECount = BECount;
7410 else
7411 MaxBECount = computeBECount(getConstant(MaxStart - MinEnd),
7412 getConstant(MinStride), false);
7413
7414 if (isa<SCEVCouldNotCompute>(MaxBECount))
7415 MaxBECount = BECount;
7416
7417 return ExitLimit(BECount, MaxBECount);
7418 }
7419
7420 /// getNumIterationsInRange - Return the number of iterations of this loop that
7421 /// produce values in the specified constant range. Another way of looking at
7422 /// this is that it returns the first iteration number where the value is not in
7423 /// the condition, thus computing the exit count. If the iteration count can't
7424 /// be computed, an instance of SCEVCouldNotCompute is returned.
getNumIterationsInRange(ConstantRange Range,ScalarEvolution & SE) const7425 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
7426 ScalarEvolution &SE) const {
7427 if (Range.isFullSet()) // Infinite loop.
7428 return SE.getCouldNotCompute();
7429
7430 // If the start is a non-zero constant, shift the range to simplify things.
7431 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
7432 if (!SC->getValue()->isZero()) {
7433 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
7434 Operands[0] = SE.getConstant(SC->getType(), 0);
7435 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
7436 getNoWrapFlags(FlagNW));
7437 if (const SCEVAddRecExpr *ShiftedAddRec =
7438 dyn_cast<SCEVAddRecExpr>(Shifted))
7439 return ShiftedAddRec->getNumIterationsInRange(
7440 Range.subtract(SC->getValue()->getValue()), SE);
7441 // This is strange and shouldn't happen.
7442 return SE.getCouldNotCompute();
7443 }
7444
7445 // The only time we can solve this is when we have all constant indices.
7446 // Otherwise, we cannot determine the overflow conditions.
7447 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
7448 if (!isa<SCEVConstant>(getOperand(i)))
7449 return SE.getCouldNotCompute();
7450
7451
7452 // Okay at this point we know that all elements of the chrec are constants and
7453 // that the start element is zero.
7454
7455 // First check to see if the range contains zero. If not, the first
7456 // iteration exits.
7457 unsigned BitWidth = SE.getTypeSizeInBits(getType());
7458 if (!Range.contains(APInt(BitWidth, 0)))
7459 return SE.getConstant(getType(), 0);
7460
7461 if (isAffine()) {
7462 // If this is an affine expression then we have this situation:
7463 // Solve {0,+,A} in Range === Ax in Range
7464
7465 // We know that zero is in the range. If A is positive then we know that
7466 // the upper value of the range must be the first possible exit value.
7467 // If A is negative then the lower of the range is the last possible loop
7468 // value. Also note that we already checked for a full range.
7469 APInt One(BitWidth,1);
7470 APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
7471 APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
7472
7473 // The exit value should be (End+A)/A.
7474 APInt ExitVal = (End + A).udiv(A);
7475 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
7476
7477 // Evaluate at the exit value. If we really did fall out of the valid
7478 // range, then we computed our trip count, otherwise wrap around or other
7479 // things must have happened.
7480 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
7481 if (Range.contains(Val->getValue()))
7482 return SE.getCouldNotCompute(); // Something strange happened
7483
7484 // Ensure that the previous value is in the range. This is a sanity check.
7485 assert(Range.contains(
7486 EvaluateConstantChrecAtConstant(this,
7487 ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
7488 "Linear scev computation is off in a bad way!");
7489 return SE.getConstant(ExitValue);
7490 } else if (isQuadratic()) {
7491 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
7492 // quadratic equation to solve it. To do this, we must frame our problem in
7493 // terms of figuring out when zero is crossed, instead of when
7494 // Range.getUpper() is crossed.
7495 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
7496 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
7497 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(),
7498 // getNoWrapFlags(FlagNW)
7499 FlagAnyWrap);
7500
7501 // Next, solve the constructed addrec
7502 std::pair<const SCEV *,const SCEV *> Roots =
7503 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
7504 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
7505 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
7506 if (R1) {
7507 // Pick the smallest positive root value.
7508 if (ConstantInt *CB =
7509 dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
7510 R1->getValue(), R2->getValue()))) {
7511 if (!CB->getZExtValue())
7512 std::swap(R1, R2); // R1 is the minimum root now.
7513
7514 // Make sure the root is not off by one. The returned iteration should
7515 // not be in the range, but the previous one should be. When solving
7516 // for "X*X < 5", for example, we should not return a root of 2.
7517 ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
7518 R1->getValue(),
7519 SE);
7520 if (Range.contains(R1Val->getValue())) {
7521 // The next iteration must be out of the range...
7522 ConstantInt *NextVal =
7523 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1);
7524
7525 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
7526 if (!Range.contains(R1Val->getValue()))
7527 return SE.getConstant(NextVal);
7528 return SE.getCouldNotCompute(); // Something strange happened
7529 }
7530
7531 // If R1 was not in the range, then it is a good return value. Make
7532 // sure that R1-1 WAS in the range though, just in case.
7533 ConstantInt *NextVal =
7534 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1);
7535 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
7536 if (Range.contains(R1Val->getValue()))
7537 return R1;
7538 return SE.getCouldNotCompute(); // Something strange happened
7539 }
7540 }
7541 }
7542
7543 return SE.getCouldNotCompute();
7544 }
7545
7546 namespace {
7547 struct FindUndefs {
7548 bool Found;
FindUndefs__anond3aa2a800911::FindUndefs7549 FindUndefs() : Found(false) {}
7550
follow__anond3aa2a800911::FindUndefs7551 bool follow(const SCEV *S) {
7552 if (const SCEVUnknown *C = dyn_cast<SCEVUnknown>(S)) {
7553 if (isa<UndefValue>(C->getValue()))
7554 Found = true;
7555 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
7556 if (isa<UndefValue>(C->getValue()))
7557 Found = true;
7558 }
7559
7560 // Keep looking if we haven't found it yet.
7561 return !Found;
7562 }
isDone__anond3aa2a800911::FindUndefs7563 bool isDone() const {
7564 // Stop recursion if we have found an undef.
7565 return Found;
7566 }
7567 };
7568 }
7569
7570 // Return true when S contains at least an undef value.
7571 static inline bool
containsUndefs(const SCEV * S)7572 containsUndefs(const SCEV *S) {
7573 FindUndefs F;
7574 SCEVTraversal<FindUndefs> ST(F);
7575 ST.visitAll(S);
7576
7577 return F.Found;
7578 }
7579
7580 namespace {
7581 // Collect all steps of SCEV expressions.
7582 struct SCEVCollectStrides {
7583 ScalarEvolution &SE;
7584 SmallVectorImpl<const SCEV *> &Strides;
7585
SCEVCollectStrides__anond3aa2a800a11::SCEVCollectStrides7586 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S)
7587 : SE(SE), Strides(S) {}
7588
follow__anond3aa2a800a11::SCEVCollectStrides7589 bool follow(const SCEV *S) {
7590 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
7591 Strides.push_back(AR->getStepRecurrence(SE));
7592 return true;
7593 }
isDone__anond3aa2a800a11::SCEVCollectStrides7594 bool isDone() const { return false; }
7595 };
7596
7597 // Collect all SCEVUnknown and SCEVMulExpr expressions.
7598 struct SCEVCollectTerms {
7599 SmallVectorImpl<const SCEV *> &Terms;
7600
SCEVCollectTerms__anond3aa2a800a11::SCEVCollectTerms7601 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T)
7602 : Terms(T) {}
7603
follow__anond3aa2a800a11::SCEVCollectTerms7604 bool follow(const SCEV *S) {
7605 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S)) {
7606 if (!containsUndefs(S))
7607 Terms.push_back(S);
7608
7609 // Stop recursion: once we collected a term, do not walk its operands.
7610 return false;
7611 }
7612
7613 // Keep looking.
7614 return true;
7615 }
isDone__anond3aa2a800a11::SCEVCollectTerms7616 bool isDone() const { return false; }
7617 };
7618 }
7619
7620 /// Find parametric terms in this SCEVAddRecExpr.
collectParametricTerms(ScalarEvolution & SE,SmallVectorImpl<const SCEV * > & Terms) const7621 void SCEVAddRecExpr::collectParametricTerms(
7622 ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &Terms) const {
7623 SmallVector<const SCEV *, 4> Strides;
7624 SCEVCollectStrides StrideCollector(SE, Strides);
7625 visitAll(this, StrideCollector);
7626
7627 DEBUG({
7628 dbgs() << "Strides:\n";
7629 for (const SCEV *S : Strides)
7630 dbgs() << *S << "\n";
7631 });
7632
7633 for (const SCEV *S : Strides) {
7634 SCEVCollectTerms TermCollector(Terms);
7635 visitAll(S, TermCollector);
7636 }
7637
7638 DEBUG({
7639 dbgs() << "Terms:\n";
7640 for (const SCEV *T : Terms)
7641 dbgs() << *T << "\n";
7642 });
7643 }
7644
findArrayDimensionsRec(ScalarEvolution & SE,SmallVectorImpl<const SCEV * > & Terms,SmallVectorImpl<const SCEV * > & Sizes)7645 static bool findArrayDimensionsRec(ScalarEvolution &SE,
7646 SmallVectorImpl<const SCEV *> &Terms,
7647 SmallVectorImpl<const SCEV *> &Sizes) {
7648 int Last = Terms.size() - 1;
7649 const SCEV *Step = Terms[Last];
7650
7651 // End of recursion.
7652 if (Last == 0) {
7653 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) {
7654 SmallVector<const SCEV *, 2> Qs;
7655 for (const SCEV *Op : M->operands())
7656 if (!isa<SCEVConstant>(Op))
7657 Qs.push_back(Op);
7658
7659 Step = SE.getMulExpr(Qs);
7660 }
7661
7662 Sizes.push_back(Step);
7663 return true;
7664 }
7665
7666 for (const SCEV *&Term : Terms) {
7667 // Normalize the terms before the next call to findArrayDimensionsRec.
7668 const SCEV *Q, *R;
7669 SCEVDivision::divide(SE, Term, Step, &Q, &R);
7670
7671 // Bail out when GCD does not evenly divide one of the terms.
7672 if (!R->isZero())
7673 return false;
7674
7675 Term = Q;
7676 }
7677
7678 // Remove all SCEVConstants.
7679 Terms.erase(std::remove_if(Terms.begin(), Terms.end(), [](const SCEV *E) {
7680 return isa<SCEVConstant>(E);
7681 }),
7682 Terms.end());
7683
7684 if (Terms.size() > 0)
7685 if (!findArrayDimensionsRec(SE, Terms, Sizes))
7686 return false;
7687
7688 Sizes.push_back(Step);
7689 return true;
7690 }
7691
7692 namespace {
7693 struct FindParameter {
7694 bool FoundParameter;
FindParameter__anond3aa2a800c11::FindParameter7695 FindParameter() : FoundParameter(false) {}
7696
follow__anond3aa2a800c11::FindParameter7697 bool follow(const SCEV *S) {
7698 if (isa<SCEVUnknown>(S)) {
7699 FoundParameter = true;
7700 // Stop recursion: we found a parameter.
7701 return false;
7702 }
7703 // Keep looking.
7704 return true;
7705 }
isDone__anond3aa2a800c11::FindParameter7706 bool isDone() const {
7707 // Stop recursion if we have found a parameter.
7708 return FoundParameter;
7709 }
7710 };
7711 }
7712
7713 // Returns true when S contains at least a SCEVUnknown parameter.
7714 static inline bool
containsParameters(const SCEV * S)7715 containsParameters(const SCEV *S) {
7716 FindParameter F;
7717 SCEVTraversal<FindParameter> ST(F);
7718 ST.visitAll(S);
7719
7720 return F.FoundParameter;
7721 }
7722
7723 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter.
7724 static inline bool
containsParameters(SmallVectorImpl<const SCEV * > & Terms)7725 containsParameters(SmallVectorImpl<const SCEV *> &Terms) {
7726 for (const SCEV *T : Terms)
7727 if (containsParameters(T))
7728 return true;
7729 return false;
7730 }
7731
7732 // Return the number of product terms in S.
numberOfTerms(const SCEV * S)7733 static inline int numberOfTerms(const SCEV *S) {
7734 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S))
7735 return Expr->getNumOperands();
7736 return 1;
7737 }
7738
removeConstantFactors(ScalarEvolution & SE,const SCEV * T)7739 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) {
7740 if (isa<SCEVConstant>(T))
7741 return nullptr;
7742
7743 if (isa<SCEVUnknown>(T))
7744 return T;
7745
7746 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) {
7747 SmallVector<const SCEV *, 2> Factors;
7748 for (const SCEV *Op : M->operands())
7749 if (!isa<SCEVConstant>(Op))
7750 Factors.push_back(Op);
7751
7752 return SE.getMulExpr(Factors);
7753 }
7754
7755 return T;
7756 }
7757
7758 /// Return the size of an element read or written by Inst.
getElementSize(Instruction * Inst)7759 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) {
7760 Type *Ty;
7761 if (StoreInst *Store = dyn_cast<StoreInst>(Inst))
7762 Ty = Store->getValueOperand()->getType();
7763 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst))
7764 Ty = Load->getType();
7765 else
7766 return nullptr;
7767
7768 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty));
7769 return getSizeOfExpr(ETy, Ty);
7770 }
7771
7772 /// Second step of delinearization: compute the array dimensions Sizes from the
7773 /// set of Terms extracted from the memory access function of this SCEVAddRec.
findArrayDimensions(SmallVectorImpl<const SCEV * > & Terms,SmallVectorImpl<const SCEV * > & Sizes,const SCEV * ElementSize) const7774 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
7775 SmallVectorImpl<const SCEV *> &Sizes,
7776 const SCEV *ElementSize) const {
7777
7778 if (Terms.size() < 1 || !ElementSize)
7779 return;
7780
7781 // Early return when Terms do not contain parameters: we do not delinearize
7782 // non parametric SCEVs.
7783 if (!containsParameters(Terms))
7784 return;
7785
7786 DEBUG({
7787 dbgs() << "Terms:\n";
7788 for (const SCEV *T : Terms)
7789 dbgs() << *T << "\n";
7790 });
7791
7792 // Remove duplicates.
7793 std::sort(Terms.begin(), Terms.end());
7794 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end());
7795
7796 // Put larger terms first.
7797 std::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) {
7798 return numberOfTerms(LHS) > numberOfTerms(RHS);
7799 });
7800
7801 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
7802
7803 // Divide all terms by the element size.
7804 for (const SCEV *&Term : Terms) {
7805 const SCEV *Q, *R;
7806 SCEVDivision::divide(SE, Term, ElementSize, &Q, &R);
7807 Term = Q;
7808 }
7809
7810 SmallVector<const SCEV *, 4> NewTerms;
7811
7812 // Remove constant factors.
7813 for (const SCEV *T : Terms)
7814 if (const SCEV *NewT = removeConstantFactors(SE, T))
7815 NewTerms.push_back(NewT);
7816
7817 DEBUG({
7818 dbgs() << "Terms after sorting:\n";
7819 for (const SCEV *T : NewTerms)
7820 dbgs() << *T << "\n";
7821 });
7822
7823 if (NewTerms.empty() ||
7824 !findArrayDimensionsRec(SE, NewTerms, Sizes)) {
7825 Sizes.clear();
7826 return;
7827 }
7828
7829 // The last element to be pushed into Sizes is the size of an element.
7830 Sizes.push_back(ElementSize);
7831
7832 DEBUG({
7833 dbgs() << "Sizes:\n";
7834 for (const SCEV *S : Sizes)
7835 dbgs() << *S << "\n";
7836 });
7837 }
7838
7839 /// Third step of delinearization: compute the access functions for the
7840 /// Subscripts based on the dimensions in Sizes.
computeAccessFunctions(ScalarEvolution & SE,SmallVectorImpl<const SCEV * > & Subscripts,SmallVectorImpl<const SCEV * > & Sizes) const7841 void SCEVAddRecExpr::computeAccessFunctions(
7842 ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &Subscripts,
7843 SmallVectorImpl<const SCEV *> &Sizes) const {
7844
7845 // Early exit in case this SCEV is not an affine multivariate function.
7846 if (Sizes.empty() || !this->isAffine())
7847 return;
7848
7849 const SCEV *Res = this;
7850 int Last = Sizes.size() - 1;
7851 for (int i = Last; i >= 0; i--) {
7852 const SCEV *Q, *R;
7853 SCEVDivision::divide(SE, Res, Sizes[i], &Q, &R);
7854
7855 DEBUG({
7856 dbgs() << "Res: " << *Res << "\n";
7857 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n";
7858 dbgs() << "Res divided by Sizes[i]:\n";
7859 dbgs() << "Quotient: " << *Q << "\n";
7860 dbgs() << "Remainder: " << *R << "\n";
7861 });
7862
7863 Res = Q;
7864
7865 // Do not record the last subscript corresponding to the size of elements in
7866 // the array.
7867 if (i == Last) {
7868
7869 // Bail out if the remainder is too complex.
7870 if (isa<SCEVAddRecExpr>(R)) {
7871 Subscripts.clear();
7872 Sizes.clear();
7873 return;
7874 }
7875
7876 continue;
7877 }
7878
7879 // Record the access function for the current subscript.
7880 Subscripts.push_back(R);
7881 }
7882
7883 // Also push in last position the remainder of the last division: it will be
7884 // the access function of the innermost dimension.
7885 Subscripts.push_back(Res);
7886
7887 std::reverse(Subscripts.begin(), Subscripts.end());
7888
7889 DEBUG({
7890 dbgs() << "Subscripts:\n";
7891 for (const SCEV *S : Subscripts)
7892 dbgs() << *S << "\n";
7893 });
7894 }
7895
7896 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and
7897 /// sizes of an array access. Returns the remainder of the delinearization that
7898 /// is the offset start of the array. The SCEV->delinearize algorithm computes
7899 /// the multiples of SCEV coefficients: that is a pattern matching of sub
7900 /// expressions in the stride and base of a SCEV corresponding to the
7901 /// computation of a GCD (greatest common divisor) of base and stride. When
7902 /// SCEV->delinearize fails, it returns the SCEV unchanged.
7903 ///
7904 /// For example: when analyzing the memory access A[i][j][k] in this loop nest
7905 ///
7906 /// void foo(long n, long m, long o, double A[n][m][o]) {
7907 ///
7908 /// for (long i = 0; i < n; i++)
7909 /// for (long j = 0; j < m; j++)
7910 /// for (long k = 0; k < o; k++)
7911 /// A[i][j][k] = 1.0;
7912 /// }
7913 ///
7914 /// the delinearization input is the following AddRec SCEV:
7915 ///
7916 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
7917 ///
7918 /// From this SCEV, we are able to say that the base offset of the access is %A
7919 /// because it appears as an offset that does not divide any of the strides in
7920 /// the loops:
7921 ///
7922 /// CHECK: Base offset: %A
7923 ///
7924 /// and then SCEV->delinearize determines the size of some of the dimensions of
7925 /// the array as these are the multiples by which the strides are happening:
7926 ///
7927 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
7928 ///
7929 /// Note that the outermost dimension remains of UnknownSize because there are
7930 /// no strides that would help identifying the size of the last dimension: when
7931 /// the array has been statically allocated, one could compute the size of that
7932 /// dimension by dividing the overall size of the array by the size of the known
7933 /// dimensions: %m * %o * 8.
7934 ///
7935 /// Finally delinearize provides the access functions for the array reference
7936 /// that does correspond to A[i][j][k] of the above C testcase:
7937 ///
7938 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>]
7939 ///
7940 /// The testcases are checking the output of a function pass:
7941 /// DelinearizationPass that walks through all loads and stores of a function
7942 /// asking for the SCEV of the memory access with respect to all enclosing
7943 /// loops, calling SCEV->delinearize on that and printing the results.
7944
delinearize(ScalarEvolution & SE,SmallVectorImpl<const SCEV * > & Subscripts,SmallVectorImpl<const SCEV * > & Sizes,const SCEV * ElementSize) const7945 void SCEVAddRecExpr::delinearize(ScalarEvolution &SE,
7946 SmallVectorImpl<const SCEV *> &Subscripts,
7947 SmallVectorImpl<const SCEV *> &Sizes,
7948 const SCEV *ElementSize) const {
7949 // First step: collect parametric terms.
7950 SmallVector<const SCEV *, 4> Terms;
7951 collectParametricTerms(SE, Terms);
7952
7953 if (Terms.empty())
7954 return;
7955
7956 // Second step: find subscript sizes.
7957 SE.findArrayDimensions(Terms, Sizes, ElementSize);
7958
7959 if (Sizes.empty())
7960 return;
7961
7962 // Third step: compute the access functions for each subscript.
7963 computeAccessFunctions(SE, Subscripts, Sizes);
7964
7965 if (Subscripts.empty())
7966 return;
7967
7968 DEBUG({
7969 dbgs() << "succeeded to delinearize " << *this << "\n";
7970 dbgs() << "ArrayDecl[UnknownSize]";
7971 for (const SCEV *S : Sizes)
7972 dbgs() << "[" << *S << "]";
7973
7974 dbgs() << "\nArrayRef";
7975 for (const SCEV *S : Subscripts)
7976 dbgs() << "[" << *S << "]";
7977 dbgs() << "\n";
7978 });
7979 }
7980
7981 //===----------------------------------------------------------------------===//
7982 // SCEVCallbackVH Class Implementation
7983 //===----------------------------------------------------------------------===//
7984
deleted()7985 void ScalarEvolution::SCEVCallbackVH::deleted() {
7986 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
7987 if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
7988 SE->ConstantEvolutionLoopExitValue.erase(PN);
7989 SE->ValueExprMap.erase(getValPtr());
7990 // this now dangles!
7991 }
7992
allUsesReplacedWith(Value * V)7993 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
7994 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
7995
7996 // Forget all the expressions associated with users of the old value,
7997 // so that future queries will recompute the expressions using the new
7998 // value.
7999 Value *Old = getValPtr();
8000 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end());
8001 SmallPtrSet<User *, 8> Visited;
8002 while (!Worklist.empty()) {
8003 User *U = Worklist.pop_back_val();
8004 // Deleting the Old value will cause this to dangle. Postpone
8005 // that until everything else is done.
8006 if (U == Old)
8007 continue;
8008 if (!Visited.insert(U).second)
8009 continue;
8010 if (PHINode *PN = dyn_cast<PHINode>(U))
8011 SE->ConstantEvolutionLoopExitValue.erase(PN);
8012 SE->ValueExprMap.erase(U);
8013 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end());
8014 }
8015 // Delete the Old value.
8016 if (PHINode *PN = dyn_cast<PHINode>(Old))
8017 SE->ConstantEvolutionLoopExitValue.erase(PN);
8018 SE->ValueExprMap.erase(Old);
8019 // this now dangles!
8020 }
8021
SCEVCallbackVH(Value * V,ScalarEvolution * se)8022 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
8023 : CallbackVH(V), SE(se) {}
8024
8025 //===----------------------------------------------------------------------===//
8026 // ScalarEvolution Class Implementation
8027 //===----------------------------------------------------------------------===//
8028
ScalarEvolution()8029 ScalarEvolution::ScalarEvolution()
8030 : FunctionPass(ID), WalkingBEDominatingConds(false), ValuesAtScopes(64),
8031 LoopDispositions(64), BlockDispositions(64), FirstUnknown(nullptr) {
8032 initializeScalarEvolutionPass(*PassRegistry::getPassRegistry());
8033 }
8034
runOnFunction(Function & F)8035 bool ScalarEvolution::runOnFunction(Function &F) {
8036 this->F = &F;
8037 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
8038 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
8039 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
8040 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
8041 return false;
8042 }
8043
releaseMemory()8044 void ScalarEvolution::releaseMemory() {
8045 // Iterate through all the SCEVUnknown instances and call their
8046 // destructors, so that they release their references to their values.
8047 for (SCEVUnknown *U = FirstUnknown; U; U = U->Next)
8048 U->~SCEVUnknown();
8049 FirstUnknown = nullptr;
8050
8051 ValueExprMap.clear();
8052
8053 // Free any extra memory created for ExitNotTakenInfo in the unlikely event
8054 // that a loop had multiple computable exits.
8055 for (DenseMap<const Loop*, BackedgeTakenInfo>::iterator I =
8056 BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end();
8057 I != E; ++I) {
8058 I->second.clear();
8059 }
8060
8061 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage");
8062 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!");
8063
8064 BackedgeTakenCounts.clear();
8065 ConstantEvolutionLoopExitValue.clear();
8066 ValuesAtScopes.clear();
8067 LoopDispositions.clear();
8068 BlockDispositions.clear();
8069 UnsignedRanges.clear();
8070 SignedRanges.clear();
8071 UniqueSCEVs.clear();
8072 SCEVAllocator.Reset();
8073 }
8074
getAnalysisUsage(AnalysisUsage & AU) const8075 void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
8076 AU.setPreservesAll();
8077 AU.addRequired<AssumptionCacheTracker>();
8078 AU.addRequiredTransitive<LoopInfoWrapperPass>();
8079 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
8080 AU.addRequired<TargetLibraryInfoWrapperPass>();
8081 }
8082
hasLoopInvariantBackedgeTakenCount(const Loop * L)8083 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
8084 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
8085 }
8086
PrintLoopInfo(raw_ostream & OS,ScalarEvolution * SE,const Loop * L)8087 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
8088 const Loop *L) {
8089 // Print all inner loops first
8090 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
8091 PrintLoopInfo(OS, SE, *I);
8092
8093 OS << "Loop ";
8094 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
8095 OS << ": ";
8096
8097 SmallVector<BasicBlock *, 8> ExitBlocks;
8098 L->getExitBlocks(ExitBlocks);
8099 if (ExitBlocks.size() != 1)
8100 OS << "<multiple exits> ";
8101
8102 if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
8103 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
8104 } else {
8105 OS << "Unpredictable backedge-taken count. ";
8106 }
8107
8108 OS << "\n"
8109 "Loop ";
8110 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
8111 OS << ": ";
8112
8113 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
8114 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
8115 } else {
8116 OS << "Unpredictable max backedge-taken count. ";
8117 }
8118
8119 OS << "\n";
8120 }
8121
print(raw_ostream & OS,const Module *) const8122 void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
8123 // ScalarEvolution's implementation of the print method is to print
8124 // out SCEV values of all instructions that are interesting. Doing
8125 // this potentially causes it to create new SCEV objects though,
8126 // which technically conflicts with the const qualifier. This isn't
8127 // observable from outside the class though, so casting away the
8128 // const isn't dangerous.
8129 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
8130
8131 OS << "Classifying expressions for: ";
8132 F->printAsOperand(OS, /*PrintType=*/false);
8133 OS << "\n";
8134 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
8135 if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) {
8136 OS << *I << '\n';
8137 OS << " --> ";
8138 const SCEV *SV = SE.getSCEV(&*I);
8139 SV->print(OS);
8140 if (!isa<SCEVCouldNotCompute>(SV)) {
8141 OS << " U: ";
8142 SE.getUnsignedRange(SV).print(OS);
8143 OS << " S: ";
8144 SE.getSignedRange(SV).print(OS);
8145 }
8146
8147 const Loop *L = LI->getLoopFor((*I).getParent());
8148
8149 const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
8150 if (AtUse != SV) {
8151 OS << " --> ";
8152 AtUse->print(OS);
8153 if (!isa<SCEVCouldNotCompute>(AtUse)) {
8154 OS << " U: ";
8155 SE.getUnsignedRange(AtUse).print(OS);
8156 OS << " S: ";
8157 SE.getSignedRange(AtUse).print(OS);
8158 }
8159 }
8160
8161 if (L) {
8162 OS << "\t\t" "Exits: ";
8163 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
8164 if (!SE.isLoopInvariant(ExitValue, L)) {
8165 OS << "<<Unknown>>";
8166 } else {
8167 OS << *ExitValue;
8168 }
8169 }
8170
8171 OS << "\n";
8172 }
8173
8174 OS << "Determining loop execution counts for: ";
8175 F->printAsOperand(OS, /*PrintType=*/false);
8176 OS << "\n";
8177 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
8178 PrintLoopInfo(OS, &SE, *I);
8179 }
8180
8181 ScalarEvolution::LoopDisposition
getLoopDisposition(const SCEV * S,const Loop * L)8182 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
8183 auto &Values = LoopDispositions[S];
8184 for (auto &V : Values) {
8185 if (V.getPointer() == L)
8186 return V.getInt();
8187 }
8188 Values.emplace_back(L, LoopVariant);
8189 LoopDisposition D = computeLoopDisposition(S, L);
8190 auto &Values2 = LoopDispositions[S];
8191 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) {
8192 if (V.getPointer() == L) {
8193 V.setInt(D);
8194 break;
8195 }
8196 }
8197 return D;
8198 }
8199
8200 ScalarEvolution::LoopDisposition
computeLoopDisposition(const SCEV * S,const Loop * L)8201 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
8202 switch (static_cast<SCEVTypes>(S->getSCEVType())) {
8203 case scConstant:
8204 return LoopInvariant;
8205 case scTruncate:
8206 case scZeroExtend:
8207 case scSignExtend:
8208 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L);
8209 case scAddRecExpr: {
8210 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
8211
8212 // If L is the addrec's loop, it's computable.
8213 if (AR->getLoop() == L)
8214 return LoopComputable;
8215
8216 // Add recurrences are never invariant in the function-body (null loop).
8217 if (!L)
8218 return LoopVariant;
8219
8220 // This recurrence is variant w.r.t. L if L contains AR's loop.
8221 if (L->contains(AR->getLoop()))
8222 return LoopVariant;
8223
8224 // This recurrence is invariant w.r.t. L if AR's loop contains L.
8225 if (AR->getLoop()->contains(L))
8226 return LoopInvariant;
8227
8228 // This recurrence is variant w.r.t. L if any of its operands
8229 // are variant.
8230 for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
8231 I != E; ++I)
8232 if (!isLoopInvariant(*I, L))
8233 return LoopVariant;
8234
8235 // Otherwise it's loop-invariant.
8236 return LoopInvariant;
8237 }
8238 case scAddExpr:
8239 case scMulExpr:
8240 case scUMaxExpr:
8241 case scSMaxExpr: {
8242 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
8243 bool HasVarying = false;
8244 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
8245 I != E; ++I) {
8246 LoopDisposition D = getLoopDisposition(*I, L);
8247 if (D == LoopVariant)
8248 return LoopVariant;
8249 if (D == LoopComputable)
8250 HasVarying = true;
8251 }
8252 return HasVarying ? LoopComputable : LoopInvariant;
8253 }
8254 case scUDivExpr: {
8255 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
8256 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L);
8257 if (LD == LoopVariant)
8258 return LoopVariant;
8259 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L);
8260 if (RD == LoopVariant)
8261 return LoopVariant;
8262 return (LD == LoopInvariant && RD == LoopInvariant) ?
8263 LoopInvariant : LoopComputable;
8264 }
8265 case scUnknown:
8266 // All non-instruction values are loop invariant. All instructions are loop
8267 // invariant if they are not contained in the specified loop.
8268 // Instructions are never considered invariant in the function body
8269 // (null loop) because they are defined within the "loop".
8270 if (Instruction *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
8271 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
8272 return LoopInvariant;
8273 case scCouldNotCompute:
8274 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
8275 }
8276 llvm_unreachable("Unknown SCEV kind!");
8277 }
8278
isLoopInvariant(const SCEV * S,const Loop * L)8279 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
8280 return getLoopDisposition(S, L) == LoopInvariant;
8281 }
8282
hasComputableLoopEvolution(const SCEV * S,const Loop * L)8283 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
8284 return getLoopDisposition(S, L) == LoopComputable;
8285 }
8286
8287 ScalarEvolution::BlockDisposition
getBlockDisposition(const SCEV * S,const BasicBlock * BB)8288 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
8289 auto &Values = BlockDispositions[S];
8290 for (auto &V : Values) {
8291 if (V.getPointer() == BB)
8292 return V.getInt();
8293 }
8294 Values.emplace_back(BB, DoesNotDominateBlock);
8295 BlockDisposition D = computeBlockDisposition(S, BB);
8296 auto &Values2 = BlockDispositions[S];
8297 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) {
8298 if (V.getPointer() == BB) {
8299 V.setInt(D);
8300 break;
8301 }
8302 }
8303 return D;
8304 }
8305
8306 ScalarEvolution::BlockDisposition
computeBlockDisposition(const SCEV * S,const BasicBlock * BB)8307 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
8308 switch (static_cast<SCEVTypes>(S->getSCEVType())) {
8309 case scConstant:
8310 return ProperlyDominatesBlock;
8311 case scTruncate:
8312 case scZeroExtend:
8313 case scSignExtend:
8314 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB);
8315 case scAddRecExpr: {
8316 // This uses a "dominates" query instead of "properly dominates" query
8317 // to test for proper dominance too, because the instruction which
8318 // produces the addrec's value is a PHI, and a PHI effectively properly
8319 // dominates its entire containing block.
8320 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
8321 if (!DT->dominates(AR->getLoop()->getHeader(), BB))
8322 return DoesNotDominateBlock;
8323 }
8324 // FALL THROUGH into SCEVNAryExpr handling.
8325 case scAddExpr:
8326 case scMulExpr:
8327 case scUMaxExpr:
8328 case scSMaxExpr: {
8329 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
8330 bool Proper = true;
8331 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
8332 I != E; ++I) {
8333 BlockDisposition D = getBlockDisposition(*I, BB);
8334 if (D == DoesNotDominateBlock)
8335 return DoesNotDominateBlock;
8336 if (D == DominatesBlock)
8337 Proper = false;
8338 }
8339 return Proper ? ProperlyDominatesBlock : DominatesBlock;
8340 }
8341 case scUDivExpr: {
8342 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
8343 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
8344 BlockDisposition LD = getBlockDisposition(LHS, BB);
8345 if (LD == DoesNotDominateBlock)
8346 return DoesNotDominateBlock;
8347 BlockDisposition RD = getBlockDisposition(RHS, BB);
8348 if (RD == DoesNotDominateBlock)
8349 return DoesNotDominateBlock;
8350 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ?
8351 ProperlyDominatesBlock : DominatesBlock;
8352 }
8353 case scUnknown:
8354 if (Instruction *I =
8355 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
8356 if (I->getParent() == BB)
8357 return DominatesBlock;
8358 if (DT->properlyDominates(I->getParent(), BB))
8359 return ProperlyDominatesBlock;
8360 return DoesNotDominateBlock;
8361 }
8362 return ProperlyDominatesBlock;
8363 case scCouldNotCompute:
8364 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
8365 }
8366 llvm_unreachable("Unknown SCEV kind!");
8367 }
8368
dominates(const SCEV * S,const BasicBlock * BB)8369 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
8370 return getBlockDisposition(S, BB) >= DominatesBlock;
8371 }
8372
properlyDominates(const SCEV * S,const BasicBlock * BB)8373 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
8374 return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
8375 }
8376
8377 namespace {
8378 // Search for a SCEV expression node within an expression tree.
8379 // Implements SCEVTraversal::Visitor.
8380 struct SCEVSearch {
8381 const SCEV *Node;
8382 bool IsFound;
8383
SCEVSearch__anond3aa2a800e11::SCEVSearch8384 SCEVSearch(const SCEV *N): Node(N), IsFound(false) {}
8385
follow__anond3aa2a800e11::SCEVSearch8386 bool follow(const SCEV *S) {
8387 IsFound |= (S == Node);
8388 return !IsFound;
8389 }
isDone__anond3aa2a800e11::SCEVSearch8390 bool isDone() const { return IsFound; }
8391 };
8392 }
8393
hasOperand(const SCEV * S,const SCEV * Op) const8394 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
8395 SCEVSearch Search(Op);
8396 visitAll(S, Search);
8397 return Search.IsFound;
8398 }
8399
forgetMemoizedResults(const SCEV * S)8400 void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
8401 ValuesAtScopes.erase(S);
8402 LoopDispositions.erase(S);
8403 BlockDispositions.erase(S);
8404 UnsignedRanges.erase(S);
8405 SignedRanges.erase(S);
8406
8407 for (DenseMap<const Loop*, BackedgeTakenInfo>::iterator I =
8408 BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end(); I != E; ) {
8409 BackedgeTakenInfo &BEInfo = I->second;
8410 if (BEInfo.hasOperand(S, this)) {
8411 BEInfo.clear();
8412 BackedgeTakenCounts.erase(I++);
8413 }
8414 else
8415 ++I;
8416 }
8417 }
8418
8419 typedef DenseMap<const Loop *, std::string> VerifyMap;
8420
8421 /// replaceSubString - Replaces all occurrences of From in Str with To.
replaceSubString(std::string & Str,StringRef From,StringRef To)8422 static void replaceSubString(std::string &Str, StringRef From, StringRef To) {
8423 size_t Pos = 0;
8424 while ((Pos = Str.find(From, Pos)) != std::string::npos) {
8425 Str.replace(Pos, From.size(), To.data(), To.size());
8426 Pos += To.size();
8427 }
8428 }
8429
8430 /// getLoopBackedgeTakenCounts - Helper method for verifyAnalysis.
8431 static void
getLoopBackedgeTakenCounts(Loop * L,VerifyMap & Map,ScalarEvolution & SE)8432 getLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE) {
8433 for (Loop::reverse_iterator I = L->rbegin(), E = L->rend(); I != E; ++I) {
8434 getLoopBackedgeTakenCounts(*I, Map, SE); // recurse.
8435
8436 std::string &S = Map[L];
8437 if (S.empty()) {
8438 raw_string_ostream OS(S);
8439 SE.getBackedgeTakenCount(L)->print(OS);
8440
8441 // false and 0 are semantically equivalent. This can happen in dead loops.
8442 replaceSubString(OS.str(), "false", "0");
8443 // Remove wrap flags, their use in SCEV is highly fragile.
8444 // FIXME: Remove this when SCEV gets smarter about them.
8445 replaceSubString(OS.str(), "<nw>", "");
8446 replaceSubString(OS.str(), "<nsw>", "");
8447 replaceSubString(OS.str(), "<nuw>", "");
8448 }
8449 }
8450 }
8451
verifyAnalysis() const8452 void ScalarEvolution::verifyAnalysis() const {
8453 if (!VerifySCEV)
8454 return;
8455
8456 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
8457
8458 // Gather stringified backedge taken counts for all loops using SCEV's caches.
8459 // FIXME: It would be much better to store actual values instead of strings,
8460 // but SCEV pointers will change if we drop the caches.
8461 VerifyMap BackedgeDumpsOld, BackedgeDumpsNew;
8462 for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
8463 getLoopBackedgeTakenCounts(*I, BackedgeDumpsOld, SE);
8464
8465 // Gather stringified backedge taken counts for all loops without using
8466 // SCEV's caches.
8467 SE.releaseMemory();
8468 for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
8469 getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE);
8470
8471 // Now compare whether they're the same with and without caches. This allows
8472 // verifying that no pass changed the cache.
8473 assert(BackedgeDumpsOld.size() == BackedgeDumpsNew.size() &&
8474 "New loops suddenly appeared!");
8475
8476 for (VerifyMap::iterator OldI = BackedgeDumpsOld.begin(),
8477 OldE = BackedgeDumpsOld.end(),
8478 NewI = BackedgeDumpsNew.begin();
8479 OldI != OldE; ++OldI, ++NewI) {
8480 assert(OldI->first == NewI->first && "Loop order changed!");
8481
8482 // Compare the stringified SCEVs. We don't care if undef backedgetaken count
8483 // changes.
8484 // FIXME: We currently ignore SCEV changes from/to CouldNotCompute. This
8485 // means that a pass is buggy or SCEV has to learn a new pattern but is
8486 // usually not harmful.
8487 if (OldI->second != NewI->second &&
8488 OldI->second.find("undef") == std::string::npos &&
8489 NewI->second.find("undef") == std::string::npos &&
8490 OldI->second != "***COULDNOTCOMPUTE***" &&
8491 NewI->second != "***COULDNOTCOMPUTE***") {
8492 dbgs() << "SCEVValidator: SCEV for loop '"
8493 << OldI->first->getHeader()->getName()
8494 << "' changed from '" << OldI->second
8495 << "' to '" << NewI->second << "'!\n";
8496 std::abort();
8497 }
8498 }
8499
8500 // TODO: Verify more things.
8501 }
8502