1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the scalar evolution expander,
11 // which is used to generate the code corresponding to a given scalar evolution
12 // expression.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #include "llvm/Analysis/ScalarEvolutionExpander.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/LoopInfo.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Dominators.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/IR/Module.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/raw_ostream.h"
29
30 using namespace llvm;
31
32 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
33 /// reusing an existing cast if a suitable one exists, moving an existing
34 /// cast if a suitable one exists but isn't in the right place, or
35 /// creating a new one.
ReuseOrCreateCast(Value * V,Type * Ty,Instruction::CastOps Op,BasicBlock::iterator IP)36 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
37 Instruction::CastOps Op,
38 BasicBlock::iterator IP) {
39 // This function must be called with the builder having a valid insertion
40 // point. It doesn't need to be the actual IP where the uses of the returned
41 // cast will be added, but it must dominate such IP.
42 // We use this precondition to produce a cast that will dominate all its
43 // uses. In particular, this is crucial for the case where the builder's
44 // insertion point *is* the point where we were asked to put the cast.
45 // Since we don't know the builder's insertion point is actually
46 // where the uses will be added (only that it dominates it), we are
47 // not allowed to move it.
48 BasicBlock::iterator BIP = Builder.GetInsertPoint();
49
50 Instruction *Ret = nullptr;
51
52 // Check to see if there is already a cast!
53 for (User *U : V->users())
54 if (U->getType() == Ty)
55 if (CastInst *CI = dyn_cast<CastInst>(U))
56 if (CI->getOpcode() == Op) {
57 // If the cast isn't where we want it, create a new cast at IP.
58 // Likewise, do not reuse a cast at BIP because it must dominate
59 // instructions that might be inserted before BIP.
60 if (BasicBlock::iterator(CI) != IP || BIP == IP) {
61 // Create a new cast, and leave the old cast in place in case
62 // it is being used as an insert point. Clear its operand
63 // so that it doesn't hold anything live.
64 Ret = CastInst::Create(Op, V, Ty, "", IP);
65 Ret->takeName(CI);
66 CI->replaceAllUsesWith(Ret);
67 CI->setOperand(0, UndefValue::get(V->getType()));
68 break;
69 }
70 Ret = CI;
71 break;
72 }
73
74 // Create a new cast.
75 if (!Ret)
76 Ret = CastInst::Create(Op, V, Ty, V->getName(), IP);
77
78 // We assert at the end of the function since IP might point to an
79 // instruction with different dominance properties than a cast
80 // (an invoke for example) and not dominate BIP (but the cast does).
81 assert(SE.DT->dominates(Ret, BIP));
82
83 rememberInstruction(Ret);
84 return Ret;
85 }
86
87 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
88 /// which must be possible with a noop cast, doing what we can to share
89 /// the casts.
InsertNoopCastOfTo(Value * V,Type * Ty)90 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
91 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
92 assert((Op == Instruction::BitCast ||
93 Op == Instruction::PtrToInt ||
94 Op == Instruction::IntToPtr) &&
95 "InsertNoopCastOfTo cannot perform non-noop casts!");
96 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
97 "InsertNoopCastOfTo cannot change sizes!");
98
99 // Short-circuit unnecessary bitcasts.
100 if (Op == Instruction::BitCast) {
101 if (V->getType() == Ty)
102 return V;
103 if (CastInst *CI = dyn_cast<CastInst>(V)) {
104 if (CI->getOperand(0)->getType() == Ty)
105 return CI->getOperand(0);
106 }
107 }
108 // Short-circuit unnecessary inttoptr<->ptrtoint casts.
109 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
110 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
111 if (CastInst *CI = dyn_cast<CastInst>(V))
112 if ((CI->getOpcode() == Instruction::PtrToInt ||
113 CI->getOpcode() == Instruction::IntToPtr) &&
114 SE.getTypeSizeInBits(CI->getType()) ==
115 SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
116 return CI->getOperand(0);
117 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
118 if ((CE->getOpcode() == Instruction::PtrToInt ||
119 CE->getOpcode() == Instruction::IntToPtr) &&
120 SE.getTypeSizeInBits(CE->getType()) ==
121 SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
122 return CE->getOperand(0);
123 }
124
125 // Fold a cast of a constant.
126 if (Constant *C = dyn_cast<Constant>(V))
127 return ConstantExpr::getCast(Op, C, Ty);
128
129 // Cast the argument at the beginning of the entry block, after
130 // any bitcasts of other arguments.
131 if (Argument *A = dyn_cast<Argument>(V)) {
132 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
133 while ((isa<BitCastInst>(IP) &&
134 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
135 cast<BitCastInst>(IP)->getOperand(0) != A) ||
136 isa<DbgInfoIntrinsic>(IP) ||
137 isa<LandingPadInst>(IP))
138 ++IP;
139 return ReuseOrCreateCast(A, Ty, Op, IP);
140 }
141
142 // Cast the instruction immediately after the instruction.
143 Instruction *I = cast<Instruction>(V);
144 BasicBlock::iterator IP = I; ++IP;
145 if (InvokeInst *II = dyn_cast<InvokeInst>(I))
146 IP = II->getNormalDest()->begin();
147 while (isa<PHINode>(IP) || isa<LandingPadInst>(IP))
148 ++IP;
149 return ReuseOrCreateCast(I, Ty, Op, IP);
150 }
151
152 /// InsertBinop - Insert the specified binary operator, doing a small amount
153 /// of work to avoid inserting an obviously redundant operation.
InsertBinop(Instruction::BinaryOps Opcode,Value * LHS,Value * RHS)154 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
155 Value *LHS, Value *RHS) {
156 // Fold a binop with constant operands.
157 if (Constant *CLHS = dyn_cast<Constant>(LHS))
158 if (Constant *CRHS = dyn_cast<Constant>(RHS))
159 return ConstantExpr::get(Opcode, CLHS, CRHS);
160
161 // Do a quick scan to see if we have this binop nearby. If so, reuse it.
162 unsigned ScanLimit = 6;
163 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
164 // Scanning starts from the last instruction before the insertion point.
165 BasicBlock::iterator IP = Builder.GetInsertPoint();
166 if (IP != BlockBegin) {
167 --IP;
168 for (; ScanLimit; --IP, --ScanLimit) {
169 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
170 // generated code.
171 if (isa<DbgInfoIntrinsic>(IP))
172 ScanLimit++;
173 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
174 IP->getOperand(1) == RHS)
175 return IP;
176 if (IP == BlockBegin) break;
177 }
178 }
179
180 // Save the original insertion point so we can restore it when we're done.
181 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
182 BuilderType::InsertPointGuard Guard(Builder);
183
184 // Move the insertion point out of as many loops as we can.
185 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
186 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
187 BasicBlock *Preheader = L->getLoopPreheader();
188 if (!Preheader) break;
189
190 // Ok, move up a level.
191 Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
192 }
193
194 // If we haven't found this binop, insert it.
195 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
196 BO->setDebugLoc(Loc);
197 rememberInstruction(BO);
198
199 return BO;
200 }
201
202 /// FactorOutConstant - Test if S is divisible by Factor, using signed
203 /// division. If so, update S with Factor divided out and return true.
204 /// S need not be evenly divisible if a reasonable remainder can be
205 /// computed.
206 /// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
207 /// unnecessary; in its place, just signed-divide Ops[i] by the scale and
208 /// check to see if the divide was folded.
FactorOutConstant(const SCEV * & S,const SCEV * & Remainder,const SCEV * Factor,ScalarEvolution & SE,const DataLayout & DL)209 static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder,
210 const SCEV *Factor, ScalarEvolution &SE,
211 const DataLayout &DL) {
212 // Everything is divisible by one.
213 if (Factor->isOne())
214 return true;
215
216 // x/x == 1.
217 if (S == Factor) {
218 S = SE.getConstant(S->getType(), 1);
219 return true;
220 }
221
222 // For a Constant, check for a multiple of the given factor.
223 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
224 // 0/x == 0.
225 if (C->isZero())
226 return true;
227 // Check for divisibility.
228 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
229 ConstantInt *CI =
230 ConstantInt::get(SE.getContext(),
231 C->getValue()->getValue().sdiv(
232 FC->getValue()->getValue()));
233 // If the quotient is zero and the remainder is non-zero, reject
234 // the value at this scale. It will be considered for subsequent
235 // smaller scales.
236 if (!CI->isZero()) {
237 const SCEV *Div = SE.getConstant(CI);
238 S = Div;
239 Remainder =
240 SE.getAddExpr(Remainder,
241 SE.getConstant(C->getValue()->getValue().srem(
242 FC->getValue()->getValue())));
243 return true;
244 }
245 }
246 }
247
248 // In a Mul, check if there is a constant operand which is a multiple
249 // of the given factor.
250 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
251 // Size is known, check if there is a constant operand which is a multiple
252 // of the given factor. If so, we can factor it.
253 const SCEVConstant *FC = cast<SCEVConstant>(Factor);
254 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
255 if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) {
256 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
257 NewMulOps[0] = SE.getConstant(
258 C->getValue()->getValue().sdiv(FC->getValue()->getValue()));
259 S = SE.getMulExpr(NewMulOps);
260 return true;
261 }
262 }
263
264 // In an AddRec, check if both start and step are divisible.
265 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
266 const SCEV *Step = A->getStepRecurrence(SE);
267 const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
268 if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
269 return false;
270 if (!StepRem->isZero())
271 return false;
272 const SCEV *Start = A->getStart();
273 if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
274 return false;
275 S = SE.getAddRecExpr(Start, Step, A->getLoop(),
276 A->getNoWrapFlags(SCEV::FlagNW));
277 return true;
278 }
279
280 return false;
281 }
282
283 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
284 /// is the number of SCEVAddRecExprs present, which are kept at the end of
285 /// the list.
286 ///
SimplifyAddOperands(SmallVectorImpl<const SCEV * > & Ops,Type * Ty,ScalarEvolution & SE)287 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
288 Type *Ty,
289 ScalarEvolution &SE) {
290 unsigned NumAddRecs = 0;
291 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
292 ++NumAddRecs;
293 // Group Ops into non-addrecs and addrecs.
294 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
295 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
296 // Let ScalarEvolution sort and simplify the non-addrecs list.
297 const SCEV *Sum = NoAddRecs.empty() ?
298 SE.getConstant(Ty, 0) :
299 SE.getAddExpr(NoAddRecs);
300 // If it returned an add, use the operands. Otherwise it simplified
301 // the sum into a single value, so just use that.
302 Ops.clear();
303 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
304 Ops.append(Add->op_begin(), Add->op_end());
305 else if (!Sum->isZero())
306 Ops.push_back(Sum);
307 // Then append the addrecs.
308 Ops.append(AddRecs.begin(), AddRecs.end());
309 }
310
311 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
312 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
313 /// This helps expose more opportunities for folding parts of the expressions
314 /// into GEP indices.
315 ///
SplitAddRecs(SmallVectorImpl<const SCEV * > & Ops,Type * Ty,ScalarEvolution & SE)316 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
317 Type *Ty,
318 ScalarEvolution &SE) {
319 // Find the addrecs.
320 SmallVector<const SCEV *, 8> AddRecs;
321 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
322 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
323 const SCEV *Start = A->getStart();
324 if (Start->isZero()) break;
325 const SCEV *Zero = SE.getConstant(Ty, 0);
326 AddRecs.push_back(SE.getAddRecExpr(Zero,
327 A->getStepRecurrence(SE),
328 A->getLoop(),
329 A->getNoWrapFlags(SCEV::FlagNW)));
330 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
331 Ops[i] = Zero;
332 Ops.append(Add->op_begin(), Add->op_end());
333 e += Add->getNumOperands();
334 } else {
335 Ops[i] = Start;
336 }
337 }
338 if (!AddRecs.empty()) {
339 // Add the addrecs onto the end of the list.
340 Ops.append(AddRecs.begin(), AddRecs.end());
341 // Resort the operand list, moving any constants to the front.
342 SimplifyAddOperands(Ops, Ty, SE);
343 }
344 }
345
346 /// expandAddToGEP - Expand an addition expression with a pointer type into
347 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
348 /// BasicAliasAnalysis and other passes analyze the result. See the rules
349 /// for getelementptr vs. inttoptr in
350 /// http://llvm.org/docs/LangRef.html#pointeraliasing
351 /// for details.
352 ///
353 /// Design note: The correctness of using getelementptr here depends on
354 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
355 /// they may introduce pointer arithmetic which may not be safely converted
356 /// into getelementptr.
357 ///
358 /// Design note: It might seem desirable for this function to be more
359 /// loop-aware. If some of the indices are loop-invariant while others
360 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
361 /// loop-invariant portions of the overall computation outside the loop.
362 /// However, there are a few reasons this is not done here. Hoisting simple
363 /// arithmetic is a low-level optimization that often isn't very
364 /// important until late in the optimization process. In fact, passes
365 /// like InstructionCombining will combine GEPs, even if it means
366 /// pushing loop-invariant computation down into loops, so even if the
367 /// GEPs were split here, the work would quickly be undone. The
368 /// LoopStrengthReduction pass, which is usually run quite late (and
369 /// after the last InstructionCombining pass), takes care of hoisting
370 /// loop-invariant portions of expressions, after considering what
371 /// can be folded using target addressing modes.
372 ///
expandAddToGEP(const SCEV * const * op_begin,const SCEV * const * op_end,PointerType * PTy,Type * Ty,Value * V)373 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
374 const SCEV *const *op_end,
375 PointerType *PTy,
376 Type *Ty,
377 Value *V) {
378 Type *OriginalElTy = PTy->getElementType();
379 Type *ElTy = OriginalElTy;
380 SmallVector<Value *, 4> GepIndices;
381 SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
382 bool AnyNonZeroIndices = false;
383
384 // Split AddRecs up into parts as either of the parts may be usable
385 // without the other.
386 SplitAddRecs(Ops, Ty, SE);
387
388 Type *IntPtrTy = DL.getIntPtrType(PTy);
389
390 // Descend down the pointer's type and attempt to convert the other
391 // operands into GEP indices, at each level. The first index in a GEP
392 // indexes into the array implied by the pointer operand; the rest of
393 // the indices index into the element or field type selected by the
394 // preceding index.
395 for (;;) {
396 // If the scale size is not 0, attempt to factor out a scale for
397 // array indexing.
398 SmallVector<const SCEV *, 8> ScaledOps;
399 if (ElTy->isSized()) {
400 const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy);
401 if (!ElSize->isZero()) {
402 SmallVector<const SCEV *, 8> NewOps;
403 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
404 const SCEV *Op = Ops[i];
405 const SCEV *Remainder = SE.getConstant(Ty, 0);
406 if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) {
407 // Op now has ElSize factored out.
408 ScaledOps.push_back(Op);
409 if (!Remainder->isZero())
410 NewOps.push_back(Remainder);
411 AnyNonZeroIndices = true;
412 } else {
413 // The operand was not divisible, so add it to the list of operands
414 // we'll scan next iteration.
415 NewOps.push_back(Ops[i]);
416 }
417 }
418 // If we made any changes, update Ops.
419 if (!ScaledOps.empty()) {
420 Ops = NewOps;
421 SimplifyAddOperands(Ops, Ty, SE);
422 }
423 }
424 }
425
426 // Record the scaled array index for this level of the type. If
427 // we didn't find any operands that could be factored, tentatively
428 // assume that element zero was selected (since the zero offset
429 // would obviously be folded away).
430 Value *Scaled = ScaledOps.empty() ?
431 Constant::getNullValue(Ty) :
432 expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
433 GepIndices.push_back(Scaled);
434
435 // Collect struct field index operands.
436 while (StructType *STy = dyn_cast<StructType>(ElTy)) {
437 bool FoundFieldNo = false;
438 // An empty struct has no fields.
439 if (STy->getNumElements() == 0) break;
440 // Field offsets are known. See if a constant offset falls within any of
441 // the struct fields.
442 if (Ops.empty())
443 break;
444 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
445 if (SE.getTypeSizeInBits(C->getType()) <= 64) {
446 const StructLayout &SL = *DL.getStructLayout(STy);
447 uint64_t FullOffset = C->getValue()->getZExtValue();
448 if (FullOffset < SL.getSizeInBytes()) {
449 unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
450 GepIndices.push_back(
451 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
452 ElTy = STy->getTypeAtIndex(ElIdx);
453 Ops[0] =
454 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
455 AnyNonZeroIndices = true;
456 FoundFieldNo = true;
457 }
458 }
459 // If no struct field offsets were found, tentatively assume that
460 // field zero was selected (since the zero offset would obviously
461 // be folded away).
462 if (!FoundFieldNo) {
463 ElTy = STy->getTypeAtIndex(0u);
464 GepIndices.push_back(
465 Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
466 }
467 }
468
469 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
470 ElTy = ATy->getElementType();
471 else
472 break;
473 }
474
475 // If none of the operands were convertible to proper GEP indices, cast
476 // the base to i8* and do an ugly getelementptr with that. It's still
477 // better than ptrtoint+arithmetic+inttoptr at least.
478 if (!AnyNonZeroIndices) {
479 // Cast the base to i8*.
480 V = InsertNoopCastOfTo(V,
481 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
482
483 assert(!isa<Instruction>(V) ||
484 SE.DT->dominates(cast<Instruction>(V), Builder.GetInsertPoint()));
485
486 // Expand the operands for a plain byte offset.
487 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
488
489 // Fold a GEP with constant operands.
490 if (Constant *CLHS = dyn_cast<Constant>(V))
491 if (Constant *CRHS = dyn_cast<Constant>(Idx))
492 return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty->getContext()),
493 CLHS, CRHS);
494
495 // Do a quick scan to see if we have this GEP nearby. If so, reuse it.
496 unsigned ScanLimit = 6;
497 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
498 // Scanning starts from the last instruction before the insertion point.
499 BasicBlock::iterator IP = Builder.GetInsertPoint();
500 if (IP != BlockBegin) {
501 --IP;
502 for (; ScanLimit; --IP, --ScanLimit) {
503 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
504 // generated code.
505 if (isa<DbgInfoIntrinsic>(IP))
506 ScanLimit++;
507 if (IP->getOpcode() == Instruction::GetElementPtr &&
508 IP->getOperand(0) == V && IP->getOperand(1) == Idx)
509 return IP;
510 if (IP == BlockBegin) break;
511 }
512 }
513
514 // Save the original insertion point so we can restore it when we're done.
515 BuilderType::InsertPointGuard Guard(Builder);
516
517 // Move the insertion point out of as many loops as we can.
518 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
519 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
520 BasicBlock *Preheader = L->getLoopPreheader();
521 if (!Preheader) break;
522
523 // Ok, move up a level.
524 Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
525 }
526
527 // Emit a GEP.
528 Value *GEP = Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep");
529 rememberInstruction(GEP);
530
531 return GEP;
532 }
533
534 // Save the original insertion point so we can restore it when we're done.
535 BuilderType::InsertPoint SaveInsertPt = Builder.saveIP();
536
537 // Move the insertion point out of as many loops as we can.
538 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
539 if (!L->isLoopInvariant(V)) break;
540
541 bool AnyIndexNotLoopInvariant = false;
542 for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(),
543 E = GepIndices.end(); I != E; ++I)
544 if (!L->isLoopInvariant(*I)) {
545 AnyIndexNotLoopInvariant = true;
546 break;
547 }
548 if (AnyIndexNotLoopInvariant)
549 break;
550
551 BasicBlock *Preheader = L->getLoopPreheader();
552 if (!Preheader) break;
553
554 // Ok, move up a level.
555 Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
556 }
557
558 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
559 // because ScalarEvolution may have changed the address arithmetic to
560 // compute a value which is beyond the end of the allocated object.
561 Value *Casted = V;
562 if (V->getType() != PTy)
563 Casted = InsertNoopCastOfTo(Casted, PTy);
564 Value *GEP = Builder.CreateGEP(OriginalElTy, Casted,
565 GepIndices,
566 "scevgep");
567 Ops.push_back(SE.getUnknown(GEP));
568 rememberInstruction(GEP);
569
570 // Restore the original insert point.
571 Builder.restoreIP(SaveInsertPt);
572
573 return expand(SE.getAddExpr(Ops));
574 }
575
576 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
577 /// SCEV expansion. If they are nested, this is the most nested. If they are
578 /// neighboring, pick the later.
PickMostRelevantLoop(const Loop * A,const Loop * B,DominatorTree & DT)579 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
580 DominatorTree &DT) {
581 if (!A) return B;
582 if (!B) return A;
583 if (A->contains(B)) return B;
584 if (B->contains(A)) return A;
585 if (DT.dominates(A->getHeader(), B->getHeader())) return B;
586 if (DT.dominates(B->getHeader(), A->getHeader())) return A;
587 return A; // Arbitrarily break the tie.
588 }
589
590 /// getRelevantLoop - Get the most relevant loop associated with the given
591 /// expression, according to PickMostRelevantLoop.
getRelevantLoop(const SCEV * S)592 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
593 // Test whether we've already computed the most relevant loop for this SCEV.
594 std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair =
595 RelevantLoops.insert(std::make_pair(S, nullptr));
596 if (!Pair.second)
597 return Pair.first->second;
598
599 if (isa<SCEVConstant>(S))
600 // A constant has no relevant loops.
601 return nullptr;
602 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
603 if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
604 return Pair.first->second = SE.LI->getLoopFor(I->getParent());
605 // A non-instruction has no relevant loops.
606 return nullptr;
607 }
608 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
609 const Loop *L = nullptr;
610 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
611 L = AR->getLoop();
612 for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end();
613 I != E; ++I)
614 L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT);
615 return RelevantLoops[N] = L;
616 }
617 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
618 const Loop *Result = getRelevantLoop(C->getOperand());
619 return RelevantLoops[C] = Result;
620 }
621 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
622 const Loop *Result =
623 PickMostRelevantLoop(getRelevantLoop(D->getLHS()),
624 getRelevantLoop(D->getRHS()),
625 *SE.DT);
626 return RelevantLoops[D] = Result;
627 }
628 llvm_unreachable("Unexpected SCEV type!");
629 }
630
631 namespace {
632
633 /// LoopCompare - Compare loops by PickMostRelevantLoop.
634 class LoopCompare {
635 DominatorTree &DT;
636 public:
LoopCompare(DominatorTree & dt)637 explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
638
operator ()(std::pair<const Loop *,const SCEV * > LHS,std::pair<const Loop *,const SCEV * > RHS) const639 bool operator()(std::pair<const Loop *, const SCEV *> LHS,
640 std::pair<const Loop *, const SCEV *> RHS) const {
641 // Keep pointer operands sorted at the end.
642 if (LHS.second->getType()->isPointerTy() !=
643 RHS.second->getType()->isPointerTy())
644 return LHS.second->getType()->isPointerTy();
645
646 // Compare loops with PickMostRelevantLoop.
647 if (LHS.first != RHS.first)
648 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
649
650 // If one operand is a non-constant negative and the other is not,
651 // put the non-constant negative on the right so that a sub can
652 // be used instead of a negate and add.
653 if (LHS.second->isNonConstantNegative()) {
654 if (!RHS.second->isNonConstantNegative())
655 return false;
656 } else if (RHS.second->isNonConstantNegative())
657 return true;
658
659 // Otherwise they are equivalent according to this comparison.
660 return false;
661 }
662 };
663
664 }
665
visitAddExpr(const SCEVAddExpr * S)666 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
667 Type *Ty = SE.getEffectiveSCEVType(S->getType());
668
669 // Collect all the add operands in a loop, along with their associated loops.
670 // Iterate in reverse so that constants are emitted last, all else equal, and
671 // so that pointer operands are inserted first, which the code below relies on
672 // to form more involved GEPs.
673 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
674 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
675 E(S->op_begin()); I != E; ++I)
676 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
677
678 // Sort by loop. Use a stable sort so that constants follow non-constants and
679 // pointer operands precede non-pointer operands.
680 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
681
682 // Emit instructions to add all the operands. Hoist as much as possible
683 // out of loops, and form meaningful getelementptrs where possible.
684 Value *Sum = nullptr;
685 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
686 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
687 const Loop *CurLoop = I->first;
688 const SCEV *Op = I->second;
689 if (!Sum) {
690 // This is the first operand. Just expand it.
691 Sum = expand(Op);
692 ++I;
693 } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
694 // The running sum expression is a pointer. Try to form a getelementptr
695 // at this level with that as the base.
696 SmallVector<const SCEV *, 4> NewOps;
697 for (; I != E && I->first == CurLoop; ++I) {
698 // If the operand is SCEVUnknown and not instructions, peek through
699 // it, to enable more of it to be folded into the GEP.
700 const SCEV *X = I->second;
701 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
702 if (!isa<Instruction>(U->getValue()))
703 X = SE.getSCEV(U->getValue());
704 NewOps.push_back(X);
705 }
706 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
707 } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
708 // The running sum is an integer, and there's a pointer at this level.
709 // Try to form a getelementptr. If the running sum is instructions,
710 // use a SCEVUnknown to avoid re-analyzing them.
711 SmallVector<const SCEV *, 4> NewOps;
712 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
713 SE.getSCEV(Sum));
714 for (++I; I != E && I->first == CurLoop; ++I)
715 NewOps.push_back(I->second);
716 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
717 } else if (Op->isNonConstantNegative()) {
718 // Instead of doing a negate and add, just do a subtract.
719 Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
720 Sum = InsertNoopCastOfTo(Sum, Ty);
721 Sum = InsertBinop(Instruction::Sub, Sum, W);
722 ++I;
723 } else {
724 // A simple add.
725 Value *W = expandCodeFor(Op, Ty);
726 Sum = InsertNoopCastOfTo(Sum, Ty);
727 // Canonicalize a constant to the RHS.
728 if (isa<Constant>(Sum)) std::swap(Sum, W);
729 Sum = InsertBinop(Instruction::Add, Sum, W);
730 ++I;
731 }
732 }
733
734 return Sum;
735 }
736
visitMulExpr(const SCEVMulExpr * S)737 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
738 Type *Ty = SE.getEffectiveSCEVType(S->getType());
739
740 // Collect all the mul operands in a loop, along with their associated loops.
741 // Iterate in reverse so that constants are emitted last, all else equal.
742 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
743 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
744 E(S->op_begin()); I != E; ++I)
745 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
746
747 // Sort by loop. Use a stable sort so that constants follow non-constants.
748 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
749
750 // Emit instructions to mul all the operands. Hoist as much as possible
751 // out of loops.
752 Value *Prod = nullptr;
753 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
754 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
755 const SCEV *Op = I->second;
756 if (!Prod) {
757 // This is the first operand. Just expand it.
758 Prod = expand(Op);
759 ++I;
760 } else if (Op->isAllOnesValue()) {
761 // Instead of doing a multiply by negative one, just do a negate.
762 Prod = InsertNoopCastOfTo(Prod, Ty);
763 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod);
764 ++I;
765 } else {
766 // A simple mul.
767 Value *W = expandCodeFor(Op, Ty);
768 Prod = InsertNoopCastOfTo(Prod, Ty);
769 // Canonicalize a constant to the RHS.
770 if (isa<Constant>(Prod)) std::swap(Prod, W);
771 Prod = InsertBinop(Instruction::Mul, Prod, W);
772 ++I;
773 }
774 }
775
776 return Prod;
777 }
778
visitUDivExpr(const SCEVUDivExpr * S)779 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
780 Type *Ty = SE.getEffectiveSCEVType(S->getType());
781
782 Value *LHS = expandCodeFor(S->getLHS(), Ty);
783 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
784 const APInt &RHS = SC->getValue()->getValue();
785 if (RHS.isPowerOf2())
786 return InsertBinop(Instruction::LShr, LHS,
787 ConstantInt::get(Ty, RHS.logBase2()));
788 }
789
790 Value *RHS = expandCodeFor(S->getRHS(), Ty);
791 return InsertBinop(Instruction::UDiv, LHS, RHS);
792 }
793
794 /// Move parts of Base into Rest to leave Base with the minimal
795 /// expression that provides a pointer operand suitable for a
796 /// GEP expansion.
ExposePointerBase(const SCEV * & Base,const SCEV * & Rest,ScalarEvolution & SE)797 static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
798 ScalarEvolution &SE) {
799 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
800 Base = A->getStart();
801 Rest = SE.getAddExpr(Rest,
802 SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
803 A->getStepRecurrence(SE),
804 A->getLoop(),
805 A->getNoWrapFlags(SCEV::FlagNW)));
806 }
807 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
808 Base = A->getOperand(A->getNumOperands()-1);
809 SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end());
810 NewAddOps.back() = Rest;
811 Rest = SE.getAddExpr(NewAddOps);
812 ExposePointerBase(Base, Rest, SE);
813 }
814 }
815
816 /// Determine if this is a well-behaved chain of instructions leading back to
817 /// the PHI. If so, it may be reused by expanded expressions.
isNormalAddRecExprPHI(PHINode * PN,Instruction * IncV,const Loop * L)818 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
819 const Loop *L) {
820 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
821 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
822 return false;
823 // If any of the operands don't dominate the insert position, bail.
824 // Addrec operands are always loop-invariant, so this can only happen
825 // if there are instructions which haven't been hoisted.
826 if (L == IVIncInsertLoop) {
827 for (User::op_iterator OI = IncV->op_begin()+1,
828 OE = IncV->op_end(); OI != OE; ++OI)
829 if (Instruction *OInst = dyn_cast<Instruction>(OI))
830 if (!SE.DT->dominates(OInst, IVIncInsertPos))
831 return false;
832 }
833 // Advance to the next instruction.
834 IncV = dyn_cast<Instruction>(IncV->getOperand(0));
835 if (!IncV)
836 return false;
837
838 if (IncV->mayHaveSideEffects())
839 return false;
840
841 if (IncV != PN)
842 return true;
843
844 return isNormalAddRecExprPHI(PN, IncV, L);
845 }
846
847 /// getIVIncOperand returns an induction variable increment's induction
848 /// variable operand.
849 ///
850 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
851 /// operands dominate InsertPos.
852 ///
853 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
854 /// simple patterns generated by getAddRecExprPHILiterally and
855 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
getIVIncOperand(Instruction * IncV,Instruction * InsertPos,bool allowScale)856 Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
857 Instruction *InsertPos,
858 bool allowScale) {
859 if (IncV == InsertPos)
860 return nullptr;
861
862 switch (IncV->getOpcode()) {
863 default:
864 return nullptr;
865 // Check for a simple Add/Sub or GEP of a loop invariant step.
866 case Instruction::Add:
867 case Instruction::Sub: {
868 Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
869 if (!OInst || SE.DT->dominates(OInst, InsertPos))
870 return dyn_cast<Instruction>(IncV->getOperand(0));
871 return nullptr;
872 }
873 case Instruction::BitCast:
874 return dyn_cast<Instruction>(IncV->getOperand(0));
875 case Instruction::GetElementPtr:
876 for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end();
877 I != E; ++I) {
878 if (isa<Constant>(*I))
879 continue;
880 if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
881 if (!SE.DT->dominates(OInst, InsertPos))
882 return nullptr;
883 }
884 if (allowScale) {
885 // allow any kind of GEP as long as it can be hoisted.
886 continue;
887 }
888 // This must be a pointer addition of constants (pretty), which is already
889 // handled, or some number of address-size elements (ugly). Ugly geps
890 // have 2 operands. i1* is used by the expander to represent an
891 // address-size element.
892 if (IncV->getNumOperands() != 2)
893 return nullptr;
894 unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
895 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
896 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
897 return nullptr;
898 break;
899 }
900 return dyn_cast<Instruction>(IncV->getOperand(0));
901 }
902 }
903
904 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
905 /// it available to other uses in this loop. Recursively hoist any operands,
906 /// until we reach a value that dominates InsertPos.
hoistIVInc(Instruction * IncV,Instruction * InsertPos)907 bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
908 if (SE.DT->dominates(IncV, InsertPos))
909 return true;
910
911 // InsertPos must itself dominate IncV so that IncV's new position satisfies
912 // its existing users.
913 if (isa<PHINode>(InsertPos)
914 || !SE.DT->dominates(InsertPos->getParent(), IncV->getParent()))
915 return false;
916
917 // Check that the chain of IV operands leading back to Phi can be hoisted.
918 SmallVector<Instruction*, 4> IVIncs;
919 for(;;) {
920 Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
921 if (!Oper)
922 return false;
923 // IncV is safe to hoist.
924 IVIncs.push_back(IncV);
925 IncV = Oper;
926 if (SE.DT->dominates(IncV, InsertPos))
927 break;
928 }
929 for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(),
930 E = IVIncs.rend(); I != E; ++I) {
931 (*I)->moveBefore(InsertPos);
932 }
933 return true;
934 }
935
936 /// Determine if this cyclic phi is in a form that would have been generated by
937 /// LSR. We don't care if the phi was actually expanded in this pass, as long
938 /// as it is in a low-cost form, for example, no implied multiplication. This
939 /// should match any patterns generated by getAddRecExprPHILiterally and
940 /// expandAddtoGEP.
isExpandedAddRecExprPHI(PHINode * PN,Instruction * IncV,const Loop * L)941 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
942 const Loop *L) {
943 for(Instruction *IVOper = IncV;
944 (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
945 /*allowScale=*/false));) {
946 if (IVOper == PN)
947 return true;
948 }
949 return false;
950 }
951
952 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
953 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
954 /// need to materialize IV increments elsewhere to handle difficult situations.
expandIVInc(PHINode * PN,Value * StepV,const Loop * L,Type * ExpandTy,Type * IntTy,bool useSubtract)955 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
956 Type *ExpandTy, Type *IntTy,
957 bool useSubtract) {
958 Value *IncV;
959 // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
960 if (ExpandTy->isPointerTy()) {
961 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
962 // If the step isn't constant, don't use an implicitly scaled GEP, because
963 // that would require a multiply inside the loop.
964 if (!isa<ConstantInt>(StepV))
965 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
966 GEPPtrTy->getAddressSpace());
967 const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
968 IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
969 if (IncV->getType() != PN->getType()) {
970 IncV = Builder.CreateBitCast(IncV, PN->getType());
971 rememberInstruction(IncV);
972 }
973 } else {
974 IncV = useSubtract ?
975 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
976 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
977 rememberInstruction(IncV);
978 }
979 return IncV;
980 }
981
982 /// \brief Hoist the addrec instruction chain rooted in the loop phi above the
983 /// position. This routine assumes that this is possible (has been checked).
hoistBeforePos(DominatorTree * DT,Instruction * InstToHoist,Instruction * Pos,PHINode * LoopPhi)984 static void hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist,
985 Instruction *Pos, PHINode *LoopPhi) {
986 do {
987 if (DT->dominates(InstToHoist, Pos))
988 break;
989 // Make sure the increment is where we want it. But don't move it
990 // down past a potential existing post-inc user.
991 InstToHoist->moveBefore(Pos);
992 Pos = InstToHoist;
993 InstToHoist = cast<Instruction>(InstToHoist->getOperand(0));
994 } while (InstToHoist != LoopPhi);
995 }
996
997 /// \brief Check whether we can cheaply express the requested SCEV in terms of
998 /// the available PHI SCEV by truncation and/or invertion of the step.
canBeCheaplyTransformed(ScalarEvolution & SE,const SCEVAddRecExpr * Phi,const SCEVAddRecExpr * Requested,bool & InvertStep)999 static bool canBeCheaplyTransformed(ScalarEvolution &SE,
1000 const SCEVAddRecExpr *Phi,
1001 const SCEVAddRecExpr *Requested,
1002 bool &InvertStep) {
1003 Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
1004 Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
1005
1006 if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
1007 return false;
1008
1009 // Try truncate it if necessary.
1010 Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
1011 if (!Phi)
1012 return false;
1013
1014 // Check whether truncation will help.
1015 if (Phi == Requested) {
1016 InvertStep = false;
1017 return true;
1018 }
1019
1020 // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
1021 if (SE.getAddExpr(Requested->getStart(),
1022 SE.getNegativeSCEV(Requested)) == Phi) {
1023 InvertStep = true;
1024 return true;
1025 }
1026
1027 return false;
1028 }
1029
IsIncrementNSW(ScalarEvolution & SE,const SCEVAddRecExpr * AR)1030 static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1031 if (!isa<IntegerType>(AR->getType()))
1032 return false;
1033
1034 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1035 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1036 const SCEV *Step = AR->getStepRecurrence(SE);
1037 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
1038 SE.getSignExtendExpr(AR, WideTy));
1039 const SCEV *ExtendAfterOp =
1040 SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1041 return ExtendAfterOp == OpAfterExtend;
1042 }
1043
IsIncrementNUW(ScalarEvolution & SE,const SCEVAddRecExpr * AR)1044 static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1045 if (!isa<IntegerType>(AR->getType()))
1046 return false;
1047
1048 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1049 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1050 const SCEV *Step = AR->getStepRecurrence(SE);
1051 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
1052 SE.getZeroExtendExpr(AR, WideTy));
1053 const SCEV *ExtendAfterOp =
1054 SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1055 return ExtendAfterOp == OpAfterExtend;
1056 }
1057
1058 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
1059 /// the base addrec, which is the addrec without any non-loop-dominating
1060 /// values, and return the PHI.
1061 PHINode *
getAddRecExprPHILiterally(const SCEVAddRecExpr * Normalized,const Loop * L,Type * ExpandTy,Type * IntTy,Type * & TruncTy,bool & InvertStep)1062 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
1063 const Loop *L,
1064 Type *ExpandTy,
1065 Type *IntTy,
1066 Type *&TruncTy,
1067 bool &InvertStep) {
1068 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
1069
1070 // Reuse a previously-inserted PHI, if present.
1071 BasicBlock *LatchBlock = L->getLoopLatch();
1072 if (LatchBlock) {
1073 PHINode *AddRecPhiMatch = nullptr;
1074 Instruction *IncV = nullptr;
1075 TruncTy = nullptr;
1076 InvertStep = false;
1077
1078 // Only try partially matching scevs that need truncation and/or
1079 // step-inversion if we know this loop is outside the current loop.
1080 bool TryNonMatchingSCEV = IVIncInsertLoop &&
1081 SE.DT->properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1082
1083 for (BasicBlock::iterator I = L->getHeader()->begin();
1084 PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1085 if (!SE.isSCEVable(PN->getType()))
1086 continue;
1087
1088 const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PN));
1089 if (!PhiSCEV)
1090 continue;
1091
1092 bool IsMatchingSCEV = PhiSCEV == Normalized;
1093 // We only handle truncation and inversion of phi recurrences for the
1094 // expanded expression if the expanded expression's loop dominates the
1095 // loop we insert to. Check now, so we can bail out early.
1096 if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1097 continue;
1098
1099 Instruction *TempIncV =
1100 cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
1101
1102 // Check whether we can reuse this PHI node.
1103 if (LSRMode) {
1104 if (!isExpandedAddRecExprPHI(PN, TempIncV, L))
1105 continue;
1106 if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos))
1107 continue;
1108 } else {
1109 if (!isNormalAddRecExprPHI(PN, TempIncV, L))
1110 continue;
1111 }
1112
1113 // Stop if we have found an exact match SCEV.
1114 if (IsMatchingSCEV) {
1115 IncV = TempIncV;
1116 TruncTy = nullptr;
1117 InvertStep = false;
1118 AddRecPhiMatch = PN;
1119 break;
1120 }
1121
1122 // Try whether the phi can be translated into the requested form
1123 // (truncated and/or offset by a constant).
1124 if ((!TruncTy || InvertStep) &&
1125 canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
1126 // Record the phi node. But don't stop we might find an exact match
1127 // later.
1128 AddRecPhiMatch = PN;
1129 IncV = TempIncV;
1130 TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
1131 }
1132 }
1133
1134 if (AddRecPhiMatch) {
1135 // Potentially, move the increment. We have made sure in
1136 // isExpandedAddRecExprPHI or hoistIVInc that this is possible.
1137 if (L == IVIncInsertLoop)
1138 hoistBeforePos(SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
1139
1140 // Ok, the add recurrence looks usable.
1141 // Remember this PHI, even in post-inc mode.
1142 InsertedValues.insert(AddRecPhiMatch);
1143 // Remember the increment.
1144 rememberInstruction(IncV);
1145 return AddRecPhiMatch;
1146 }
1147 }
1148
1149 // Save the original insertion point so we can restore it when we're done.
1150 BuilderType::InsertPointGuard Guard(Builder);
1151
1152 // Another AddRec may need to be recursively expanded below. For example, if
1153 // this AddRec is quadratic, the StepV may itself be an AddRec in this
1154 // loop. Remove this loop from the PostIncLoops set before expanding such
1155 // AddRecs. Otherwise, we cannot find a valid position for the step
1156 // (i.e. StepV can never dominate its loop header). Ideally, we could do
1157 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1158 // so it's not worth implementing SmallPtrSet::swap.
1159 PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1160 PostIncLoops.clear();
1161
1162 // Expand code for the start value.
1163 Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy,
1164 L->getHeader()->begin());
1165
1166 // StartV must be hoisted into L's preheader to dominate the new phi.
1167 assert(!isa<Instruction>(StartV) ||
1168 SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(),
1169 L->getHeader()));
1170
1171 // Expand code for the step value. Do this before creating the PHI so that PHI
1172 // reuse code doesn't see an incomplete PHI.
1173 const SCEV *Step = Normalized->getStepRecurrence(SE);
1174 // If the stride is negative, insert a sub instead of an add for the increment
1175 // (unless it's a constant, because subtracts of constants are canonicalized
1176 // to adds).
1177 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1178 if (useSubtract)
1179 Step = SE.getNegativeSCEV(Step);
1180 // Expand the step somewhere that dominates the loop header.
1181 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
1182
1183 // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
1184 // we actually do emit an addition. It does not apply if we emit a
1185 // subtraction.
1186 bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
1187 bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
1188
1189 // Create the PHI.
1190 BasicBlock *Header = L->getHeader();
1191 Builder.SetInsertPoint(Header, Header->begin());
1192 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1193 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1194 Twine(IVName) + ".iv");
1195 rememberInstruction(PN);
1196
1197 // Create the step instructions and populate the PHI.
1198 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1199 BasicBlock *Pred = *HPI;
1200
1201 // Add a start value.
1202 if (!L->contains(Pred)) {
1203 PN->addIncoming(StartV, Pred);
1204 continue;
1205 }
1206
1207 // Create a step value and add it to the PHI.
1208 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1209 // instructions at IVIncInsertPos.
1210 Instruction *InsertPos = L == IVIncInsertLoop ?
1211 IVIncInsertPos : Pred->getTerminator();
1212 Builder.SetInsertPoint(InsertPos);
1213 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1214
1215 if (isa<OverflowingBinaryOperator>(IncV)) {
1216 if (IncrementIsNUW)
1217 cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1218 if (IncrementIsNSW)
1219 cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1220 }
1221 PN->addIncoming(IncV, Pred);
1222 }
1223
1224 // After expanding subexpressions, restore the PostIncLoops set so the caller
1225 // can ensure that IVIncrement dominates the current uses.
1226 PostIncLoops = SavedPostIncLoops;
1227
1228 // Remember this PHI, even in post-inc mode.
1229 InsertedValues.insert(PN);
1230
1231 return PN;
1232 }
1233
expandAddRecExprLiterally(const SCEVAddRecExpr * S)1234 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1235 Type *STy = S->getType();
1236 Type *IntTy = SE.getEffectiveSCEVType(STy);
1237 const Loop *L = S->getLoop();
1238
1239 // Determine a normalized form of this expression, which is the expression
1240 // before any post-inc adjustment is made.
1241 const SCEVAddRecExpr *Normalized = S;
1242 if (PostIncLoops.count(L)) {
1243 PostIncLoopSet Loops;
1244 Loops.insert(L);
1245 Normalized =
1246 cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, nullptr,
1247 nullptr, Loops, SE, *SE.DT));
1248 }
1249
1250 // Strip off any non-loop-dominating component from the addrec start.
1251 const SCEV *Start = Normalized->getStart();
1252 const SCEV *PostLoopOffset = nullptr;
1253 if (!SE.properlyDominates(Start, L->getHeader())) {
1254 PostLoopOffset = Start;
1255 Start = SE.getConstant(Normalized->getType(), 0);
1256 Normalized = cast<SCEVAddRecExpr>(
1257 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1258 Normalized->getLoop(),
1259 Normalized->getNoWrapFlags(SCEV::FlagNW)));
1260 }
1261
1262 // Strip off any non-loop-dominating component from the addrec step.
1263 const SCEV *Step = Normalized->getStepRecurrence(SE);
1264 const SCEV *PostLoopScale = nullptr;
1265 if (!SE.dominates(Step, L->getHeader())) {
1266 PostLoopScale = Step;
1267 Step = SE.getConstant(Normalized->getType(), 1);
1268 Normalized =
1269 cast<SCEVAddRecExpr>(SE.getAddRecExpr(
1270 Start, Step, Normalized->getLoop(),
1271 Normalized->getNoWrapFlags(SCEV::FlagNW)));
1272 }
1273
1274 // Expand the core addrec. If we need post-loop scaling, force it to
1275 // expand to an integer type to avoid the need for additional casting.
1276 Type *ExpandTy = PostLoopScale ? IntTy : STy;
1277 // In some cases, we decide to reuse an existing phi node but need to truncate
1278 // it and/or invert the step.
1279 Type *TruncTy = nullptr;
1280 bool InvertStep = false;
1281 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy,
1282 TruncTy, InvertStep);
1283
1284 // Accommodate post-inc mode, if necessary.
1285 Value *Result;
1286 if (!PostIncLoops.count(L))
1287 Result = PN;
1288 else {
1289 // In PostInc mode, use the post-incremented value.
1290 BasicBlock *LatchBlock = L->getLoopLatch();
1291 assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1292 Result = PN->getIncomingValueForBlock(LatchBlock);
1293
1294 // For an expansion to use the postinc form, the client must call
1295 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1296 // or dominated by IVIncInsertPos.
1297 if (isa<Instruction>(Result)
1298 && !SE.DT->dominates(cast<Instruction>(Result),
1299 Builder.GetInsertPoint())) {
1300 // The induction variable's postinc expansion does not dominate this use.
1301 // IVUsers tries to prevent this case, so it is rare. However, it can
1302 // happen when an IVUser outside the loop is not dominated by the latch
1303 // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1304 // all cases. Consider a phi outide whose operand is replaced during
1305 // expansion with the value of the postinc user. Without fundamentally
1306 // changing the way postinc users are tracked, the only remedy is
1307 // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1308 // but hopefully expandCodeFor handles that.
1309 bool useSubtract =
1310 !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1311 if (useSubtract)
1312 Step = SE.getNegativeSCEV(Step);
1313 Value *StepV;
1314 {
1315 // Expand the step somewhere that dominates the loop header.
1316 BuilderType::InsertPointGuard Guard(Builder);
1317 StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
1318 }
1319 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1320 }
1321 }
1322
1323 // We have decided to reuse an induction variable of a dominating loop. Apply
1324 // truncation and/or invertion of the step.
1325 if (TruncTy) {
1326 Type *ResTy = Result->getType();
1327 // Normalize the result type.
1328 if (ResTy != SE.getEffectiveSCEVType(ResTy))
1329 Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
1330 // Truncate the result.
1331 if (TruncTy != Result->getType()) {
1332 Result = Builder.CreateTrunc(Result, TruncTy);
1333 rememberInstruction(Result);
1334 }
1335 // Invert the result.
1336 if (InvertStep) {
1337 Result = Builder.CreateSub(expandCodeFor(Normalized->getStart(), TruncTy),
1338 Result);
1339 rememberInstruction(Result);
1340 }
1341 }
1342
1343 // Re-apply any non-loop-dominating scale.
1344 if (PostLoopScale) {
1345 assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
1346 Result = InsertNoopCastOfTo(Result, IntTy);
1347 Result = Builder.CreateMul(Result,
1348 expandCodeFor(PostLoopScale, IntTy));
1349 rememberInstruction(Result);
1350 }
1351
1352 // Re-apply any non-loop-dominating offset.
1353 if (PostLoopOffset) {
1354 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1355 const SCEV *const OffsetArray[1] = { PostLoopOffset };
1356 Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
1357 } else {
1358 Result = InsertNoopCastOfTo(Result, IntTy);
1359 Result = Builder.CreateAdd(Result,
1360 expandCodeFor(PostLoopOffset, IntTy));
1361 rememberInstruction(Result);
1362 }
1363 }
1364
1365 return Result;
1366 }
1367
visitAddRecExpr(const SCEVAddRecExpr * S)1368 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1369 if (!CanonicalMode) return expandAddRecExprLiterally(S);
1370
1371 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1372 const Loop *L = S->getLoop();
1373
1374 // First check for an existing canonical IV in a suitable type.
1375 PHINode *CanonicalIV = nullptr;
1376 if (PHINode *PN = L->getCanonicalInductionVariable())
1377 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1378 CanonicalIV = PN;
1379
1380 // Rewrite an AddRec in terms of the canonical induction variable, if
1381 // its type is more narrow.
1382 if (CanonicalIV &&
1383 SE.getTypeSizeInBits(CanonicalIV->getType()) >
1384 SE.getTypeSizeInBits(Ty)) {
1385 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1386 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1387 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
1388 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1389 S->getNoWrapFlags(SCEV::FlagNW)));
1390 BasicBlock::iterator NewInsertPt =
1391 std::next(BasicBlock::iterator(cast<Instruction>(V)));
1392 BuilderType::InsertPointGuard Guard(Builder);
1393 while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) ||
1394 isa<LandingPadInst>(NewInsertPt))
1395 ++NewInsertPt;
1396 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
1397 NewInsertPt);
1398 return V;
1399 }
1400
1401 // {X,+,F} --> X + {0,+,F}
1402 if (!S->getStart()->isZero()) {
1403 SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
1404 NewOps[0] = SE.getConstant(Ty, 0);
1405 const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1406 S->getNoWrapFlags(SCEV::FlagNW));
1407
1408 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
1409 // comments on expandAddToGEP for details.
1410 const SCEV *Base = S->getStart();
1411 const SCEV *RestArray[1] = { Rest };
1412 // Dig into the expression to find the pointer base for a GEP.
1413 ExposePointerBase(Base, RestArray[0], SE);
1414 // If we found a pointer, expand the AddRec with a GEP.
1415 if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
1416 // Make sure the Base isn't something exotic, such as a multiplied
1417 // or divided pointer value. In those cases, the result type isn't
1418 // actually a pointer type.
1419 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
1420 Value *StartV = expand(Base);
1421 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
1422 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
1423 }
1424 }
1425
1426 // Just do a normal add. Pre-expand the operands to suppress folding.
1427 return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())),
1428 SE.getUnknown(expand(Rest))));
1429 }
1430
1431 // If we don't yet have a canonical IV, create one.
1432 if (!CanonicalIV) {
1433 // Create and insert the PHI node for the induction variable in the
1434 // specified loop.
1435 BasicBlock *Header = L->getHeader();
1436 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1437 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1438 Header->begin());
1439 rememberInstruction(CanonicalIV);
1440
1441 SmallSet<BasicBlock *, 4> PredSeen;
1442 Constant *One = ConstantInt::get(Ty, 1);
1443 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1444 BasicBlock *HP = *HPI;
1445 if (!PredSeen.insert(HP).second) {
1446 // There must be an incoming value for each predecessor, even the
1447 // duplicates!
1448 CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
1449 continue;
1450 }
1451
1452 if (L->contains(HP)) {
1453 // Insert a unit add instruction right before the terminator
1454 // corresponding to the back-edge.
1455 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1456 "indvar.next",
1457 HP->getTerminator());
1458 Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1459 rememberInstruction(Add);
1460 CanonicalIV->addIncoming(Add, HP);
1461 } else {
1462 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1463 }
1464 }
1465 }
1466
1467 // {0,+,1} --> Insert a canonical induction variable into the loop!
1468 if (S->isAffine() && S->getOperand(1)->isOne()) {
1469 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1470 "IVs with types different from the canonical IV should "
1471 "already have been handled!");
1472 return CanonicalIV;
1473 }
1474
1475 // {0,+,F} --> {0,+,1} * F
1476
1477 // If this is a simple linear addrec, emit it now as a special case.
1478 if (S->isAffine()) // {0,+,F} --> i*F
1479 return
1480 expand(SE.getTruncateOrNoop(
1481 SE.getMulExpr(SE.getUnknown(CanonicalIV),
1482 SE.getNoopOrAnyExtend(S->getOperand(1),
1483 CanonicalIV->getType())),
1484 Ty));
1485
1486 // If this is a chain of recurrences, turn it into a closed form, using the
1487 // folders, then expandCodeFor the closed form. This allows the folders to
1488 // simplify the expression without having to build a bunch of special code
1489 // into this folder.
1490 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
1491
1492 // Promote S up to the canonical IV type, if the cast is foldable.
1493 const SCEV *NewS = S;
1494 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1495 if (isa<SCEVAddRecExpr>(Ext))
1496 NewS = Ext;
1497
1498 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1499 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
1500
1501 // Truncate the result down to the original type, if needed.
1502 const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1503 return expand(T);
1504 }
1505
visitTruncateExpr(const SCEVTruncateExpr * S)1506 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1507 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1508 Value *V = expandCodeFor(S->getOperand(),
1509 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1510 Value *I = Builder.CreateTrunc(V, Ty);
1511 rememberInstruction(I);
1512 return I;
1513 }
1514
visitZeroExtendExpr(const SCEVZeroExtendExpr * S)1515 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1516 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1517 Value *V = expandCodeFor(S->getOperand(),
1518 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1519 Value *I = Builder.CreateZExt(V, Ty);
1520 rememberInstruction(I);
1521 return I;
1522 }
1523
visitSignExtendExpr(const SCEVSignExtendExpr * S)1524 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1525 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1526 Value *V = expandCodeFor(S->getOperand(),
1527 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1528 Value *I = Builder.CreateSExt(V, Ty);
1529 rememberInstruction(I);
1530 return I;
1531 }
1532
visitSMaxExpr(const SCEVSMaxExpr * S)1533 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1534 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1535 Type *Ty = LHS->getType();
1536 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1537 // In the case of mixed integer and pointer types, do the
1538 // rest of the comparisons as integer.
1539 if (S->getOperand(i)->getType() != Ty) {
1540 Ty = SE.getEffectiveSCEVType(Ty);
1541 LHS = InsertNoopCastOfTo(LHS, Ty);
1542 }
1543 Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1544 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
1545 rememberInstruction(ICmp);
1546 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
1547 rememberInstruction(Sel);
1548 LHS = Sel;
1549 }
1550 // In the case of mixed integer and pointer types, cast the
1551 // final result back to the pointer type.
1552 if (LHS->getType() != S->getType())
1553 LHS = InsertNoopCastOfTo(LHS, S->getType());
1554 return LHS;
1555 }
1556
visitUMaxExpr(const SCEVUMaxExpr * S)1557 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1558 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1559 Type *Ty = LHS->getType();
1560 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1561 // In the case of mixed integer and pointer types, do the
1562 // rest of the comparisons as integer.
1563 if (S->getOperand(i)->getType() != Ty) {
1564 Ty = SE.getEffectiveSCEVType(Ty);
1565 LHS = InsertNoopCastOfTo(LHS, Ty);
1566 }
1567 Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1568 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
1569 rememberInstruction(ICmp);
1570 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
1571 rememberInstruction(Sel);
1572 LHS = Sel;
1573 }
1574 // In the case of mixed integer and pointer types, cast the
1575 // final result back to the pointer type.
1576 if (LHS->getType() != S->getType())
1577 LHS = InsertNoopCastOfTo(LHS, S->getType());
1578 return LHS;
1579 }
1580
expandCodeFor(const SCEV * SH,Type * Ty,Instruction * IP)1581 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
1582 Instruction *IP) {
1583 Builder.SetInsertPoint(IP->getParent(), IP);
1584 return expandCodeFor(SH, Ty);
1585 }
1586
expandCodeFor(const SCEV * SH,Type * Ty)1587 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
1588 // Expand the code for this SCEV.
1589 Value *V = expand(SH);
1590 if (Ty) {
1591 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1592 "non-trivial casts should be done with the SCEVs directly!");
1593 V = InsertNoopCastOfTo(V, Ty);
1594 }
1595 return V;
1596 }
1597
expand(const SCEV * S)1598 Value *SCEVExpander::expand(const SCEV *S) {
1599 // Compute an insertion point for this SCEV object. Hoist the instructions
1600 // as far out in the loop nest as possible.
1601 Instruction *InsertPt = Builder.GetInsertPoint();
1602 for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ;
1603 L = L->getParentLoop())
1604 if (SE.isLoopInvariant(S, L)) {
1605 if (!L) break;
1606 if (BasicBlock *Preheader = L->getLoopPreheader())
1607 InsertPt = Preheader->getTerminator();
1608 else {
1609 // LSR sets the insertion point for AddRec start/step values to the
1610 // block start to simplify value reuse, even though it's an invalid
1611 // position. SCEVExpander must correct for this in all cases.
1612 InsertPt = L->getHeader()->getFirstInsertionPt();
1613 }
1614 } else {
1615 // If the SCEV is computable at this level, insert it into the header
1616 // after the PHIs (and after any other instructions that we've inserted
1617 // there) so that it is guaranteed to dominate any user inside the loop.
1618 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1619 InsertPt = L->getHeader()->getFirstInsertionPt();
1620 while (InsertPt != Builder.GetInsertPoint()
1621 && (isInsertedInstruction(InsertPt)
1622 || isa<DbgInfoIntrinsic>(InsertPt))) {
1623 InsertPt = std::next(BasicBlock::iterator(InsertPt));
1624 }
1625 break;
1626 }
1627
1628 // Check to see if we already expanded this here.
1629 std::map<std::pair<const SCEV *, Instruction *>, TrackingVH<Value> >::iterator
1630 I = InsertedExpressions.find(std::make_pair(S, InsertPt));
1631 if (I != InsertedExpressions.end())
1632 return I->second;
1633
1634 BuilderType::InsertPointGuard Guard(Builder);
1635 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1636
1637 // Expand the expression into instructions.
1638 Value *V = visit(S);
1639
1640 // Remember the expanded value for this SCEV at this location.
1641 //
1642 // This is independent of PostIncLoops. The mapped value simply materializes
1643 // the expression at this insertion point. If the mapped value happened to be
1644 // a postinc expansion, it could be reused by a non-postinc user, but only if
1645 // its insertion point was already at the head of the loop.
1646 InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1647 return V;
1648 }
1649
rememberInstruction(Value * I)1650 void SCEVExpander::rememberInstruction(Value *I) {
1651 if (!PostIncLoops.empty())
1652 InsertedPostIncValues.insert(I);
1653 else
1654 InsertedValues.insert(I);
1655 }
1656
1657 /// getOrInsertCanonicalInductionVariable - This method returns the
1658 /// canonical induction variable of the specified type for the specified
1659 /// loop (inserting one if there is none). A canonical induction variable
1660 /// starts at zero and steps by one on each iteration.
1661 PHINode *
getOrInsertCanonicalInductionVariable(const Loop * L,Type * Ty)1662 SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
1663 Type *Ty) {
1664 assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
1665
1666 // Build a SCEV for {0,+,1}<L>.
1667 // Conservatively use FlagAnyWrap for now.
1668 const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
1669 SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap);
1670
1671 // Emit code for it.
1672 BuilderType::InsertPointGuard Guard(Builder);
1673 PHINode *V = cast<PHINode>(expandCodeFor(H, nullptr,
1674 L->getHeader()->begin()));
1675
1676 return V;
1677 }
1678
1679 /// replaceCongruentIVs - Check for congruent phis in this loop header and
1680 /// replace them with their most canonical representative. Return the number of
1681 /// phis eliminated.
1682 ///
1683 /// This does not depend on any SCEVExpander state but should be used in
1684 /// the same context that SCEVExpander is used.
replaceCongruentIVs(Loop * L,const DominatorTree * DT,SmallVectorImpl<WeakVH> & DeadInsts,const TargetTransformInfo * TTI)1685 unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
1686 SmallVectorImpl<WeakVH> &DeadInsts,
1687 const TargetTransformInfo *TTI) {
1688 // Find integer phis in order of increasing width.
1689 SmallVector<PHINode*, 8> Phis;
1690 for (BasicBlock::iterator I = L->getHeader()->begin();
1691 PHINode *Phi = dyn_cast<PHINode>(I); ++I) {
1692 Phis.push_back(Phi);
1693 }
1694 if (TTI)
1695 std::sort(Phis.begin(), Phis.end(), [](Value *LHS, Value *RHS) {
1696 // Put pointers at the back and make sure pointer < pointer = false.
1697 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
1698 return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
1699 return RHS->getType()->getPrimitiveSizeInBits() <
1700 LHS->getType()->getPrimitiveSizeInBits();
1701 });
1702
1703 unsigned NumElim = 0;
1704 DenseMap<const SCEV *, PHINode *> ExprToIVMap;
1705 // Process phis from wide to narrow. Mapping wide phis to the their truncation
1706 // so narrow phis can reuse them.
1707 for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(),
1708 PEnd = Phis.end(); PIter != PEnd; ++PIter) {
1709 PHINode *Phi = *PIter;
1710
1711 // Fold constant phis. They may be congruent to other constant phis and
1712 // would confuse the logic below that expects proper IVs.
1713 if (Value *V = SimplifyInstruction(Phi, DL, SE.TLI, SE.DT, SE.AC)) {
1714 Phi->replaceAllUsesWith(V);
1715 DeadInsts.push_back(Phi);
1716 ++NumElim;
1717 DEBUG_WITH_TYPE(DebugType, dbgs()
1718 << "INDVARS: Eliminated constant iv: " << *Phi << '\n');
1719 continue;
1720 }
1721
1722 if (!SE.isSCEVable(Phi->getType()))
1723 continue;
1724
1725 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1726 if (!OrigPhiRef) {
1727 OrigPhiRef = Phi;
1728 if (Phi->getType()->isIntegerTy() && TTI
1729 && TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
1730 // This phi can be freely truncated to the narrowest phi type. Map the
1731 // truncated expression to it so it will be reused for narrow types.
1732 const SCEV *TruncExpr =
1733 SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
1734 ExprToIVMap[TruncExpr] = Phi;
1735 }
1736 continue;
1737 }
1738
1739 // Replacing a pointer phi with an integer phi or vice-versa doesn't make
1740 // sense.
1741 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
1742 continue;
1743
1744 if (BasicBlock *LatchBlock = L->getLoopLatch()) {
1745 Instruction *OrigInc =
1746 cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock));
1747 Instruction *IsomorphicInc =
1748 cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
1749
1750 // If this phi has the same width but is more canonical, replace the
1751 // original with it. As part of the "more canonical" determination,
1752 // respect a prior decision to use an IV chain.
1753 if (OrigPhiRef->getType() == Phi->getType()
1754 && !(ChainedPhis.count(Phi)
1755 || isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L))
1756 && (ChainedPhis.count(Phi)
1757 || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1758 std::swap(OrigPhiRef, Phi);
1759 std::swap(OrigInc, IsomorphicInc);
1760 }
1761 // Replacing the congruent phi is sufficient because acyclic redundancy
1762 // elimination, CSE/GVN, should handle the rest. However, once SCEV proves
1763 // that a phi is congruent, it's often the head of an IV user cycle that
1764 // is isomorphic with the original phi. It's worth eagerly cleaning up the
1765 // common case of a single IV increment so that DeleteDeadPHIs can remove
1766 // cycles that had postinc uses.
1767 const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc),
1768 IsomorphicInc->getType());
1769 if (OrigInc != IsomorphicInc
1770 && TruncExpr == SE.getSCEV(IsomorphicInc)
1771 && ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc))
1772 || hoistIVInc(OrigInc, IsomorphicInc))) {
1773 DEBUG_WITH_TYPE(DebugType, dbgs()
1774 << "INDVARS: Eliminated congruent iv.inc: "
1775 << *IsomorphicInc << '\n');
1776 Value *NewInc = OrigInc;
1777 if (OrigInc->getType() != IsomorphicInc->getType()) {
1778 Instruction *IP = nullptr;
1779 if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
1780 IP = PN->getParent()->getFirstInsertionPt();
1781 else
1782 IP = OrigInc->getNextNode();
1783
1784 IRBuilder<> Builder(IP);
1785 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
1786 NewInc = Builder.
1787 CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
1788 }
1789 IsomorphicInc->replaceAllUsesWith(NewInc);
1790 DeadInsts.push_back(IsomorphicInc);
1791 }
1792 }
1793 DEBUG_WITH_TYPE(DebugType, dbgs()
1794 << "INDVARS: Eliminated congruent iv: " << *Phi << '\n');
1795 ++NumElim;
1796 Value *NewIV = OrigPhiRef;
1797 if (OrigPhiRef->getType() != Phi->getType()) {
1798 IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt());
1799 Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
1800 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
1801 }
1802 Phi->replaceAllUsesWith(NewIV);
1803 DeadInsts.push_back(Phi);
1804 }
1805 return NumElim;
1806 }
1807
isHighCostExpansionHelper(const SCEV * S,Loop * L,SmallPtrSetImpl<const SCEV * > & Processed)1808 bool SCEVExpander::isHighCostExpansionHelper(
1809 const SCEV *S, Loop *L, SmallPtrSetImpl<const SCEV *> &Processed) {
1810 if (!Processed.insert(S).second)
1811 return false;
1812
1813 if (auto *UDivExpr = dyn_cast<SCEVUDivExpr>(S)) {
1814 // If the divisor is a power of two and the SCEV type fits in a native
1815 // integer, consider the divison cheap irrespective of whether it occurs in
1816 // the user code since it can be lowered into a right shift.
1817 if (auto *SC = dyn_cast<SCEVConstant>(UDivExpr->getRHS()))
1818 if (SC->getValue()->getValue().isPowerOf2()) {
1819 const DataLayout &DL =
1820 L->getHeader()->getParent()->getParent()->getDataLayout();
1821 unsigned Width = cast<IntegerType>(UDivExpr->getType())->getBitWidth();
1822 return DL.isIllegalInteger(Width);
1823 }
1824
1825 // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
1826 // HowManyLessThans produced to compute a precise expression, rather than a
1827 // UDiv from the user's code. If we can't find a UDiv in the code with some
1828 // simple searching, assume the former consider UDivExpr expensive to
1829 // compute.
1830 BasicBlock *ExitingBB = L->getExitingBlock();
1831 if (!ExitingBB)
1832 return true;
1833
1834 BranchInst *ExitingBI = dyn_cast<BranchInst>(ExitingBB->getTerminator());
1835 if (!ExitingBI || !ExitingBI->isConditional())
1836 return true;
1837
1838 ICmpInst *OrigCond = dyn_cast<ICmpInst>(ExitingBI->getCondition());
1839 if (!OrigCond)
1840 return true;
1841
1842 const SCEV *RHS = SE.getSCEV(OrigCond->getOperand(1));
1843 RHS = SE.getMinusSCEV(RHS, SE.getConstant(RHS->getType(), 1));
1844 if (RHS != S) {
1845 const SCEV *LHS = SE.getSCEV(OrigCond->getOperand(0));
1846 LHS = SE.getMinusSCEV(LHS, SE.getConstant(LHS->getType(), 1));
1847 if (LHS != S)
1848 return true;
1849 }
1850 }
1851
1852 // Recurse past add expressions, which commonly occur in the
1853 // BackedgeTakenCount. They may already exist in program code, and if not,
1854 // they are not too expensive rematerialize.
1855 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
1856 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
1857 I != E; ++I) {
1858 if (isHighCostExpansionHelper(*I, L, Processed))
1859 return true;
1860 }
1861 return false;
1862 }
1863
1864 // HowManyLessThans uses a Max expression whenever the loop is not guarded by
1865 // the exit condition.
1866 if (isa<SCEVSMaxExpr>(S) || isa<SCEVUMaxExpr>(S))
1867 return true;
1868
1869 // If we haven't recognized an expensive SCEV pattern, assume it's an
1870 // expression produced by program code.
1871 return false;
1872 }
1873
1874 namespace {
1875 // Search for a SCEV subexpression that is not safe to expand. Any expression
1876 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
1877 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
1878 // instruction, but the important thing is that we prove the denominator is
1879 // nonzero before expansion.
1880 //
1881 // IVUsers already checks that IV-derived expressions are safe. So this check is
1882 // only needed when the expression includes some subexpression that is not IV
1883 // derived.
1884 //
1885 // Currently, we only allow division by a nonzero constant here. If this is
1886 // inadequate, we could easily allow division by SCEVUnknown by using
1887 // ValueTracking to check isKnownNonZero().
1888 //
1889 // We cannot generally expand recurrences unless the step dominates the loop
1890 // header. The expander handles the special case of affine recurrences by
1891 // scaling the recurrence outside the loop, but this technique isn't generally
1892 // applicable. Expanding a nested recurrence outside a loop requires computing
1893 // binomial coefficients. This could be done, but the recurrence has to be in a
1894 // perfectly reduced form, which can't be guaranteed.
1895 struct SCEVFindUnsafe {
1896 ScalarEvolution &SE;
1897 bool IsUnsafe;
1898
SCEVFindUnsafe__anon93665ff70311::SCEVFindUnsafe1899 SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {}
1900
follow__anon93665ff70311::SCEVFindUnsafe1901 bool follow(const SCEV *S) {
1902 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
1903 const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
1904 if (!SC || SC->getValue()->isZero()) {
1905 IsUnsafe = true;
1906 return false;
1907 }
1908 }
1909 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
1910 const SCEV *Step = AR->getStepRecurrence(SE);
1911 if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
1912 IsUnsafe = true;
1913 return false;
1914 }
1915 }
1916 return true;
1917 }
isDone__anon93665ff70311::SCEVFindUnsafe1918 bool isDone() const { return IsUnsafe; }
1919 };
1920 }
1921
1922 namespace llvm {
isSafeToExpand(const SCEV * S,ScalarEvolution & SE)1923 bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) {
1924 SCEVFindUnsafe Search(SE);
1925 visitAll(S, Search);
1926 return !Search.IsUnsafe;
1927 }
1928 }
1929