1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the primary stateless implementation of the
11 // Alias Analysis interface that implements identities (two different
12 // globals cannot alias, etc), but does no stateful analysis.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #include "llvm/Analysis/BasicAliasAnalysis.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/CFG.h"
21 #include "llvm/Analysis/CaptureTracking.h"
22 #include "llvm/Analysis/InstructionSimplify.h"
23 #include "llvm/Analysis/LoopInfo.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/Analysis/AssumptionCache.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/GlobalAlias.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/LLVMContext.h"
36 #include "llvm/IR/Operator.h"
37 #include "llvm/Pass.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include <algorithm>
40
41 #define DEBUG_TYPE "basicaa"
42
43 using namespace llvm;
44
45 /// Enable analysis of recursive PHI nodes.
46 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden,
47 cl::init(false));
48 /// SearchLimitReached / SearchTimes shows how often the limit of
49 /// to decompose GEPs is reached. It will affect the precision
50 /// of basic alias analysis.
51 STATISTIC(SearchLimitReached, "Number of times the limit to "
52 "decompose GEPs is reached");
53 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
54
55 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
56 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
57 /// careful with value equivalence. We use reachability to make sure a value
58 /// cannot be involved in a cycle.
59 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
60
61 // The max limit of the search depth in DecomposeGEPExpression() and
62 // GetUnderlyingObject(), both functions need to use the same search
63 // depth otherwise the algorithm in aliasGEP will assert.
64 static const unsigned MaxLookupSearchDepth = 6;
65
66 //===----------------------------------------------------------------------===//
67 // Useful predicates
68 //===----------------------------------------------------------------------===//
69
70 /// Returns true if the pointer is to a function-local object that never
71 /// escapes from the function.
isNonEscapingLocalObject(const Value * V)72 static bool isNonEscapingLocalObject(const Value *V) {
73 // If this is a local allocation, check to see if it escapes.
74 if (isa<AllocaInst>(V) || isNoAliasCall(V))
75 // Set StoreCaptures to True so that we can assume in our callers that the
76 // pointer is not the result of a load instruction. Currently
77 // PointerMayBeCaptured doesn't have any special analysis for the
78 // StoreCaptures=false case; if it did, our callers could be refined to be
79 // more precise.
80 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
81
82 // If this is an argument that corresponds to a byval or noalias argument,
83 // then it has not escaped before entering the function. Check if it escapes
84 // inside the function.
85 if (const Argument *A = dyn_cast<Argument>(V))
86 if (A->hasByValAttr() || A->hasNoAliasAttr())
87 // Note even if the argument is marked nocapture, we still need to check
88 // for copies made inside the function. The nocapture attribute only
89 // specifies that there are no copies made that outlive the function.
90 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
91
92 return false;
93 }
94
95 /// Returns true if the pointer is one which would have been considered an
96 /// escape by isNonEscapingLocalObject.
isEscapeSource(const Value * V)97 static bool isEscapeSource(const Value *V) {
98 if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V))
99 return true;
100
101 // The load case works because isNonEscapingLocalObject considers all
102 // stores to be escapes (it passes true for the StoreCaptures argument
103 // to PointerMayBeCaptured).
104 if (isa<LoadInst>(V))
105 return true;
106
107 return false;
108 }
109
110 /// Returns the size of the object specified by V or UnknownSize if unknown.
getObjectSize(const Value * V,const DataLayout & DL,const TargetLibraryInfo & TLI,bool RoundToAlign=false)111 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
112 const TargetLibraryInfo &TLI,
113 bool RoundToAlign = false) {
114 uint64_t Size;
115 if (getObjectSize(V, Size, DL, &TLI, RoundToAlign))
116 return Size;
117 return MemoryLocation::UnknownSize;
118 }
119
120 /// Returns true if we can prove that the object specified by V is smaller than
121 /// Size.
isObjectSmallerThan(const Value * V,uint64_t Size,const DataLayout & DL,const TargetLibraryInfo & TLI)122 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
123 const DataLayout &DL,
124 const TargetLibraryInfo &TLI) {
125 // Note that the meanings of the "object" are slightly different in the
126 // following contexts:
127 // c1: llvm::getObjectSize()
128 // c2: llvm.objectsize() intrinsic
129 // c3: isObjectSmallerThan()
130 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
131 // refers to the "entire object".
132 //
133 // Consider this example:
134 // char *p = (char*)malloc(100)
135 // char *q = p+80;
136 //
137 // In the context of c1 and c2, the "object" pointed by q refers to the
138 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
139 //
140 // However, in the context of c3, the "object" refers to the chunk of memory
141 // being allocated. So, the "object" has 100 bytes, and q points to the middle
142 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
143 // parameter, before the llvm::getObjectSize() is called to get the size of
144 // entire object, we should:
145 // - either rewind the pointer q to the base-address of the object in
146 // question (in this case rewind to p), or
147 // - just give up. It is up to caller to make sure the pointer is pointing
148 // to the base address the object.
149 //
150 // We go for 2nd option for simplicity.
151 if (!isIdentifiedObject(V))
152 return false;
153
154 // This function needs to use the aligned object size because we allow
155 // reads a bit past the end given sufficient alignment.
156 uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/ true);
157
158 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
159 }
160
161 /// Returns true if we can prove that the object specified by V has size Size.
isObjectSize(const Value * V,uint64_t Size,const DataLayout & DL,const TargetLibraryInfo & TLI)162 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
163 const TargetLibraryInfo &TLI) {
164 uint64_t ObjectSize = getObjectSize(V, DL, TLI);
165 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
166 }
167
168 //===----------------------------------------------------------------------===//
169 // GetElementPtr Instruction Decomposition and Analysis
170 //===----------------------------------------------------------------------===//
171
172 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
173 /// B are constant integers.
174 ///
175 /// Returns the scale and offset values as APInts and return V as a Value*, and
176 /// return whether we looked through any sign or zero extends. The incoming
177 /// Value is known to have IntegerType, and it may already be sign or zero
178 /// extended.
179 ///
180 /// Note that this looks through extends, so the high bits may not be
181 /// represented in the result.
GetLinearExpression(const Value * V,APInt & Scale,APInt & Offset,unsigned & ZExtBits,unsigned & SExtBits,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,DominatorTree * DT,bool & NSW,bool & NUW)182 /*static*/ const Value *BasicAAResult::GetLinearExpression(
183 const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
184 unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
185 AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
186 assert(V->getType()->isIntegerTy() && "Not an integer value");
187
188 // Limit our recursion depth.
189 if (Depth == 6) {
190 Scale = 1;
191 Offset = 0;
192 return V;
193 }
194
195 if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
196 // If it's a constant, just convert it to an offset and remove the variable.
197 // If we've been called recursively, the Offset bit width will be greater
198 // than the constant's (the Offset's always as wide as the outermost call),
199 // so we'll zext here and process any extension in the isa<SExtInst> &
200 // isa<ZExtInst> cases below.
201 Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
202 assert(Scale == 0 && "Constant values don't have a scale");
203 return V;
204 }
205
206 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
207 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
208
209 // If we've been called recursively, then Offset and Scale will be wider
210 // than the BOp operands. We'll always zext it here as we'll process sign
211 // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
212 APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
213
214 switch (BOp->getOpcode()) {
215 default:
216 // We don't understand this instruction, so we can't decompose it any
217 // further.
218 Scale = 1;
219 Offset = 0;
220 return V;
221 case Instruction::Or:
222 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
223 // analyze it.
224 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
225 BOp, DT)) {
226 Scale = 1;
227 Offset = 0;
228 return V;
229 }
230 // FALL THROUGH.
231 case Instruction::Add:
232 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
233 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
234 Offset += RHS;
235 break;
236 case Instruction::Sub:
237 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
238 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
239 Offset -= RHS;
240 break;
241 case Instruction::Mul:
242 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
243 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
244 Offset *= RHS;
245 Scale *= RHS;
246 break;
247 case Instruction::Shl:
248 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
249 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
250 Offset <<= RHS.getLimitedValue();
251 Scale <<= RHS.getLimitedValue();
252 // the semantics of nsw and nuw for left shifts don't match those of
253 // multiplications, so we won't propagate them.
254 NSW = NUW = false;
255 return V;
256 }
257
258 if (isa<OverflowingBinaryOperator>(BOp)) {
259 NUW &= BOp->hasNoUnsignedWrap();
260 NSW &= BOp->hasNoSignedWrap();
261 }
262 return V;
263 }
264 }
265
266 // Since GEP indices are sign extended anyway, we don't care about the high
267 // bits of a sign or zero extended value - just scales and offsets. The
268 // extensions have to be consistent though.
269 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
270 Value *CastOp = cast<CastInst>(V)->getOperand(0);
271 unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
272 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
273 unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
274 const Value *Result =
275 GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
276 Depth + 1, AC, DT, NSW, NUW);
277
278 // zext(zext(%x)) == zext(%x), and similiarly for sext; we'll handle this
279 // by just incrementing the number of bits we've extended by.
280 unsigned ExtendedBy = NewWidth - SmallWidth;
281
282 if (isa<SExtInst>(V) && ZExtBits == 0) {
283 // sext(sext(%x, a), b) == sext(%x, a + b)
284
285 if (NSW) {
286 // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
287 // into sext(%x) + sext(c). We'll sext the Offset ourselves:
288 unsigned OldWidth = Offset.getBitWidth();
289 Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
290 } else {
291 // We may have signed-wrapped, so don't decompose sext(%x + c) into
292 // sext(%x) + sext(c)
293 Scale = 1;
294 Offset = 0;
295 Result = CastOp;
296 ZExtBits = OldZExtBits;
297 SExtBits = OldSExtBits;
298 }
299 SExtBits += ExtendedBy;
300 } else {
301 // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
302
303 if (!NUW) {
304 // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
305 // zext(%x) + zext(c)
306 Scale = 1;
307 Offset = 0;
308 Result = CastOp;
309 ZExtBits = OldZExtBits;
310 SExtBits = OldSExtBits;
311 }
312 ZExtBits += ExtendedBy;
313 }
314
315 return Result;
316 }
317
318 Scale = 1;
319 Offset = 0;
320 return V;
321 }
322
323 /// To ensure a pointer offset fits in an integer of size PointerSize
324 /// (in bits) when that size is smaller than 64. This is an issue in
325 /// particular for 32b programs with negative indices that rely on two's
326 /// complement wrap-arounds for precise alias information.
adjustToPointerSize(int64_t Offset,unsigned PointerSize)327 static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize) {
328 assert(PointerSize <= 64 && "Invalid PointerSize!");
329 unsigned ShiftBits = 64 - PointerSize;
330 return (int64_t)((uint64_t)Offset << ShiftBits) >> ShiftBits;
331 }
332
333 /// If V is a symbolic pointer expression, decompose it into a base pointer
334 /// with a constant offset and a number of scaled symbolic offsets.
335 ///
336 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
337 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
338 /// specified amount, but which may have other unrepresented high bits. As
339 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
340 ///
341 /// When DataLayout is around, this function is capable of analyzing everything
342 /// that GetUnderlyingObject can look through. To be able to do that
343 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
344 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
345 /// through pointer casts.
DecomposeGEPExpression(const Value * V,DecomposedGEP & Decomposed,const DataLayout & DL,AssumptionCache * AC,DominatorTree * DT)346 bool BasicAAResult::DecomposeGEPExpression(const Value *V,
347 DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
348 DominatorTree *DT) {
349 // Limit recursion depth to limit compile time in crazy cases.
350 unsigned MaxLookup = MaxLookupSearchDepth;
351 SearchTimes++;
352
353 Decomposed.StructOffset = 0;
354 Decomposed.OtherOffset = 0;
355 Decomposed.VarIndices.clear();
356 do {
357 // See if this is a bitcast or GEP.
358 const Operator *Op = dyn_cast<Operator>(V);
359 if (!Op) {
360 // The only non-operator case we can handle are GlobalAliases.
361 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
362 if (!GA->isInterposable()) {
363 V = GA->getAliasee();
364 continue;
365 }
366 }
367 Decomposed.Base = V;
368 return false;
369 }
370
371 if (Op->getOpcode() == Instruction::BitCast ||
372 Op->getOpcode() == Instruction::AddrSpaceCast) {
373 V = Op->getOperand(0);
374 continue;
375 }
376
377 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
378 if (!GEPOp) {
379 if (auto CS = ImmutableCallSite(V))
380 if (const Value *RV = CS.getReturnedArgOperand()) {
381 V = RV;
382 continue;
383 }
384
385 // If it's not a GEP, hand it off to SimplifyInstruction to see if it
386 // can come up with something. This matches what GetUnderlyingObject does.
387 if (const Instruction *I = dyn_cast<Instruction>(V))
388 // TODO: Get a DominatorTree and AssumptionCache and use them here
389 // (these are both now available in this function, but this should be
390 // updated when GetUnderlyingObject is updated). TLI should be
391 // provided also.
392 if (const Value *Simplified =
393 SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
394 V = Simplified;
395 continue;
396 }
397
398 Decomposed.Base = V;
399 return false;
400 }
401
402 // Don't attempt to analyze GEPs over unsized objects.
403 if (!GEPOp->getSourceElementType()->isSized()) {
404 Decomposed.Base = V;
405 return false;
406 }
407
408 unsigned AS = GEPOp->getPointerAddressSpace();
409 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
410 gep_type_iterator GTI = gep_type_begin(GEPOp);
411 unsigned PointerSize = DL.getPointerSizeInBits(AS);
412 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
413 I != E; ++I) {
414 const Value *Index = *I;
415 // Compute the (potentially symbolic) offset in bytes for this index.
416 if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
417 // For a struct, add the member offset.
418 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
419 if (FieldNo == 0)
420 continue;
421
422 Decomposed.StructOffset +=
423 DL.getStructLayout(STy)->getElementOffset(FieldNo);
424 continue;
425 }
426
427 // For an array/pointer, add the element offset, explicitly scaled.
428 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
429 if (CIdx->isZero())
430 continue;
431 Decomposed.OtherOffset +=
432 DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue();
433 continue;
434 }
435
436 uint64_t Scale = DL.getTypeAllocSize(*GTI);
437 unsigned ZExtBits = 0, SExtBits = 0;
438
439 // If the integer type is smaller than the pointer size, it is implicitly
440 // sign extended to pointer size.
441 unsigned Width = Index->getType()->getIntegerBitWidth();
442 if (PointerSize > Width)
443 SExtBits += PointerSize - Width;
444
445 // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
446 APInt IndexScale(Width, 0), IndexOffset(Width, 0);
447 bool NSW = true, NUW = true;
448 Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
449 SExtBits, DL, 0, AC, DT, NSW, NUW);
450
451 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
452 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
453 Decomposed.OtherOffset += IndexOffset.getSExtValue() * Scale;
454 Scale *= IndexScale.getSExtValue();
455
456 // If we already had an occurrence of this index variable, merge this
457 // scale into it. For example, we want to handle:
458 // A[x][x] -> x*16 + x*4 -> x*20
459 // This also ensures that 'x' only appears in the index list once.
460 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
461 if (Decomposed.VarIndices[i].V == Index &&
462 Decomposed.VarIndices[i].ZExtBits == ZExtBits &&
463 Decomposed.VarIndices[i].SExtBits == SExtBits) {
464 Scale += Decomposed.VarIndices[i].Scale;
465 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
466 break;
467 }
468 }
469
470 // Make sure that we have a scale that makes sense for this target's
471 // pointer size.
472 Scale = adjustToPointerSize(Scale, PointerSize);
473
474 if (Scale) {
475 VariableGEPIndex Entry = {Index, ZExtBits, SExtBits,
476 static_cast<int64_t>(Scale)};
477 Decomposed.VarIndices.push_back(Entry);
478 }
479 }
480
481 // Take care of wrap-arounds
482 Decomposed.StructOffset =
483 adjustToPointerSize(Decomposed.StructOffset, PointerSize);
484 Decomposed.OtherOffset =
485 adjustToPointerSize(Decomposed.OtherOffset, PointerSize);
486
487 // Analyze the base pointer next.
488 V = GEPOp->getOperand(0);
489 } while (--MaxLookup);
490
491 // If the chain of expressions is too deep, just return early.
492 Decomposed.Base = V;
493 SearchLimitReached++;
494 return true;
495 }
496
497 /// Returns whether the given pointer value points to memory that is local to
498 /// the function, with global constants being considered local to all
499 /// functions.
pointsToConstantMemory(const MemoryLocation & Loc,bool OrLocal)500 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
501 bool OrLocal) {
502 assert(Visited.empty() && "Visited must be cleared after use!");
503
504 unsigned MaxLookup = 8;
505 SmallVector<const Value *, 16> Worklist;
506 Worklist.push_back(Loc.Ptr);
507 do {
508 const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
509 if (!Visited.insert(V).second) {
510 Visited.clear();
511 return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
512 }
513
514 // An alloca instruction defines local memory.
515 if (OrLocal && isa<AllocaInst>(V))
516 continue;
517
518 // A global constant counts as local memory for our purposes.
519 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
520 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
521 // global to be marked constant in some modules and non-constant in
522 // others. GV may even be a declaration, not a definition.
523 if (!GV->isConstant()) {
524 Visited.clear();
525 return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
526 }
527 continue;
528 }
529
530 // If both select values point to local memory, then so does the select.
531 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
532 Worklist.push_back(SI->getTrueValue());
533 Worklist.push_back(SI->getFalseValue());
534 continue;
535 }
536
537 // If all values incoming to a phi node point to local memory, then so does
538 // the phi.
539 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
540 // Don't bother inspecting phi nodes with many operands.
541 if (PN->getNumIncomingValues() > MaxLookup) {
542 Visited.clear();
543 return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
544 }
545 for (Value *IncValue : PN->incoming_values())
546 Worklist.push_back(IncValue);
547 continue;
548 }
549
550 // Otherwise be conservative.
551 Visited.clear();
552 return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
553
554 } while (!Worklist.empty() && --MaxLookup);
555
556 Visited.clear();
557 return Worklist.empty();
558 }
559
560 /// Returns the behavior when calling the given call site.
getModRefBehavior(ImmutableCallSite CS)561 FunctionModRefBehavior BasicAAResult::getModRefBehavior(ImmutableCallSite CS) {
562 if (CS.doesNotAccessMemory())
563 // Can't do better than this.
564 return FMRB_DoesNotAccessMemory;
565
566 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
567
568 // If the callsite knows it only reads memory, don't return worse
569 // than that.
570 if (CS.onlyReadsMemory())
571 Min = FMRB_OnlyReadsMemory;
572 else if (CS.doesNotReadMemory())
573 Min = FMRB_DoesNotReadMemory;
574
575 if (CS.onlyAccessesArgMemory())
576 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
577
578 // If CS has operand bundles then aliasing attributes from the function it
579 // calls do not directly apply to the CallSite. This can be made more
580 // precise in the future.
581 if (!CS.hasOperandBundles())
582 if (const Function *F = CS.getCalledFunction())
583 Min =
584 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
585
586 return Min;
587 }
588
589 /// Returns the behavior when calling the given function. For use when the call
590 /// site is not known.
getModRefBehavior(const Function * F)591 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
592 // If the function declares it doesn't access memory, we can't do better.
593 if (F->doesNotAccessMemory())
594 return FMRB_DoesNotAccessMemory;
595
596 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
597
598 // If the function declares it only reads memory, go with that.
599 if (F->onlyReadsMemory())
600 Min = FMRB_OnlyReadsMemory;
601 else if (F->doesNotReadMemory())
602 Min = FMRB_DoesNotReadMemory;
603
604 if (F->onlyAccessesArgMemory())
605 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
606
607 return Min;
608 }
609
610 /// Returns true if this is a writeonly (i.e Mod only) parameter.
isWriteOnlyParam(ImmutableCallSite CS,unsigned ArgIdx,const TargetLibraryInfo & TLI)611 static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx,
612 const TargetLibraryInfo &TLI) {
613 if (CS.paramHasAttr(ArgIdx + 1, Attribute::WriteOnly))
614 return true;
615
616 // We can bound the aliasing properties of memset_pattern16 just as we can
617 // for memcpy/memset. This is particularly important because the
618 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
619 // whenever possible.
620 // FIXME Consider handling this in InferFunctionAttr.cpp together with other
621 // attributes.
622 LibFunc::Func F;
623 if (CS.getCalledFunction() && TLI.getLibFunc(*CS.getCalledFunction(), F) &&
624 F == LibFunc::memset_pattern16 && TLI.has(F))
625 if (ArgIdx == 0)
626 return true;
627
628 // TODO: memset_pattern4, memset_pattern8
629 // TODO: _chk variants
630 // TODO: strcmp, strcpy
631
632 return false;
633 }
634
getArgModRefInfo(ImmutableCallSite CS,unsigned ArgIdx)635 ModRefInfo BasicAAResult::getArgModRefInfo(ImmutableCallSite CS,
636 unsigned ArgIdx) {
637
638 // Checking for known builtin intrinsics and target library functions.
639 if (isWriteOnlyParam(CS, ArgIdx, TLI))
640 return MRI_Mod;
641
642 if (CS.paramHasAttr(ArgIdx + 1, Attribute::ReadOnly))
643 return MRI_Ref;
644
645 if (CS.paramHasAttr(ArgIdx + 1, Attribute::ReadNone))
646 return MRI_NoModRef;
647
648 return AAResultBase::getArgModRefInfo(CS, ArgIdx);
649 }
650
isIntrinsicCall(ImmutableCallSite CS,Intrinsic::ID IID)651 static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID) {
652 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
653 return II && II->getIntrinsicID() == IID;
654 }
655
656 #ifndef NDEBUG
getParent(const Value * V)657 static const Function *getParent(const Value *V) {
658 if (const Instruction *inst = dyn_cast<Instruction>(V))
659 return inst->getParent()->getParent();
660
661 if (const Argument *arg = dyn_cast<Argument>(V))
662 return arg->getParent();
663
664 return nullptr;
665 }
666
notDifferentParent(const Value * O1,const Value * O2)667 static bool notDifferentParent(const Value *O1, const Value *O2) {
668
669 const Function *F1 = getParent(O1);
670 const Function *F2 = getParent(O2);
671
672 return !F1 || !F2 || F1 == F2;
673 }
674 #endif
675
alias(const MemoryLocation & LocA,const MemoryLocation & LocB)676 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
677 const MemoryLocation &LocB) {
678 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
679 "BasicAliasAnalysis doesn't support interprocedural queries.");
680
681 // If we have a directly cached entry for these locations, we have recursed
682 // through this once, so just return the cached results. Notably, when this
683 // happens, we don't clear the cache.
684 auto CacheIt = AliasCache.find(LocPair(LocA, LocB));
685 if (CacheIt != AliasCache.end())
686 return CacheIt->second;
687
688 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr,
689 LocB.Size, LocB.AATags);
690 // AliasCache rarely has more than 1 or 2 elements, always use
691 // shrink_and_clear so it quickly returns to the inline capacity of the
692 // SmallDenseMap if it ever grows larger.
693 // FIXME: This should really be shrink_to_inline_capacity_and_clear().
694 AliasCache.shrink_and_clear();
695 VisitedPhiBBs.clear();
696 return Alias;
697 }
698
699 /// Checks to see if the specified callsite can clobber the specified memory
700 /// object.
701 ///
702 /// Since we only look at local properties of this function, we really can't
703 /// say much about this query. We do, however, use simple "address taken"
704 /// analysis on local objects.
getModRefInfo(ImmutableCallSite CS,const MemoryLocation & Loc)705 ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
706 const MemoryLocation &Loc) {
707 assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
708 "AliasAnalysis query involving multiple functions!");
709
710 const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
711
712 // If this is a tail call and Loc.Ptr points to a stack location, we know that
713 // the tail call cannot access or modify the local stack.
714 // We cannot exclude byval arguments here; these belong to the caller of
715 // the current function not to the current function, and a tail callee
716 // may reference them.
717 if (isa<AllocaInst>(Object))
718 if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
719 if (CI->isTailCall())
720 return MRI_NoModRef;
721
722 // If the pointer is to a locally allocated object that does not escape,
723 // then the call can not mod/ref the pointer unless the call takes the pointer
724 // as an argument, and itself doesn't capture it.
725 if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
726 isNonEscapingLocalObject(Object)) {
727 bool PassedAsArg = false;
728 unsigned OperandNo = 0;
729 for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
730 CI != CE; ++CI, ++OperandNo) {
731 // Only look at the no-capture or byval pointer arguments. If this
732 // pointer were passed to arguments that were neither of these, then it
733 // couldn't be no-capture.
734 if (!(*CI)->getType()->isPointerTy() ||
735 (!CS.doesNotCapture(OperandNo) && !CS.isByValArgument(OperandNo)))
736 continue;
737
738 // If this is a no-capture pointer argument, see if we can tell that it
739 // is impossible to alias the pointer we're checking. If not, we have to
740 // assume that the call could touch the pointer, even though it doesn't
741 // escape.
742 AliasResult AR =
743 getBestAAResults().alias(MemoryLocation(*CI), MemoryLocation(Object));
744 if (AR) {
745 PassedAsArg = true;
746 break;
747 }
748 }
749
750 if (!PassedAsArg)
751 return MRI_NoModRef;
752 }
753
754 // If the CallSite is to malloc or calloc, we can assume that it doesn't
755 // modify any IR visible value. This is only valid because we assume these
756 // routines do not read values visible in the IR. TODO: Consider special
757 // casing realloc and strdup routines which access only their arguments as
758 // well. Or alternatively, replace all of this with inaccessiblememonly once
759 // that's implemented fully.
760 auto *Inst = CS.getInstruction();
761 if (isMallocLikeFn(Inst, &TLI) || isCallocLikeFn(Inst, &TLI)) {
762 // Be conservative if the accessed pointer may alias the allocation -
763 // fallback to the generic handling below.
764 if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias)
765 return MRI_NoModRef;
766 }
767
768 // While the assume intrinsic is marked as arbitrarily writing so that
769 // proper control dependencies will be maintained, it never aliases any
770 // particular memory location.
771 if (isIntrinsicCall(CS, Intrinsic::assume))
772 return MRI_NoModRef;
773
774 // Like assumes, guard intrinsics are also marked as arbitrarily writing so
775 // that proper control dependencies are maintained but they never mods any
776 // particular memory location.
777 //
778 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
779 // heap state at the point the guard is issued needs to be consistent in case
780 // the guard invokes the "deopt" continuation.
781 if (isIntrinsicCall(CS, Intrinsic::experimental_guard))
782 return MRI_Ref;
783
784 // The AAResultBase base class has some smarts, lets use them.
785 return AAResultBase::getModRefInfo(CS, Loc);
786 }
787
getModRefInfo(ImmutableCallSite CS1,ImmutableCallSite CS2)788 ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1,
789 ImmutableCallSite CS2) {
790 // While the assume intrinsic is marked as arbitrarily writing so that
791 // proper control dependencies will be maintained, it never aliases any
792 // particular memory location.
793 if (isIntrinsicCall(CS1, Intrinsic::assume) ||
794 isIntrinsicCall(CS2, Intrinsic::assume))
795 return MRI_NoModRef;
796
797 // Like assumes, guard intrinsics are also marked as arbitrarily writing so
798 // that proper control dependencies are maintained but they never mod any
799 // particular memory location.
800 //
801 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
802 // heap state at the point the guard is issued needs to be consistent in case
803 // the guard invokes the "deopt" continuation.
804
805 // NB! This function is *not* commutative, so we specical case two
806 // possibilities for guard intrinsics.
807
808 if (isIntrinsicCall(CS1, Intrinsic::experimental_guard))
809 return getModRefBehavior(CS2) & MRI_Mod ? MRI_Ref : MRI_NoModRef;
810
811 if (isIntrinsicCall(CS2, Intrinsic::experimental_guard))
812 return getModRefBehavior(CS1) & MRI_Mod ? MRI_Mod : MRI_NoModRef;
813
814 // The AAResultBase base class has some smarts, lets use them.
815 return AAResultBase::getModRefInfo(CS1, CS2);
816 }
817
818 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
819 /// both having the exact same pointer operand.
aliasSameBasePointerGEPs(const GEPOperator * GEP1,uint64_t V1Size,const GEPOperator * GEP2,uint64_t V2Size,const DataLayout & DL)820 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
821 uint64_t V1Size,
822 const GEPOperator *GEP2,
823 uint64_t V2Size,
824 const DataLayout &DL) {
825
826 assert(GEP1->getPointerOperand()->stripPointerCasts() ==
827 GEP2->getPointerOperand()->stripPointerCasts() &&
828 GEP1->getPointerOperand()->getType() ==
829 GEP2->getPointerOperand()->getType() &&
830 "Expected GEPs with the same pointer operand");
831
832 // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
833 // such that the struct field accesses provably cannot alias.
834 // We also need at least two indices (the pointer, and the struct field).
835 if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
836 GEP1->getNumIndices() < 2)
837 return MayAlias;
838
839 // If we don't know the size of the accesses through both GEPs, we can't
840 // determine whether the struct fields accessed can't alias.
841 if (V1Size == MemoryLocation::UnknownSize ||
842 V2Size == MemoryLocation::UnknownSize)
843 return MayAlias;
844
845 ConstantInt *C1 =
846 dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
847 ConstantInt *C2 =
848 dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
849
850 // If the last (struct) indices are constants and are equal, the other indices
851 // might be also be dynamically equal, so the GEPs can alias.
852 if (C1 && C2 && C1->getSExtValue() == C2->getSExtValue())
853 return MayAlias;
854
855 // Find the last-indexed type of the GEP, i.e., the type you'd get if
856 // you stripped the last index.
857 // On the way, look at each indexed type. If there's something other
858 // than an array, different indices can lead to different final types.
859 SmallVector<Value *, 8> IntermediateIndices;
860
861 // Insert the first index; we don't need to check the type indexed
862 // through it as it only drops the pointer indirection.
863 assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
864 IntermediateIndices.push_back(GEP1->getOperand(1));
865
866 // Insert all the remaining indices but the last one.
867 // Also, check that they all index through arrays.
868 for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
869 if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
870 GEP1->getSourceElementType(), IntermediateIndices)))
871 return MayAlias;
872 IntermediateIndices.push_back(GEP1->getOperand(i + 1));
873 }
874
875 auto *Ty = GetElementPtrInst::getIndexedType(
876 GEP1->getSourceElementType(), IntermediateIndices);
877 StructType *LastIndexedStruct = dyn_cast<StructType>(Ty);
878
879 if (isa<SequentialType>(Ty)) {
880 // We know that:
881 // - both GEPs begin indexing from the exact same pointer;
882 // - the last indices in both GEPs are constants, indexing into a sequential
883 // type (array or pointer);
884 // - both GEPs only index through arrays prior to that.
885 //
886 // Because array indices greater than the number of elements are valid in
887 // GEPs, unless we know the intermediate indices are identical between
888 // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
889 // partially overlap. We also need to check that the loaded size matches
890 // the element size, otherwise we could still have overlap.
891 const uint64_t ElementSize =
892 DL.getTypeStoreSize(cast<SequentialType>(Ty)->getElementType());
893 if (V1Size != ElementSize || V2Size != ElementSize)
894 return MayAlias;
895
896 for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i)
897 if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1))
898 return MayAlias;
899
900 // Now we know that the array/pointer that GEP1 indexes into and that
901 // that GEP2 indexes into must either precisely overlap or be disjoint.
902 // Because they cannot partially overlap and because fields in an array
903 // cannot overlap, if we can prove the final indices are different between
904 // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
905
906 // If the last indices are constants, we've already checked they don't
907 // equal each other so we can exit early.
908 if (C1 && C2)
909 return NoAlias;
910 if (isKnownNonEqual(GEP1->getOperand(GEP1->getNumOperands() - 1),
911 GEP2->getOperand(GEP2->getNumOperands() - 1),
912 DL))
913 return NoAlias;
914 return MayAlias;
915 } else if (!LastIndexedStruct || !C1 || !C2) {
916 return MayAlias;
917 }
918
919 // We know that:
920 // - both GEPs begin indexing from the exact same pointer;
921 // - the last indices in both GEPs are constants, indexing into a struct;
922 // - said indices are different, hence, the pointed-to fields are different;
923 // - both GEPs only index through arrays prior to that.
924 //
925 // This lets us determine that the struct that GEP1 indexes into and the
926 // struct that GEP2 indexes into must either precisely overlap or be
927 // completely disjoint. Because they cannot partially overlap, indexing into
928 // different non-overlapping fields of the struct will never alias.
929
930 // Therefore, the only remaining thing needed to show that both GEPs can't
931 // alias is that the fields are not overlapping.
932 const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
933 const uint64_t StructSize = SL->getSizeInBytes();
934 const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
935 const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
936
937 auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
938 uint64_t V2Off, uint64_t V2Size) {
939 return V1Off < V2Off && V1Off + V1Size <= V2Off &&
940 ((V2Off + V2Size <= StructSize) ||
941 (V2Off + V2Size - StructSize <= V1Off));
942 };
943
944 if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
945 EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
946 return NoAlias;
947
948 return MayAlias;
949 }
950
951 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the
952 // beginning of the object the GEP points would have a negative offset with
953 // repsect to the alloca, that means the GEP can not alias pointer (b).
954 // Note that the pointer based on the alloca may not be a GEP. For
955 // example, it may be the alloca itself.
956 // The same applies if (b) is based on a GlobalVariable. Note that just being
957 // based on isIdentifiedObject() is not enough - we need an identified object
958 // that does not permit access to negative offsets. For example, a negative
959 // offset from a noalias argument or call can be inbounds w.r.t the actual
960 // underlying object.
961 //
962 // For example, consider:
963 //
964 // struct { int f0, int f1, ...} foo;
965 // foo alloca;
966 // foo* random = bar(alloca);
967 // int *f0 = &alloca.f0
968 // int *f1 = &random->f1;
969 //
970 // Which is lowered, approximately, to:
971 //
972 // %alloca = alloca %struct.foo
973 // %random = call %struct.foo* @random(%struct.foo* %alloca)
974 // %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
975 // %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
976 //
977 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
978 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also
979 // point into the same object. But since %f0 points to the beginning of %alloca,
980 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher
981 // than (%alloca - 1), and so is not inbounds, a contradiction.
isGEPBaseAtNegativeOffset(const GEPOperator * GEPOp,const DecomposedGEP & DecompGEP,const DecomposedGEP & DecompObject,uint64_t ObjectAccessSize)982 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
983 const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
984 uint64_t ObjectAccessSize) {
985 // If the object access size is unknown, or the GEP isn't inbounds, bail.
986 if (ObjectAccessSize == MemoryLocation::UnknownSize || !GEPOp->isInBounds())
987 return false;
988
989 // We need the object to be an alloca or a globalvariable, and want to know
990 // the offset of the pointer from the object precisely, so no variable
991 // indices are allowed.
992 if (!(isa<AllocaInst>(DecompObject.Base) ||
993 isa<GlobalVariable>(DecompObject.Base)) ||
994 !DecompObject.VarIndices.empty())
995 return false;
996
997 int64_t ObjectBaseOffset = DecompObject.StructOffset +
998 DecompObject.OtherOffset;
999
1000 // If the GEP has no variable indices, we know the precise offset
1001 // from the base, then use it. If the GEP has variable indices, we're in
1002 // a bit more trouble: we can't count on the constant offsets that come
1003 // from non-struct sources, since these can be "rewound" by a negative
1004 // variable offset. So use only offsets that came from structs.
1005 int64_t GEPBaseOffset = DecompGEP.StructOffset;
1006 if (DecompGEP.VarIndices.empty())
1007 GEPBaseOffset += DecompGEP.OtherOffset;
1008
1009 return (GEPBaseOffset >= ObjectBaseOffset + (int64_t)ObjectAccessSize);
1010 }
1011
1012 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1013 /// another pointer.
1014 ///
1015 /// We know that V1 is a GEP, but we don't know anything about V2.
1016 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
1017 /// V2.
aliasGEP(const GEPOperator * GEP1,uint64_t V1Size,const AAMDNodes & V1AAInfo,const Value * V2,uint64_t V2Size,const AAMDNodes & V2AAInfo,const Value * UnderlyingV1,const Value * UnderlyingV2)1018 AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
1019 const AAMDNodes &V1AAInfo, const Value *V2,
1020 uint64_t V2Size, const AAMDNodes &V2AAInfo,
1021 const Value *UnderlyingV1,
1022 const Value *UnderlyingV2) {
1023 DecomposedGEP DecompGEP1, DecompGEP2;
1024 bool GEP1MaxLookupReached =
1025 DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
1026 bool GEP2MaxLookupReached =
1027 DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
1028
1029 int64_t GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
1030 int64_t GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
1031
1032 assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1033 "DecomposeGEPExpression returned a result different from "
1034 "GetUnderlyingObject");
1035
1036 // If the GEP's offset relative to its base is such that the base would
1037 // fall below the start of the object underlying V2, then the GEP and V2
1038 // cannot alias.
1039 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1040 isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
1041 return NoAlias;
1042 // If we have two gep instructions with must-alias or not-alias'ing base
1043 // pointers, figure out if the indexes to the GEP tell us anything about the
1044 // derived pointer.
1045 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
1046 // Check for the GEP base being at a negative offset, this time in the other
1047 // direction.
1048 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1049 isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
1050 return NoAlias;
1051 // Do the base pointers alias?
1052 AliasResult BaseAlias =
1053 aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(),
1054 UnderlyingV2, MemoryLocation::UnknownSize, AAMDNodes());
1055
1056 // Check for geps of non-aliasing underlying pointers where the offsets are
1057 // identical.
1058 if ((BaseAlias == MayAlias) && V1Size == V2Size) {
1059 // Do the base pointers alias assuming type and size.
1060 AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1AAInfo,
1061 UnderlyingV2, V2Size, V2AAInfo);
1062 if (PreciseBaseAlias == NoAlias) {
1063 // See if the computed offset from the common pointer tells us about the
1064 // relation of the resulting pointer.
1065 // If the max search depth is reached the result is undefined
1066 if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1067 return MayAlias;
1068
1069 // Same offsets.
1070 if (GEP1BaseOffset == GEP2BaseOffset &&
1071 DecompGEP1.VarIndices == DecompGEP2.VarIndices)
1072 return NoAlias;
1073 }
1074 }
1075
1076 // If we get a No or May, then return it immediately, no amount of analysis
1077 // will improve this situation.
1078 if (BaseAlias != MustAlias)
1079 return BaseAlias;
1080
1081 // Otherwise, we have a MustAlias. Since the base pointers alias each other
1082 // exactly, see if the computed offset from the common pointer tells us
1083 // about the relation of the resulting pointer.
1084 // If we know the two GEPs are based off of the exact same pointer (and not
1085 // just the same underlying object), see if that tells us anything about
1086 // the resulting pointers.
1087 if (GEP1->getPointerOperand()->stripPointerCasts() ==
1088 GEP2->getPointerOperand()->stripPointerCasts() &&
1089 GEP1->getPointerOperand()->getType() ==
1090 GEP2->getPointerOperand()->getType()) {
1091 AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
1092 // If we couldn't find anything interesting, don't abandon just yet.
1093 if (R != MayAlias)
1094 return R;
1095 }
1096
1097 // If the max search depth is reached, the result is undefined
1098 if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1099 return MayAlias;
1100
1101 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1102 // symbolic difference.
1103 GEP1BaseOffset -= GEP2BaseOffset;
1104 GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1105
1106 } else {
1107 // Check to see if these two pointers are related by the getelementptr
1108 // instruction. If one pointer is a GEP with a non-zero index of the other
1109 // pointer, we know they cannot alias.
1110
1111 // If both accesses are unknown size, we can't do anything useful here.
1112 if (V1Size == MemoryLocation::UnknownSize &&
1113 V2Size == MemoryLocation::UnknownSize)
1114 return MayAlias;
1115
1116 AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize,
1117 AAMDNodes(), V2, V2Size, V2AAInfo);
1118 if (R != MustAlias)
1119 // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1120 // If V2 is known not to alias GEP base pointer, then the two values
1121 // cannot alias per GEP semantics: "A pointer value formed from a
1122 // getelementptr instruction is associated with the addresses associated
1123 // with the first operand of the getelementptr".
1124 return R;
1125
1126 // If the max search depth is reached the result is undefined
1127 if (GEP1MaxLookupReached)
1128 return MayAlias;
1129 }
1130
1131 // In the two GEP Case, if there is no difference in the offsets of the
1132 // computed pointers, the resultant pointers are a must alias. This
1133 // happens when we have two lexically identical GEP's (for example).
1134 //
1135 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1136 // must aliases the GEP, the end result is a must alias also.
1137 if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty())
1138 return MustAlias;
1139
1140 // If there is a constant difference between the pointers, but the difference
1141 // is less than the size of the associated memory object, then we know
1142 // that the objects are partially overlapping. If the difference is
1143 // greater, we know they do not overlap.
1144 if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
1145 if (GEP1BaseOffset >= 0) {
1146 if (V2Size != MemoryLocation::UnknownSize) {
1147 if ((uint64_t)GEP1BaseOffset < V2Size)
1148 return PartialAlias;
1149 return NoAlias;
1150 }
1151 } else {
1152 // We have the situation where:
1153 // + +
1154 // | BaseOffset |
1155 // ---------------->|
1156 // |-->V1Size |-------> V2Size
1157 // GEP1 V2
1158 // We need to know that V2Size is not unknown, otherwise we might have
1159 // stripped a gep with negative index ('gep <ptr>, -1, ...).
1160 if (V1Size != MemoryLocation::UnknownSize &&
1161 V2Size != MemoryLocation::UnknownSize) {
1162 if (-(uint64_t)GEP1BaseOffset < V1Size)
1163 return PartialAlias;
1164 return NoAlias;
1165 }
1166 }
1167 }
1168
1169 if (!DecompGEP1.VarIndices.empty()) {
1170 uint64_t Modulo = 0;
1171 bool AllPositive = true;
1172 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1173
1174 // Try to distinguish something like &A[i][1] against &A[42][0].
1175 // Grab the least significant bit set in any of the scales. We
1176 // don't need std::abs here (even if the scale's negative) as we'll
1177 // be ^'ing Modulo with itself later.
1178 Modulo |= (uint64_t)DecompGEP1.VarIndices[i].Scale;
1179
1180 if (AllPositive) {
1181 // If the Value could change between cycles, then any reasoning about
1182 // the Value this cycle may not hold in the next cycle. We'll just
1183 // give up if we can't determine conditions that hold for every cycle:
1184 const Value *V = DecompGEP1.VarIndices[i].V;
1185
1186 bool SignKnownZero, SignKnownOne;
1187 ComputeSignBit(const_cast<Value *>(V), SignKnownZero, SignKnownOne, DL,
1188 0, &AC, nullptr, DT);
1189
1190 // Zero-extension widens the variable, and so forces the sign
1191 // bit to zero.
1192 bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1193 SignKnownZero |= IsZExt;
1194 SignKnownOne &= !IsZExt;
1195
1196 // If the variable begins with a zero then we know it's
1197 // positive, regardless of whether the value is signed or
1198 // unsigned.
1199 int64_t Scale = DecompGEP1.VarIndices[i].Scale;
1200 AllPositive =
1201 (SignKnownZero && Scale >= 0) || (SignKnownOne && Scale < 0);
1202 }
1203 }
1204
1205 Modulo = Modulo ^ (Modulo & (Modulo - 1));
1206
1207 // We can compute the difference between the two addresses
1208 // mod Modulo. Check whether that difference guarantees that the
1209 // two locations do not alias.
1210 uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1);
1211 if (V1Size != MemoryLocation::UnknownSize &&
1212 V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size &&
1213 V1Size <= Modulo - ModOffset)
1214 return NoAlias;
1215
1216 // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
1217 // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
1218 // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
1219 if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t)GEP1BaseOffset)
1220 return NoAlias;
1221
1222 if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1223 GEP1BaseOffset, &AC, DT))
1224 return NoAlias;
1225 }
1226
1227 // Statically, we can see that the base objects are the same, but the
1228 // pointers have dynamic offsets which we can't resolve. And none of our
1229 // little tricks above worked.
1230 //
1231 // TODO: Returning PartialAlias instead of MayAlias is a mild hack; the
1232 // practical effect of this is protecting TBAA in the case of dynamic
1233 // indices into arrays of unions or malloc'd memory.
1234 return PartialAlias;
1235 }
1236
MergeAliasResults(AliasResult A,AliasResult B)1237 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1238 // If the results agree, take it.
1239 if (A == B)
1240 return A;
1241 // A mix of PartialAlias and MustAlias is PartialAlias.
1242 if ((A == PartialAlias && B == MustAlias) ||
1243 (B == PartialAlias && A == MustAlias))
1244 return PartialAlias;
1245 // Otherwise, we don't know anything.
1246 return MayAlias;
1247 }
1248
1249 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1250 /// against another.
aliasSelect(const SelectInst * SI,uint64_t SISize,const AAMDNodes & SIAAInfo,const Value * V2,uint64_t V2Size,const AAMDNodes & V2AAInfo)1251 AliasResult BasicAAResult::aliasSelect(const SelectInst *SI, uint64_t SISize,
1252 const AAMDNodes &SIAAInfo,
1253 const Value *V2, uint64_t V2Size,
1254 const AAMDNodes &V2AAInfo) {
1255 // If the values are Selects with the same condition, we can do a more precise
1256 // check: just check for aliases between the values on corresponding arms.
1257 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1258 if (SI->getCondition() == SI2->getCondition()) {
1259 AliasResult Alias = aliasCheck(SI->getTrueValue(), SISize, SIAAInfo,
1260 SI2->getTrueValue(), V2Size, V2AAInfo);
1261 if (Alias == MayAlias)
1262 return MayAlias;
1263 AliasResult ThisAlias =
1264 aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1265 SI2->getFalseValue(), V2Size, V2AAInfo);
1266 return MergeAliasResults(ThisAlias, Alias);
1267 }
1268
1269 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1270 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1271 AliasResult Alias =
1272 aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(), SISize, SIAAInfo);
1273 if (Alias == MayAlias)
1274 return MayAlias;
1275
1276 AliasResult ThisAlias =
1277 aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo);
1278 return MergeAliasResults(ThisAlias, Alias);
1279 }
1280
1281 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1282 /// another.
aliasPHI(const PHINode * PN,uint64_t PNSize,const AAMDNodes & PNAAInfo,const Value * V2,uint64_t V2Size,const AAMDNodes & V2AAInfo)1283 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, uint64_t PNSize,
1284 const AAMDNodes &PNAAInfo, const Value *V2,
1285 uint64_t V2Size,
1286 const AAMDNodes &V2AAInfo) {
1287 // Track phi nodes we have visited. We use this information when we determine
1288 // value equivalence.
1289 VisitedPhiBBs.insert(PN->getParent());
1290
1291 // If the values are PHIs in the same block, we can do a more precise
1292 // as well as efficient check: just check for aliases between the values
1293 // on corresponding edges.
1294 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1295 if (PN2->getParent() == PN->getParent()) {
1296 LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
1297 MemoryLocation(V2, V2Size, V2AAInfo));
1298 if (PN > V2)
1299 std::swap(Locs.first, Locs.second);
1300 // Analyse the PHIs' inputs under the assumption that the PHIs are
1301 // NoAlias.
1302 // If the PHIs are May/MustAlias there must be (recursively) an input
1303 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1304 // there must be an operation on the PHIs within the PHIs' value cycle
1305 // that causes a MayAlias.
1306 // Pretend the phis do not alias.
1307 AliasResult Alias = NoAlias;
1308 assert(AliasCache.count(Locs) &&
1309 "There must exist an entry for the phi node");
1310 AliasResult OrigAliasResult = AliasCache[Locs];
1311 AliasCache[Locs] = NoAlias;
1312
1313 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1314 AliasResult ThisAlias =
1315 aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1316 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1317 V2Size, V2AAInfo);
1318 Alias = MergeAliasResults(ThisAlias, Alias);
1319 if (Alias == MayAlias)
1320 break;
1321 }
1322
1323 // Reset if speculation failed.
1324 if (Alias != NoAlias)
1325 AliasCache[Locs] = OrigAliasResult;
1326
1327 return Alias;
1328 }
1329
1330 SmallPtrSet<Value *, 4> UniqueSrc;
1331 SmallVector<Value *, 4> V1Srcs;
1332 bool isRecursive = false;
1333 for (Value *PV1 : PN->incoming_values()) {
1334 if (isa<PHINode>(PV1))
1335 // If any of the source itself is a PHI, return MayAlias conservatively
1336 // to avoid compile time explosion. The worst possible case is if both
1337 // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1338 // and 'n' are the number of PHI sources.
1339 return MayAlias;
1340
1341 if (EnableRecPhiAnalysis)
1342 if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1343 // Check whether the incoming value is a GEP that advances the pointer
1344 // result of this PHI node (e.g. in a loop). If this is the case, we
1345 // would recurse and always get a MayAlias. Handle this case specially
1346 // below.
1347 if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1348 isa<ConstantInt>(PV1GEP->idx_begin())) {
1349 isRecursive = true;
1350 continue;
1351 }
1352 }
1353
1354 if (UniqueSrc.insert(PV1).second)
1355 V1Srcs.push_back(PV1);
1356 }
1357
1358 // If this PHI node is recursive, set the size of the accessed memory to
1359 // unknown to represent all the possible values the GEP could advance the
1360 // pointer to.
1361 if (isRecursive)
1362 PNSize = MemoryLocation::UnknownSize;
1363
1364 AliasResult Alias =
1365 aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0], PNSize, PNAAInfo);
1366
1367 // Early exit if the check of the first PHI source against V2 is MayAlias.
1368 // Other results are not possible.
1369 if (Alias == MayAlias)
1370 return MayAlias;
1371
1372 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1373 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1374 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1375 Value *V = V1Srcs[i];
1376
1377 AliasResult ThisAlias =
1378 aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo);
1379 Alias = MergeAliasResults(ThisAlias, Alias);
1380 if (Alias == MayAlias)
1381 break;
1382 }
1383
1384 return Alias;
1385 }
1386
1387 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1388 /// array references.
aliasCheck(const Value * V1,uint64_t V1Size,AAMDNodes V1AAInfo,const Value * V2,uint64_t V2Size,AAMDNodes V2AAInfo)1389 AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size,
1390 AAMDNodes V1AAInfo, const Value *V2,
1391 uint64_t V2Size, AAMDNodes V2AAInfo) {
1392 // If either of the memory references is empty, it doesn't matter what the
1393 // pointer values are.
1394 if (V1Size == 0 || V2Size == 0)
1395 return NoAlias;
1396
1397 // Strip off any casts if they exist.
1398 V1 = V1->stripPointerCasts();
1399 V2 = V2->stripPointerCasts();
1400
1401 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1402 // value for undef that aliases nothing in the program.
1403 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1404 return NoAlias;
1405
1406 // Are we checking for alias of the same value?
1407 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1408 // different iterations. We must therefore make sure that this is not the
1409 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1410 // happen by looking at the visited phi nodes and making sure they cannot
1411 // reach the value.
1412 if (isValueEqualInPotentialCycles(V1, V2))
1413 return MustAlias;
1414
1415 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1416 return NoAlias; // Scalars cannot alias each other
1417
1418 // Figure out what objects these things are pointing to if we can.
1419 const Value *O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
1420 const Value *O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
1421
1422 // Null values in the default address space don't point to any object, so they
1423 // don't alias any other pointer.
1424 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1425 if (CPN->getType()->getAddressSpace() == 0)
1426 return NoAlias;
1427 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1428 if (CPN->getType()->getAddressSpace() == 0)
1429 return NoAlias;
1430
1431 if (O1 != O2) {
1432 // If V1/V2 point to two different objects, we know that we have no alias.
1433 if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1434 return NoAlias;
1435
1436 // Constant pointers can't alias with non-const isIdentifiedObject objects.
1437 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1438 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1439 return NoAlias;
1440
1441 // Function arguments can't alias with things that are known to be
1442 // unambigously identified at the function level.
1443 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1444 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1445 return NoAlias;
1446
1447 // Most objects can't alias null.
1448 if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) ||
1449 (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2)))
1450 return NoAlias;
1451
1452 // If one pointer is the result of a call/invoke or load and the other is a
1453 // non-escaping local object within the same function, then we know the
1454 // object couldn't escape to a point where the call could return it.
1455 //
1456 // Note that if the pointers are in different functions, there are a
1457 // variety of complications. A call with a nocapture argument may still
1458 // temporary store the nocapture argument's value in a temporary memory
1459 // location if that memory location doesn't escape. Or it may pass a
1460 // nocapture value to other functions as long as they don't capture it.
1461 if (isEscapeSource(O1) && isNonEscapingLocalObject(O2))
1462 return NoAlias;
1463 if (isEscapeSource(O2) && isNonEscapingLocalObject(O1))
1464 return NoAlias;
1465 }
1466
1467 // If the size of one access is larger than the entire object on the other
1468 // side, then we know such behavior is undefined and can assume no alias.
1469 if ((V1Size != MemoryLocation::UnknownSize &&
1470 isObjectSmallerThan(O2, V1Size, DL, TLI)) ||
1471 (V2Size != MemoryLocation::UnknownSize &&
1472 isObjectSmallerThan(O1, V2Size, DL, TLI)))
1473 return NoAlias;
1474
1475 // Check the cache before climbing up use-def chains. This also terminates
1476 // otherwise infinitely recursive queries.
1477 LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1478 MemoryLocation(V2, V2Size, V2AAInfo));
1479 if (V1 > V2)
1480 std::swap(Locs.first, Locs.second);
1481 std::pair<AliasCacheTy::iterator, bool> Pair =
1482 AliasCache.insert(std::make_pair(Locs, MayAlias));
1483 if (!Pair.second)
1484 return Pair.first->second;
1485
1486 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1487 // GEP can't simplify, we don't even look at the PHI cases.
1488 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1489 std::swap(V1, V2);
1490 std::swap(V1Size, V2Size);
1491 std::swap(O1, O2);
1492 std::swap(V1AAInfo, V2AAInfo);
1493 }
1494 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1495 AliasResult Result =
1496 aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2);
1497 if (Result != MayAlias)
1498 return AliasCache[Locs] = Result;
1499 }
1500
1501 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1502 std::swap(V1, V2);
1503 std::swap(V1Size, V2Size);
1504 std::swap(V1AAInfo, V2AAInfo);
1505 }
1506 if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1507 AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo);
1508 if (Result != MayAlias)
1509 return AliasCache[Locs] = Result;
1510 }
1511
1512 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1513 std::swap(V1, V2);
1514 std::swap(V1Size, V2Size);
1515 std::swap(V1AAInfo, V2AAInfo);
1516 }
1517 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1518 AliasResult Result =
1519 aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo);
1520 if (Result != MayAlias)
1521 return AliasCache[Locs] = Result;
1522 }
1523
1524 // If both pointers are pointing into the same object and one of them
1525 // accesses the entire object, then the accesses must overlap in some way.
1526 if (O1 == O2)
1527 if ((V1Size != MemoryLocation::UnknownSize &&
1528 isObjectSize(O1, V1Size, DL, TLI)) ||
1529 (V2Size != MemoryLocation::UnknownSize &&
1530 isObjectSize(O2, V2Size, DL, TLI)))
1531 return AliasCache[Locs] = PartialAlias;
1532
1533 // Recurse back into the best AA results we have, potentially with refined
1534 // memory locations. We have already ensured that BasicAA has a MayAlias
1535 // cache result for these, so any recursion back into BasicAA won't loop.
1536 AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second);
1537 return AliasCache[Locs] = Result;
1538 }
1539
1540 /// Check whether two Values can be considered equivalent.
1541 ///
1542 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1543 /// they can not be part of a cycle in the value graph by looking at all
1544 /// visited phi nodes an making sure that the phis cannot reach the value. We
1545 /// have to do this because we are looking through phi nodes (That is we say
1546 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
isValueEqualInPotentialCycles(const Value * V,const Value * V2)1547 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1548 const Value *V2) {
1549 if (V != V2)
1550 return false;
1551
1552 const Instruction *Inst = dyn_cast<Instruction>(V);
1553 if (!Inst)
1554 return true;
1555
1556 if (VisitedPhiBBs.empty())
1557 return true;
1558
1559 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1560 return false;
1561
1562 // Make sure that the visited phis cannot reach the Value. This ensures that
1563 // the Values cannot come from different iterations of a potential cycle the
1564 // phi nodes could be involved in.
1565 for (auto *P : VisitedPhiBBs)
1566 if (isPotentiallyReachable(&P->front(), Inst, DT, LI))
1567 return false;
1568
1569 return true;
1570 }
1571
1572 /// Computes the symbolic difference between two de-composed GEPs.
1573 ///
1574 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1575 /// instructions GEP1 and GEP2 which have common base pointers.
GetIndexDifference(SmallVectorImpl<VariableGEPIndex> & Dest,const SmallVectorImpl<VariableGEPIndex> & Src)1576 void BasicAAResult::GetIndexDifference(
1577 SmallVectorImpl<VariableGEPIndex> &Dest,
1578 const SmallVectorImpl<VariableGEPIndex> &Src) {
1579 if (Src.empty())
1580 return;
1581
1582 for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1583 const Value *V = Src[i].V;
1584 unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1585 int64_t Scale = Src[i].Scale;
1586
1587 // Find V in Dest. This is N^2, but pointer indices almost never have more
1588 // than a few variable indexes.
1589 for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1590 if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1591 Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1592 continue;
1593
1594 // If we found it, subtract off Scale V's from the entry in Dest. If it
1595 // goes to zero, remove the entry.
1596 if (Dest[j].Scale != Scale)
1597 Dest[j].Scale -= Scale;
1598 else
1599 Dest.erase(Dest.begin() + j);
1600 Scale = 0;
1601 break;
1602 }
1603
1604 // If we didn't consume this entry, add it to the end of the Dest list.
1605 if (Scale) {
1606 VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
1607 Dest.push_back(Entry);
1608 }
1609 }
1610 }
1611
constantOffsetHeuristic(const SmallVectorImpl<VariableGEPIndex> & VarIndices,uint64_t V1Size,uint64_t V2Size,int64_t BaseOffset,AssumptionCache * AC,DominatorTree * DT)1612 bool BasicAAResult::constantOffsetHeuristic(
1613 const SmallVectorImpl<VariableGEPIndex> &VarIndices, uint64_t V1Size,
1614 uint64_t V2Size, int64_t BaseOffset, AssumptionCache *AC,
1615 DominatorTree *DT) {
1616 if (VarIndices.size() != 2 || V1Size == MemoryLocation::UnknownSize ||
1617 V2Size == MemoryLocation::UnknownSize)
1618 return false;
1619
1620 const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1621
1622 if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1623 Var0.Scale != -Var1.Scale)
1624 return false;
1625
1626 unsigned Width = Var1.V->getType()->getIntegerBitWidth();
1627
1628 // We'll strip off the Extensions of Var0 and Var1 and do another round
1629 // of GetLinearExpression decomposition. In the example above, if Var0
1630 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1631
1632 APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
1633 V1Offset(Width, 0);
1634 bool NSW = true, NUW = true;
1635 unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
1636 const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
1637 V0SExtBits, DL, 0, AC, DT, NSW, NUW);
1638 NSW = true;
1639 NUW = true;
1640 const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
1641 V1SExtBits, DL, 0, AC, DT, NSW, NUW);
1642
1643 if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
1644 V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
1645 return false;
1646
1647 // We have a hit - Var0 and Var1 only differ by a constant offset!
1648
1649 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1650 // Var1 is possible to calculate, but we're just interested in the absolute
1651 // minimum difference between the two. The minimum distance may occur due to
1652 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1653 // the minimum distance between %i and %i + 5 is 3.
1654 APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
1655 MinDiff = APIntOps::umin(MinDiff, Wrapped);
1656 uint64_t MinDiffBytes = MinDiff.getZExtValue() * std::abs(Var0.Scale);
1657
1658 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1659 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1660 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1661 // V2Size can fit in the MinDiffBytes gap.
1662 return V1Size + std::abs(BaseOffset) <= MinDiffBytes &&
1663 V2Size + std::abs(BaseOffset) <= MinDiffBytes;
1664 }
1665
1666 //===----------------------------------------------------------------------===//
1667 // BasicAliasAnalysis Pass
1668 //===----------------------------------------------------------------------===//
1669
1670 char BasicAA::PassID;
1671
run(Function & F,AnalysisManager<Function> & AM)1672 BasicAAResult BasicAA::run(Function &F, AnalysisManager<Function> &AM) {
1673 return BasicAAResult(F.getParent()->getDataLayout(),
1674 AM.getResult<TargetLibraryAnalysis>(F),
1675 AM.getResult<AssumptionAnalysis>(F),
1676 &AM.getResult<DominatorTreeAnalysis>(F),
1677 AM.getCachedResult<LoopAnalysis>(F));
1678 }
1679
BasicAAWrapperPass()1680 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1681 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1682 }
1683
1684 char BasicAAWrapperPass::ID = 0;
anchor()1685 void BasicAAWrapperPass::anchor() {}
1686
1687 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa",
1688 "Basic Alias Analysis (stateless AA impl)", true, true)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)1689 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1690 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1691 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1692 INITIALIZE_PASS_END(BasicAAWrapperPass, "basicaa",
1693 "Basic Alias Analysis (stateless AA impl)", true, true)
1694
1695 FunctionPass *llvm::createBasicAAWrapperPass() {
1696 return new BasicAAWrapperPass();
1697 }
1698
runOnFunction(Function & F)1699 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1700 auto &ACT = getAnalysis<AssumptionCacheTracker>();
1701 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1702 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1703 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
1704
1705 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), TLIWP.getTLI(),
1706 ACT.getAssumptionCache(F), &DTWP.getDomTree(),
1707 LIWP ? &LIWP->getLoopInfo() : nullptr));
1708
1709 return false;
1710 }
1711
getAnalysisUsage(AnalysisUsage & AU) const1712 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1713 AU.setPreservesAll();
1714 AU.addRequired<AssumptionCacheTracker>();
1715 AU.addRequired<DominatorTreeWrapperPass>();
1716 AU.addRequired<TargetLibraryInfoWrapperPass>();
1717 }
1718
createLegacyPMBasicAAResult(Pass & P,Function & F)1719 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
1720 return BasicAAResult(
1721 F.getParent()->getDataLayout(),
1722 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
1723 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1724 }
1725