1 //===- AliasAnalysis.cpp - Generic Alias Analysis Interface Implementation -==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the generic AliasAnalysis interface which is used as the
11 // common interface used by all clients and implementations of alias analysis.
12 //
13 // This file also implements the default version of the AliasAnalysis interface
14 // that is to be used when no other implementation is specified. This does some
15 // simple tests that detect obvious cases: two different global pointers cannot
16 // alias, a global cannot alias a malloc, two different mallocs cannot alias,
17 // etc.
18 //
19 // This alias analysis implementation really isn't very good for anything, but
20 // it is very fast, and makes a nice clean default implementation. Because it
21 // handles lots of little corner cases, other, more complex, alias analysis
22 // implementations may choose to rely on this pass to resolve these simple and
23 // easy cases.
24 //
25 //===----------------------------------------------------------------------===//
26
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/BasicAliasAnalysis.h"
29 #include "llvm/Analysis/CFG.h"
30 #include "llvm/Analysis/CFLAliasAnalysis.h"
31 #include "llvm/Analysis/CaptureTracking.h"
32 #include "llvm/Analysis/GlobalsModRef.h"
33 #include "llvm/Analysis/ObjCARCAliasAnalysis.h"
34 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
35 #include "llvm/Analysis/ScopedNoAliasAA.h"
36 #include "llvm/Analysis/TargetLibraryInfo.h"
37 #include "llvm/Analysis/TypeBasedAliasAnalysis.h"
38 #include "llvm/Analysis/ValueTracking.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/Dominators.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/LLVMContext.h"
46 #include "llvm/IR/Type.h"
47 #include "llvm/Pass.h"
48 using namespace llvm;
49
50 /// Allow disabling BasicAA from the AA results. This is particularly useful
51 /// when testing to isolate a single AA implementation.
52 static cl::opt<bool> DisableBasicAA("disable-basicaa", cl::Hidden,
53 cl::init(false));
54
AAResults(AAResults && Arg)55 AAResults::AAResults(AAResults &&Arg) : AAs(std::move(Arg.AAs)) {
56 for (auto &AA : AAs)
57 AA->setAAResults(this);
58 }
59
operator =(AAResults && Arg)60 AAResults &AAResults::operator=(AAResults &&Arg) {
61 AAs = std::move(Arg.AAs);
62 for (auto &AA : AAs)
63 AA->setAAResults(this);
64 return *this;
65 }
66
~AAResults()67 AAResults::~AAResults() {
68 // FIXME; It would be nice to at least clear out the pointers back to this
69 // aggregation here, but we end up with non-nesting lifetimes in the legacy
70 // pass manager that prevent this from working. In the legacy pass manager
71 // we'll end up with dangling references here in some cases.
72 #if 0
73 for (auto &AA : AAs)
74 AA->setAAResults(nullptr);
75 #endif
76 }
77
78 //===----------------------------------------------------------------------===//
79 // Default chaining methods
80 //===----------------------------------------------------------------------===//
81
alias(const MemoryLocation & LocA,const MemoryLocation & LocB)82 AliasResult AAResults::alias(const MemoryLocation &LocA,
83 const MemoryLocation &LocB) {
84 for (const auto &AA : AAs) {
85 auto Result = AA->alias(LocA, LocB);
86 if (Result != MayAlias)
87 return Result;
88 }
89 return MayAlias;
90 }
91
pointsToConstantMemory(const MemoryLocation & Loc,bool OrLocal)92 bool AAResults::pointsToConstantMemory(const MemoryLocation &Loc,
93 bool OrLocal) {
94 for (const auto &AA : AAs)
95 if (AA->pointsToConstantMemory(Loc, OrLocal))
96 return true;
97
98 return false;
99 }
100
getArgModRefInfo(ImmutableCallSite CS,unsigned ArgIdx)101 ModRefInfo AAResults::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
102 ModRefInfo Result = MRI_ModRef;
103
104 for (const auto &AA : AAs) {
105 Result = ModRefInfo(Result & AA->getArgModRefInfo(CS, ArgIdx));
106
107 // Early-exit the moment we reach the bottom of the lattice.
108 if (Result == MRI_NoModRef)
109 return Result;
110 }
111
112 return Result;
113 }
114
getModRefInfo(Instruction * I,ImmutableCallSite Call)115 ModRefInfo AAResults::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
116 // We may have two calls
117 if (auto CS = ImmutableCallSite(I)) {
118 // Check if the two calls modify the same memory
119 return getModRefInfo(Call, CS);
120 } else {
121 // Otherwise, check if the call modifies or references the
122 // location this memory access defines. The best we can say
123 // is that if the call references what this instruction
124 // defines, it must be clobbered by this location.
125 const MemoryLocation DefLoc = MemoryLocation::get(I);
126 if (getModRefInfo(Call, DefLoc) != MRI_NoModRef)
127 return MRI_ModRef;
128 }
129 return MRI_NoModRef;
130 }
131
getModRefInfo(ImmutableCallSite CS,const MemoryLocation & Loc)132 ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
133 const MemoryLocation &Loc) {
134 ModRefInfo Result = MRI_ModRef;
135
136 for (const auto &AA : AAs) {
137 Result = ModRefInfo(Result & AA->getModRefInfo(CS, Loc));
138
139 // Early-exit the moment we reach the bottom of the lattice.
140 if (Result == MRI_NoModRef)
141 return Result;
142 }
143
144 return Result;
145 }
146
getModRefInfo(ImmutableCallSite CS1,ImmutableCallSite CS2)147 ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
148 ImmutableCallSite CS2) {
149 ModRefInfo Result = MRI_ModRef;
150
151 for (const auto &AA : AAs) {
152 Result = ModRefInfo(Result & AA->getModRefInfo(CS1, CS2));
153
154 // Early-exit the moment we reach the bottom of the lattice.
155 if (Result == MRI_NoModRef)
156 return Result;
157 }
158
159 return Result;
160 }
161
getModRefBehavior(ImmutableCallSite CS)162 FunctionModRefBehavior AAResults::getModRefBehavior(ImmutableCallSite CS) {
163 FunctionModRefBehavior Result = FMRB_UnknownModRefBehavior;
164
165 for (const auto &AA : AAs) {
166 Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(CS));
167
168 // Early-exit the moment we reach the bottom of the lattice.
169 if (Result == FMRB_DoesNotAccessMemory)
170 return Result;
171 }
172
173 return Result;
174 }
175
getModRefBehavior(const Function * F)176 FunctionModRefBehavior AAResults::getModRefBehavior(const Function *F) {
177 FunctionModRefBehavior Result = FMRB_UnknownModRefBehavior;
178
179 for (const auto &AA : AAs) {
180 Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(F));
181
182 // Early-exit the moment we reach the bottom of the lattice.
183 if (Result == FMRB_DoesNotAccessMemory)
184 return Result;
185 }
186
187 return Result;
188 }
189
190 //===----------------------------------------------------------------------===//
191 // Helper method implementation
192 //===----------------------------------------------------------------------===//
193
getModRefInfo(const LoadInst * L,const MemoryLocation & Loc)194 ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
195 const MemoryLocation &Loc) {
196 // Be conservative in the face of volatile/atomic.
197 if (!L->isUnordered())
198 return MRI_ModRef;
199
200 // If the load address doesn't alias the given address, it doesn't read
201 // or write the specified memory.
202 if (Loc.Ptr && !alias(MemoryLocation::get(L), Loc))
203 return MRI_NoModRef;
204
205 // Otherwise, a load just reads.
206 return MRI_Ref;
207 }
208
getModRefInfo(const StoreInst * S,const MemoryLocation & Loc)209 ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
210 const MemoryLocation &Loc) {
211 // Be conservative in the face of volatile/atomic.
212 if (!S->isUnordered())
213 return MRI_ModRef;
214
215 if (Loc.Ptr) {
216 // If the store address cannot alias the pointer in question, then the
217 // specified memory cannot be modified by the store.
218 if (!alias(MemoryLocation::get(S), Loc))
219 return MRI_NoModRef;
220
221 // If the pointer is a pointer to constant memory, then it could not have
222 // been modified by this store.
223 if (pointsToConstantMemory(Loc))
224 return MRI_NoModRef;
225 }
226
227 // Otherwise, a store just writes.
228 return MRI_Mod;
229 }
230
getModRefInfo(const VAArgInst * V,const MemoryLocation & Loc)231 ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
232 const MemoryLocation &Loc) {
233
234 if (Loc.Ptr) {
235 // If the va_arg address cannot alias the pointer in question, then the
236 // specified memory cannot be accessed by the va_arg.
237 if (!alias(MemoryLocation::get(V), Loc))
238 return MRI_NoModRef;
239
240 // If the pointer is a pointer to constant memory, then it could not have
241 // been modified by this va_arg.
242 if (pointsToConstantMemory(Loc))
243 return MRI_NoModRef;
244 }
245
246 // Otherwise, a va_arg reads and writes.
247 return MRI_ModRef;
248 }
249
getModRefInfo(const CatchPadInst * CatchPad,const MemoryLocation & Loc)250 ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
251 const MemoryLocation &Loc) {
252 if (Loc.Ptr) {
253 // If the pointer is a pointer to constant memory,
254 // then it could not have been modified by this catchpad.
255 if (pointsToConstantMemory(Loc))
256 return MRI_NoModRef;
257 }
258
259 // Otherwise, a catchpad reads and writes.
260 return MRI_ModRef;
261 }
262
getModRefInfo(const CatchReturnInst * CatchRet,const MemoryLocation & Loc)263 ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
264 const MemoryLocation &Loc) {
265 if (Loc.Ptr) {
266 // If the pointer is a pointer to constant memory,
267 // then it could not have been modified by this catchpad.
268 if (pointsToConstantMemory(Loc))
269 return MRI_NoModRef;
270 }
271
272 // Otherwise, a catchret reads and writes.
273 return MRI_ModRef;
274 }
275
getModRefInfo(const AtomicCmpXchgInst * CX,const MemoryLocation & Loc)276 ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
277 const MemoryLocation &Loc) {
278 // Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
279 if (CX->getSuccessOrdering() > Monotonic)
280 return MRI_ModRef;
281
282 // If the cmpxchg address does not alias the location, it does not access it.
283 if (Loc.Ptr && !alias(MemoryLocation::get(CX), Loc))
284 return MRI_NoModRef;
285
286 return MRI_ModRef;
287 }
288
getModRefInfo(const AtomicRMWInst * RMW,const MemoryLocation & Loc)289 ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
290 const MemoryLocation &Loc) {
291 // Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
292 if (RMW->getOrdering() > Monotonic)
293 return MRI_ModRef;
294
295 // If the atomicrmw address does not alias the location, it does not access it.
296 if (Loc.Ptr && !alias(MemoryLocation::get(RMW), Loc))
297 return MRI_NoModRef;
298
299 return MRI_ModRef;
300 }
301
302 /// \brief Return information about whether a particular call site modifies
303 /// or reads the specified memory location \p MemLoc before instruction \p I
304 /// in a BasicBlock. A ordered basic block \p OBB can be used to speed up
305 /// instruction-ordering queries inside the BasicBlock containing \p I.
306 /// FIXME: this is really just shoring-up a deficiency in alias analysis.
307 /// BasicAA isn't willing to spend linear time determining whether an alloca
308 /// was captured before or after this particular call, while we are. However,
309 /// with a smarter AA in place, this test is just wasting compile time.
callCapturesBefore(const Instruction * I,const MemoryLocation & MemLoc,DominatorTree * DT,OrderedBasicBlock * OBB)310 ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
311 const MemoryLocation &MemLoc,
312 DominatorTree *DT,
313 OrderedBasicBlock *OBB) {
314 if (!DT)
315 return MRI_ModRef;
316
317 const Value *Object =
318 GetUnderlyingObject(MemLoc.Ptr, I->getModule()->getDataLayout());
319 if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
320 isa<Constant>(Object))
321 return MRI_ModRef;
322
323 ImmutableCallSite CS(I);
324 if (!CS.getInstruction() || CS.getInstruction() == Object)
325 return MRI_ModRef;
326
327 if (llvm::PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true,
328 /* StoreCaptures */ true, I, DT,
329 /* include Object */ true,
330 /* OrderedBasicBlock */ OBB))
331 return MRI_ModRef;
332
333 unsigned ArgNo = 0;
334 ModRefInfo R = MRI_NoModRef;
335 for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
336 CI != CE; ++CI, ++ArgNo) {
337 // Only look at the no-capture or byval pointer arguments. If this
338 // pointer were passed to arguments that were neither of these, then it
339 // couldn't be no-capture.
340 if (!(*CI)->getType()->isPointerTy() ||
341 (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo)))
342 continue;
343
344 // If this is a no-capture pointer argument, see if we can tell that it
345 // is impossible to alias the pointer we're checking. If not, we have to
346 // assume that the call could touch the pointer, even though it doesn't
347 // escape.
348 if (isNoAlias(MemoryLocation(*CI), MemoryLocation(Object)))
349 continue;
350 if (CS.doesNotAccessMemory(ArgNo))
351 continue;
352 if (CS.onlyReadsMemory(ArgNo)) {
353 R = MRI_Ref;
354 continue;
355 }
356 return MRI_ModRef;
357 }
358 return R;
359 }
360
361 /// canBasicBlockModify - Return true if it is possible for execution of the
362 /// specified basic block to modify the location Loc.
363 ///
canBasicBlockModify(const BasicBlock & BB,const MemoryLocation & Loc)364 bool AAResults::canBasicBlockModify(const BasicBlock &BB,
365 const MemoryLocation &Loc) {
366 return canInstructionRangeModRef(BB.front(), BB.back(), Loc, MRI_Mod);
367 }
368
369 /// canInstructionRangeModRef - Return true if it is possible for the
370 /// execution of the specified instructions to mod\ref (according to the
371 /// mode) the location Loc. The instructions to consider are all
372 /// of the instructions in the range of [I1,I2] INCLUSIVE.
373 /// I1 and I2 must be in the same basic block.
canInstructionRangeModRef(const Instruction & I1,const Instruction & I2,const MemoryLocation & Loc,const ModRefInfo Mode)374 bool AAResults::canInstructionRangeModRef(const Instruction &I1,
375 const Instruction &I2,
376 const MemoryLocation &Loc,
377 const ModRefInfo Mode) {
378 assert(I1.getParent() == I2.getParent() &&
379 "Instructions not in same basic block!");
380 BasicBlock::const_iterator I = I1.getIterator();
381 BasicBlock::const_iterator E = I2.getIterator();
382 ++E; // Convert from inclusive to exclusive range.
383
384 for (; I != E; ++I) // Check every instruction in range
385 if (getModRefInfo(&*I, Loc) & Mode)
386 return true;
387 return false;
388 }
389
390 // Provide a definition for the root virtual destructor.
~Concept()391 AAResults::Concept::~Concept() {}
392
393 namespace {
394 /// A wrapper pass for external alias analyses. This just squirrels away the
395 /// callback used to run any analyses and register their results.
396 struct ExternalAAWrapperPass : ImmutablePass {
397 typedef std::function<void(Pass &, Function &, AAResults &)> CallbackT;
398
399 CallbackT CB;
400
401 static char ID;
402
ExternalAAWrapperPass__anona21f71f30111::ExternalAAWrapperPass403 ExternalAAWrapperPass() : ImmutablePass(ID) {
404 initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
405 }
ExternalAAWrapperPass__anona21f71f30111::ExternalAAWrapperPass406 explicit ExternalAAWrapperPass(CallbackT CB)
407 : ImmutablePass(ID), CB(std::move(CB)) {
408 initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
409 }
410
getAnalysisUsage__anona21f71f30111::ExternalAAWrapperPass411 void getAnalysisUsage(AnalysisUsage &AU) const override {
412 AU.setPreservesAll();
413 }
414 };
415 }
416
417 char ExternalAAWrapperPass::ID = 0;
418 INITIALIZE_PASS(ExternalAAWrapperPass, "external-aa", "External Alias Analysis",
419 false, true)
420
421 ImmutablePass *
createExternalAAWrapperPass(ExternalAAWrapperPass::CallbackT Callback)422 llvm::createExternalAAWrapperPass(ExternalAAWrapperPass::CallbackT Callback) {
423 return new ExternalAAWrapperPass(std::move(Callback));
424 }
425
AAResultsWrapperPass()426 AAResultsWrapperPass::AAResultsWrapperPass() : FunctionPass(ID) {
427 initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry());
428 }
429
430 char AAResultsWrapperPass::ID = 0;
431
432 INITIALIZE_PASS_BEGIN(AAResultsWrapperPass, "aa",
433 "Function Alias Analysis Results", false, true)
INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)434 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
435 INITIALIZE_PASS_DEPENDENCY(CFLAAWrapperPass)
436 INITIALIZE_PASS_DEPENDENCY(ExternalAAWrapperPass)
437 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
438 INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass)
439 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
440 INITIALIZE_PASS_DEPENDENCY(ScopedNoAliasAAWrapperPass)
441 INITIALIZE_PASS_DEPENDENCY(TypeBasedAAWrapperPass)
442 INITIALIZE_PASS_END(AAResultsWrapperPass, "aa",
443 "Function Alias Analysis Results", false, true)
444
445 FunctionPass *llvm::createAAResultsWrapperPass() {
446 return new AAResultsWrapperPass();
447 }
448
449 /// Run the wrapper pass to rebuild an aggregation over known AA passes.
450 ///
451 /// This is the legacy pass manager's interface to the new-style AA results
452 /// aggregation object. Because this is somewhat shoe-horned into the legacy
453 /// pass manager, we hard code all the specific alias analyses available into
454 /// it. While the particular set enabled is configured via commandline flags,
455 /// adding a new alias analysis to LLVM will require adding support for it to
456 /// this list.
runOnFunction(Function & F)457 bool AAResultsWrapperPass::runOnFunction(Function &F) {
458 // NB! This *must* be reset before adding new AA results to the new
459 // AAResults object because in the legacy pass manager, each instance
460 // of these will refer to the *same* immutable analyses, registering and
461 // unregistering themselves with them. We need to carefully tear down the
462 // previous object first, in this case replacing it with an empty one, before
463 // registering new results.
464 AAR.reset(new AAResults());
465
466 // BasicAA is always available for function analyses. Also, we add it first
467 // so that it can trump TBAA results when it proves MustAlias.
468 // FIXME: TBAA should have an explicit mode to support this and then we
469 // should reconsider the ordering here.
470 if (!DisableBasicAA)
471 AAR->addAAResult(getAnalysis<BasicAAWrapperPass>().getResult());
472
473 // Populate the results with the currently available AAs.
474 if (auto *WrapperPass = getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>())
475 AAR->addAAResult(WrapperPass->getResult());
476 if (auto *WrapperPass = getAnalysisIfAvailable<TypeBasedAAWrapperPass>())
477 AAR->addAAResult(WrapperPass->getResult());
478 if (auto *WrapperPass =
479 getAnalysisIfAvailable<objcarc::ObjCARCAAWrapperPass>())
480 AAR->addAAResult(WrapperPass->getResult());
481 if (auto *WrapperPass = getAnalysisIfAvailable<GlobalsAAWrapperPass>())
482 AAR->addAAResult(WrapperPass->getResult());
483 if (auto *WrapperPass = getAnalysisIfAvailable<SCEVAAWrapperPass>())
484 AAR->addAAResult(WrapperPass->getResult());
485 if (auto *WrapperPass = getAnalysisIfAvailable<CFLAAWrapperPass>())
486 AAR->addAAResult(WrapperPass->getResult());
487
488 // If available, run an external AA providing callback over the results as
489 // well.
490 if (auto *WrapperPass = getAnalysisIfAvailable<ExternalAAWrapperPass>())
491 if (WrapperPass->CB)
492 WrapperPass->CB(*this, F, *AAR);
493
494 // Analyses don't mutate the IR, so return false.
495 return false;
496 }
497
getAnalysisUsage(AnalysisUsage & AU) const498 void AAResultsWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
499 AU.setPreservesAll();
500 AU.addRequired<BasicAAWrapperPass>();
501
502 // We also need to mark all the alias analysis passes we will potentially
503 // probe in runOnFunction as used here to ensure the legacy pass manager
504 // preserves them. This hard coding of lists of alias analyses is specific to
505 // the legacy pass manager.
506 AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>();
507 AU.addUsedIfAvailable<TypeBasedAAWrapperPass>();
508 AU.addUsedIfAvailable<objcarc::ObjCARCAAWrapperPass>();
509 AU.addUsedIfAvailable<GlobalsAAWrapperPass>();
510 AU.addUsedIfAvailable<SCEVAAWrapperPass>();
511 AU.addUsedIfAvailable<CFLAAWrapperPass>();
512 }
513
createLegacyPMAAResults(Pass & P,Function & F,BasicAAResult & BAR)514 AAResults llvm::createLegacyPMAAResults(Pass &P, Function &F,
515 BasicAAResult &BAR) {
516 AAResults AAR;
517
518 // Add in our explicitly constructed BasicAA results.
519 if (!DisableBasicAA)
520 AAR.addAAResult(BAR);
521
522 // Populate the results with the other currently available AAs.
523 if (auto *WrapperPass =
524 P.getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>())
525 AAR.addAAResult(WrapperPass->getResult());
526 if (auto *WrapperPass = P.getAnalysisIfAvailable<TypeBasedAAWrapperPass>())
527 AAR.addAAResult(WrapperPass->getResult());
528 if (auto *WrapperPass =
529 P.getAnalysisIfAvailable<objcarc::ObjCARCAAWrapperPass>())
530 AAR.addAAResult(WrapperPass->getResult());
531 if (auto *WrapperPass = P.getAnalysisIfAvailable<GlobalsAAWrapperPass>())
532 AAR.addAAResult(WrapperPass->getResult());
533 if (auto *WrapperPass = P.getAnalysisIfAvailable<SCEVAAWrapperPass>())
534 AAR.addAAResult(WrapperPass->getResult());
535 if (auto *WrapperPass = P.getAnalysisIfAvailable<CFLAAWrapperPass>())
536 AAR.addAAResult(WrapperPass->getResult());
537
538 return AAR;
539 }
540
541 /// isNoAliasCall - Return true if this pointer is returned by a noalias
542 /// function.
isNoAliasCall(const Value * V)543 bool llvm::isNoAliasCall(const Value *V) {
544 if (auto CS = ImmutableCallSite(V))
545 return CS.paramHasAttr(0, Attribute::NoAlias);
546 return false;
547 }
548
549 /// isNoAliasArgument - Return true if this is an argument with the noalias
550 /// attribute.
isNoAliasArgument(const Value * V)551 bool llvm::isNoAliasArgument(const Value *V)
552 {
553 if (const Argument *A = dyn_cast<Argument>(V))
554 return A->hasNoAliasAttr();
555 return false;
556 }
557
558 /// isIdentifiedObject - Return true if this pointer refers to a distinct and
559 /// identifiable object. This returns true for:
560 /// Global Variables and Functions (but not Global Aliases)
561 /// Allocas and Mallocs
562 /// ByVal and NoAlias Arguments
563 /// NoAlias returns
564 ///
isIdentifiedObject(const Value * V)565 bool llvm::isIdentifiedObject(const Value *V) {
566 if (isa<AllocaInst>(V))
567 return true;
568 if (isa<GlobalValue>(V) && !isa<GlobalAlias>(V))
569 return true;
570 if (isNoAliasCall(V))
571 return true;
572 if (const Argument *A = dyn_cast<Argument>(V))
573 return A->hasNoAliasAttr() || A->hasByValAttr();
574 return false;
575 }
576
577 /// isIdentifiedFunctionLocal - Return true if V is umabigously identified
578 /// at the function-level. Different IdentifiedFunctionLocals can't alias.
579 /// Further, an IdentifiedFunctionLocal can not alias with any function
580 /// arguments other than itself, which is not necessarily true for
581 /// IdentifiedObjects.
isIdentifiedFunctionLocal(const Value * V)582 bool llvm::isIdentifiedFunctionLocal(const Value *V)
583 {
584 return isa<AllocaInst>(V) || isNoAliasCall(V) || isNoAliasArgument(V);
585 }
586