1 //===- Loads.cpp - Local load analysis ------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines simple local analyses for load instructions.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Analysis/Loads.h"
15 #include "llvm/Analysis/AliasAnalysis.h"
16 #include "llvm/Analysis/ValueTracking.h"
17 #include "llvm/IR/DataLayout.h"
18 #include "llvm/IR/GlobalAlias.h"
19 #include "llvm/IR/GlobalVariable.h"
20 #include "llvm/IR/IntrinsicInst.h"
21 #include "llvm/IR/LLVMContext.h"
22 #include "llvm/IR/Module.h"
23 #include "llvm/IR/Operator.h"
24 #include "llvm/IR/Statepoint.h"
25
26 using namespace llvm;
27
isAligned(const Value * Base,const APInt & Offset,unsigned Align,const DataLayout & DL)28 static bool isAligned(const Value *Base, const APInt &Offset, unsigned Align,
29 const DataLayout &DL) {
30 APInt BaseAlign(Offset.getBitWidth(), Base->getPointerAlignment(DL));
31
32 if (!BaseAlign) {
33 Type *Ty = Base->getType()->getPointerElementType();
34 if (!Ty->isSized())
35 return false;
36 BaseAlign = DL.getABITypeAlignment(Ty);
37 }
38
39 APInt Alignment(Offset.getBitWidth(), Align);
40
41 assert(Alignment.isPowerOf2() && "must be a power of 2!");
42 return BaseAlign.uge(Alignment) && !(Offset & (Alignment-1));
43 }
44
isAligned(const Value * Base,unsigned Align,const DataLayout & DL)45 static bool isAligned(const Value *Base, unsigned Align, const DataLayout &DL) {
46 Type *Ty = Base->getType();
47 assert(Ty->isSized() && "must be sized");
48 APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0);
49 return isAligned(Base, Offset, Align, DL);
50 }
51
52 /// Test if V is always a pointer to allocated and suitably aligned memory for
53 /// a simple load or store.
isDereferenceableAndAlignedPointer(const Value * V,unsigned Align,const APInt & Size,const DataLayout & DL,const Instruction * CtxI,const DominatorTree * DT,SmallPtrSetImpl<const Value * > & Visited)54 static bool isDereferenceableAndAlignedPointer(
55 const Value *V, unsigned Align, const APInt &Size, const DataLayout &DL,
56 const Instruction *CtxI, const DominatorTree *DT,
57 SmallPtrSetImpl<const Value *> &Visited) {
58 // Note that it is not safe to speculate into a malloc'd region because
59 // malloc may return null.
60
61 // bitcast instructions are no-ops as far as dereferenceability is concerned.
62 if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V))
63 return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, Size,
64 DL, CtxI, DT, Visited);
65
66 bool CheckForNonNull = false;
67 APInt KnownDerefBytes(Size.getBitWidth(),
68 V->getPointerDereferenceableBytes(DL, CheckForNonNull));
69 if (KnownDerefBytes.getBoolValue()) {
70 if (KnownDerefBytes.uge(Size))
71 if (!CheckForNonNull || isKnownNonNullAt(V, CtxI, DT))
72 return isAligned(V, Align, DL);
73 }
74
75 // For GEPs, determine if the indexing lands within the allocated object.
76 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
77 const Value *Base = GEP->getPointerOperand();
78
79 APInt Offset(DL.getPointerTypeSizeInBits(GEP->getType()), 0);
80 if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
81 !Offset.urem(APInt(Offset.getBitWidth(), Align)).isMinValue())
82 return false;
83
84 // If the base pointer is dereferenceable for Offset+Size bytes, then the
85 // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base
86 // pointer is aligned to Align bytes, and the Offset is divisible by Align
87 // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
88 // aligned to Align bytes.
89
90 return Visited.insert(Base).second &&
91 isDereferenceableAndAlignedPointer(Base, Align, Offset + Size, DL,
92 CtxI, DT, Visited);
93 }
94
95 // For gc.relocate, look through relocations
96 if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
97 return isDereferenceableAndAlignedPointer(
98 RelocateInst->getDerivedPtr(), Align, Size, DL, CtxI, DT, Visited);
99
100 if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
101 return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size,
102 DL, CtxI, DT, Visited);
103
104 if (auto CS = ImmutableCallSite(V))
105 if (const Value *RV = CS.getReturnedArgOperand())
106 return isDereferenceableAndAlignedPointer(RV, Align, Size, DL, CtxI, DT,
107 Visited);
108
109 // If we don't know, assume the worst.
110 return false;
111 }
112
isDereferenceableAndAlignedPointer(const Value * V,unsigned Align,const DataLayout & DL,const Instruction * CtxI,const DominatorTree * DT)113 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
114 const DataLayout &DL,
115 const Instruction *CtxI,
116 const DominatorTree *DT) {
117 // When dereferenceability information is provided by a dereferenceable
118 // attribute, we know exactly how many bytes are dereferenceable. If we can
119 // determine the exact offset to the attributed variable, we can use that
120 // information here.
121 Type *VTy = V->getType();
122 Type *Ty = VTy->getPointerElementType();
123
124 // Require ABI alignment for loads without alignment specification
125 if (Align == 0)
126 Align = DL.getABITypeAlignment(Ty);
127
128 if (!Ty->isSized())
129 return false;
130
131 SmallPtrSet<const Value *, 32> Visited;
132 return ::isDereferenceableAndAlignedPointer(
133 V, Align, APInt(DL.getTypeSizeInBits(VTy), DL.getTypeStoreSize(Ty)), DL,
134 CtxI, DT, Visited);
135 }
136
isDereferenceablePointer(const Value * V,const DataLayout & DL,const Instruction * CtxI,const DominatorTree * DT)137 bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL,
138 const Instruction *CtxI,
139 const DominatorTree *DT) {
140 return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT);
141 }
142
143 /// \brief Test if A and B will obviously have the same value.
144 ///
145 /// This includes recognizing that %t0 and %t1 will have the same
146 /// value in code like this:
147 /// \code
148 /// %t0 = getelementptr \@a, 0, 3
149 /// store i32 0, i32* %t0
150 /// %t1 = getelementptr \@a, 0, 3
151 /// %t2 = load i32* %t1
152 /// \endcode
153 ///
AreEquivalentAddressValues(const Value * A,const Value * B)154 static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
155 // Test if the values are trivially equivalent.
156 if (A == B)
157 return true;
158
159 // Test if the values come from identical arithmetic instructions.
160 // Use isIdenticalToWhenDefined instead of isIdenticalTo because
161 // this function is only used when one address use dominates the
162 // other, which means that they'll always either have the same
163 // value or one of them will have an undefined value.
164 if (isa<BinaryOperator>(A) || isa<CastInst>(A) || isa<PHINode>(A) ||
165 isa<GetElementPtrInst>(A))
166 if (const Instruction *BI = dyn_cast<Instruction>(B))
167 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
168 return true;
169
170 // Otherwise they may not be equivalent.
171 return false;
172 }
173
174 /// \brief Check if executing a load of this pointer value cannot trap.
175 ///
176 /// If DT and ScanFrom are specified this method performs context-sensitive
177 /// analysis and returns true if it is safe to load immediately before ScanFrom.
178 ///
179 /// If it is not obviously safe to load from the specified pointer, we do
180 /// a quick local scan of the basic block containing \c ScanFrom, to determine
181 /// if the address is already accessed.
182 ///
183 /// This uses the pointee type to determine how many bytes need to be safe to
184 /// load from the pointer.
isSafeToLoadUnconditionally(Value * V,unsigned Align,const DataLayout & DL,Instruction * ScanFrom,const DominatorTree * DT)185 bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align,
186 const DataLayout &DL,
187 Instruction *ScanFrom,
188 const DominatorTree *DT) {
189 // Zero alignment means that the load has the ABI alignment for the target
190 if (Align == 0)
191 Align = DL.getABITypeAlignment(V->getType()->getPointerElementType());
192 assert(isPowerOf2_32(Align));
193
194 // If DT is not specified we can't make context-sensitive query
195 const Instruction* CtxI = DT ? ScanFrom : nullptr;
196 if (isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT))
197 return true;
198
199 int64_t ByteOffset = 0;
200 Value *Base = V;
201 Base = GetPointerBaseWithConstantOffset(V, ByteOffset, DL);
202
203 if (ByteOffset < 0) // out of bounds
204 return false;
205
206 Type *BaseType = nullptr;
207 unsigned BaseAlign = 0;
208 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
209 // An alloca is safe to load from as load as it is suitably aligned.
210 BaseType = AI->getAllocatedType();
211 BaseAlign = AI->getAlignment();
212 } else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
213 // Global variables are not necessarily safe to load from if they are
214 // interposed arbitrarily. Their size may change or they may be weak and
215 // require a test to determine if they were in fact provided.
216 if (!GV->isInterposable()) {
217 BaseType = GV->getType()->getElementType();
218 BaseAlign = GV->getAlignment();
219 }
220 }
221
222 PointerType *AddrTy = cast<PointerType>(V->getType());
223 uint64_t LoadSize = DL.getTypeStoreSize(AddrTy->getElementType());
224
225 // If we found a base allocated type from either an alloca or global variable,
226 // try to see if we are definitively within the allocated region. We need to
227 // know the size of the base type and the loaded type to do anything in this
228 // case.
229 if (BaseType && BaseType->isSized()) {
230 if (BaseAlign == 0)
231 BaseAlign = DL.getPrefTypeAlignment(BaseType);
232
233 if (Align <= BaseAlign) {
234 // Check if the load is within the bounds of the underlying object.
235 if (ByteOffset + LoadSize <= DL.getTypeAllocSize(BaseType) &&
236 ((ByteOffset % Align) == 0))
237 return true;
238 }
239 }
240
241 if (!ScanFrom)
242 return false;
243
244 // Otherwise, be a little bit aggressive by scanning the local block where we
245 // want to check to see if the pointer is already being loaded or stored
246 // from/to. If so, the previous load or store would have already trapped,
247 // so there is no harm doing an extra load (also, CSE will later eliminate
248 // the load entirely).
249 BasicBlock::iterator BBI = ScanFrom->getIterator(),
250 E = ScanFrom->getParent()->begin();
251
252 // We can at least always strip pointer casts even though we can't use the
253 // base here.
254 V = V->stripPointerCasts();
255
256 while (BBI != E) {
257 --BBI;
258
259 // If we see a free or a call which may write to memory (i.e. which might do
260 // a free) the pointer could be marked invalid.
261 if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
262 !isa<DbgInfoIntrinsic>(BBI))
263 return false;
264
265 Value *AccessedPtr;
266 unsigned AccessedAlign;
267 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
268 AccessedPtr = LI->getPointerOperand();
269 AccessedAlign = LI->getAlignment();
270 } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
271 AccessedPtr = SI->getPointerOperand();
272 AccessedAlign = SI->getAlignment();
273 } else
274 continue;
275
276 Type *AccessedTy = AccessedPtr->getType()->getPointerElementType();
277 if (AccessedAlign == 0)
278 AccessedAlign = DL.getABITypeAlignment(AccessedTy);
279 if (AccessedAlign < Align)
280 continue;
281
282 // Handle trivial cases.
283 if (AccessedPtr == V)
284 return true;
285
286 if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
287 LoadSize <= DL.getTypeStoreSize(AccessedTy))
288 return true;
289 }
290 return false;
291 }
292
293 /// DefMaxInstsToScan - the default number of maximum instructions
294 /// to scan in the block, used by FindAvailableLoadedValue().
295 /// FindAvailableLoadedValue() was introduced in r60148, to improve jump
296 /// threading in part by eliminating partially redundant loads.
297 /// At that point, the value of MaxInstsToScan was already set to '6'
298 /// without documented explanation.
299 cl::opt<unsigned>
300 llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden,
301 cl::desc("Use this to specify the default maximum number of instructions "
302 "to scan backward from a given instruction, when searching for "
303 "available loaded value"));
304
FindAvailableLoadedValue(LoadInst * Load,BasicBlock * ScanBB,BasicBlock::iterator & ScanFrom,unsigned MaxInstsToScan,AliasAnalysis * AA,AAMDNodes * AATags,bool * IsLoadCSE)305 Value *llvm::FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB,
306 BasicBlock::iterator &ScanFrom,
307 unsigned MaxInstsToScan,
308 AliasAnalysis *AA, AAMDNodes *AATags,
309 bool *IsLoadCSE) {
310 if (MaxInstsToScan == 0)
311 MaxInstsToScan = ~0U;
312
313 Value *Ptr = Load->getPointerOperand();
314 Type *AccessTy = Load->getType();
315
316 // We can never remove a volatile load
317 if (Load->isVolatile())
318 return nullptr;
319
320 // Anything stronger than unordered is currently unimplemented.
321 if (!Load->isUnordered())
322 return nullptr;
323
324 const DataLayout &DL = ScanBB->getModule()->getDataLayout();
325
326 // Try to get the store size for the type.
327 uint64_t AccessSize = DL.getTypeStoreSize(AccessTy);
328
329 Value *StrippedPtr = Ptr->stripPointerCasts();
330
331 while (ScanFrom != ScanBB->begin()) {
332 // We must ignore debug info directives when counting (otherwise they
333 // would affect codegen).
334 Instruction *Inst = &*--ScanFrom;
335 if (isa<DbgInfoIntrinsic>(Inst))
336 continue;
337
338 // Restore ScanFrom to expected value in case next test succeeds
339 ScanFrom++;
340
341 // Don't scan huge blocks.
342 if (MaxInstsToScan-- == 0)
343 return nullptr;
344
345 --ScanFrom;
346 // If this is a load of Ptr, the loaded value is available.
347 // (This is true even if the load is volatile or atomic, although
348 // those cases are unlikely.)
349 if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
350 if (AreEquivalentAddressValues(
351 LI->getPointerOperand()->stripPointerCasts(), StrippedPtr) &&
352 CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
353
354 // We can value forward from an atomic to a non-atomic, but not the
355 // other way around.
356 if (LI->isAtomic() < Load->isAtomic())
357 return nullptr;
358
359 if (AATags)
360 LI->getAAMetadata(*AATags);
361 if (IsLoadCSE)
362 *IsLoadCSE = true;
363 return LI;
364 }
365
366 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
367 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
368 // If this is a store through Ptr, the value is available!
369 // (This is true even if the store is volatile or atomic, although
370 // those cases are unlikely.)
371 if (AreEquivalentAddressValues(StorePtr, StrippedPtr) &&
372 CastInst::isBitOrNoopPointerCastable(SI->getValueOperand()->getType(),
373 AccessTy, DL)) {
374
375 // We can value forward from an atomic to a non-atomic, but not the
376 // other way around.
377 if (SI->isAtomic() < Load->isAtomic())
378 return nullptr;
379
380 if (AATags)
381 SI->getAAMetadata(*AATags);
382 return SI->getOperand(0);
383 }
384
385 // If both StrippedPtr and StorePtr reach all the way to an alloca or
386 // global and they are different, ignore the store. This is a trivial form
387 // of alias analysis that is important for reg2mem'd code.
388 if ((isa<AllocaInst>(StrippedPtr) || isa<GlobalVariable>(StrippedPtr)) &&
389 (isa<AllocaInst>(StorePtr) || isa<GlobalVariable>(StorePtr)) &&
390 StrippedPtr != StorePtr)
391 continue;
392
393 // If we have alias analysis and it says the store won't modify the loaded
394 // value, ignore the store.
395 if (AA && (AA->getModRefInfo(SI, StrippedPtr, AccessSize) & MRI_Mod) == 0)
396 continue;
397
398 // Otherwise the store that may or may not alias the pointer, bail out.
399 ++ScanFrom;
400 return nullptr;
401 }
402
403 // If this is some other instruction that may clobber Ptr, bail out.
404 if (Inst->mayWriteToMemory()) {
405 // If alias analysis claims that it really won't modify the load,
406 // ignore it.
407 if (AA &&
408 (AA->getModRefInfo(Inst, StrippedPtr, AccessSize) & MRI_Mod) == 0)
409 continue;
410
411 // May modify the pointer, bail out.
412 ++ScanFrom;
413 return nullptr;
414 }
415 }
416
417 // Got to the start of the block, we didn't find it, but are done for this
418 // block.
419 return nullptr;
420 }
421