1 //===- InferAddressSpace.cpp - --------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // CUDA C/C++ includes memory space designation as variable type qualifers (such
11 // as __global__ and __shared__). Knowing the space of a memory access allows
12 // CUDA compilers to emit faster PTX loads and stores. For example, a load from
13 // shared memory can be translated to `ld.shared` which is roughly 10% faster
14 // than a generic `ld` on an NVIDIA Tesla K40c.
15 //
16 // Unfortunately, type qualifiers only apply to variable declarations, so CUDA
17 // compilers must infer the memory space of an address expression from
18 // type-qualified variables.
19 //
20 // LLVM IR uses non-zero (so-called) specific address spaces to represent memory
21 // spaces (e.g. addrspace(3) means shared memory). The Clang frontend
22 // places only type-qualified variables in specific address spaces, and then
23 // conservatively `addrspacecast`s each type-qualified variable to addrspace(0)
24 // (so-called the generic address space) for other instructions to use.
25 //
26 // For example, the Clang translates the following CUDA code
27 // __shared__ float a[10];
28 // float v = a[i];
29 // to
30 // %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]*
31 // %1 = gep [10 x float], [10 x float]* %0, i64 0, i64 %i
32 // %v = load float, float* %1 ; emits ld.f32
33 // @a is in addrspace(3) since it's type-qualified, but its use from %1 is
34 // redirected to %0 (the generic version of @a).
35 //
36 // The optimization implemented in this file propagates specific address spaces
37 // from type-qualified variable declarations to its users. For example, it
38 // optimizes the above IR to
39 // %1 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i
40 // %v = load float addrspace(3)* %1 ; emits ld.shared.f32
41 // propagating the addrspace(3) from @a to %1. As the result, the NVPTX
42 // codegen is able to emit ld.shared.f32 for %v.
43 //
44 // Address space inference works in two steps. First, it uses a data-flow
45 // analysis to infer as many generic pointers as possible to point to only one
46 // specific address space. In the above example, it can prove that %1 only
47 // points to addrspace(3). This algorithm was published in
48 // CUDA: Compiling and optimizing for a GPU platform
49 // Chakrabarti, Grover, Aarts, Kong, Kudlur, Lin, Marathe, Murphy, Wang
50 // ICCS 2012
51 //
52 // Then, address space inference replaces all refinable generic pointers with
53 // equivalent specific pointers.
54 //
55 // The major challenge of implementing this optimization is handling PHINodes,
56 // which may create loops in the data flow graph. This brings two complications.
57 //
58 // First, the data flow analysis in Step 1 needs to be circular. For example,
59 // %generic.input = addrspacecast float addrspace(3)* %input to float*
60 // loop:
61 // %y = phi [ %generic.input, %y2 ]
62 // %y2 = getelementptr %y, 1
63 // %v = load %y2
64 // br ..., label %loop, ...
65 // proving %y specific requires proving both %generic.input and %y2 specific,
66 // but proving %y2 specific circles back to %y. To address this complication,
67 // the data flow analysis operates on a lattice:
68 // uninitialized > specific address spaces > generic.
69 // All address expressions (our implementation only considers phi, bitcast,
70 // addrspacecast, and getelementptr) start with the uninitialized address space.
71 // The monotone transfer function moves the address space of a pointer down a
72 // lattice path from uninitialized to specific and then to generic. A join
73 // operation of two different specific address spaces pushes the expression down
74 // to the generic address space. The analysis completes once it reaches a fixed
75 // point.
76 //
77 // Second, IR rewriting in Step 2 also needs to be circular. For example,
78 // converting %y to addrspace(3) requires the compiler to know the converted
79 // %y2, but converting %y2 needs the converted %y. To address this complication,
80 // we break these cycles using "undef" placeholders. When converting an
81 // instruction `I` to a new address space, if its operand `Op` is not converted
82 // yet, we let `I` temporarily use `undef` and fix all the uses of undef later.
83 // For instance, our algorithm first converts %y to
84 // %y' = phi float addrspace(3)* [ %input, undef ]
85 // Then, it converts %y2 to
86 // %y2' = getelementptr %y', 1
87 // Finally, it fixes the undef in %y' so that
88 // %y' = phi float addrspace(3)* [ %input, %y2' ]
89 //
90 //===----------------------------------------------------------------------===//
91
92 #include "llvm/ADT/ArrayRef.h"
93 #include "llvm/ADT/DenseMap.h"
94 #include "llvm/ADT/DenseSet.h"
95 #include "llvm/ADT/None.h"
96 #include "llvm/ADT/Optional.h"
97 #include "llvm/ADT/SetVector.h"
98 #include "llvm/ADT/SmallVector.h"
99 #include "llvm/Analysis/TargetTransformInfo.h"
100 #include "llvm/Transforms/Utils/Local.h"
101 #include "llvm/IR/BasicBlock.h"
102 #include "llvm/IR/Constant.h"
103 #include "llvm/IR/Constants.h"
104 #include "llvm/IR/Function.h"
105 #include "llvm/IR/IRBuilder.h"
106 #include "llvm/IR/InstIterator.h"
107 #include "llvm/IR/Instruction.h"
108 #include "llvm/IR/Instructions.h"
109 #include "llvm/IR/IntrinsicInst.h"
110 #include "llvm/IR/Intrinsics.h"
111 #include "llvm/IR/LLVMContext.h"
112 #include "llvm/IR/Operator.h"
113 #include "llvm/IR/Type.h"
114 #include "llvm/IR/Use.h"
115 #include "llvm/IR/User.h"
116 #include "llvm/IR/Value.h"
117 #include "llvm/IR/ValueHandle.h"
118 #include "llvm/Pass.h"
119 #include "llvm/Support/Casting.h"
120 #include "llvm/Support/Compiler.h"
121 #include "llvm/Support/Debug.h"
122 #include "llvm/Support/ErrorHandling.h"
123 #include "llvm/Support/raw_ostream.h"
124 #include "llvm/Transforms/Scalar.h"
125 #include "llvm/Transforms/Utils/ValueMapper.h"
126 #include <cassert>
127 #include <iterator>
128 #include <limits>
129 #include <utility>
130 #include <vector>
131
132 #define DEBUG_TYPE "infer-address-spaces"
133
134 using namespace llvm;
135
136 static const unsigned UninitializedAddressSpace =
137 std::numeric_limits<unsigned>::max();
138
139 namespace {
140
141 using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>;
142
143 /// InferAddressSpaces
144 class InferAddressSpaces : public FunctionPass {
145 /// Target specific address space which uses of should be replaced if
146 /// possible.
147 unsigned FlatAddrSpace;
148
149 public:
150 static char ID;
151
InferAddressSpaces()152 InferAddressSpaces() : FunctionPass(ID) {}
153
getAnalysisUsage(AnalysisUsage & AU) const154 void getAnalysisUsage(AnalysisUsage &AU) const override {
155 AU.setPreservesCFG();
156 AU.addRequired<TargetTransformInfoWrapperPass>();
157 }
158
159 bool runOnFunction(Function &F) override;
160
161 private:
162 // Returns the new address space of V if updated; otherwise, returns None.
163 Optional<unsigned>
164 updateAddressSpace(const Value &V,
165 const ValueToAddrSpaceMapTy &InferredAddrSpace) const;
166
167 // Tries to infer the specific address space of each address expression in
168 // Postorder.
169 void inferAddressSpaces(ArrayRef<WeakTrackingVH> Postorder,
170 ValueToAddrSpaceMapTy *InferredAddrSpace) const;
171
172 bool isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const;
173
174 // Changes the flat address expressions in function F to point to specific
175 // address spaces if InferredAddrSpace says so. Postorder is the postorder of
176 // all flat expressions in the use-def graph of function F.
177 bool rewriteWithNewAddressSpaces(
178 const TargetTransformInfo &TTI, ArrayRef<WeakTrackingVH> Postorder,
179 const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const;
180
181 void appendsFlatAddressExpressionToPostorderStack(
182 Value *V, std::vector<std::pair<Value *, bool>> &PostorderStack,
183 DenseSet<Value *> &Visited) const;
184
185 bool rewriteIntrinsicOperands(IntrinsicInst *II,
186 Value *OldV, Value *NewV) const;
187 void collectRewritableIntrinsicOperands(
188 IntrinsicInst *II,
189 std::vector<std::pair<Value *, bool>> &PostorderStack,
190 DenseSet<Value *> &Visited) const;
191
192 std::vector<WeakTrackingVH> collectFlatAddressExpressions(Function &F) const;
193
194 Value *cloneValueWithNewAddressSpace(
195 Value *V, unsigned NewAddrSpace,
196 const ValueToValueMapTy &ValueWithNewAddrSpace,
197 SmallVectorImpl<const Use *> *UndefUsesToFix) const;
198 unsigned joinAddressSpaces(unsigned AS1, unsigned AS2) const;
199 };
200
201 } // end anonymous namespace
202
203 char InferAddressSpaces::ID = 0;
204
205 namespace llvm {
206
207 void initializeInferAddressSpacesPass(PassRegistry &);
208
209 } // end namespace llvm
210
211 INITIALIZE_PASS(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces",
212 false, false)
213
214 // Returns true if V is an address expression.
215 // TODO: Currently, we consider only phi, bitcast, addrspacecast, and
216 // getelementptr operators.
isAddressExpression(const Value & V)217 static bool isAddressExpression(const Value &V) {
218 if (!isa<Operator>(V))
219 return false;
220
221 switch (cast<Operator>(V).getOpcode()) {
222 case Instruction::PHI:
223 case Instruction::BitCast:
224 case Instruction::AddrSpaceCast:
225 case Instruction::GetElementPtr:
226 case Instruction::Select:
227 return true;
228 default:
229 return false;
230 }
231 }
232
233 // Returns the pointer operands of V.
234 //
235 // Precondition: V is an address expression.
getPointerOperands(const Value & V)236 static SmallVector<Value *, 2> getPointerOperands(const Value &V) {
237 const Operator &Op = cast<Operator>(V);
238 switch (Op.getOpcode()) {
239 case Instruction::PHI: {
240 auto IncomingValues = cast<PHINode>(Op).incoming_values();
241 return SmallVector<Value *, 2>(IncomingValues.begin(),
242 IncomingValues.end());
243 }
244 case Instruction::BitCast:
245 case Instruction::AddrSpaceCast:
246 case Instruction::GetElementPtr:
247 return {Op.getOperand(0)};
248 case Instruction::Select:
249 return {Op.getOperand(1), Op.getOperand(2)};
250 default:
251 llvm_unreachable("Unexpected instruction type.");
252 }
253 }
254
255 // TODO: Move logic to TTI?
rewriteIntrinsicOperands(IntrinsicInst * II,Value * OldV,Value * NewV) const256 bool InferAddressSpaces::rewriteIntrinsicOperands(IntrinsicInst *II,
257 Value *OldV,
258 Value *NewV) const {
259 Module *M = II->getParent()->getParent()->getParent();
260
261 switch (II->getIntrinsicID()) {
262 case Intrinsic::amdgcn_atomic_inc:
263 case Intrinsic::amdgcn_atomic_dec:
264 case Intrinsic::amdgcn_ds_fadd:
265 case Intrinsic::amdgcn_ds_fmin:
266 case Intrinsic::amdgcn_ds_fmax: {
267 const ConstantInt *IsVolatile = dyn_cast<ConstantInt>(II->getArgOperand(4));
268 if (!IsVolatile || !IsVolatile->isZero())
269 return false;
270
271 LLVM_FALLTHROUGH;
272 }
273 case Intrinsic::objectsize: {
274 Type *DestTy = II->getType();
275 Type *SrcTy = NewV->getType();
276 Function *NewDecl =
277 Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy});
278 II->setArgOperand(0, NewV);
279 II->setCalledFunction(NewDecl);
280 return true;
281 }
282 default:
283 return false;
284 }
285 }
286
287 // TODO: Move logic to TTI?
collectRewritableIntrinsicOperands(IntrinsicInst * II,std::vector<std::pair<Value *,bool>> & PostorderStack,DenseSet<Value * > & Visited) const288 void InferAddressSpaces::collectRewritableIntrinsicOperands(
289 IntrinsicInst *II, std::vector<std::pair<Value *, bool>> &PostorderStack,
290 DenseSet<Value *> &Visited) const {
291 switch (II->getIntrinsicID()) {
292 case Intrinsic::objectsize:
293 case Intrinsic::amdgcn_atomic_inc:
294 case Intrinsic::amdgcn_atomic_dec:
295 case Intrinsic::amdgcn_ds_fadd:
296 case Intrinsic::amdgcn_ds_fmin:
297 case Intrinsic::amdgcn_ds_fmax:
298 appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(0),
299 PostorderStack, Visited);
300 break;
301 default:
302 break;
303 }
304 }
305
306 // Returns all flat address expressions in function F. The elements are
307 // If V is an unvisited flat address expression, appends V to PostorderStack
308 // and marks it as visited.
appendsFlatAddressExpressionToPostorderStack(Value * V,std::vector<std::pair<Value *,bool>> & PostorderStack,DenseSet<Value * > & Visited) const309 void InferAddressSpaces::appendsFlatAddressExpressionToPostorderStack(
310 Value *V, std::vector<std::pair<Value *, bool>> &PostorderStack,
311 DenseSet<Value *> &Visited) const {
312 assert(V->getType()->isPointerTy());
313
314 // Generic addressing expressions may be hidden in nested constant
315 // expressions.
316 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
317 // TODO: Look in non-address parts, like icmp operands.
318 if (isAddressExpression(*CE) && Visited.insert(CE).second)
319 PostorderStack.push_back(std::make_pair(CE, false));
320
321 return;
322 }
323
324 if (isAddressExpression(*V) &&
325 V->getType()->getPointerAddressSpace() == FlatAddrSpace) {
326 if (Visited.insert(V).second) {
327 PostorderStack.push_back(std::make_pair(V, false));
328
329 Operator *Op = cast<Operator>(V);
330 for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I) {
331 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op->getOperand(I))) {
332 if (isAddressExpression(*CE) && Visited.insert(CE).second)
333 PostorderStack.emplace_back(CE, false);
334 }
335 }
336 }
337 }
338 }
339
340 // Returns all flat address expressions in function F. The elements are ordered
341 // ordered in postorder.
342 std::vector<WeakTrackingVH>
collectFlatAddressExpressions(Function & F) const343 InferAddressSpaces::collectFlatAddressExpressions(Function &F) const {
344 // This function implements a non-recursive postorder traversal of a partial
345 // use-def graph of function F.
346 std::vector<std::pair<Value *, bool>> PostorderStack;
347 // The set of visited expressions.
348 DenseSet<Value *> Visited;
349
350 auto PushPtrOperand = [&](Value *Ptr) {
351 appendsFlatAddressExpressionToPostorderStack(Ptr, PostorderStack,
352 Visited);
353 };
354
355 // Look at operations that may be interesting accelerate by moving to a known
356 // address space. We aim at generating after loads and stores, but pure
357 // addressing calculations may also be faster.
358 for (Instruction &I : instructions(F)) {
359 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
360 if (!GEP->getType()->isVectorTy())
361 PushPtrOperand(GEP->getPointerOperand());
362 } else if (auto *LI = dyn_cast<LoadInst>(&I))
363 PushPtrOperand(LI->getPointerOperand());
364 else if (auto *SI = dyn_cast<StoreInst>(&I))
365 PushPtrOperand(SI->getPointerOperand());
366 else if (auto *RMW = dyn_cast<AtomicRMWInst>(&I))
367 PushPtrOperand(RMW->getPointerOperand());
368 else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(&I))
369 PushPtrOperand(CmpX->getPointerOperand());
370 else if (auto *MI = dyn_cast<MemIntrinsic>(&I)) {
371 // For memset/memcpy/memmove, any pointer operand can be replaced.
372 PushPtrOperand(MI->getRawDest());
373
374 // Handle 2nd operand for memcpy/memmove.
375 if (auto *MTI = dyn_cast<MemTransferInst>(MI))
376 PushPtrOperand(MTI->getRawSource());
377 } else if (auto *II = dyn_cast<IntrinsicInst>(&I))
378 collectRewritableIntrinsicOperands(II, PostorderStack, Visited);
379 else if (ICmpInst *Cmp = dyn_cast<ICmpInst>(&I)) {
380 // FIXME: Handle vectors of pointers
381 if (Cmp->getOperand(0)->getType()->isPointerTy()) {
382 PushPtrOperand(Cmp->getOperand(0));
383 PushPtrOperand(Cmp->getOperand(1));
384 }
385 } else if (auto *ASC = dyn_cast<AddrSpaceCastInst>(&I)) {
386 if (!ASC->getType()->isVectorTy())
387 PushPtrOperand(ASC->getPointerOperand());
388 }
389 }
390
391 std::vector<WeakTrackingVH> Postorder; // The resultant postorder.
392 while (!PostorderStack.empty()) {
393 Value *TopVal = PostorderStack.back().first;
394 // If the operands of the expression on the top are already explored,
395 // adds that expression to the resultant postorder.
396 if (PostorderStack.back().second) {
397 if (TopVal->getType()->getPointerAddressSpace() == FlatAddrSpace)
398 Postorder.push_back(TopVal);
399 PostorderStack.pop_back();
400 continue;
401 }
402 // Otherwise, adds its operands to the stack and explores them.
403 PostorderStack.back().second = true;
404 for (Value *PtrOperand : getPointerOperands(*TopVal)) {
405 appendsFlatAddressExpressionToPostorderStack(PtrOperand, PostorderStack,
406 Visited);
407 }
408 }
409 return Postorder;
410 }
411
412 // A helper function for cloneInstructionWithNewAddressSpace. Returns the clone
413 // of OperandUse.get() in the new address space. If the clone is not ready yet,
414 // returns an undef in the new address space as a placeholder.
operandWithNewAddressSpaceOrCreateUndef(const Use & OperandUse,unsigned NewAddrSpace,const ValueToValueMapTy & ValueWithNewAddrSpace,SmallVectorImpl<const Use * > * UndefUsesToFix)415 static Value *operandWithNewAddressSpaceOrCreateUndef(
416 const Use &OperandUse, unsigned NewAddrSpace,
417 const ValueToValueMapTy &ValueWithNewAddrSpace,
418 SmallVectorImpl<const Use *> *UndefUsesToFix) {
419 Value *Operand = OperandUse.get();
420
421 Type *NewPtrTy =
422 Operand->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
423
424 if (Constant *C = dyn_cast<Constant>(Operand))
425 return ConstantExpr::getAddrSpaceCast(C, NewPtrTy);
426
427 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand))
428 return NewOperand;
429
430 UndefUsesToFix->push_back(&OperandUse);
431 return UndefValue::get(NewPtrTy);
432 }
433
434 // Returns a clone of `I` with its operands converted to those specified in
435 // ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an
436 // operand whose address space needs to be modified might not exist in
437 // ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and
438 // adds that operand use to UndefUsesToFix so that caller can fix them later.
439 //
440 // Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast
441 // from a pointer whose type already matches. Therefore, this function returns a
442 // Value* instead of an Instruction*.
cloneInstructionWithNewAddressSpace(Instruction * I,unsigned NewAddrSpace,const ValueToValueMapTy & ValueWithNewAddrSpace,SmallVectorImpl<const Use * > * UndefUsesToFix)443 static Value *cloneInstructionWithNewAddressSpace(
444 Instruction *I, unsigned NewAddrSpace,
445 const ValueToValueMapTy &ValueWithNewAddrSpace,
446 SmallVectorImpl<const Use *> *UndefUsesToFix) {
447 Type *NewPtrType =
448 I->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
449
450 if (I->getOpcode() == Instruction::AddrSpaceCast) {
451 Value *Src = I->getOperand(0);
452 // Because `I` is flat, the source address space must be specific.
453 // Therefore, the inferred address space must be the source space, according
454 // to our algorithm.
455 assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
456 if (Src->getType() != NewPtrType)
457 return new BitCastInst(Src, NewPtrType);
458 return Src;
459 }
460
461 // Computes the converted pointer operands.
462 SmallVector<Value *, 4> NewPointerOperands;
463 for (const Use &OperandUse : I->operands()) {
464 if (!OperandUse.get()->getType()->isPointerTy())
465 NewPointerOperands.push_back(nullptr);
466 else
467 NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef(
468 OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix));
469 }
470
471 switch (I->getOpcode()) {
472 case Instruction::BitCast:
473 return new BitCastInst(NewPointerOperands[0], NewPtrType);
474 case Instruction::PHI: {
475 assert(I->getType()->isPointerTy());
476 PHINode *PHI = cast<PHINode>(I);
477 PHINode *NewPHI = PHINode::Create(NewPtrType, PHI->getNumIncomingValues());
478 for (unsigned Index = 0; Index < PHI->getNumIncomingValues(); ++Index) {
479 unsigned OperandNo = PHINode::getOperandNumForIncomingValue(Index);
480 NewPHI->addIncoming(NewPointerOperands[OperandNo],
481 PHI->getIncomingBlock(Index));
482 }
483 return NewPHI;
484 }
485 case Instruction::GetElementPtr: {
486 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
487 GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
488 GEP->getSourceElementType(), NewPointerOperands[0],
489 SmallVector<Value *, 4>(GEP->idx_begin(), GEP->idx_end()));
490 NewGEP->setIsInBounds(GEP->isInBounds());
491 return NewGEP;
492 }
493 case Instruction::Select:
494 assert(I->getType()->isPointerTy());
495 return SelectInst::Create(I->getOperand(0), NewPointerOperands[1],
496 NewPointerOperands[2], "", nullptr, I);
497 default:
498 llvm_unreachable("Unexpected opcode");
499 }
500 }
501
502 // Similar to cloneInstructionWithNewAddressSpace, returns a clone of the
503 // constant expression `CE` with its operands replaced as specified in
504 // ValueWithNewAddrSpace.
cloneConstantExprWithNewAddressSpace(ConstantExpr * CE,unsigned NewAddrSpace,const ValueToValueMapTy & ValueWithNewAddrSpace)505 static Value *cloneConstantExprWithNewAddressSpace(
506 ConstantExpr *CE, unsigned NewAddrSpace,
507 const ValueToValueMapTy &ValueWithNewAddrSpace) {
508 Type *TargetType =
509 CE->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
510
511 if (CE->getOpcode() == Instruction::AddrSpaceCast) {
512 // Because CE is flat, the source address space must be specific.
513 // Therefore, the inferred address space must be the source space according
514 // to our algorithm.
515 assert(CE->getOperand(0)->getType()->getPointerAddressSpace() ==
516 NewAddrSpace);
517 return ConstantExpr::getBitCast(CE->getOperand(0), TargetType);
518 }
519
520 if (CE->getOpcode() == Instruction::BitCast) {
521 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(CE->getOperand(0)))
522 return ConstantExpr::getBitCast(cast<Constant>(NewOperand), TargetType);
523 return ConstantExpr::getAddrSpaceCast(CE, TargetType);
524 }
525
526 if (CE->getOpcode() == Instruction::Select) {
527 Constant *Src0 = CE->getOperand(1);
528 Constant *Src1 = CE->getOperand(2);
529 if (Src0->getType()->getPointerAddressSpace() ==
530 Src1->getType()->getPointerAddressSpace()) {
531
532 return ConstantExpr::getSelect(
533 CE->getOperand(0), ConstantExpr::getAddrSpaceCast(Src0, TargetType),
534 ConstantExpr::getAddrSpaceCast(Src1, TargetType));
535 }
536 }
537
538 // Computes the operands of the new constant expression.
539 bool IsNew = false;
540 SmallVector<Constant *, 4> NewOperands;
541 for (unsigned Index = 0; Index < CE->getNumOperands(); ++Index) {
542 Constant *Operand = CE->getOperand(Index);
543 // If the address space of `Operand` needs to be modified, the new operand
544 // with the new address space should already be in ValueWithNewAddrSpace
545 // because (1) the constant expressions we consider (i.e. addrspacecast,
546 // bitcast, and getelementptr) do not incur cycles in the data flow graph
547 // and (2) this function is called on constant expressions in postorder.
548 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) {
549 IsNew = true;
550 NewOperands.push_back(cast<Constant>(NewOperand));
551 } else {
552 // Otherwise, reuses the old operand.
553 NewOperands.push_back(Operand);
554 }
555 }
556
557 // If !IsNew, we will replace the Value with itself. However, replaced values
558 // are assumed to wrapped in a addrspace cast later so drop it now.
559 if (!IsNew)
560 return nullptr;
561
562 if (CE->getOpcode() == Instruction::GetElementPtr) {
563 // Needs to specify the source type while constructing a getelementptr
564 // constant expression.
565 return CE->getWithOperands(
566 NewOperands, TargetType, /*OnlyIfReduced=*/false,
567 NewOperands[0]->getType()->getPointerElementType());
568 }
569
570 return CE->getWithOperands(NewOperands, TargetType);
571 }
572
573 // Returns a clone of the value `V`, with its operands replaced as specified in
574 // ValueWithNewAddrSpace. This function is called on every flat address
575 // expression whose address space needs to be modified, in postorder.
576 //
577 // See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix.
cloneValueWithNewAddressSpace(Value * V,unsigned NewAddrSpace,const ValueToValueMapTy & ValueWithNewAddrSpace,SmallVectorImpl<const Use * > * UndefUsesToFix) const578 Value *InferAddressSpaces::cloneValueWithNewAddressSpace(
579 Value *V, unsigned NewAddrSpace,
580 const ValueToValueMapTy &ValueWithNewAddrSpace,
581 SmallVectorImpl<const Use *> *UndefUsesToFix) const {
582 // All values in Postorder are flat address expressions.
583 assert(isAddressExpression(*V) &&
584 V->getType()->getPointerAddressSpace() == FlatAddrSpace);
585
586 if (Instruction *I = dyn_cast<Instruction>(V)) {
587 Value *NewV = cloneInstructionWithNewAddressSpace(
588 I, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix);
589 if (Instruction *NewI = dyn_cast<Instruction>(NewV)) {
590 if (NewI->getParent() == nullptr) {
591 NewI->insertBefore(I);
592 NewI->takeName(I);
593 }
594 }
595 return NewV;
596 }
597
598 return cloneConstantExprWithNewAddressSpace(
599 cast<ConstantExpr>(V), NewAddrSpace, ValueWithNewAddrSpace);
600 }
601
602 // Defines the join operation on the address space lattice (see the file header
603 // comments).
joinAddressSpaces(unsigned AS1,unsigned AS2) const604 unsigned InferAddressSpaces::joinAddressSpaces(unsigned AS1,
605 unsigned AS2) const {
606 if (AS1 == FlatAddrSpace || AS2 == FlatAddrSpace)
607 return FlatAddrSpace;
608
609 if (AS1 == UninitializedAddressSpace)
610 return AS2;
611 if (AS2 == UninitializedAddressSpace)
612 return AS1;
613
614 // The join of two different specific address spaces is flat.
615 return (AS1 == AS2) ? AS1 : FlatAddrSpace;
616 }
617
runOnFunction(Function & F)618 bool InferAddressSpaces::runOnFunction(Function &F) {
619 if (skipFunction(F))
620 return false;
621
622 const TargetTransformInfo &TTI =
623 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
624 FlatAddrSpace = TTI.getFlatAddressSpace();
625 if (FlatAddrSpace == UninitializedAddressSpace)
626 return false;
627
628 // Collects all flat address expressions in postorder.
629 std::vector<WeakTrackingVH> Postorder = collectFlatAddressExpressions(F);
630
631 // Runs a data-flow analysis to refine the address spaces of every expression
632 // in Postorder.
633 ValueToAddrSpaceMapTy InferredAddrSpace;
634 inferAddressSpaces(Postorder, &InferredAddrSpace);
635
636 // Changes the address spaces of the flat address expressions who are inferred
637 // to point to a specific address space.
638 return rewriteWithNewAddressSpaces(TTI, Postorder, InferredAddrSpace, &F);
639 }
640
641 // Constants need to be tracked through RAUW to handle cases with nested
642 // constant expressions, so wrap values in WeakTrackingVH.
inferAddressSpaces(ArrayRef<WeakTrackingVH> Postorder,ValueToAddrSpaceMapTy * InferredAddrSpace) const643 void InferAddressSpaces::inferAddressSpaces(
644 ArrayRef<WeakTrackingVH> Postorder,
645 ValueToAddrSpaceMapTy *InferredAddrSpace) const {
646 SetVector<Value *> Worklist(Postorder.begin(), Postorder.end());
647 // Initially, all expressions are in the uninitialized address space.
648 for (Value *V : Postorder)
649 (*InferredAddrSpace)[V] = UninitializedAddressSpace;
650
651 while (!Worklist.empty()) {
652 Value *V = Worklist.pop_back_val();
653
654 // Tries to update the address space of the stack top according to the
655 // address spaces of its operands.
656 LLVM_DEBUG(dbgs() << "Updating the address space of\n " << *V << '\n');
657 Optional<unsigned> NewAS = updateAddressSpace(*V, *InferredAddrSpace);
658 if (!NewAS.hasValue())
659 continue;
660 // If any updates are made, grabs its users to the worklist because
661 // their address spaces can also be possibly updated.
662 LLVM_DEBUG(dbgs() << " to " << NewAS.getValue() << '\n');
663 (*InferredAddrSpace)[V] = NewAS.getValue();
664
665 for (Value *User : V->users()) {
666 // Skip if User is already in the worklist.
667 if (Worklist.count(User))
668 continue;
669
670 auto Pos = InferredAddrSpace->find(User);
671 // Our algorithm only updates the address spaces of flat address
672 // expressions, which are those in InferredAddrSpace.
673 if (Pos == InferredAddrSpace->end())
674 continue;
675
676 // Function updateAddressSpace moves the address space down a lattice
677 // path. Therefore, nothing to do if User is already inferred as flat (the
678 // bottom element in the lattice).
679 if (Pos->second == FlatAddrSpace)
680 continue;
681
682 Worklist.insert(User);
683 }
684 }
685 }
686
updateAddressSpace(const Value & V,const ValueToAddrSpaceMapTy & InferredAddrSpace) const687 Optional<unsigned> InferAddressSpaces::updateAddressSpace(
688 const Value &V, const ValueToAddrSpaceMapTy &InferredAddrSpace) const {
689 assert(InferredAddrSpace.count(&V));
690
691 // The new inferred address space equals the join of the address spaces
692 // of all its pointer operands.
693 unsigned NewAS = UninitializedAddressSpace;
694
695 const Operator &Op = cast<Operator>(V);
696 if (Op.getOpcode() == Instruction::Select) {
697 Value *Src0 = Op.getOperand(1);
698 Value *Src1 = Op.getOperand(2);
699
700 auto I = InferredAddrSpace.find(Src0);
701 unsigned Src0AS = (I != InferredAddrSpace.end()) ?
702 I->second : Src0->getType()->getPointerAddressSpace();
703
704 auto J = InferredAddrSpace.find(Src1);
705 unsigned Src1AS = (J != InferredAddrSpace.end()) ?
706 J->second : Src1->getType()->getPointerAddressSpace();
707
708 auto *C0 = dyn_cast<Constant>(Src0);
709 auto *C1 = dyn_cast<Constant>(Src1);
710
711 // If one of the inputs is a constant, we may be able to do a constant
712 // addrspacecast of it. Defer inferring the address space until the input
713 // address space is known.
714 if ((C1 && Src0AS == UninitializedAddressSpace) ||
715 (C0 && Src1AS == UninitializedAddressSpace))
716 return None;
717
718 if (C0 && isSafeToCastConstAddrSpace(C0, Src1AS))
719 NewAS = Src1AS;
720 else if (C1 && isSafeToCastConstAddrSpace(C1, Src0AS))
721 NewAS = Src0AS;
722 else
723 NewAS = joinAddressSpaces(Src0AS, Src1AS);
724 } else {
725 for (Value *PtrOperand : getPointerOperands(V)) {
726 auto I = InferredAddrSpace.find(PtrOperand);
727 unsigned OperandAS = I != InferredAddrSpace.end() ?
728 I->second : PtrOperand->getType()->getPointerAddressSpace();
729
730 // join(flat, *) = flat. So we can break if NewAS is already flat.
731 NewAS = joinAddressSpaces(NewAS, OperandAS);
732 if (NewAS == FlatAddrSpace)
733 break;
734 }
735 }
736
737 unsigned OldAS = InferredAddrSpace.lookup(&V);
738 assert(OldAS != FlatAddrSpace);
739 if (OldAS == NewAS)
740 return None;
741 return NewAS;
742 }
743
744 /// \p returns true if \p U is the pointer operand of a memory instruction with
745 /// a single pointer operand that can have its address space changed by simply
746 /// mutating the use to a new value. If the memory instruction is volatile,
747 /// return true only if the target allows the memory instruction to be volatile
748 /// in the new address space.
isSimplePointerUseValidToReplace(const TargetTransformInfo & TTI,Use & U,unsigned AddrSpace)749 static bool isSimplePointerUseValidToReplace(const TargetTransformInfo &TTI,
750 Use &U, unsigned AddrSpace) {
751 User *Inst = U.getUser();
752 unsigned OpNo = U.getOperandNo();
753 bool VolatileIsAllowed = false;
754 if (auto *I = dyn_cast<Instruction>(Inst))
755 VolatileIsAllowed = TTI.hasVolatileVariant(I, AddrSpace);
756
757 if (auto *LI = dyn_cast<LoadInst>(Inst))
758 return OpNo == LoadInst::getPointerOperandIndex() &&
759 (VolatileIsAllowed || !LI->isVolatile());
760
761 if (auto *SI = dyn_cast<StoreInst>(Inst))
762 return OpNo == StoreInst::getPointerOperandIndex() &&
763 (VolatileIsAllowed || !SI->isVolatile());
764
765 if (auto *RMW = dyn_cast<AtomicRMWInst>(Inst))
766 return OpNo == AtomicRMWInst::getPointerOperandIndex() &&
767 (VolatileIsAllowed || !RMW->isVolatile());
768
769 if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst))
770 return OpNo == AtomicCmpXchgInst::getPointerOperandIndex() &&
771 (VolatileIsAllowed || !CmpX->isVolatile());
772
773 return false;
774 }
775
776 /// Update memory intrinsic uses that require more complex processing than
777 /// simple memory instructions. Thse require re-mangling and may have multiple
778 /// pointer operands.
handleMemIntrinsicPtrUse(MemIntrinsic * MI,Value * OldV,Value * NewV)779 static bool handleMemIntrinsicPtrUse(MemIntrinsic *MI, Value *OldV,
780 Value *NewV) {
781 IRBuilder<> B(MI);
782 MDNode *TBAA = MI->getMetadata(LLVMContext::MD_tbaa);
783 MDNode *ScopeMD = MI->getMetadata(LLVMContext::MD_alias_scope);
784 MDNode *NoAliasMD = MI->getMetadata(LLVMContext::MD_noalias);
785
786 if (auto *MSI = dyn_cast<MemSetInst>(MI)) {
787 B.CreateMemSet(NewV, MSI->getValue(),
788 MSI->getLength(), MSI->getDestAlignment(),
789 false, // isVolatile
790 TBAA, ScopeMD, NoAliasMD);
791 } else if (auto *MTI = dyn_cast<MemTransferInst>(MI)) {
792 Value *Src = MTI->getRawSource();
793 Value *Dest = MTI->getRawDest();
794
795 // Be careful in case this is a self-to-self copy.
796 if (Src == OldV)
797 Src = NewV;
798
799 if (Dest == OldV)
800 Dest = NewV;
801
802 if (isa<MemCpyInst>(MTI)) {
803 MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct);
804 B.CreateMemCpy(Dest, MTI->getDestAlignment(),
805 Src, MTI->getSourceAlignment(),
806 MTI->getLength(),
807 false, // isVolatile
808 TBAA, TBAAStruct, ScopeMD, NoAliasMD);
809 } else {
810 assert(isa<MemMoveInst>(MTI));
811 B.CreateMemMove(Dest, MTI->getDestAlignment(),
812 Src, MTI->getSourceAlignment(),
813 MTI->getLength(),
814 false, // isVolatile
815 TBAA, ScopeMD, NoAliasMD);
816 }
817 } else
818 llvm_unreachable("unhandled MemIntrinsic");
819
820 MI->eraseFromParent();
821 return true;
822 }
823
824 // \p returns true if it is OK to change the address space of constant \p C with
825 // a ConstantExpr addrspacecast.
isSafeToCastConstAddrSpace(Constant * C,unsigned NewAS) const826 bool InferAddressSpaces::isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const {
827 assert(NewAS != UninitializedAddressSpace);
828
829 unsigned SrcAS = C->getType()->getPointerAddressSpace();
830 if (SrcAS == NewAS || isa<UndefValue>(C))
831 return true;
832
833 // Prevent illegal casts between different non-flat address spaces.
834 if (SrcAS != FlatAddrSpace && NewAS != FlatAddrSpace)
835 return false;
836
837 if (isa<ConstantPointerNull>(C))
838 return true;
839
840 if (auto *Op = dyn_cast<Operator>(C)) {
841 // If we already have a constant addrspacecast, it should be safe to cast it
842 // off.
843 if (Op->getOpcode() == Instruction::AddrSpaceCast)
844 return isSafeToCastConstAddrSpace(cast<Constant>(Op->getOperand(0)), NewAS);
845
846 if (Op->getOpcode() == Instruction::IntToPtr &&
847 Op->getType()->getPointerAddressSpace() == FlatAddrSpace)
848 return true;
849 }
850
851 return false;
852 }
853
skipToNextUser(Value::use_iterator I,Value::use_iterator End)854 static Value::use_iterator skipToNextUser(Value::use_iterator I,
855 Value::use_iterator End) {
856 User *CurUser = I->getUser();
857 ++I;
858
859 while (I != End && I->getUser() == CurUser)
860 ++I;
861
862 return I;
863 }
864
rewriteWithNewAddressSpaces(const TargetTransformInfo & TTI,ArrayRef<WeakTrackingVH> Postorder,const ValueToAddrSpaceMapTy & InferredAddrSpace,Function * F) const865 bool InferAddressSpaces::rewriteWithNewAddressSpaces(
866 const TargetTransformInfo &TTI, ArrayRef<WeakTrackingVH> Postorder,
867 const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const {
868 // For each address expression to be modified, creates a clone of it with its
869 // pointer operands converted to the new address space. Since the pointer
870 // operands are converted, the clone is naturally in the new address space by
871 // construction.
872 ValueToValueMapTy ValueWithNewAddrSpace;
873 SmallVector<const Use *, 32> UndefUsesToFix;
874 for (Value* V : Postorder) {
875 unsigned NewAddrSpace = InferredAddrSpace.lookup(V);
876 if (V->getType()->getPointerAddressSpace() != NewAddrSpace) {
877 ValueWithNewAddrSpace[V] = cloneValueWithNewAddressSpace(
878 V, NewAddrSpace, ValueWithNewAddrSpace, &UndefUsesToFix);
879 }
880 }
881
882 if (ValueWithNewAddrSpace.empty())
883 return false;
884
885 // Fixes all the undef uses generated by cloneInstructionWithNewAddressSpace.
886 for (const Use *UndefUse : UndefUsesToFix) {
887 User *V = UndefUse->getUser();
888 User *NewV = cast<User>(ValueWithNewAddrSpace.lookup(V));
889 unsigned OperandNo = UndefUse->getOperandNo();
890 assert(isa<UndefValue>(NewV->getOperand(OperandNo)));
891 NewV->setOperand(OperandNo, ValueWithNewAddrSpace.lookup(UndefUse->get()));
892 }
893
894 SmallVector<Instruction *, 16> DeadInstructions;
895
896 // Replaces the uses of the old address expressions with the new ones.
897 for (const WeakTrackingVH &WVH : Postorder) {
898 assert(WVH && "value was unexpectedly deleted");
899 Value *V = WVH;
900 Value *NewV = ValueWithNewAddrSpace.lookup(V);
901 if (NewV == nullptr)
902 continue;
903
904 LLVM_DEBUG(dbgs() << "Replacing the uses of " << *V << "\n with\n "
905 << *NewV << '\n');
906
907 if (Constant *C = dyn_cast<Constant>(V)) {
908 Constant *Replace = ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
909 C->getType());
910 if (C != Replace) {
911 LLVM_DEBUG(dbgs() << "Inserting replacement const cast: " << Replace
912 << ": " << *Replace << '\n');
913 C->replaceAllUsesWith(Replace);
914 V = Replace;
915 }
916 }
917
918 Value::use_iterator I, E, Next;
919 for (I = V->use_begin(), E = V->use_end(); I != E; ) {
920 Use &U = *I;
921
922 // Some users may see the same pointer operand in multiple operands. Skip
923 // to the next instruction.
924 I = skipToNextUser(I, E);
925
926 if (isSimplePointerUseValidToReplace(
927 TTI, U, V->getType()->getPointerAddressSpace())) {
928 // If V is used as the pointer operand of a compatible memory operation,
929 // sets the pointer operand to NewV. This replacement does not change
930 // the element type, so the resultant load/store is still valid.
931 U.set(NewV);
932 continue;
933 }
934
935 User *CurUser = U.getUser();
936 // Handle more complex cases like intrinsic that need to be remangled.
937 if (auto *MI = dyn_cast<MemIntrinsic>(CurUser)) {
938 if (!MI->isVolatile() && handleMemIntrinsicPtrUse(MI, V, NewV))
939 continue;
940 }
941
942 if (auto *II = dyn_cast<IntrinsicInst>(CurUser)) {
943 if (rewriteIntrinsicOperands(II, V, NewV))
944 continue;
945 }
946
947 if (isa<Instruction>(CurUser)) {
948 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(CurUser)) {
949 // If we can infer that both pointers are in the same addrspace,
950 // transform e.g.
951 // %cmp = icmp eq float* %p, %q
952 // into
953 // %cmp = icmp eq float addrspace(3)* %new_p, %new_q
954
955 unsigned NewAS = NewV->getType()->getPointerAddressSpace();
956 int SrcIdx = U.getOperandNo();
957 int OtherIdx = (SrcIdx == 0) ? 1 : 0;
958 Value *OtherSrc = Cmp->getOperand(OtherIdx);
959
960 if (Value *OtherNewV = ValueWithNewAddrSpace.lookup(OtherSrc)) {
961 if (OtherNewV->getType()->getPointerAddressSpace() == NewAS) {
962 Cmp->setOperand(OtherIdx, OtherNewV);
963 Cmp->setOperand(SrcIdx, NewV);
964 continue;
965 }
966 }
967
968 // Even if the type mismatches, we can cast the constant.
969 if (auto *KOtherSrc = dyn_cast<Constant>(OtherSrc)) {
970 if (isSafeToCastConstAddrSpace(KOtherSrc, NewAS)) {
971 Cmp->setOperand(SrcIdx, NewV);
972 Cmp->setOperand(OtherIdx,
973 ConstantExpr::getAddrSpaceCast(KOtherSrc, NewV->getType()));
974 continue;
975 }
976 }
977 }
978
979 if (AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(CurUser)) {
980 unsigned NewAS = NewV->getType()->getPointerAddressSpace();
981 if (ASC->getDestAddressSpace() == NewAS) {
982 if (ASC->getType()->getPointerElementType() !=
983 NewV->getType()->getPointerElementType()) {
984 NewV = CastInst::Create(Instruction::BitCast, NewV,
985 ASC->getType(), "", ASC);
986 }
987 ASC->replaceAllUsesWith(NewV);
988 DeadInstructions.push_back(ASC);
989 continue;
990 }
991 }
992
993 // Otherwise, replaces the use with flat(NewV).
994 if (Instruction *I = dyn_cast<Instruction>(V)) {
995 BasicBlock::iterator InsertPos = std::next(I->getIterator());
996 while (isa<PHINode>(InsertPos))
997 ++InsertPos;
998 U.set(new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos));
999 } else {
1000 U.set(ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
1001 V->getType()));
1002 }
1003 }
1004 }
1005
1006 if (V->use_empty()) {
1007 if (Instruction *I = dyn_cast<Instruction>(V))
1008 DeadInstructions.push_back(I);
1009 }
1010 }
1011
1012 for (Instruction *I : DeadInstructions)
1013 RecursivelyDeleteTriviallyDeadInstructions(I);
1014
1015 return true;
1016 }
1017
createInferAddressSpacesPass()1018 FunctionPass *llvm::createInferAddressSpacesPass() {
1019 return new InferAddressSpaces();
1020 }
1021