1 //===- LoopIdiomRecognize.cpp - Loop idiom recognition --------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass implements an idiom recognizer that transforms simple loops into a
11 // non-loop form. In cases that this kicks in, it can be a significant
12 // performance win.
13 //
14 // If compiling for code size we avoid idiom recognition if the resulting
15 // code could be larger than the code for the original loop. One way this could
16 // happen is if the loop is not removable after idiom recognition due to the
17 // presence of non-idiom instructions. The initial implementation of the
18 // heuristics applies to idioms in multi-block loops.
19 //
20 //===----------------------------------------------------------------------===//
21 //
22 // TODO List:
23 //
24 // Future loop memory idioms to recognize:
25 // memcmp, memmove, strlen, etc.
26 // Future floating point idioms to recognize in -ffast-math mode:
27 // fpowi
28 // Future integer operation idioms to recognize:
29 // ctpop, ctlz, cttz
30 //
31 // Beware that isel's default lowering for ctpop is highly inefficient for
32 // i64 and larger types when i64 is legal and the value has few bits set. It
33 // would be good to enhance isel to emit a loop for ctpop in this case.
34 //
35 // This could recognize common matrix multiplies and dot product idioms and
36 // replace them with calls to BLAS (if linked in??).
37 //
38 //===----------------------------------------------------------------------===//
39
40 #include "llvm/ADT/APInt.h"
41 #include "llvm/ADT/ArrayRef.h"
42 #include "llvm/ADT/DenseMap.h"
43 #include "llvm/ADT/MapVector.h"
44 #include "llvm/ADT/SetVector.h"
45 #include "llvm/ADT/SmallPtrSet.h"
46 #include "llvm/ADT/SmallVector.h"
47 #include "llvm/ADT/Statistic.h"
48 #include "llvm/ADT/StringRef.h"
49 #include "llvm/Analysis/AliasAnalysis.h"
50 #include "llvm/Analysis/LoopAccessAnalysis.h"
51 #include "llvm/Analysis/LoopInfo.h"
52 #include "llvm/Analysis/LoopPass.h"
53 #include "llvm/Analysis/MemoryLocation.h"
54 #include "llvm/Analysis/ScalarEvolution.h"
55 #include "llvm/Analysis/ScalarEvolutionExpander.h"
56 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
57 #include "llvm/Analysis/TargetLibraryInfo.h"
58 #include "llvm/Analysis/TargetTransformInfo.h"
59 #include "llvm/Transforms/Utils/Local.h"
60 #include "llvm/Analysis/ValueTracking.h"
61 #include "llvm/IR/Attributes.h"
62 #include "llvm/IR/BasicBlock.h"
63 #include "llvm/IR/Constant.h"
64 #include "llvm/IR/Constants.h"
65 #include "llvm/IR/DataLayout.h"
66 #include "llvm/IR/DebugLoc.h"
67 #include "llvm/IR/DerivedTypes.h"
68 #include "llvm/IR/Dominators.h"
69 #include "llvm/IR/GlobalValue.h"
70 #include "llvm/IR/GlobalVariable.h"
71 #include "llvm/IR/IRBuilder.h"
72 #include "llvm/IR/InstrTypes.h"
73 #include "llvm/IR/Instruction.h"
74 #include "llvm/IR/Instructions.h"
75 #include "llvm/IR/IntrinsicInst.h"
76 #include "llvm/IR/Intrinsics.h"
77 #include "llvm/IR/LLVMContext.h"
78 #include "llvm/IR/Module.h"
79 #include "llvm/IR/PassManager.h"
80 #include "llvm/IR/Type.h"
81 #include "llvm/IR/User.h"
82 #include "llvm/IR/Value.h"
83 #include "llvm/IR/ValueHandle.h"
84 #include "llvm/Pass.h"
85 #include "llvm/Support/Casting.h"
86 #include "llvm/Support/CommandLine.h"
87 #include "llvm/Support/Debug.h"
88 #include "llvm/Support/raw_ostream.h"
89 #include "llvm/Transforms/Scalar.h"
90 #include "llvm/Transforms/Scalar/LoopIdiomRecognize.h"
91 #include "llvm/Transforms/Utils/BuildLibCalls.h"
92 #include "llvm/Transforms/Utils/LoopUtils.h"
93 #include <algorithm>
94 #include <cassert>
95 #include <cstdint>
96 #include <utility>
97 #include <vector>
98
99 using namespace llvm;
100
101 #define DEBUG_TYPE "loop-idiom"
102
103 STATISTIC(NumMemSet, "Number of memset's formed from loop stores");
104 STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores");
105
106 static cl::opt<bool> UseLIRCodeSizeHeurs(
107 "use-lir-code-size-heurs",
108 cl::desc("Use loop idiom recognition code size heuristics when compiling"
109 "with -Os/-Oz"),
110 cl::init(true), cl::Hidden);
111
112 namespace {
113
114 class LoopIdiomRecognize {
115 Loop *CurLoop = nullptr;
116 AliasAnalysis *AA;
117 DominatorTree *DT;
118 LoopInfo *LI;
119 ScalarEvolution *SE;
120 TargetLibraryInfo *TLI;
121 const TargetTransformInfo *TTI;
122 const DataLayout *DL;
123 bool ApplyCodeSizeHeuristics;
124
125 public:
LoopIdiomRecognize(AliasAnalysis * AA,DominatorTree * DT,LoopInfo * LI,ScalarEvolution * SE,TargetLibraryInfo * TLI,const TargetTransformInfo * TTI,const DataLayout * DL)126 explicit LoopIdiomRecognize(AliasAnalysis *AA, DominatorTree *DT,
127 LoopInfo *LI, ScalarEvolution *SE,
128 TargetLibraryInfo *TLI,
129 const TargetTransformInfo *TTI,
130 const DataLayout *DL)
131 : AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI), DL(DL) {}
132
133 bool runOnLoop(Loop *L);
134
135 private:
136 using StoreList = SmallVector<StoreInst *, 8>;
137 using StoreListMap = MapVector<Value *, StoreList>;
138
139 StoreListMap StoreRefsForMemset;
140 StoreListMap StoreRefsForMemsetPattern;
141 StoreList StoreRefsForMemcpy;
142 bool HasMemset;
143 bool HasMemsetPattern;
144 bool HasMemcpy;
145
146 /// Return code for isLegalStore()
147 enum LegalStoreKind {
148 None = 0,
149 Memset,
150 MemsetPattern,
151 Memcpy,
152 UnorderedAtomicMemcpy,
153 DontUse // Dummy retval never to be used. Allows catching errors in retval
154 // handling.
155 };
156
157 /// \name Countable Loop Idiom Handling
158 /// @{
159
160 bool runOnCountableLoop();
161 bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
162 SmallVectorImpl<BasicBlock *> &ExitBlocks);
163
164 void collectStores(BasicBlock *BB);
165 LegalStoreKind isLegalStore(StoreInst *SI);
166 bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount,
167 bool ForMemset);
168 bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount);
169
170 bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
171 unsigned StoreAlignment, Value *StoredVal,
172 Instruction *TheStore,
173 SmallPtrSetImpl<Instruction *> &Stores,
174 const SCEVAddRecExpr *Ev, const SCEV *BECount,
175 bool NegStride, bool IsLoopMemset = false);
176 bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount);
177 bool avoidLIRForMultiBlockLoop(bool IsMemset = false,
178 bool IsLoopMemset = false);
179
180 /// @}
181 /// \name Noncountable Loop Idiom Handling
182 /// @{
183
184 bool runOnNoncountableLoop();
185
186 bool recognizePopcount();
187 void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst,
188 PHINode *CntPhi, Value *Var);
189 bool recognizeAndInsertCTLZ();
190 void transformLoopToCountable(BasicBlock *PreCondBB, Instruction *CntInst,
191 PHINode *CntPhi, Value *Var, Instruction *DefX,
192 const DebugLoc &DL, bool ZeroCheck,
193 bool IsCntPhiUsedOutsideLoop);
194
195 /// @}
196 };
197
198 class LoopIdiomRecognizeLegacyPass : public LoopPass {
199 public:
200 static char ID;
201
LoopIdiomRecognizeLegacyPass()202 explicit LoopIdiomRecognizeLegacyPass() : LoopPass(ID) {
203 initializeLoopIdiomRecognizeLegacyPassPass(
204 *PassRegistry::getPassRegistry());
205 }
206
runOnLoop(Loop * L,LPPassManager & LPM)207 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
208 if (skipLoop(L))
209 return false;
210
211 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
212 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
213 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
214 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
215 TargetLibraryInfo *TLI =
216 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
217 const TargetTransformInfo *TTI =
218 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
219 *L->getHeader()->getParent());
220 const DataLayout *DL = &L->getHeader()->getModule()->getDataLayout();
221
222 LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, DL);
223 return LIR.runOnLoop(L);
224 }
225
226 /// This transformation requires natural loop information & requires that
227 /// loop preheaders be inserted into the CFG.
getAnalysisUsage(AnalysisUsage & AU) const228 void getAnalysisUsage(AnalysisUsage &AU) const override {
229 AU.addRequired<TargetLibraryInfoWrapperPass>();
230 AU.addRequired<TargetTransformInfoWrapperPass>();
231 getLoopAnalysisUsage(AU);
232 }
233 };
234
235 } // end anonymous namespace
236
237 char LoopIdiomRecognizeLegacyPass::ID = 0;
238
run(Loop & L,LoopAnalysisManager & AM,LoopStandardAnalysisResults & AR,LPMUpdater &)239 PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM,
240 LoopStandardAnalysisResults &AR,
241 LPMUpdater &) {
242 const auto *DL = &L.getHeader()->getModule()->getDataLayout();
243
244 LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI, DL);
245 if (!LIR.runOnLoop(&L))
246 return PreservedAnalyses::all();
247
248 return getLoopPassPreservedAnalyses();
249 }
250
251 INITIALIZE_PASS_BEGIN(LoopIdiomRecognizeLegacyPass, "loop-idiom",
252 "Recognize loop idioms", false, false)
INITIALIZE_PASS_DEPENDENCY(LoopPass)253 INITIALIZE_PASS_DEPENDENCY(LoopPass)
254 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
255 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
256 INITIALIZE_PASS_END(LoopIdiomRecognizeLegacyPass, "loop-idiom",
257 "Recognize loop idioms", false, false)
258
259 Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognizeLegacyPass(); }
260
deleteDeadInstruction(Instruction * I)261 static void deleteDeadInstruction(Instruction *I) {
262 I->replaceAllUsesWith(UndefValue::get(I->getType()));
263 I->eraseFromParent();
264 }
265
266 //===----------------------------------------------------------------------===//
267 //
268 // Implementation of LoopIdiomRecognize
269 //
270 //===----------------------------------------------------------------------===//
271
runOnLoop(Loop * L)272 bool LoopIdiomRecognize::runOnLoop(Loop *L) {
273 CurLoop = L;
274 // If the loop could not be converted to canonical form, it must have an
275 // indirectbr in it, just give up.
276 if (!L->getLoopPreheader())
277 return false;
278
279 // Disable loop idiom recognition if the function's name is a common idiom.
280 StringRef Name = L->getHeader()->getParent()->getName();
281 if (Name == "memset" || Name == "memcpy")
282 return false;
283
284 // Determine if code size heuristics need to be applied.
285 ApplyCodeSizeHeuristics =
286 L->getHeader()->getParent()->optForSize() && UseLIRCodeSizeHeurs;
287
288 HasMemset = TLI->has(LibFunc_memset);
289 HasMemsetPattern = TLI->has(LibFunc_memset_pattern16);
290 HasMemcpy = TLI->has(LibFunc_memcpy);
291
292 if (HasMemset || HasMemsetPattern || HasMemcpy)
293 if (SE->hasLoopInvariantBackedgeTakenCount(L))
294 return runOnCountableLoop();
295
296 return runOnNoncountableLoop();
297 }
298
runOnCountableLoop()299 bool LoopIdiomRecognize::runOnCountableLoop() {
300 const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop);
301 assert(!isa<SCEVCouldNotCompute>(BECount) &&
302 "runOnCountableLoop() called on a loop without a predictable"
303 "backedge-taken count");
304
305 // If this loop executes exactly one time, then it should be peeled, not
306 // optimized by this pass.
307 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
308 if (BECst->getAPInt() == 0)
309 return false;
310
311 SmallVector<BasicBlock *, 8> ExitBlocks;
312 CurLoop->getUniqueExitBlocks(ExitBlocks);
313
314 LLVM_DEBUG(dbgs() << "loop-idiom Scanning: F["
315 << CurLoop->getHeader()->getParent()->getName()
316 << "] Loop %" << CurLoop->getHeader()->getName() << "\n");
317
318 bool MadeChange = false;
319
320 // The following transforms hoist stores/memsets into the loop pre-header.
321 // Give up if the loop has instructions may throw.
322 LoopSafetyInfo SafetyInfo;
323 computeLoopSafetyInfo(&SafetyInfo, CurLoop);
324 if (SafetyInfo.MayThrow)
325 return MadeChange;
326
327 // Scan all the blocks in the loop that are not in subloops.
328 for (auto *BB : CurLoop->getBlocks()) {
329 // Ignore blocks in subloops.
330 if (LI->getLoopFor(BB) != CurLoop)
331 continue;
332
333 MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks);
334 }
335 return MadeChange;
336 }
337
getStoreStride(const SCEVAddRecExpr * StoreEv)338 static APInt getStoreStride(const SCEVAddRecExpr *StoreEv) {
339 const SCEVConstant *ConstStride = cast<SCEVConstant>(StoreEv->getOperand(1));
340 return ConstStride->getAPInt();
341 }
342
343 /// getMemSetPatternValue - If a strided store of the specified value is safe to
344 /// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should
345 /// be passed in. Otherwise, return null.
346 ///
347 /// Note that we don't ever attempt to use memset_pattern8 or 4, because these
348 /// just replicate their input array and then pass on to memset_pattern16.
getMemSetPatternValue(Value * V,const DataLayout * DL)349 static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) {
350 // If the value isn't a constant, we can't promote it to being in a constant
351 // array. We could theoretically do a store to an alloca or something, but
352 // that doesn't seem worthwhile.
353 Constant *C = dyn_cast<Constant>(V);
354 if (!C)
355 return nullptr;
356
357 // Only handle simple values that are a power of two bytes in size.
358 uint64_t Size = DL->getTypeSizeInBits(V->getType());
359 if (Size == 0 || (Size & 7) || (Size & (Size - 1)))
360 return nullptr;
361
362 // Don't care enough about darwin/ppc to implement this.
363 if (DL->isBigEndian())
364 return nullptr;
365
366 // Convert to size in bytes.
367 Size /= 8;
368
369 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
370 // if the top and bottom are the same (e.g. for vectors and large integers).
371 if (Size > 16)
372 return nullptr;
373
374 // If the constant is exactly 16 bytes, just use it.
375 if (Size == 16)
376 return C;
377
378 // Otherwise, we'll use an array of the constants.
379 unsigned ArraySize = 16 / Size;
380 ArrayType *AT = ArrayType::get(V->getType(), ArraySize);
381 return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C));
382 }
383
384 LoopIdiomRecognize::LegalStoreKind
isLegalStore(StoreInst * SI)385 LoopIdiomRecognize::isLegalStore(StoreInst *SI) {
386 // Don't touch volatile stores.
387 if (SI->isVolatile())
388 return LegalStoreKind::None;
389 // We only want simple or unordered-atomic stores.
390 if (!SI->isUnordered())
391 return LegalStoreKind::None;
392
393 // Don't convert stores of non-integral pointer types to memsets (which stores
394 // integers).
395 if (DL->isNonIntegralPointerType(SI->getValueOperand()->getType()))
396 return LegalStoreKind::None;
397
398 // Avoid merging nontemporal stores.
399 if (SI->getMetadata(LLVMContext::MD_nontemporal))
400 return LegalStoreKind::None;
401
402 Value *StoredVal = SI->getValueOperand();
403 Value *StorePtr = SI->getPointerOperand();
404
405 // Reject stores that are so large that they overflow an unsigned.
406 uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
407 if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
408 return LegalStoreKind::None;
409
410 // See if the pointer expression is an AddRec like {base,+,1} on the current
411 // loop, which indicates a strided store. If we have something else, it's a
412 // random store we can't handle.
413 const SCEVAddRecExpr *StoreEv =
414 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
415 if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
416 return LegalStoreKind::None;
417
418 // Check to see if we have a constant stride.
419 if (!isa<SCEVConstant>(StoreEv->getOperand(1)))
420 return LegalStoreKind::None;
421
422 // See if the store can be turned into a memset.
423
424 // If the stored value is a byte-wise value (like i32 -1), then it may be
425 // turned into a memset of i8 -1, assuming that all the consecutive bytes
426 // are stored. A store of i32 0x01020304 can never be turned into a memset,
427 // but it can be turned into memset_pattern if the target supports it.
428 Value *SplatValue = isBytewiseValue(StoredVal);
429 Constant *PatternValue = nullptr;
430
431 // Note: memset and memset_pattern on unordered-atomic is yet not supported
432 bool UnorderedAtomic = SI->isUnordered() && !SI->isSimple();
433
434 // If we're allowed to form a memset, and the stored value would be
435 // acceptable for memset, use it.
436 if (!UnorderedAtomic && HasMemset && SplatValue &&
437 // Verify that the stored value is loop invariant. If not, we can't
438 // promote the memset.
439 CurLoop->isLoopInvariant(SplatValue)) {
440 // It looks like we can use SplatValue.
441 return LegalStoreKind::Memset;
442 } else if (!UnorderedAtomic && HasMemsetPattern &&
443 // Don't create memset_pattern16s with address spaces.
444 StorePtr->getType()->getPointerAddressSpace() == 0 &&
445 (PatternValue = getMemSetPatternValue(StoredVal, DL))) {
446 // It looks like we can use PatternValue!
447 return LegalStoreKind::MemsetPattern;
448 }
449
450 // Otherwise, see if the store can be turned into a memcpy.
451 if (HasMemcpy) {
452 // Check to see if the stride matches the size of the store. If so, then we
453 // know that every byte is touched in the loop.
454 APInt Stride = getStoreStride(StoreEv);
455 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
456 if (StoreSize != Stride && StoreSize != -Stride)
457 return LegalStoreKind::None;
458
459 // The store must be feeding a non-volatile load.
460 LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand());
461
462 // Only allow non-volatile loads
463 if (!LI || LI->isVolatile())
464 return LegalStoreKind::None;
465 // Only allow simple or unordered-atomic loads
466 if (!LI->isUnordered())
467 return LegalStoreKind::None;
468
469 // See if the pointer expression is an AddRec like {base,+,1} on the current
470 // loop, which indicates a strided load. If we have something else, it's a
471 // random load we can't handle.
472 const SCEVAddRecExpr *LoadEv =
473 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
474 if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine())
475 return LegalStoreKind::None;
476
477 // The store and load must share the same stride.
478 if (StoreEv->getOperand(1) != LoadEv->getOperand(1))
479 return LegalStoreKind::None;
480
481 // Success. This store can be converted into a memcpy.
482 UnorderedAtomic = UnorderedAtomic || LI->isAtomic();
483 return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy
484 : LegalStoreKind::Memcpy;
485 }
486 // This store can't be transformed into a memset/memcpy.
487 return LegalStoreKind::None;
488 }
489
collectStores(BasicBlock * BB)490 void LoopIdiomRecognize::collectStores(BasicBlock *BB) {
491 StoreRefsForMemset.clear();
492 StoreRefsForMemsetPattern.clear();
493 StoreRefsForMemcpy.clear();
494 for (Instruction &I : *BB) {
495 StoreInst *SI = dyn_cast<StoreInst>(&I);
496 if (!SI)
497 continue;
498
499 // Make sure this is a strided store with a constant stride.
500 switch (isLegalStore(SI)) {
501 case LegalStoreKind::None:
502 // Nothing to do
503 break;
504 case LegalStoreKind::Memset: {
505 // Find the base pointer.
506 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
507 StoreRefsForMemset[Ptr].push_back(SI);
508 } break;
509 case LegalStoreKind::MemsetPattern: {
510 // Find the base pointer.
511 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
512 StoreRefsForMemsetPattern[Ptr].push_back(SI);
513 } break;
514 case LegalStoreKind::Memcpy:
515 case LegalStoreKind::UnorderedAtomicMemcpy:
516 StoreRefsForMemcpy.push_back(SI);
517 break;
518 default:
519 assert(false && "unhandled return value");
520 break;
521 }
522 }
523 }
524
525 /// runOnLoopBlock - Process the specified block, which lives in a counted loop
526 /// with the specified backedge count. This block is known to be in the current
527 /// loop and not in any subloops.
runOnLoopBlock(BasicBlock * BB,const SCEV * BECount,SmallVectorImpl<BasicBlock * > & ExitBlocks)528 bool LoopIdiomRecognize::runOnLoopBlock(
529 BasicBlock *BB, const SCEV *BECount,
530 SmallVectorImpl<BasicBlock *> &ExitBlocks) {
531 // We can only promote stores in this block if they are unconditionally
532 // executed in the loop. For a block to be unconditionally executed, it has
533 // to dominate all the exit blocks of the loop. Verify this now.
534 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
535 if (!DT->dominates(BB, ExitBlocks[i]))
536 return false;
537
538 bool MadeChange = false;
539 // Look for store instructions, which may be optimized to memset/memcpy.
540 collectStores(BB);
541
542 // Look for a single store or sets of stores with a common base, which can be
543 // optimized into a memset (memset_pattern). The latter most commonly happens
544 // with structs and handunrolled loops.
545 for (auto &SL : StoreRefsForMemset)
546 MadeChange |= processLoopStores(SL.second, BECount, true);
547
548 for (auto &SL : StoreRefsForMemsetPattern)
549 MadeChange |= processLoopStores(SL.second, BECount, false);
550
551 // Optimize the store into a memcpy, if it feeds an similarly strided load.
552 for (auto &SI : StoreRefsForMemcpy)
553 MadeChange |= processLoopStoreOfLoopLoad(SI, BECount);
554
555 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
556 Instruction *Inst = &*I++;
557 // Look for memset instructions, which may be optimized to a larger memset.
558 if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
559 WeakTrackingVH InstPtr(&*I);
560 if (!processLoopMemSet(MSI, BECount))
561 continue;
562 MadeChange = true;
563
564 // If processing the memset invalidated our iterator, start over from the
565 // top of the block.
566 if (!InstPtr)
567 I = BB->begin();
568 continue;
569 }
570 }
571
572 return MadeChange;
573 }
574
575 /// processLoopStores - See if this store(s) can be promoted to a memset.
processLoopStores(SmallVectorImpl<StoreInst * > & SL,const SCEV * BECount,bool ForMemset)576 bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL,
577 const SCEV *BECount,
578 bool ForMemset) {
579 // Try to find consecutive stores that can be transformed into memsets.
580 SetVector<StoreInst *> Heads, Tails;
581 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain;
582
583 // Do a quadratic search on all of the given stores and find
584 // all of the pairs of stores that follow each other.
585 SmallVector<unsigned, 16> IndexQueue;
586 for (unsigned i = 0, e = SL.size(); i < e; ++i) {
587 assert(SL[i]->isSimple() && "Expected only non-volatile stores.");
588
589 Value *FirstStoredVal = SL[i]->getValueOperand();
590 Value *FirstStorePtr = SL[i]->getPointerOperand();
591 const SCEVAddRecExpr *FirstStoreEv =
592 cast<SCEVAddRecExpr>(SE->getSCEV(FirstStorePtr));
593 APInt FirstStride = getStoreStride(FirstStoreEv);
594 unsigned FirstStoreSize = DL->getTypeStoreSize(SL[i]->getValueOperand()->getType());
595
596 // See if we can optimize just this store in isolation.
597 if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) {
598 Heads.insert(SL[i]);
599 continue;
600 }
601
602 Value *FirstSplatValue = nullptr;
603 Constant *FirstPatternValue = nullptr;
604
605 if (ForMemset)
606 FirstSplatValue = isBytewiseValue(FirstStoredVal);
607 else
608 FirstPatternValue = getMemSetPatternValue(FirstStoredVal, DL);
609
610 assert((FirstSplatValue || FirstPatternValue) &&
611 "Expected either splat value or pattern value.");
612
613 IndexQueue.clear();
614 // If a store has multiple consecutive store candidates, search Stores
615 // array according to the sequence: from i+1 to e, then from i-1 to 0.
616 // This is because usually pairing with immediate succeeding or preceding
617 // candidate create the best chance to find memset opportunity.
618 unsigned j = 0;
619 for (j = i + 1; j < e; ++j)
620 IndexQueue.push_back(j);
621 for (j = i; j > 0; --j)
622 IndexQueue.push_back(j - 1);
623
624 for (auto &k : IndexQueue) {
625 assert(SL[k]->isSimple() && "Expected only non-volatile stores.");
626 Value *SecondStorePtr = SL[k]->getPointerOperand();
627 const SCEVAddRecExpr *SecondStoreEv =
628 cast<SCEVAddRecExpr>(SE->getSCEV(SecondStorePtr));
629 APInt SecondStride = getStoreStride(SecondStoreEv);
630
631 if (FirstStride != SecondStride)
632 continue;
633
634 Value *SecondStoredVal = SL[k]->getValueOperand();
635 Value *SecondSplatValue = nullptr;
636 Constant *SecondPatternValue = nullptr;
637
638 if (ForMemset)
639 SecondSplatValue = isBytewiseValue(SecondStoredVal);
640 else
641 SecondPatternValue = getMemSetPatternValue(SecondStoredVal, DL);
642
643 assert((SecondSplatValue || SecondPatternValue) &&
644 "Expected either splat value or pattern value.");
645
646 if (isConsecutiveAccess(SL[i], SL[k], *DL, *SE, false)) {
647 if (ForMemset) {
648 if (FirstSplatValue != SecondSplatValue)
649 continue;
650 } else {
651 if (FirstPatternValue != SecondPatternValue)
652 continue;
653 }
654 Tails.insert(SL[k]);
655 Heads.insert(SL[i]);
656 ConsecutiveChain[SL[i]] = SL[k];
657 break;
658 }
659 }
660 }
661
662 // We may run into multiple chains that merge into a single chain. We mark the
663 // stores that we transformed so that we don't visit the same store twice.
664 SmallPtrSet<Value *, 16> TransformedStores;
665 bool Changed = false;
666
667 // For stores that start but don't end a link in the chain:
668 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end();
669 it != e; ++it) {
670 if (Tails.count(*it))
671 continue;
672
673 // We found a store instr that starts a chain. Now follow the chain and try
674 // to transform it.
675 SmallPtrSet<Instruction *, 8> AdjacentStores;
676 StoreInst *I = *it;
677
678 StoreInst *HeadStore = I;
679 unsigned StoreSize = 0;
680
681 // Collect the chain into a list.
682 while (Tails.count(I) || Heads.count(I)) {
683 if (TransformedStores.count(I))
684 break;
685 AdjacentStores.insert(I);
686
687 StoreSize += DL->getTypeStoreSize(I->getValueOperand()->getType());
688 // Move to the next value in the chain.
689 I = ConsecutiveChain[I];
690 }
691
692 Value *StoredVal = HeadStore->getValueOperand();
693 Value *StorePtr = HeadStore->getPointerOperand();
694 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
695 APInt Stride = getStoreStride(StoreEv);
696
697 // Check to see if the stride matches the size of the stores. If so, then
698 // we know that every byte is touched in the loop.
699 if (StoreSize != Stride && StoreSize != -Stride)
700 continue;
701
702 bool NegStride = StoreSize == -Stride;
703
704 if (processLoopStridedStore(StorePtr, StoreSize, HeadStore->getAlignment(),
705 StoredVal, HeadStore, AdjacentStores, StoreEv,
706 BECount, NegStride)) {
707 TransformedStores.insert(AdjacentStores.begin(), AdjacentStores.end());
708 Changed = true;
709 }
710 }
711
712 return Changed;
713 }
714
715 /// processLoopMemSet - See if this memset can be promoted to a large memset.
processLoopMemSet(MemSetInst * MSI,const SCEV * BECount)716 bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
717 const SCEV *BECount) {
718 // We can only handle non-volatile memsets with a constant size.
719 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
720 return false;
721
722 // If we're not allowed to hack on memset, we fail.
723 if (!HasMemset)
724 return false;
725
726 Value *Pointer = MSI->getDest();
727
728 // See if the pointer expression is an AddRec like {base,+,1} on the current
729 // loop, which indicates a strided store. If we have something else, it's a
730 // random store we can't handle.
731 const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer));
732 if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine())
733 return false;
734
735 // Reject memsets that are so large that they overflow an unsigned.
736 uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
737 if ((SizeInBytes >> 32) != 0)
738 return false;
739
740 // Check to see if the stride matches the size of the memset. If so, then we
741 // know that every byte is touched in the loop.
742 const SCEVConstant *ConstStride = dyn_cast<SCEVConstant>(Ev->getOperand(1));
743 if (!ConstStride)
744 return false;
745
746 APInt Stride = ConstStride->getAPInt();
747 if (SizeInBytes != Stride && SizeInBytes != -Stride)
748 return false;
749
750 // Verify that the memset value is loop invariant. If not, we can't promote
751 // the memset.
752 Value *SplatValue = MSI->getValue();
753 if (!SplatValue || !CurLoop->isLoopInvariant(SplatValue))
754 return false;
755
756 SmallPtrSet<Instruction *, 1> MSIs;
757 MSIs.insert(MSI);
758 bool NegStride = SizeInBytes == -Stride;
759 return processLoopStridedStore(Pointer, (unsigned)SizeInBytes,
760 MSI->getDestAlignment(), SplatValue, MSI, MSIs,
761 Ev, BECount, NegStride, /*IsLoopMemset=*/true);
762 }
763
764 /// mayLoopAccessLocation - Return true if the specified loop might access the
765 /// specified pointer location, which is a loop-strided access. The 'Access'
766 /// argument specifies what the verboten forms of access are (read or write).
767 static bool
mayLoopAccessLocation(Value * Ptr,ModRefInfo Access,Loop * L,const SCEV * BECount,unsigned StoreSize,AliasAnalysis & AA,SmallPtrSetImpl<Instruction * > & IgnoredStores)768 mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L,
769 const SCEV *BECount, unsigned StoreSize,
770 AliasAnalysis &AA,
771 SmallPtrSetImpl<Instruction *> &IgnoredStores) {
772 // Get the location that may be stored across the loop. Since the access is
773 // strided positively through memory, we say that the modified location starts
774 // at the pointer and has infinite size.
775 uint64_t AccessSize = MemoryLocation::UnknownSize;
776
777 // If the loop iterates a fixed number of times, we can refine the access size
778 // to be exactly the size of the memset, which is (BECount+1)*StoreSize
779 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
780 AccessSize = (BECst->getValue()->getZExtValue() + 1) * StoreSize;
781
782 // TODO: For this to be really effective, we have to dive into the pointer
783 // operand in the store. Store to &A[i] of 100 will always return may alias
784 // with store of &A[100], we need to StoreLoc to be "A" with size of 100,
785 // which will then no-alias a store to &A[100].
786 MemoryLocation StoreLoc(Ptr, AccessSize);
787
788 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
789 ++BI)
790 for (Instruction &I : **BI)
791 if (IgnoredStores.count(&I) == 0 &&
792 isModOrRefSet(
793 intersectModRef(AA.getModRefInfo(&I, StoreLoc), Access)))
794 return true;
795
796 return false;
797 }
798
799 // If we have a negative stride, Start refers to the end of the memory location
800 // we're trying to memset. Therefore, we need to recompute the base pointer,
801 // which is just Start - BECount*Size.
getStartForNegStride(const SCEV * Start,const SCEV * BECount,Type * IntPtr,unsigned StoreSize,ScalarEvolution * SE)802 static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount,
803 Type *IntPtr, unsigned StoreSize,
804 ScalarEvolution *SE) {
805 const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr);
806 if (StoreSize != 1)
807 Index = SE->getMulExpr(Index, SE->getConstant(IntPtr, StoreSize),
808 SCEV::FlagNUW);
809 return SE->getMinusSCEV(Start, Index);
810 }
811
812 /// Compute the number of bytes as a SCEV from the backedge taken count.
813 ///
814 /// This also maps the SCEV into the provided type and tries to handle the
815 /// computation in a way that will fold cleanly.
getNumBytes(const SCEV * BECount,Type * IntPtr,unsigned StoreSize,Loop * CurLoop,const DataLayout * DL,ScalarEvolution * SE)816 static const SCEV *getNumBytes(const SCEV *BECount, Type *IntPtr,
817 unsigned StoreSize, Loop *CurLoop,
818 const DataLayout *DL, ScalarEvolution *SE) {
819 const SCEV *NumBytesS;
820 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
821 // pointer size if it isn't already.
822 //
823 // If we're going to need to zero extend the BE count, check if we can add
824 // one to it prior to zero extending without overflow. Provided this is safe,
825 // it allows better simplification of the +1.
826 if (DL->getTypeSizeInBits(BECount->getType()) <
827 DL->getTypeSizeInBits(IntPtr) &&
828 SE->isLoopEntryGuardedByCond(
829 CurLoop, ICmpInst::ICMP_NE, BECount,
830 SE->getNegativeSCEV(SE->getOne(BECount->getType())))) {
831 NumBytesS = SE->getZeroExtendExpr(
832 SE->getAddExpr(BECount, SE->getOne(BECount->getType()), SCEV::FlagNUW),
833 IntPtr);
834 } else {
835 NumBytesS = SE->getAddExpr(SE->getTruncateOrZeroExtend(BECount, IntPtr),
836 SE->getOne(IntPtr), SCEV::FlagNUW);
837 }
838
839 // And scale it based on the store size.
840 if (StoreSize != 1) {
841 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
842 SCEV::FlagNUW);
843 }
844 return NumBytesS;
845 }
846
847 /// processLoopStridedStore - We see a strided store of some value. If we can
848 /// transform this into a memset or memset_pattern in the loop preheader, do so.
processLoopStridedStore(Value * DestPtr,unsigned StoreSize,unsigned StoreAlignment,Value * StoredVal,Instruction * TheStore,SmallPtrSetImpl<Instruction * > & Stores,const SCEVAddRecExpr * Ev,const SCEV * BECount,bool NegStride,bool IsLoopMemset)849 bool LoopIdiomRecognize::processLoopStridedStore(
850 Value *DestPtr, unsigned StoreSize, unsigned StoreAlignment,
851 Value *StoredVal, Instruction *TheStore,
852 SmallPtrSetImpl<Instruction *> &Stores, const SCEVAddRecExpr *Ev,
853 const SCEV *BECount, bool NegStride, bool IsLoopMemset) {
854 Value *SplatValue = isBytewiseValue(StoredVal);
855 Constant *PatternValue = nullptr;
856
857 if (!SplatValue)
858 PatternValue = getMemSetPatternValue(StoredVal, DL);
859
860 assert((SplatValue || PatternValue) &&
861 "Expected either splat value or pattern value.");
862
863 // The trip count of the loop and the base pointer of the addrec SCEV is
864 // guaranteed to be loop invariant, which means that it should dominate the
865 // header. This allows us to insert code for it in the preheader.
866 unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
867 BasicBlock *Preheader = CurLoop->getLoopPreheader();
868 IRBuilder<> Builder(Preheader->getTerminator());
869 SCEVExpander Expander(*SE, *DL, "loop-idiom");
870
871 Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS);
872 Type *IntPtr = Builder.getIntPtrTy(*DL, DestAS);
873
874 const SCEV *Start = Ev->getStart();
875 // Handle negative strided loops.
876 if (NegStride)
877 Start = getStartForNegStride(Start, BECount, IntPtr, StoreSize, SE);
878
879 // TODO: ideally we should still be able to generate memset if SCEV expander
880 // is taught to generate the dependencies at the latest point.
881 if (!isSafeToExpand(Start, *SE))
882 return false;
883
884 // Okay, we have a strided store "p[i]" of a splattable value. We can turn
885 // this into a memset in the loop preheader now if we want. However, this
886 // would be unsafe to do if there is anything else in the loop that may read
887 // or write to the aliased location. Check for any overlap by generating the
888 // base pointer and checking the region.
889 Value *BasePtr =
890 Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator());
891 if (mayLoopAccessLocation(BasePtr, ModRefInfo::ModRef, CurLoop, BECount,
892 StoreSize, *AA, Stores)) {
893 Expander.clear();
894 // If we generated new code for the base pointer, clean up.
895 RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI);
896 return false;
897 }
898
899 if (avoidLIRForMultiBlockLoop(/*IsMemset=*/true, IsLoopMemset))
900 return false;
901
902 // Okay, everything looks good, insert the memset.
903
904 const SCEV *NumBytesS =
905 getNumBytes(BECount, IntPtr, StoreSize, CurLoop, DL, SE);
906
907 // TODO: ideally we should still be able to generate memset if SCEV expander
908 // is taught to generate the dependencies at the latest point.
909 if (!isSafeToExpand(NumBytesS, *SE))
910 return false;
911
912 Value *NumBytes =
913 Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
914
915 CallInst *NewCall;
916 if (SplatValue) {
917 NewCall =
918 Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, StoreAlignment);
919 } else {
920 // Everything is emitted in default address space
921 Type *Int8PtrTy = DestInt8PtrTy;
922
923 Module *M = TheStore->getModule();
924 StringRef FuncName = "memset_pattern16";
925 Value *MSP =
926 M->getOrInsertFunction(FuncName, Builder.getVoidTy(),
927 Int8PtrTy, Int8PtrTy, IntPtr);
928 inferLibFuncAttributes(M, FuncName, *TLI);
929
930 // Otherwise we should form a memset_pattern16. PatternValue is known to be
931 // an constant array of 16-bytes. Plop the value into a mergable global.
932 GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true,
933 GlobalValue::PrivateLinkage,
934 PatternValue, ".memset_pattern");
935 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); // Ok to merge these.
936 GV->setAlignment(16);
937 Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy);
938 NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes});
939 }
940
941 LLVM_DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n"
942 << " from store to: " << *Ev << " at: " << *TheStore
943 << "\n");
944 NewCall->setDebugLoc(TheStore->getDebugLoc());
945
946 // Okay, the memset has been formed. Zap the original store and anything that
947 // feeds into it.
948 for (auto *I : Stores)
949 deleteDeadInstruction(I);
950 ++NumMemSet;
951 return true;
952 }
953
954 /// If the stored value is a strided load in the same loop with the same stride
955 /// this may be transformable into a memcpy. This kicks in for stuff like
956 /// for (i) A[i] = B[i];
processLoopStoreOfLoopLoad(StoreInst * SI,const SCEV * BECount)957 bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
958 const SCEV *BECount) {
959 assert(SI->isUnordered() && "Expected only non-volatile non-ordered stores.");
960
961 Value *StorePtr = SI->getPointerOperand();
962 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
963 APInt Stride = getStoreStride(StoreEv);
964 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
965 bool NegStride = StoreSize == -Stride;
966
967 // The store must be feeding a non-volatile load.
968 LoadInst *LI = cast<LoadInst>(SI->getValueOperand());
969 assert(LI->isUnordered() && "Expected only non-volatile non-ordered loads.");
970
971 // See if the pointer expression is an AddRec like {base,+,1} on the current
972 // loop, which indicates a strided load. If we have something else, it's a
973 // random load we can't handle.
974 const SCEVAddRecExpr *LoadEv =
975 cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
976
977 // The trip count of the loop and the base pointer of the addrec SCEV is
978 // guaranteed to be loop invariant, which means that it should dominate the
979 // header. This allows us to insert code for it in the preheader.
980 BasicBlock *Preheader = CurLoop->getLoopPreheader();
981 IRBuilder<> Builder(Preheader->getTerminator());
982 SCEVExpander Expander(*SE, *DL, "loop-idiom");
983
984 const SCEV *StrStart = StoreEv->getStart();
985 unsigned StrAS = SI->getPointerAddressSpace();
986 Type *IntPtrTy = Builder.getIntPtrTy(*DL, StrAS);
987
988 // Handle negative strided loops.
989 if (NegStride)
990 StrStart = getStartForNegStride(StrStart, BECount, IntPtrTy, StoreSize, SE);
991
992 // Okay, we have a strided store "p[i]" of a loaded value. We can turn
993 // this into a memcpy in the loop preheader now if we want. However, this
994 // would be unsafe to do if there is anything else in the loop that may read
995 // or write the memory region we're storing to. This includes the load that
996 // feeds the stores. Check for an alias by generating the base address and
997 // checking everything.
998 Value *StoreBasePtr = Expander.expandCodeFor(
999 StrStart, Builder.getInt8PtrTy(StrAS), Preheader->getTerminator());
1000
1001 SmallPtrSet<Instruction *, 1> Stores;
1002 Stores.insert(SI);
1003 if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount,
1004 StoreSize, *AA, Stores)) {
1005 Expander.clear();
1006 // If we generated new code for the base pointer, clean up.
1007 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
1008 return false;
1009 }
1010
1011 const SCEV *LdStart = LoadEv->getStart();
1012 unsigned LdAS = LI->getPointerAddressSpace();
1013
1014 // Handle negative strided loops.
1015 if (NegStride)
1016 LdStart = getStartForNegStride(LdStart, BECount, IntPtrTy, StoreSize, SE);
1017
1018 // For a memcpy, we have to make sure that the input array is not being
1019 // mutated by the loop.
1020 Value *LoadBasePtr = Expander.expandCodeFor(
1021 LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator());
1022
1023 if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount,
1024 StoreSize, *AA, Stores)) {
1025 Expander.clear();
1026 // If we generated new code for the base pointer, clean up.
1027 RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI);
1028 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
1029 return false;
1030 }
1031
1032 if (avoidLIRForMultiBlockLoop())
1033 return false;
1034
1035 // Okay, everything is safe, we can transform this!
1036
1037 const SCEV *NumBytesS =
1038 getNumBytes(BECount, IntPtrTy, StoreSize, CurLoop, DL, SE);
1039
1040 Value *NumBytes =
1041 Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator());
1042
1043 CallInst *NewCall = nullptr;
1044 // Check whether to generate an unordered atomic memcpy:
1045 // If the load or store are atomic, then they must necessarily be unordered
1046 // by previous checks.
1047 if (!SI->isAtomic() && !LI->isAtomic())
1048 NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlignment(),
1049 LoadBasePtr, LI->getAlignment(), NumBytes);
1050 else {
1051 // We cannot allow unaligned ops for unordered load/store, so reject
1052 // anything where the alignment isn't at least the element size.
1053 unsigned Align = std::min(SI->getAlignment(), LI->getAlignment());
1054 if (Align < StoreSize)
1055 return false;
1056
1057 // If the element.atomic memcpy is not lowered into explicit
1058 // loads/stores later, then it will be lowered into an element-size
1059 // specific lib call. If the lib call doesn't exist for our store size, then
1060 // we shouldn't generate the memcpy.
1061 if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize())
1062 return false;
1063
1064 // Create the call.
1065 // Note that unordered atomic loads/stores are *required* by the spec to
1066 // have an alignment but non-atomic loads/stores may not.
1067 NewCall = Builder.CreateElementUnorderedAtomicMemCpy(
1068 StoreBasePtr, SI->getAlignment(), LoadBasePtr, LI->getAlignment(),
1069 NumBytes, StoreSize);
1070 }
1071 NewCall->setDebugLoc(SI->getDebugLoc());
1072
1073 LLVM_DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n"
1074 << " from load ptr=" << *LoadEv << " at: " << *LI << "\n"
1075 << " from store ptr=" << *StoreEv << " at: " << *SI
1076 << "\n");
1077
1078 // Okay, the memcpy has been formed. Zap the original store and anything that
1079 // feeds into it.
1080 deleteDeadInstruction(SI);
1081 ++NumMemCpy;
1082 return true;
1083 }
1084
1085 // When compiling for codesize we avoid idiom recognition for a multi-block loop
1086 // unless it is a loop_memset idiom or a memset/memcpy idiom in a nested loop.
1087 //
avoidLIRForMultiBlockLoop(bool IsMemset,bool IsLoopMemset)1088 bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset,
1089 bool IsLoopMemset) {
1090 if (ApplyCodeSizeHeuristics && CurLoop->getNumBlocks() > 1) {
1091 if (!CurLoop->getParentLoop() && (!IsMemset || !IsLoopMemset)) {
1092 LLVM_DEBUG(dbgs() << " " << CurLoop->getHeader()->getParent()->getName()
1093 << " : LIR " << (IsMemset ? "Memset" : "Memcpy")
1094 << " avoided: multi-block top-level loop\n");
1095 return true;
1096 }
1097 }
1098
1099 return false;
1100 }
1101
runOnNoncountableLoop()1102 bool LoopIdiomRecognize::runOnNoncountableLoop() {
1103 return recognizePopcount() || recognizeAndInsertCTLZ();
1104 }
1105
1106 /// Check if the given conditional branch is based on the comparison between
1107 /// a variable and zero, and if the variable is non-zero, the control yields to
1108 /// the loop entry. If the branch matches the behavior, the variable involved
1109 /// in the comparison is returned. This function will be called to see if the
1110 /// precondition and postcondition of the loop are in desirable form.
matchCondition(BranchInst * BI,BasicBlock * LoopEntry)1111 static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry) {
1112 if (!BI || !BI->isConditional())
1113 return nullptr;
1114
1115 ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition());
1116 if (!Cond)
1117 return nullptr;
1118
1119 ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1));
1120 if (!CmpZero || !CmpZero->isZero())
1121 return nullptr;
1122
1123 ICmpInst::Predicate Pred = Cond->getPredicate();
1124 if ((Pred == ICmpInst::ICMP_NE && BI->getSuccessor(0) == LoopEntry) ||
1125 (Pred == ICmpInst::ICMP_EQ && BI->getSuccessor(1) == LoopEntry))
1126 return Cond->getOperand(0);
1127
1128 return nullptr;
1129 }
1130
1131 // Check if the recurrence variable `VarX` is in the right form to create
1132 // the idiom. Returns the value coerced to a PHINode if so.
getRecurrenceVar(Value * VarX,Instruction * DefX,BasicBlock * LoopEntry)1133 static PHINode *getRecurrenceVar(Value *VarX, Instruction *DefX,
1134 BasicBlock *LoopEntry) {
1135 auto *PhiX = dyn_cast<PHINode>(VarX);
1136 if (PhiX && PhiX->getParent() == LoopEntry &&
1137 (PhiX->getOperand(0) == DefX || PhiX->getOperand(1) == DefX))
1138 return PhiX;
1139 return nullptr;
1140 }
1141
1142 /// Return true iff the idiom is detected in the loop.
1143 ///
1144 /// Additionally:
1145 /// 1) \p CntInst is set to the instruction counting the population bit.
1146 /// 2) \p CntPhi is set to the corresponding phi node.
1147 /// 3) \p Var is set to the value whose population bits are being counted.
1148 ///
1149 /// The core idiom we are trying to detect is:
1150 /// \code
1151 /// if (x0 != 0)
1152 /// goto loop-exit // the precondition of the loop
1153 /// cnt0 = init-val;
1154 /// do {
1155 /// x1 = phi (x0, x2);
1156 /// cnt1 = phi(cnt0, cnt2);
1157 ///
1158 /// cnt2 = cnt1 + 1;
1159 /// ...
1160 /// x2 = x1 & (x1 - 1);
1161 /// ...
1162 /// } while(x != 0);
1163 ///
1164 /// loop-exit:
1165 /// \endcode
detectPopcountIdiom(Loop * CurLoop,BasicBlock * PreCondBB,Instruction * & CntInst,PHINode * & CntPhi,Value * & Var)1166 static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB,
1167 Instruction *&CntInst, PHINode *&CntPhi,
1168 Value *&Var) {
1169 // step 1: Check to see if the look-back branch match this pattern:
1170 // "if (a!=0) goto loop-entry".
1171 BasicBlock *LoopEntry;
1172 Instruction *DefX2, *CountInst;
1173 Value *VarX1, *VarX0;
1174 PHINode *PhiX, *CountPhi;
1175
1176 DefX2 = CountInst = nullptr;
1177 VarX1 = VarX0 = nullptr;
1178 PhiX = CountPhi = nullptr;
1179 LoopEntry = *(CurLoop->block_begin());
1180
1181 // step 1: Check if the loop-back branch is in desirable form.
1182 {
1183 if (Value *T = matchCondition(
1184 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
1185 DefX2 = dyn_cast<Instruction>(T);
1186 else
1187 return false;
1188 }
1189
1190 // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)"
1191 {
1192 if (!DefX2 || DefX2->getOpcode() != Instruction::And)
1193 return false;
1194
1195 BinaryOperator *SubOneOp;
1196
1197 if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0))))
1198 VarX1 = DefX2->getOperand(1);
1199 else {
1200 VarX1 = DefX2->getOperand(0);
1201 SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1));
1202 }
1203 if (!SubOneOp || SubOneOp->getOperand(0) != VarX1)
1204 return false;
1205
1206 ConstantInt *Dec = dyn_cast<ConstantInt>(SubOneOp->getOperand(1));
1207 if (!Dec ||
1208 !((SubOneOp->getOpcode() == Instruction::Sub && Dec->isOne()) ||
1209 (SubOneOp->getOpcode() == Instruction::Add &&
1210 Dec->isMinusOne()))) {
1211 return false;
1212 }
1213 }
1214
1215 // step 3: Check the recurrence of variable X
1216 PhiX = getRecurrenceVar(VarX1, DefX2, LoopEntry);
1217 if (!PhiX)
1218 return false;
1219
1220 // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1
1221 {
1222 CountInst = nullptr;
1223 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
1224 IterE = LoopEntry->end();
1225 Iter != IterE; Iter++) {
1226 Instruction *Inst = &*Iter;
1227 if (Inst->getOpcode() != Instruction::Add)
1228 continue;
1229
1230 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
1231 if (!Inc || !Inc->isOne())
1232 continue;
1233
1234 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
1235 if (!Phi)
1236 continue;
1237
1238 // Check if the result of the instruction is live of the loop.
1239 bool LiveOutLoop = false;
1240 for (User *U : Inst->users()) {
1241 if ((cast<Instruction>(U))->getParent() != LoopEntry) {
1242 LiveOutLoop = true;
1243 break;
1244 }
1245 }
1246
1247 if (LiveOutLoop) {
1248 CountInst = Inst;
1249 CountPhi = Phi;
1250 break;
1251 }
1252 }
1253
1254 if (!CountInst)
1255 return false;
1256 }
1257
1258 // step 5: check if the precondition is in this form:
1259 // "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;"
1260 {
1261 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1262 Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader());
1263 if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1))
1264 return false;
1265
1266 CntInst = CountInst;
1267 CntPhi = CountPhi;
1268 Var = T;
1269 }
1270
1271 return true;
1272 }
1273
1274 /// Return true if the idiom is detected in the loop.
1275 ///
1276 /// Additionally:
1277 /// 1) \p CntInst is set to the instruction Counting Leading Zeros (CTLZ)
1278 /// or nullptr if there is no such.
1279 /// 2) \p CntPhi is set to the corresponding phi node
1280 /// or nullptr if there is no such.
1281 /// 3) \p Var is set to the value whose CTLZ could be used.
1282 /// 4) \p DefX is set to the instruction calculating Loop exit condition.
1283 ///
1284 /// The core idiom we are trying to detect is:
1285 /// \code
1286 /// if (x0 == 0)
1287 /// goto loop-exit // the precondition of the loop
1288 /// cnt0 = init-val;
1289 /// do {
1290 /// x = phi (x0, x.next); //PhiX
1291 /// cnt = phi(cnt0, cnt.next);
1292 ///
1293 /// cnt.next = cnt + 1;
1294 /// ...
1295 /// x.next = x >> 1; // DefX
1296 /// ...
1297 /// } while(x.next != 0);
1298 ///
1299 /// loop-exit:
1300 /// \endcode
detectCTLZIdiom(Loop * CurLoop,PHINode * & PhiX,Instruction * & CntInst,PHINode * & CntPhi,Instruction * & DefX)1301 static bool detectCTLZIdiom(Loop *CurLoop, PHINode *&PhiX,
1302 Instruction *&CntInst, PHINode *&CntPhi,
1303 Instruction *&DefX) {
1304 BasicBlock *LoopEntry;
1305 Value *VarX = nullptr;
1306
1307 DefX = nullptr;
1308 PhiX = nullptr;
1309 CntInst = nullptr;
1310 CntPhi = nullptr;
1311 LoopEntry = *(CurLoop->block_begin());
1312
1313 // step 1: Check if the loop-back branch is in desirable form.
1314 if (Value *T = matchCondition(
1315 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
1316 DefX = dyn_cast<Instruction>(T);
1317 else
1318 return false;
1319
1320 // step 2: detect instructions corresponding to "x.next = x >> 1"
1321 if (!DefX || (DefX->getOpcode() != Instruction::AShr &&
1322 DefX->getOpcode() != Instruction::LShr))
1323 return false;
1324 ConstantInt *Shft = dyn_cast<ConstantInt>(DefX->getOperand(1));
1325 if (!Shft || !Shft->isOne())
1326 return false;
1327 VarX = DefX->getOperand(0);
1328
1329 // step 3: Check the recurrence of variable X
1330 PhiX = getRecurrenceVar(VarX, DefX, LoopEntry);
1331 if (!PhiX)
1332 return false;
1333
1334 // step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1
1335 // TODO: We can skip the step. If loop trip count is known (CTLZ),
1336 // then all uses of "cnt.next" could be optimized to the trip count
1337 // plus "cnt0". Currently it is not optimized.
1338 // This step could be used to detect POPCNT instruction:
1339 // cnt.next = cnt + (x.next & 1)
1340 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
1341 IterE = LoopEntry->end();
1342 Iter != IterE; Iter++) {
1343 Instruction *Inst = &*Iter;
1344 if (Inst->getOpcode() != Instruction::Add)
1345 continue;
1346
1347 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
1348 if (!Inc || !Inc->isOne())
1349 continue;
1350
1351 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
1352 if (!Phi)
1353 continue;
1354
1355 CntInst = Inst;
1356 CntPhi = Phi;
1357 break;
1358 }
1359 if (!CntInst)
1360 return false;
1361
1362 return true;
1363 }
1364
1365 /// Recognize CTLZ idiom in a non-countable loop and convert the loop
1366 /// to countable (with CTLZ trip count).
1367 /// If CTLZ inserted as a new trip count returns true; otherwise, returns false.
recognizeAndInsertCTLZ()1368 bool LoopIdiomRecognize::recognizeAndInsertCTLZ() {
1369 // Give up if the loop has multiple blocks or multiple backedges.
1370 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
1371 return false;
1372
1373 Instruction *CntInst, *DefX;
1374 PHINode *CntPhi, *PhiX;
1375 if (!detectCTLZIdiom(CurLoop, PhiX, CntInst, CntPhi, DefX))
1376 return false;
1377
1378 bool IsCntPhiUsedOutsideLoop = false;
1379 for (User *U : CntPhi->users())
1380 if (!CurLoop->contains(cast<Instruction>(U))) {
1381 IsCntPhiUsedOutsideLoop = true;
1382 break;
1383 }
1384 bool IsCntInstUsedOutsideLoop = false;
1385 for (User *U : CntInst->users())
1386 if (!CurLoop->contains(cast<Instruction>(U))) {
1387 IsCntInstUsedOutsideLoop = true;
1388 break;
1389 }
1390 // If both CntInst and CntPhi are used outside the loop the profitability
1391 // is questionable.
1392 if (IsCntInstUsedOutsideLoop && IsCntPhiUsedOutsideLoop)
1393 return false;
1394
1395 // For some CPUs result of CTLZ(X) intrinsic is undefined
1396 // when X is 0. If we can not guarantee X != 0, we need to check this
1397 // when expand.
1398 bool ZeroCheck = false;
1399 // It is safe to assume Preheader exist as it was checked in
1400 // parent function RunOnLoop.
1401 BasicBlock *PH = CurLoop->getLoopPreheader();
1402 Value *InitX = PhiX->getIncomingValueForBlock(PH);
1403
1404 // Make sure the initial value can't be negative otherwise the ashr in the
1405 // loop might never reach zero which would make the loop infinite.
1406 if (DefX->getOpcode() == Instruction::AShr && !isKnownNonNegative(InitX, *DL))
1407 return false;
1408
1409 // If we are using the count instruction outside the loop, make sure we
1410 // have a zero check as a precondition. Without the check the loop would run
1411 // one iteration for before any check of the input value. This means 0 and 1
1412 // would have identical behavior in the original loop and thus
1413 if (!IsCntPhiUsedOutsideLoop) {
1414 auto *PreCondBB = PH->getSinglePredecessor();
1415 if (!PreCondBB)
1416 return false;
1417 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1418 if (!PreCondBI)
1419 return false;
1420 if (matchCondition(PreCondBI, PH) != InitX)
1421 return false;
1422 ZeroCheck = true;
1423 }
1424
1425 // Check if CTLZ intrinsic is profitable. Assume it is always profitable
1426 // if we delete the loop (the loop has only 6 instructions):
1427 // %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ]
1428 // %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ]
1429 // %shr = ashr %n.addr.0, 1
1430 // %tobool = icmp eq %shr, 0
1431 // %inc = add nsw %i.0, 1
1432 // br i1 %tobool
1433
1434 const Value *Args[] =
1435 {InitX, ZeroCheck ? ConstantInt::getTrue(InitX->getContext())
1436 : ConstantInt::getFalse(InitX->getContext())};
1437 if (CurLoop->getHeader()->size() != 6 &&
1438 TTI->getIntrinsicCost(Intrinsic::ctlz, InitX->getType(), Args) >
1439 TargetTransformInfo::TCC_Basic)
1440 return false;
1441
1442 transformLoopToCountable(PH, CntInst, CntPhi, InitX, DefX,
1443 DefX->getDebugLoc(), ZeroCheck,
1444 IsCntPhiUsedOutsideLoop);
1445 return true;
1446 }
1447
1448 /// Recognizes a population count idiom in a non-countable loop.
1449 ///
1450 /// If detected, transforms the relevant code to issue the popcount intrinsic
1451 /// function call, and returns true; otherwise, returns false.
recognizePopcount()1452 bool LoopIdiomRecognize::recognizePopcount() {
1453 if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware)
1454 return false;
1455
1456 // Counting population are usually conducted by few arithmetic instructions.
1457 // Such instructions can be easily "absorbed" by vacant slots in a
1458 // non-compact loop. Therefore, recognizing popcount idiom only makes sense
1459 // in a compact loop.
1460
1461 // Give up if the loop has multiple blocks or multiple backedges.
1462 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
1463 return false;
1464
1465 BasicBlock *LoopBody = *(CurLoop->block_begin());
1466 if (LoopBody->size() >= 20) {
1467 // The loop is too big, bail out.
1468 return false;
1469 }
1470
1471 // It should have a preheader containing nothing but an unconditional branch.
1472 BasicBlock *PH = CurLoop->getLoopPreheader();
1473 if (!PH || &PH->front() != PH->getTerminator())
1474 return false;
1475 auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator());
1476 if (!EntryBI || EntryBI->isConditional())
1477 return false;
1478
1479 // It should have a precondition block where the generated popcount intrinsic
1480 // function can be inserted.
1481 auto *PreCondBB = PH->getSinglePredecessor();
1482 if (!PreCondBB)
1483 return false;
1484 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1485 if (!PreCondBI || PreCondBI->isUnconditional())
1486 return false;
1487
1488 Instruction *CntInst;
1489 PHINode *CntPhi;
1490 Value *Val;
1491 if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val))
1492 return false;
1493
1494 transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val);
1495 return true;
1496 }
1497
createPopcntIntrinsic(IRBuilder<> & IRBuilder,Value * Val,const DebugLoc & DL)1498 static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
1499 const DebugLoc &DL) {
1500 Value *Ops[] = {Val};
1501 Type *Tys[] = {Val->getType()};
1502
1503 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
1504 Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys);
1505 CallInst *CI = IRBuilder.CreateCall(Func, Ops);
1506 CI->setDebugLoc(DL);
1507
1508 return CI;
1509 }
1510
createCTLZIntrinsic(IRBuilder<> & IRBuilder,Value * Val,const DebugLoc & DL,bool ZeroCheck)1511 static CallInst *createCTLZIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
1512 const DebugLoc &DL, bool ZeroCheck) {
1513 Value *Ops[] = {Val, ZeroCheck ? IRBuilder.getTrue() : IRBuilder.getFalse()};
1514 Type *Tys[] = {Val->getType()};
1515
1516 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
1517 Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctlz, Tys);
1518 CallInst *CI = IRBuilder.CreateCall(Func, Ops);
1519 CI->setDebugLoc(DL);
1520
1521 return CI;
1522 }
1523
1524 /// Transform the following loop:
1525 /// loop:
1526 /// CntPhi = PHI [Cnt0, CntInst]
1527 /// PhiX = PHI [InitX, DefX]
1528 /// CntInst = CntPhi + 1
1529 /// DefX = PhiX >> 1
1530 /// LOOP_BODY
1531 /// Br: loop if (DefX != 0)
1532 /// Use(CntPhi) or Use(CntInst)
1533 ///
1534 /// Into:
1535 /// If CntPhi used outside the loop:
1536 /// CountPrev = BitWidth(InitX) - CTLZ(InitX >> 1)
1537 /// Count = CountPrev + 1
1538 /// else
1539 /// Count = BitWidth(InitX) - CTLZ(InitX)
1540 /// loop:
1541 /// CntPhi = PHI [Cnt0, CntInst]
1542 /// PhiX = PHI [InitX, DefX]
1543 /// PhiCount = PHI [Count, Dec]
1544 /// CntInst = CntPhi + 1
1545 /// DefX = PhiX >> 1
1546 /// Dec = PhiCount - 1
1547 /// LOOP_BODY
1548 /// Br: loop if (Dec != 0)
1549 /// Use(CountPrev + Cnt0) // Use(CntPhi)
1550 /// or
1551 /// Use(Count + Cnt0) // Use(CntInst)
1552 ///
1553 /// If LOOP_BODY is empty the loop will be deleted.
1554 /// If CntInst and DefX are not used in LOOP_BODY they will be removed.
transformLoopToCountable(BasicBlock * Preheader,Instruction * CntInst,PHINode * CntPhi,Value * InitX,Instruction * DefX,const DebugLoc & DL,bool ZeroCheck,bool IsCntPhiUsedOutsideLoop)1555 void LoopIdiomRecognize::transformLoopToCountable(
1556 BasicBlock *Preheader, Instruction *CntInst, PHINode *CntPhi, Value *InitX,
1557 Instruction *DefX, const DebugLoc &DL, bool ZeroCheck,
1558 bool IsCntPhiUsedOutsideLoop) {
1559 BranchInst *PreheaderBr = cast<BranchInst>(Preheader->getTerminator());
1560
1561 // Step 1: Insert the CTLZ instruction at the end of the preheader block
1562 // Count = BitWidth - CTLZ(InitX);
1563 // If there are uses of CntPhi create:
1564 // CountPrev = BitWidth - CTLZ(InitX >> 1);
1565 IRBuilder<> Builder(PreheaderBr);
1566 Builder.SetCurrentDebugLocation(DL);
1567 Value *CTLZ, *Count, *CountPrev, *NewCount, *InitXNext;
1568
1569 if (IsCntPhiUsedOutsideLoop) {
1570 if (DefX->getOpcode() == Instruction::AShr)
1571 InitXNext =
1572 Builder.CreateAShr(InitX, ConstantInt::get(InitX->getType(), 1));
1573 else if (DefX->getOpcode() == Instruction::LShr)
1574 InitXNext =
1575 Builder.CreateLShr(InitX, ConstantInt::get(InitX->getType(), 1));
1576 else
1577 llvm_unreachable("Unexpected opcode!");
1578 } else
1579 InitXNext = InitX;
1580 CTLZ = createCTLZIntrinsic(Builder, InitXNext, DL, ZeroCheck);
1581 Count = Builder.CreateSub(
1582 ConstantInt::get(CTLZ->getType(),
1583 CTLZ->getType()->getIntegerBitWidth()),
1584 CTLZ);
1585 if (IsCntPhiUsedOutsideLoop) {
1586 CountPrev = Count;
1587 Count = Builder.CreateAdd(
1588 CountPrev,
1589 ConstantInt::get(CountPrev->getType(), 1));
1590 }
1591 if (IsCntPhiUsedOutsideLoop)
1592 NewCount = Builder.CreateZExtOrTrunc(CountPrev,
1593 cast<IntegerType>(CntInst->getType()));
1594 else
1595 NewCount = Builder.CreateZExtOrTrunc(Count,
1596 cast<IntegerType>(CntInst->getType()));
1597
1598 // If the CTLZ counter's initial value is not zero, insert Add Inst.
1599 Value *CntInitVal = CntPhi->getIncomingValueForBlock(Preheader);
1600 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
1601 if (!InitConst || !InitConst->isZero())
1602 NewCount = Builder.CreateAdd(NewCount, CntInitVal);
1603
1604 // Step 2: Insert new IV and loop condition:
1605 // loop:
1606 // ...
1607 // PhiCount = PHI [Count, Dec]
1608 // ...
1609 // Dec = PhiCount - 1
1610 // ...
1611 // Br: loop if (Dec != 0)
1612 BasicBlock *Body = *(CurLoop->block_begin());
1613 auto *LbBr = cast<BranchInst>(Body->getTerminator());
1614 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
1615 Type *Ty = Count->getType();
1616
1617 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
1618
1619 Builder.SetInsertPoint(LbCond);
1620 Instruction *TcDec = cast<Instruction>(
1621 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
1622 "tcdec", false, true));
1623
1624 TcPhi->addIncoming(Count, Preheader);
1625 TcPhi->addIncoming(TcDec, Body);
1626
1627 CmpInst::Predicate Pred =
1628 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
1629 LbCond->setPredicate(Pred);
1630 LbCond->setOperand(0, TcDec);
1631 LbCond->setOperand(1, ConstantInt::get(Ty, 0));
1632
1633 // Step 3: All the references to the original counter outside
1634 // the loop are replaced with the NewCount -- the value returned from
1635 // __builtin_ctlz(x).
1636 if (IsCntPhiUsedOutsideLoop)
1637 CntPhi->replaceUsesOutsideBlock(NewCount, Body);
1638 else
1639 CntInst->replaceUsesOutsideBlock(NewCount, Body);
1640
1641 // step 4: Forget the "non-computable" trip-count SCEV associated with the
1642 // loop. The loop would otherwise not be deleted even if it becomes empty.
1643 SE->forgetLoop(CurLoop);
1644 }
1645
transformLoopToPopcount(BasicBlock * PreCondBB,Instruction * CntInst,PHINode * CntPhi,Value * Var)1646 void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB,
1647 Instruction *CntInst,
1648 PHINode *CntPhi, Value *Var) {
1649 BasicBlock *PreHead = CurLoop->getLoopPreheader();
1650 auto *PreCondBr = cast<BranchInst>(PreCondBB->getTerminator());
1651 const DebugLoc &DL = CntInst->getDebugLoc();
1652
1653 // Assuming before transformation, the loop is following:
1654 // if (x) // the precondition
1655 // do { cnt++; x &= x - 1; } while(x);
1656
1657 // Step 1: Insert the ctpop instruction at the end of the precondition block
1658 IRBuilder<> Builder(PreCondBr);
1659 Value *PopCnt, *PopCntZext, *NewCount, *TripCnt;
1660 {
1661 PopCnt = createPopcntIntrinsic(Builder, Var, DL);
1662 NewCount = PopCntZext =
1663 Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType()));
1664
1665 if (NewCount != PopCnt)
1666 (cast<Instruction>(NewCount))->setDebugLoc(DL);
1667
1668 // TripCnt is exactly the number of iterations the loop has
1669 TripCnt = NewCount;
1670
1671 // If the population counter's initial value is not zero, insert Add Inst.
1672 Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead);
1673 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
1674 if (!InitConst || !InitConst->isZero()) {
1675 NewCount = Builder.CreateAdd(NewCount, CntInitVal);
1676 (cast<Instruction>(NewCount))->setDebugLoc(DL);
1677 }
1678 }
1679
1680 // Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to
1681 // "if (NewCount == 0) loop-exit". Without this change, the intrinsic
1682 // function would be partial dead code, and downstream passes will drag
1683 // it back from the precondition block to the preheader.
1684 {
1685 ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition());
1686
1687 Value *Opnd0 = PopCntZext;
1688 Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0);
1689 if (PreCond->getOperand(0) != Var)
1690 std::swap(Opnd0, Opnd1);
1691
1692 ICmpInst *NewPreCond = cast<ICmpInst>(
1693 Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1));
1694 PreCondBr->setCondition(NewPreCond);
1695
1696 RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI);
1697 }
1698
1699 // Step 3: Note that the population count is exactly the trip count of the
1700 // loop in question, which enable us to convert the loop from noncountable
1701 // loop into a countable one. The benefit is twofold:
1702 //
1703 // - If the loop only counts population, the entire loop becomes dead after
1704 // the transformation. It is a lot easier to prove a countable loop dead
1705 // than to prove a noncountable one. (In some C dialects, an infinite loop
1706 // isn't dead even if it computes nothing useful. In general, DCE needs
1707 // to prove a noncountable loop finite before safely delete it.)
1708 //
1709 // - If the loop also performs something else, it remains alive.
1710 // Since it is transformed to countable form, it can be aggressively
1711 // optimized by some optimizations which are in general not applicable
1712 // to a noncountable loop.
1713 //
1714 // After this step, this loop (conceptually) would look like following:
1715 // newcnt = __builtin_ctpop(x);
1716 // t = newcnt;
1717 // if (x)
1718 // do { cnt++; x &= x-1; t--) } while (t > 0);
1719 BasicBlock *Body = *(CurLoop->block_begin());
1720 {
1721 auto *LbBr = cast<BranchInst>(Body->getTerminator());
1722 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
1723 Type *Ty = TripCnt->getType();
1724
1725 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
1726
1727 Builder.SetInsertPoint(LbCond);
1728 Instruction *TcDec = cast<Instruction>(
1729 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
1730 "tcdec", false, true));
1731
1732 TcPhi->addIncoming(TripCnt, PreHead);
1733 TcPhi->addIncoming(TcDec, Body);
1734
1735 CmpInst::Predicate Pred =
1736 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE;
1737 LbCond->setPredicate(Pred);
1738 LbCond->setOperand(0, TcDec);
1739 LbCond->setOperand(1, ConstantInt::get(Ty, 0));
1740 }
1741
1742 // Step 4: All the references to the original population counter outside
1743 // the loop are replaced with the NewCount -- the value returned from
1744 // __builtin_ctpop().
1745 CntInst->replaceUsesOutsideBlock(NewCount, Body);
1746
1747 // step 5: Forget the "non-computable" trip-count SCEV associated with the
1748 // loop. The loop would otherwise not be deleted even if it becomes empty.
1749 SE->forgetLoop(CurLoop);
1750 }
1751