1 //===-- AtomicExpandPass.cpp - Expand atomic instructions -------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains a pass (at IR level) to replace atomic instructions with
11 // either (intrinsic-based) load-linked/store-conditional loops or AtomicCmpXchg.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/CodeGen/Passes.h"
16 #include "llvm/IR/Function.h"
17 #include "llvm/IR/IRBuilder.h"
18 #include "llvm/IR/InstIterator.h"
19 #include "llvm/IR/Instructions.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/IR/Module.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Target/TargetLowering.h"
24 #include "llvm/Target/TargetMachine.h"
25 #include "llvm/Target/TargetSubtargetInfo.h"
26
27 using namespace llvm;
28
29 #define DEBUG_TYPE "atomic-expand"
30
31 namespace {
32 class AtomicExpand: public FunctionPass {
33 const TargetMachine *TM;
34 const TargetLowering *TLI;
35 public:
36 static char ID; // Pass identification, replacement for typeid
AtomicExpand(const TargetMachine * TM=nullptr)37 explicit AtomicExpand(const TargetMachine *TM = nullptr)
38 : FunctionPass(ID), TM(TM), TLI(nullptr) {
39 initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
40 }
41
42 bool runOnFunction(Function &F) override;
43
44 private:
45 bool bracketInstWithFences(Instruction *I, AtomicOrdering Order,
46 bool IsStore, bool IsLoad);
47 bool expandAtomicLoad(LoadInst *LI);
48 bool expandAtomicLoadToLL(LoadInst *LI);
49 bool expandAtomicLoadToCmpXchg(LoadInst *LI);
50 bool expandAtomicStore(StoreInst *SI);
51 bool tryExpandAtomicRMW(AtomicRMWInst *AI);
52 bool expandAtomicRMWToLLSC(AtomicRMWInst *AI);
53 bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI);
54 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
55 bool isIdempotentRMW(AtomicRMWInst *AI);
56 bool simplifyIdempotentRMW(AtomicRMWInst *AI);
57 };
58 }
59
60 char AtomicExpand::ID = 0;
61 char &llvm::AtomicExpandID = AtomicExpand::ID;
62 INITIALIZE_TM_PASS(AtomicExpand, "atomic-expand",
63 "Expand Atomic calls in terms of either load-linked & store-conditional or cmpxchg",
64 false, false)
65
createAtomicExpandPass(const TargetMachine * TM)66 FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) {
67 return new AtomicExpand(TM);
68 }
69
runOnFunction(Function & F)70 bool AtomicExpand::runOnFunction(Function &F) {
71 if (!TM || !TM->getSubtargetImpl(F)->enableAtomicExpand())
72 return false;
73 TLI = TM->getSubtargetImpl(F)->getTargetLowering();
74
75 SmallVector<Instruction *, 1> AtomicInsts;
76
77 // Changing control-flow while iterating through it is a bad idea, so gather a
78 // list of all atomic instructions before we start.
79 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
80 if (I->isAtomic())
81 AtomicInsts.push_back(&*I);
82 }
83
84 bool MadeChange = false;
85 for (auto I : AtomicInsts) {
86 auto LI = dyn_cast<LoadInst>(I);
87 auto SI = dyn_cast<StoreInst>(I);
88 auto RMWI = dyn_cast<AtomicRMWInst>(I);
89 auto CASI = dyn_cast<AtomicCmpXchgInst>(I);
90 assert((LI || SI || RMWI || CASI || isa<FenceInst>(I)) &&
91 "Unknown atomic instruction");
92
93 auto FenceOrdering = Monotonic;
94 bool IsStore, IsLoad;
95 if (TLI->getInsertFencesForAtomic()) {
96 if (LI && isAtLeastAcquire(LI->getOrdering())) {
97 FenceOrdering = LI->getOrdering();
98 LI->setOrdering(Monotonic);
99 IsStore = false;
100 IsLoad = true;
101 } else if (SI && isAtLeastRelease(SI->getOrdering())) {
102 FenceOrdering = SI->getOrdering();
103 SI->setOrdering(Monotonic);
104 IsStore = true;
105 IsLoad = false;
106 } else if (RMWI && (isAtLeastRelease(RMWI->getOrdering()) ||
107 isAtLeastAcquire(RMWI->getOrdering()))) {
108 FenceOrdering = RMWI->getOrdering();
109 RMWI->setOrdering(Monotonic);
110 IsStore = IsLoad = true;
111 } else if (CASI && !TLI->hasLoadLinkedStoreConditional() &&
112 (isAtLeastRelease(CASI->getSuccessOrdering()) ||
113 isAtLeastAcquire(CASI->getSuccessOrdering()))) {
114 // If a compare and swap is lowered to LL/SC, we can do smarter fence
115 // insertion, with a stronger one on the success path than on the
116 // failure path. As a result, fence insertion is directly done by
117 // expandAtomicCmpXchg in that case.
118 FenceOrdering = CASI->getSuccessOrdering();
119 CASI->setSuccessOrdering(Monotonic);
120 CASI->setFailureOrdering(Monotonic);
121 IsStore = IsLoad = true;
122 }
123
124 if (FenceOrdering != Monotonic) {
125 MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad);
126 }
127 }
128
129 if (LI && TLI->shouldExpandAtomicLoadInIR(LI)) {
130 MadeChange |= expandAtomicLoad(LI);
131 } else if (SI && TLI->shouldExpandAtomicStoreInIR(SI)) {
132 MadeChange |= expandAtomicStore(SI);
133 } else if (RMWI) {
134 // There are two different ways of expanding RMW instructions:
135 // - into a load if it is idempotent
136 // - into a Cmpxchg/LL-SC loop otherwise
137 // we try them in that order.
138
139 if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
140 MadeChange = true;
141 } else {
142 MadeChange |= tryExpandAtomicRMW(RMWI);
143 }
144 } else if (CASI && TLI->hasLoadLinkedStoreConditional()) {
145 MadeChange |= expandAtomicCmpXchg(CASI);
146 }
147 }
148 return MadeChange;
149 }
150
bracketInstWithFences(Instruction * I,AtomicOrdering Order,bool IsStore,bool IsLoad)151 bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order,
152 bool IsStore, bool IsLoad) {
153 IRBuilder<> Builder(I);
154
155 auto LeadingFence = TLI->emitLeadingFence(Builder, Order, IsStore, IsLoad);
156
157 auto TrailingFence = TLI->emitTrailingFence(Builder, Order, IsStore, IsLoad);
158 // The trailing fence is emitted before the instruction instead of after
159 // because there is no easy way of setting Builder insertion point after
160 // an instruction. So we must erase it from the BB, and insert it back
161 // in the right place.
162 // We have a guard here because not every atomic operation generates a
163 // trailing fence.
164 if (TrailingFence) {
165 TrailingFence->removeFromParent();
166 TrailingFence->insertAfter(I);
167 }
168
169 return (LeadingFence || TrailingFence);
170 }
171
expandAtomicLoad(LoadInst * LI)172 bool AtomicExpand::expandAtomicLoad(LoadInst *LI) {
173 if (TLI->hasLoadLinkedStoreConditional())
174 return expandAtomicLoadToLL(LI);
175 else
176 return expandAtomicLoadToCmpXchg(LI);
177 }
178
expandAtomicLoadToLL(LoadInst * LI)179 bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
180 IRBuilder<> Builder(LI);
181
182 // On some architectures, load-linked instructions are atomic for larger
183 // sizes than normal loads. For example, the only 64-bit load guaranteed
184 // to be single-copy atomic by ARM is an ldrexd (A3.5.3).
185 Value *Val =
186 TLI->emitLoadLinked(Builder, LI->getPointerOperand(), LI->getOrdering());
187
188 LI->replaceAllUsesWith(Val);
189 LI->eraseFromParent();
190
191 return true;
192 }
193
expandAtomicLoadToCmpXchg(LoadInst * LI)194 bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
195 IRBuilder<> Builder(LI);
196 AtomicOrdering Order = LI->getOrdering();
197 Value *Addr = LI->getPointerOperand();
198 Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
199 Constant *DummyVal = Constant::getNullValue(Ty);
200
201 Value *Pair = Builder.CreateAtomicCmpXchg(
202 Addr, DummyVal, DummyVal, Order,
203 AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
204 Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded");
205
206 LI->replaceAllUsesWith(Loaded);
207 LI->eraseFromParent();
208
209 return true;
210 }
211
expandAtomicStore(StoreInst * SI)212 bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
213 // This function is only called on atomic stores that are too large to be
214 // atomic if implemented as a native store. So we replace them by an
215 // atomic swap, that can be implemented for example as a ldrex/strex on ARM
216 // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
217 // It is the responsibility of the target to only signal expansion via
218 // shouldExpandAtomicRMW in cases where this is required and possible.
219 IRBuilder<> Builder(SI);
220 AtomicRMWInst *AI =
221 Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
222 SI->getValueOperand(), SI->getOrdering());
223 SI->eraseFromParent();
224
225 // Now we have an appropriate swap instruction, lower it as usual.
226 return tryExpandAtomicRMW(AI);
227 }
228
tryExpandAtomicRMW(AtomicRMWInst * AI)229 bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
230 switch (TLI->shouldExpandAtomicRMWInIR(AI)) {
231 case TargetLoweringBase::AtomicRMWExpansionKind::None:
232 return false;
233 case TargetLoweringBase::AtomicRMWExpansionKind::LLSC: {
234 assert(TLI->hasLoadLinkedStoreConditional() &&
235 "TargetLowering requested we expand AtomicRMW instruction into "
236 "load-linked/store-conditional combos, but such instructions aren't "
237 "supported");
238
239 return expandAtomicRMWToLLSC(AI);
240 }
241 case TargetLoweringBase::AtomicRMWExpansionKind::CmpXChg: {
242 return expandAtomicRMWToCmpXchg(AI);
243 }
244 }
245 llvm_unreachable("Unhandled case in tryExpandAtomicRMW");
246 }
247
248 /// Emit IR to implement the given atomicrmw operation on values in registers,
249 /// returning the new value.
performAtomicOp(AtomicRMWInst::BinOp Op,IRBuilder<> & Builder,Value * Loaded,Value * Inc)250 static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
251 Value *Loaded, Value *Inc) {
252 Value *NewVal;
253 switch (Op) {
254 case AtomicRMWInst::Xchg:
255 return Inc;
256 case AtomicRMWInst::Add:
257 return Builder.CreateAdd(Loaded, Inc, "new");
258 case AtomicRMWInst::Sub:
259 return Builder.CreateSub(Loaded, Inc, "new");
260 case AtomicRMWInst::And:
261 return Builder.CreateAnd(Loaded, Inc, "new");
262 case AtomicRMWInst::Nand:
263 return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
264 case AtomicRMWInst::Or:
265 return Builder.CreateOr(Loaded, Inc, "new");
266 case AtomicRMWInst::Xor:
267 return Builder.CreateXor(Loaded, Inc, "new");
268 case AtomicRMWInst::Max:
269 NewVal = Builder.CreateICmpSGT(Loaded, Inc);
270 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
271 case AtomicRMWInst::Min:
272 NewVal = Builder.CreateICmpSLE(Loaded, Inc);
273 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
274 case AtomicRMWInst::UMax:
275 NewVal = Builder.CreateICmpUGT(Loaded, Inc);
276 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
277 case AtomicRMWInst::UMin:
278 NewVal = Builder.CreateICmpULE(Loaded, Inc);
279 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
280 default:
281 llvm_unreachable("Unknown atomic op");
282 }
283 }
284
expandAtomicRMWToLLSC(AtomicRMWInst * AI)285 bool AtomicExpand::expandAtomicRMWToLLSC(AtomicRMWInst *AI) {
286 AtomicOrdering MemOpOrder = AI->getOrdering();
287 Value *Addr = AI->getPointerOperand();
288 BasicBlock *BB = AI->getParent();
289 Function *F = BB->getParent();
290 LLVMContext &Ctx = F->getContext();
291
292 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
293 //
294 // The standard expansion we produce is:
295 // [...]
296 // fence?
297 // atomicrmw.start:
298 // %loaded = @load.linked(%addr)
299 // %new = some_op iN %loaded, %incr
300 // %stored = @store_conditional(%new, %addr)
301 // %try_again = icmp i32 ne %stored, 0
302 // br i1 %try_again, label %loop, label %atomicrmw.end
303 // atomicrmw.end:
304 // fence?
305 // [...]
306 BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
307 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
308
309 // This grabs the DebugLoc from AI.
310 IRBuilder<> Builder(AI);
311
312 // The split call above "helpfully" added a branch at the end of BB (to the
313 // wrong place), but we might want a fence too. It's easiest to just remove
314 // the branch entirely.
315 std::prev(BB->end())->eraseFromParent();
316 Builder.SetInsertPoint(BB);
317 Builder.CreateBr(LoopBB);
318
319 // Start the main loop block now that we've taken care of the preliminaries.
320 Builder.SetInsertPoint(LoopBB);
321 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
322
323 Value *NewVal =
324 performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
325
326 Value *StoreSuccess =
327 TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder);
328 Value *TryAgain = Builder.CreateICmpNE(
329 StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
330 Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
331
332 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
333
334 AI->replaceAllUsesWith(Loaded);
335 AI->eraseFromParent();
336
337 return true;
338 }
339
expandAtomicRMWToCmpXchg(AtomicRMWInst * AI)340 bool AtomicExpand::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI) {
341 AtomicOrdering MemOpOrder =
342 AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering();
343 Value *Addr = AI->getPointerOperand();
344 BasicBlock *BB = AI->getParent();
345 Function *F = BB->getParent();
346 LLVMContext &Ctx = F->getContext();
347
348 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
349 //
350 // The standard expansion we produce is:
351 // [...]
352 // %init_loaded = load atomic iN* %addr
353 // br label %loop
354 // loop:
355 // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
356 // %new = some_op iN %loaded, %incr
357 // %pair = cmpxchg iN* %addr, iN %loaded, iN %new
358 // %new_loaded = extractvalue { iN, i1 } %pair, 0
359 // %success = extractvalue { iN, i1 } %pair, 1
360 // br i1 %success, label %atomicrmw.end, label %loop
361 // atomicrmw.end:
362 // [...]
363 BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
364 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
365
366 // This grabs the DebugLoc from AI.
367 IRBuilder<> Builder(AI);
368
369 // The split call above "helpfully" added a branch at the end of BB (to the
370 // wrong place), but we want a load. It's easiest to just remove
371 // the branch entirely.
372 std::prev(BB->end())->eraseFromParent();
373 Builder.SetInsertPoint(BB);
374 LoadInst *InitLoaded = Builder.CreateLoad(Addr);
375 // Atomics require at least natural alignment.
376 InitLoaded->setAlignment(AI->getType()->getPrimitiveSizeInBits());
377 Builder.CreateBr(LoopBB);
378
379 // Start the main loop block now that we've taken care of the preliminaries.
380 Builder.SetInsertPoint(LoopBB);
381 PHINode *Loaded = Builder.CreatePHI(AI->getType(), 2, "loaded");
382 Loaded->addIncoming(InitLoaded, BB);
383
384 Value *NewVal =
385 performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
386
387 Value *Pair = Builder.CreateAtomicCmpXchg(
388 Addr, Loaded, NewVal, MemOpOrder,
389 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder));
390 Value *NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
391 Loaded->addIncoming(NewLoaded, LoopBB);
392
393 Value *Success = Builder.CreateExtractValue(Pair, 1, "success");
394 Builder.CreateCondBr(Success, ExitBB, LoopBB);
395
396 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
397
398 AI->replaceAllUsesWith(NewLoaded);
399 AI->eraseFromParent();
400
401 return true;
402 }
403
expandAtomicCmpXchg(AtomicCmpXchgInst * CI)404 bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
405 AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
406 AtomicOrdering FailureOrder = CI->getFailureOrdering();
407 Value *Addr = CI->getPointerOperand();
408 BasicBlock *BB = CI->getParent();
409 Function *F = BB->getParent();
410 LLVMContext &Ctx = F->getContext();
411 // If getInsertFencesForAtomic() returns true, then the target does not want
412 // to deal with memory orders, and emitLeading/TrailingFence should take care
413 // of everything. Otherwise, emitLeading/TrailingFence are no-op and we
414 // should preserve the ordering.
415 AtomicOrdering MemOpOrder =
416 TLI->getInsertFencesForAtomic() ? Monotonic : SuccessOrder;
417
418 // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
419 //
420 // The full expansion we produce is:
421 // [...]
422 // fence?
423 // cmpxchg.start:
424 // %loaded = @load.linked(%addr)
425 // %should_store = icmp eq %loaded, %desired
426 // br i1 %should_store, label %cmpxchg.trystore,
427 // label %cmpxchg.failure
428 // cmpxchg.trystore:
429 // %stored = @store_conditional(%new, %addr)
430 // %success = icmp eq i32 %stored, 0
431 // br i1 %success, label %cmpxchg.success, label %loop/%cmpxchg.failure
432 // cmpxchg.success:
433 // fence?
434 // br label %cmpxchg.end
435 // cmpxchg.failure:
436 // fence?
437 // br label %cmpxchg.end
438 // cmpxchg.end:
439 // %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
440 // %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
441 // %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
442 // [...]
443 BasicBlock *ExitBB = BB->splitBasicBlock(CI, "cmpxchg.end");
444 auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
445 auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, FailureBB);
446 auto TryStoreBB = BasicBlock::Create(Ctx, "cmpxchg.trystore", F, SuccessBB);
447 auto LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB);
448
449 // This grabs the DebugLoc from CI
450 IRBuilder<> Builder(CI);
451
452 // The split call above "helpfully" added a branch at the end of BB (to the
453 // wrong place), but we might want a fence too. It's easiest to just remove
454 // the branch entirely.
455 std::prev(BB->end())->eraseFromParent();
456 Builder.SetInsertPoint(BB);
457 TLI->emitLeadingFence(Builder, SuccessOrder, /*IsStore=*/true,
458 /*IsLoad=*/true);
459 Builder.CreateBr(LoopBB);
460
461 // Start the main loop block now that we've taken care of the preliminaries.
462 Builder.SetInsertPoint(LoopBB);
463 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
464 Value *ShouldStore =
465 Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
466
467 // If the the cmpxchg doesn't actually need any ordering when it fails, we can
468 // jump straight past that fence instruction (if it exists).
469 Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB);
470
471 Builder.SetInsertPoint(TryStoreBB);
472 Value *StoreSuccess = TLI->emitStoreConditional(
473 Builder, CI->getNewValOperand(), Addr, MemOpOrder);
474 StoreSuccess = Builder.CreateICmpEQ(
475 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
476 Builder.CreateCondBr(StoreSuccess, SuccessBB,
477 CI->isWeak() ? FailureBB : LoopBB);
478
479 // Make sure later instructions don't get reordered with a fence if necessary.
480 Builder.SetInsertPoint(SuccessBB);
481 TLI->emitTrailingFence(Builder, SuccessOrder, /*IsStore=*/true,
482 /*IsLoad=*/true);
483 Builder.CreateBr(ExitBB);
484
485 Builder.SetInsertPoint(FailureBB);
486 TLI->emitTrailingFence(Builder, FailureOrder, /*IsStore=*/true,
487 /*IsLoad=*/true);
488 Builder.CreateBr(ExitBB);
489
490 // Finally, we have control-flow based knowledge of whether the cmpxchg
491 // succeeded or not. We expose this to later passes by converting any
492 // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate PHI.
493
494 // Setup the builder so we can create any PHIs we need.
495 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
496 PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2);
497 Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB);
498 Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB);
499
500 // Look for any users of the cmpxchg that are just comparing the loaded value
501 // against the desired one, and replace them with the CFG-derived version.
502 SmallVector<ExtractValueInst *, 2> PrunedInsts;
503 for (auto User : CI->users()) {
504 ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
505 if (!EV)
506 continue;
507
508 assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 &&
509 "weird extraction from { iN, i1 }");
510
511 if (EV->getIndices()[0] == 0)
512 EV->replaceAllUsesWith(Loaded);
513 else
514 EV->replaceAllUsesWith(Success);
515
516 PrunedInsts.push_back(EV);
517 }
518
519 // We can remove the instructions now we're no longer iterating through them.
520 for (auto EV : PrunedInsts)
521 EV->eraseFromParent();
522
523 if (!CI->use_empty()) {
524 // Some use of the full struct return that we don't understand has happened,
525 // so we've got to reconstruct it properly.
526 Value *Res;
527 Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0);
528 Res = Builder.CreateInsertValue(Res, Success, 1);
529
530 CI->replaceAllUsesWith(Res);
531 }
532
533 CI->eraseFromParent();
534 return true;
535 }
536
isIdempotentRMW(AtomicRMWInst * RMWI)537 bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) {
538 auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
539 if(!C)
540 return false;
541
542 AtomicRMWInst::BinOp Op = RMWI->getOperation();
543 switch(Op) {
544 case AtomicRMWInst::Add:
545 case AtomicRMWInst::Sub:
546 case AtomicRMWInst::Or:
547 case AtomicRMWInst::Xor:
548 return C->isZero();
549 case AtomicRMWInst::And:
550 return C->isMinusOne();
551 // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/...
552 default:
553 return false;
554 }
555 }
556
simplifyIdempotentRMW(AtomicRMWInst * RMWI)557 bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) {
558 if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
559 if (TLI->shouldExpandAtomicLoadInIR(ResultingLoad))
560 expandAtomicLoad(ResultingLoad);
561 return true;
562 }
563 return false;
564 }
565