/external/llvm/test/CodeGen/CPP/ |
D | atomic.ll | 65 …; CHECK: AtomicCmpXchgInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicCmpXchgInst({{.*}}, SequentiallyCon… 71 …; CHECK: AtomicCmpXchgInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicCmpXchgInst({{.*}}, AcquireRelease,… 77 …; CHECK: AtomicCmpXchgInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicCmpXchgInst({{.*}}, SequentiallyCon… 83 …; CHECK: AtomicCmpXchgInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicCmpXchgInst({{.*}}, AcquireRelease,…
|
/external/llvm/lib/IR/ |
D | Instruction.cpp | 295 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1)) in haveSameSpecialState() 296 return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() && in haveSameSpecialState() 297 CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() && in haveSameSpecialState() 299 cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() && in haveSameSpecialState() 301 cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() && in haveSameSpecialState() 302 CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I2)->getSynchScope(); in haveSameSpecialState()
|
D | Instructions.cpp | 1117 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal, in Init() 1148 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, in AtomicCmpXchgInst() function in AtomicCmpXchgInst 1156 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), in AtomicCmpXchgInst() 1157 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) { in AtomicCmpXchgInst() 1161 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, in AtomicCmpXchgInst() function in AtomicCmpXchgInst 1169 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), in AtomicCmpXchgInst() 1170 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) { in AtomicCmpXchgInst() 3556 AtomicCmpXchgInst *AtomicCmpXchgInst::clone_impl() const { in clone_impl() 3557 AtomicCmpXchgInst *Result = in clone_impl() 3558 new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2), in clone_impl()
|
D | AsmWriter.cpp | 2672 if (isa<AtomicCmpXchgInst>(I) && cast<AtomicCmpXchgInst>(I).isWeak()) in printInstruction() 2678 (isa<AtomicCmpXchgInst>(I) && cast<AtomicCmpXchgInst>(I).isVolatile()) || in printInstruction() 2950 } else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) { in printInstruction()
|
/external/llvm/lib/Transforms/Scalar/ |
D | LowerAtomic.cpp | 24 static bool LowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI) { in LowerAtomicCmpXchgInst() 126 else if (AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(Inst)) in runOnBasicBlock()
|
/external/llvm/lib/Transforms/Instrumentation/ |
D | BoundsChecking.cpp | 182 if (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<AtomicCmpXchgInst>(I) || in runOnFunction() 198 } else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Inst)) { in runOnFunction()
|
D | ThreadSanitizer.cpp | 324 if (isa<AtomicCmpXchgInst>(I)) in isAtomic() 547 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) { in instrumentAtomic()
|
D | MemorySanitizer.cpp | 1242 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I)); in handleCASOrRMW() 1254 if (isa<AtomicCmpXchgInst>(I)) in handleCASOrRMW() 1268 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { in visitAtomicCmpXchgInst()
|
D | AddressSanitizer.cpp | 854 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) { in isInterestingMemoryAccess()
|
/external/llvm/include/llvm/Analysis/ |
D | AliasAnalysis.h | 144 Location getLocation(const AtomicCmpXchgInst *CXI); 155 else if (auto *I = dyn_cast<AtomicCmpXchgInst>(Inst)) in getLocation() 397 return getModRefInfo((const AtomicCmpXchgInst*)I, Loc); in getModRefInfo() 480 ModRefResult getModRefInfo(const AtomicCmpXchgInst *CX, const Location &Loc); 483 ModRefResult getModRefInfo(const AtomicCmpXchgInst *CX, in getModRefInfo()
|
/external/llvm/lib/CodeGen/ |
D | AtomicExpandPass.cpp | 54 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI); 89 auto CASI = dyn_cast<AtomicCmpXchgInst>(I); in runOnFunction() 203 AtomicCmpXchgInst::getStrongestFailureOrdering(Order)); in expandAtomicLoadToCmpXchg() 389 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder)); in expandAtomicRMWToCmpXchg() 404 bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) { in expandAtomicCmpXchg()
|
/external/llvm/lib/Transforms/IPO/ |
D | MergeFunctions.cpp | 809 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(L)) { in cmpOperations() 811 cast<AtomicCmpXchgInst>(R)->isVolatile())) in cmpOperations() 814 cast<AtomicCmpXchgInst>(R)->isWeak())) in cmpOperations() 817 cast<AtomicCmpXchgInst>(R)->getSuccessOrdering())) in cmpOperations() 820 cast<AtomicCmpXchgInst>(R)->getFailureOrdering())) in cmpOperations() 823 cast<AtomicCmpXchgInst>(R)->getSynchScope()); in cmpOperations()
|
/external/llvm/lib/Analysis/ |
D | AliasAnalysis.cpp | 294 AliasAnalysis::getLocation(const AtomicCmpXchgInst *CXI) { in getLocation() 397 AliasAnalysis::getModRefInfo(const AtomicCmpXchgInst *CX, const Location &Loc) { in getModRefInfo()
|
D | CFLAliasAnalysis.cpp | 295 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &Inst) { in visitAtomicCmpXchgInst() 562 Value *visitAtomicCmpXchgInst(AtomicCmpXchgInst &Inst) { in visitAtomicCmpXchgInst()
|
D | MemoryDependenceAnalysis.cpp | 361 else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Inst)) in isVolatile()
|
/external/llvm/include/llvm/IR/ |
D | InstVisitor.h | 177 RetTy visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { DELEGATE(Instruction);} in visitAtomicCmpXchgInst()
|
D | Instruction.def | 137 HANDLE_MEMORY_INST(31, AtomicCmpXchg , AtomicCmpXchgInst )
|
D | Instructions.h | 485 class AtomicCmpXchgInst : public Instruction { 491 AtomicCmpXchgInst *clone_impl() const override; 497 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 502 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 627 struct OperandTraits<AtomicCmpXchgInst> : 628 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { 631 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)
|
D | IRBuilder.h | 1006 AtomicCmpXchgInst * 1011 return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering,
|
/external/llvm/lib/Bitcode/Writer/ |
D | BitcodeWriter.cpp | 1911 Vals.push_back(cast<AtomicCmpXchgInst>(I).isVolatile()); in WriteInstruction() 1913 cast<AtomicCmpXchgInst>(I).getSuccessOrdering())); in WriteInstruction() 1915 cast<AtomicCmpXchgInst>(I).getSynchScope())); in WriteInstruction() 1917 cast<AtomicCmpXchgInst>(I).getFailureOrdering())); in WriteInstruction() 1918 Vals.push_back(cast<AtomicCmpXchgInst>(I).isWeak()); in WriteInstruction()
|
/external/clang/lib/CodeGen/ |
D | CGAtomic.cpp | 350 llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg( in emitAtomicCmpXchg() 414 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder); in emitAtomicCmpXchgFailureSet() 1374 Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success); in EmitAtomicCompareExchange()
|
/external/llvm/lib/Transforms/Utils/ |
D | InlineFunction.cpp | 471 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) in AddAliasScopeMetadata()
|
/external/llvm/lib/CodeGen/SelectionDAG/ |
D | SelectionDAGBuilder.h | 777 void visitAtomicCmpXchg(const AtomicCmpXchgInst &I);
|
/external/llvm/lib/Bitcode/Reader/ |
D | BitcodeReader.cpp | 4111 AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrdering); in ParseFunctionBody() 4115 I = new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering, FailureOrdering, in ParseFunctionBody() 4117 cast<AtomicCmpXchgInst>(I)->setVolatile(Record[OpNum]); in ParseFunctionBody() 4126 cast<AtomicCmpXchgInst>(I)->setWeak(Record[OpNum+4]); in ParseFunctionBody()
|
/external/llvm/lib/Target/CppBackend/ |
D | CPPBackend.cpp | 1568 const AtomicCmpXchgInst *cxi = cast<AtomicCmpXchgInst>(I); in printInstruction()
|