/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/GlobalISel/ |
D | select-atomicrmw.mir | 152 … ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDEORALW [[CST]], [[COPY]] :: (load store acq_rel 4 on %ir.addr) 156 %2:gpr(s32) = G_ATOMICRMW_XOR %0, %1 :: (load store acq_rel 4 on %ir.addr) 172 … ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSMINALW [[CST]], [[COPY]] :: (load store acq_rel 4 on %ir.addr) 176 %2:gpr(s32) = G_ATOMICRMW_MIN %0, %1 :: (load store acq_rel 4 on %ir.addr) 192 … ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSMAXALW [[CST]], [[COPY]] :: (load store acq_rel 4 on %ir.addr) 196 %2:gpr(s32) = G_ATOMICRMW_MAX %0, %1 :: (load store acq_rel 4 on %ir.addr) 212 … ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDUMINALW [[CST]], [[COPY]] :: (load store acq_rel 4 on %ir.addr) 216 %2:gpr(s32) = G_ATOMICRMW_UMIN %0, %1 :: (load store acq_rel 4 on %ir.addr) 232 … ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDUMAXALW [[CST]], [[COPY]] :: (load store acq_rel 4 on %ir.addr) 236 %2:gpr(s32) = G_ATOMICRMW_UMAX %0, %1 :: (load store acq_rel 4 on %ir.addr)
|
/external/llvm/test/Assembler/ |
D | atomic.ll | 16 ; CHECK: cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire 17 cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire 18 ; CHECK: cmpxchg i32* %x, i32 42, i32 0 acq_rel monotonic 19 cmpxchg i32* %x, i32 42, i32 0 acq_rel monotonic
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/ |
D | pr30451.ll | 8 %2 = atomicrmw min i8* %0, i8 0 acq_rel 24 %2 = atomicrmw min i16* %0, i16 0 acq_rel 41 %2 = atomicrmw max i8* %0, i8 0 acq_rel 57 %2 = atomicrmw max i16* %0, i16 0 acq_rel
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Assembler/ |
D | atomic.ll | 22 ; CHECK: cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire 23 cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire 24 ; CHECK: cmpxchg i32* %x, i32 42, i32 0 acq_rel monotonic 25 cmpxchg i32* %x, i32 42, i32 0 acq_rel monotonic
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | memory-legalizer-atomic-fence.ll | 39 fence acq_rel 82 fence syncscope("singlethread") acq_rel 130 fence syncscope("agent") acq_rel 176 fence syncscope("workgroup") acq_rel 217 fence syncscope("wavefront") acq_rel
|
D | private-memory-atomics.ll | 15 %tmp4 = atomicrmw add i32 addrspace(5)* %tmp3, i32 7 acq_rel 28 %tmp4 = cmpxchg i32 addrspace(5)* %tmp3, i32 0, i32 1 acq_rel monotonic
|
D | memory-legalizer-atomic-rmw.ll | 48 %val = atomicrmw volatile xchg i32* %out, i32 %in acq_rel 108 %val = atomicrmw volatile xchg i32* %out, i32 %in syncscope("singlethread") acq_rel 168 %val = atomicrmw volatile xchg i32* %out, i32 %in syncscope("agent") acq_rel 228 %val = atomicrmw volatile xchg i32* %out, i32 %in syncscope("workgroup") acq_rel 288 %val = atomicrmw volatile xchg i32* %out, i32 %in syncscope("wavefront") acq_rel
|
D | memory-legalizer-atomic-cmpxchg.ll | 52 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in acq_rel monotonic 104 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in acq_rel acquire 182 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in syncscope("singlethread") acq_rel monotonic 234 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in syncscope("singlethread") acq_rel acquire 312 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in syncscope("agent") acq_rel monotonic 364 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in syncscope("agent") acq_rel acquire 442 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in syncscope("workgroup") acq_rel monotonic 494 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in syncscope("workgroup") acq_rel acquire 572 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in syncscope("wavefront") acq_rel monotonic 624 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in syncscope("wavefront") acq_rel acquire
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Instrumentation/ThreadSanitizer/ |
D | atomic.ll | 239 atomicrmw xchg i8* %a, i8 0 acq_rel, !dbg !7 247 atomicrmw add i8* %a, i8 0 acq_rel, !dbg !7 255 atomicrmw sub i8* %a, i8 0 acq_rel, !dbg !7 263 atomicrmw and i8* %a, i8 0 acq_rel, !dbg !7 271 atomicrmw or i8* %a, i8 0 acq_rel, !dbg !7 279 atomicrmw xor i8* %a, i8 0 acq_rel, !dbg !7 287 atomicrmw nand i8* %a, i8 0 acq_rel, !dbg !7 375 cmpxchg i8* %a, i8 0, i8 1 acq_rel acquire, !dbg !7 623 atomicrmw xchg i16* %a, i16 0 acq_rel, !dbg !7 631 atomicrmw add i16* %a, i16 0 acq_rel, !dbg !7 [all …]
|
/external/llvm/test/Instrumentation/ThreadSanitizer/ |
D | atomic.ll | 239 atomicrmw xchg i8* %a, i8 0 acq_rel, !dbg !7 247 atomicrmw add i8* %a, i8 0 acq_rel, !dbg !7 255 atomicrmw sub i8* %a, i8 0 acq_rel, !dbg !7 263 atomicrmw and i8* %a, i8 0 acq_rel, !dbg !7 271 atomicrmw or i8* %a, i8 0 acq_rel, !dbg !7 279 atomicrmw xor i8* %a, i8 0 acq_rel, !dbg !7 287 atomicrmw nand i8* %a, i8 0 acq_rel, !dbg !7 375 cmpxchg i8* %a, i8 0, i8 1 acq_rel acquire, !dbg !7 623 atomicrmw xchg i16* %a, i16 0 acq_rel, !dbg !7 631 atomicrmw add i16* %a, i16 0 acq_rel, !dbg !7 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Bitcode/ |
D | cmpxchg-upgrade.ll | 17 cmpxchg i32* %addr, i32 42, i32 0 acq_rel 18 ; CHECK: cmpxchg i32* %addr, i32 42, i32 0 acq_rel acquire
|
D | atomic.ll | 11 cmpxchg weak i32* %addr, i32 %desired, i32 %new acq_rel acquire 12 ; CHECK: cmpxchg weak i32* %addr, i32 %desired, i32 %new acq_rel acquire
|
D | atomic-no-syncscope.ll | 11 ; CHECK: cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire 12 ; CHECK: cmpxchg i32* %x, i32 42, i32 0 acq_rel monotonic
|
D | memInstructions.3.2.ll | 278 ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire 280 %res13 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire 282 ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire 284 %res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire 286 …[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire 288 %res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire 290 …z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire 292 %res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
|
/external/llvm/test/Bitcode/ |
D | atomic.ll | 11 cmpxchg weak i32* %addr, i32 %desired, i32 %new acq_rel acquire 12 ; CHECK: cmpxchg weak i32* %addr, i32 %desired, i32 %new acq_rel acquire
|
D | cmpxchg-upgrade.ll | 17 cmpxchg i32* %addr, i32 42, i32 0 acq_rel 18 ; CHECK: cmpxchg i32* %addr, i32 42, i32 0 acq_rel acquire
|
D | memInstructions.3.2.ll | 278 ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire 280 %res13 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire 282 ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire 284 %res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire 286 ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acqui… 288 %res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire 290 …T: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire 292 %res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Hexagon/intrinsics/ |
D | fence.ll | 3 ; RUN: sed -e "s/ORDER/acq_rel/" %s | llc -march=hexagon | FileCheck %s 7 ; RUN: sed -e 's/ORDER/syncscope("singlethread") acq_rel/' %s | llc -march=hexagon | FileCheck %s
|
D | atomicrmw_bitwise_native.ll | 4 ; RUN: sed -e "s/ORDER/acq_rel/" -e "s/BINARY_OP/and/" %s | llc -march=hexagon | FileCheck %s 9 ; RUN: sed -e "s/ORDER/acq_rel/" -e "s/BINARY_OP/xor/" %s | llc -march=hexagon | FileCheck %s 14 ; RUN: sed -e "s/ORDER/acq_rel/" -e "s/BINARY_OP/or/" %s | llc -march=hexagon | FileCheck %s
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/MIR/AArch64/ |
D | atomic-memoperands.mir | 22 ; CHECK: G_STORE [[LOAD1]](s32), [[COPY]](p0) :: (store acq_rel 4) 30 G_STORE %2(s32), %0(p0) :: (store acq_rel 4)
|
/external/swiftshader/third_party/LLVM/test/Assembler/ |
D | atomic.ll | 15 ; CHECK: cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel 16 cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel
|
/external/llvm/include/llvm/Support/ |
D | AtomicOrdering.h | 34 acq_rel = 4, enumerator 145 /* acq_rel */ AtomicOrderingCABI::acq_rel, in toCABI()
|
/external/swiftshader/third_party/llvm-7.0/llvm/include/llvm/Support/ |
D | AtomicOrdering.h | 34 acq_rel = 4, enumerator 144 /* acq_rel */ AtomicOrderingCABI::acq_rel, in toCABI()
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | private-memory-atomics.ll | 15 %tmp4 = atomicrmw add i32* %tmp3, i32 7 acq_rel 28 %tmp4 = cmpxchg i32* %tmp3, i32 0, i32 1 acq_rel monotonic
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | select-cmpxchg.ll | 30 ; CHECK-NEXT: %tmp0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value acq_rel monotonic 33 %tmp0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value acq_rel monotonic
|