Home
last modified time | relevance | path

Searched refs:seq_cst (Results 1 – 25 of 706) sorted by relevance

12345678910>>...29

/external/llvm-project/llvm/test/CodeGen/AMDGPU/
Dfp-atomic-to-s_denormmode.mir11 …64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float a…
23 …28, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float a…
35 …32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float a…
47 …64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float a…
59 …32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float a…
71 …64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float a…
83 …0, -1, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float a…
95 …0, -1, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float a…
107 …0, -1, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float a…
119 …0, -1, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float a…
[all …]
Dflat_atomics_i64.ll9 %tmp0 = atomicrmw volatile add i64* %gep, i64 %in seq_cst
19 %tmp0 = atomicrmw volatile add i64* %gep, i64 %in seq_cst
30 %tmp0 = atomicrmw volatile add i64* %gep, i64 %in seq_cst
41 %tmp0 = atomicrmw volatile add i64* %gep, i64 %in seq_cst
50 %tmp0 = atomicrmw volatile add i64* %out, i64 %in seq_cst
59 %tmp0 = atomicrmw volatile add i64* %out, i64 %in seq_cst
69 %tmp0 = atomicrmw volatile add i64* %ptr, i64 %in seq_cst
79 %tmp0 = atomicrmw volatile add i64* %ptr, i64 %in seq_cst
89 %tmp0 = atomicrmw volatile and i64* %gep, i64 %in seq_cst
99 %tmp0 = atomicrmw volatile and i64* %gep, i64 %in seq_cst
[all …]
Dflat_atomics.ll11 %val = atomicrmw volatile add i32* %gep, i32 %in seq_cst
21 %val = atomicrmw volatile add i32* %gep, i32 %in seq_cst
30 %val = atomicrmw volatile add i32* %gep, i32 %in seq_cst
41 %val = atomicrmw volatile add i32* %gep, i32 %in seq_cst
53 %val = atomicrmw volatile add i32* %gep, i32 %in seq_cst
65 %val = atomicrmw volatile add i32* %gep, i32 %in seq_cst
74 %val = atomicrmw volatile add i32* %out, i32 %in seq_cst
83 %val = atomicrmw volatile add i32* %out, i32 %in seq_cst
93 %val = atomicrmw volatile add i32* %ptr, i32 %in seq_cst
103 %val = atomicrmw volatile add i32* %ptr, i32 %in seq_cst
[all …]
Dr600.global_atomics.ll12 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
22 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
34 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
45 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
54 %val = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst
64 %val = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst
74 %val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
85 %val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
94 %val = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst
104 %val = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst
[all …]
/external/clang/test/OpenMP/
Datomic_ast_print.cpp28 #pragma omp atomic seq_cst in foo()
30 #pragma omp atomic read seq_cst in foo()
32 #pragma omp atomic seq_cst write in foo()
34 #pragma omp atomic update seq_cst in foo()
36 #pragma omp atomic seq_cst capture in foo()
38 #pragma omp atomic capture seq_cst in foo()
128 #pragma omp atomic seq_cst in main()
130 #pragma omp atomic read seq_cst in main()
132 #pragma omp atomic seq_cst write in main()
134 #pragma omp atomic update seq_cst in main()
[all …]
/external/llvm-project/llvm/test/Transforms/InstCombine/
Dconsecutive-fences.ll6 ; CHECK-NEXT: fence seq_cst
12 fence seq_cst
13 fence seq_cst
14 fence seq_cst
22 ; CHECK-NEXT: fence seq_cst
23 ; CHECK-NEXT: fence syncscope("singlethread") seq_cst
28 fence seq_cst
29 fence syncscope("singlethread") seq_cst
35 ; CHECK-NEXT: fence seq_cst
37 ; CHECK-NEXT: fence seq_cst
[all …]
/external/llvm-project/llvm/test/CodeGen/X86/
Dnocx16.ll5 %0 = cmpxchg i128* %a, i128 1, i128 1 seq_cst seq_cst
7 %1 = atomicrmw xchg i128* %a, i128 1 seq_cst
9 %2 = atomicrmw add i128* %a, i128 1 seq_cst
11 %3 = atomicrmw sub i128* %a, i128 1 seq_cst
13 %4 = atomicrmw and i128* %a, i128 1 seq_cst
15 %5 = atomicrmw nand i128* %a, i128 1 seq_cst
17 %6 = atomicrmw or i128* %a, i128 1 seq_cst
19 %7 = atomicrmw xor i128* %a, i128 1 seq_cst
/external/llvm/test/CodeGen/X86/
Dnocx16.ll5 %0 = cmpxchg i128* %a, i128 1, i128 1 seq_cst seq_cst
7 %1 = atomicrmw xchg i128* %a, i128 1 seq_cst
9 %2 = atomicrmw add i128* %a, i128 1 seq_cst
11 %3 = atomicrmw sub i128* %a, i128 1 seq_cst
13 %4 = atomicrmw and i128* %a, i128 1 seq_cst
15 %5 = atomicrmw nand i128* %a, i128 1 seq_cst
17 %6 = atomicrmw or i128* %a, i128 1 seq_cst
19 %7 = atomicrmw xor i128* %a, i128 1 seq_cst
/external/llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/
Dinst-select-store-atomic-local.mir22 …; GFX6: DS_WRITE_B32 [[COPY1]], [[COPY]], 0, 0, implicit $m0, implicit $exec :: (store seq_cst 4, …
28 …; GFX7: DS_WRITE_B32 [[COPY1]], [[COPY]], 0, 0, implicit $m0, implicit $exec :: (store seq_cst 4, …
33 …; GFX9: DS_WRITE_B32_gfx9 [[COPY1]], [[COPY]], 0, 0, implicit $exec :: (store seq_cst 4, addrspace…
36 G_STORE %0, %1 :: (store seq_cst 4, align 4, addrspace 3)
56 ; GFX6: G_STORE [[COPY]](<2 x s16>), [[COPY1]](p3) :: (store seq_cst 4, addrspace 3)
62 ; GFX7: G_STORE [[COPY]](<2 x s16>), [[COPY1]](p3) :: (store seq_cst 4, addrspace 3)
67 ; GFX9: G_STORE [[COPY]](<2 x s16>), [[COPY1]](p3) :: (store seq_cst 4, addrspace 3)
70 G_STORE %0, %1 :: (store seq_cst 4, align 4, addrspace 3)
90 ; GFX6: G_STORE [[COPY]](p3), [[COPY1]](p3) :: (store seq_cst 4, addrspace 3)
96 ; GFX7: G_STORE [[COPY]](p3), [[COPY1]](p3) :: (store seq_cst 4, addrspace 3)
[all …]
Dinst-select-load-atomic-local.mir21 …vgpr_32 = DS_READ_B32 [[COPY]], 0, 0, implicit $m0, implicit $exec :: (load seq_cst 4, addrspace 3)
27 …vgpr_32 = DS_READ_B32 [[COPY]], 0, 0, implicit $m0, implicit $exec :: (load seq_cst 4, addrspace 3)
32 …[0-9]+]]:vgpr_32 = DS_READ_B32_gfx9 [[COPY]], 0, 0, implicit $exec :: (load seq_cst 4, addrspace 3)
35 %1:vgpr(s32) = G_LOAD %0 :: (load seq_cst 4, align 4, addrspace 3)
55 … ; GFX6: [[LOAD:%[0-9]+]]:vgpr_32(<2 x s16>) = G_LOAD [[COPY]](p3) :: (load seq_cst 4, addrspace 3)
61 … ; GFX7: [[LOAD:%[0-9]+]]:vgpr_32(<2 x s16>) = G_LOAD [[COPY]](p3) :: (load seq_cst 4, addrspace 3)
66 … ; GFX9: [[LOAD:%[0-9]+]]:vgpr_32(<2 x s16>) = G_LOAD [[COPY]](p3) :: (load seq_cst 4, addrspace 3)
69 %1:vgpr(<2 x s16>) = G_LOAD %0 :: (load seq_cst 4, align 4, addrspace 3)
89 ; GFX6: [[LOAD:%[0-9]+]]:vgpr_32(p3) = G_LOAD [[COPY]](p3) :: (load seq_cst 4, addrspace 3)
95 ; GFX7: [[LOAD:%[0-9]+]]:vgpr_32(p3) = G_LOAD [[COPY]](p3) :: (load seq_cst 4, addrspace 3)
[all …]
Dinst-select-store-atomic-flat.mir20 …TORE_DWORD [[COPY1]], [[COPY]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store seq_cst 4)
25 …TORE_DWORD [[COPY1]], [[COPY]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store seq_cst 4)
28 G_STORE %0, %1 :: (store seq_cst 4, align 4, addrspace 0)
47 ; GFX7: G_STORE [[COPY]](<2 x s16>), [[COPY1]](p0) :: (store seq_cst 4)
52 ; GFX9: G_STORE [[COPY]](<2 x s16>), [[COPY1]](p0) :: (store seq_cst 4)
55 G_STORE %0, %1 :: (store seq_cst 4, align 4, addrspace 0)
74 ; GFX7: G_STORE [[COPY]](p3), [[COPY1]](p0) :: (store seq_cst 4)
79 ; GFX9: G_STORE [[COPY]](p3), [[COPY1]](p0) :: (store seq_cst 4)
82 G_STORE %0, %1 :: (store seq_cst 4, align 4, addrspace 0)
101 ; GFX7: G_STORE [[COPY]](p5), [[COPY1]](p0) :: (store seq_cst 4)
[all …]
Dinst-select-load-atomic-flat.mir19 …r_32 = FLAT_LOAD_DWORD [[COPY]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load seq_cst 4)
24 …r_32 = FLAT_LOAD_DWORD [[COPY]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load seq_cst 4)
27 %1:vgpr(s32) = G_LOAD %0 :: (load seq_cst 4, align 4, addrspace 0)
46 ; GFX7: [[LOAD:%[0-9]+]]:vgpr_32(<2 x s16>) = G_LOAD [[COPY]](p0) :: (load seq_cst 4)
51 ; GFX9: [[LOAD:%[0-9]+]]:vgpr_32(<2 x s16>) = G_LOAD [[COPY]](p0) :: (load seq_cst 4)
54 %1:vgpr(<2 x s16>) = G_LOAD %0 :: (load seq_cst 4, align 4, addrspace 0)
73 ; GFX7: [[LOAD:%[0-9]+]]:vgpr_32(p3) = G_LOAD [[COPY]](p0) :: (load seq_cst 4)
78 ; GFX9: [[LOAD:%[0-9]+]]:vgpr_32(p3) = G_LOAD [[COPY]](p0) :: (load seq_cst 4)
81 %1:vgpr(p3) = G_LOAD %0 :: (load seq_cst 4, align 4, addrspace 0)
100 …64 = FLAT_LOAD_DWORDX2 [[COPY]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load seq_cst 8)
[all …]
/external/llvm-project/llvm/test/CodeGen/NVPTX/
Datomics.ll7 %ret = atomicrmw add i32* %addr, i32 %val seq_cst
14 %ret = atomicrmw add i64* %addr, i64 %val seq_cst
22 %ret = atomicrmw sub i32* %subr, i32 %val seq_cst
30 %ret = atomicrmw sub i64* %subr, i64 %val seq_cst
37 %ret = atomicrmw and i32* %subr, i32 %val seq_cst
44 %ret = atomicrmw and i64* %subr, i64 %val seq_cst
50 ; %ret = atomicrmw nand i32* %subr, i32 %val seq_cst
55 ; %ret = atomicrmw nand i64* %subr, i64 %val seq_cst
62 %ret = atomicrmw or i32* %subr, i32 %val seq_cst
69 %ret = atomicrmw or i64* %subr, i64 %val seq_cst
[all …]
/external/llvm-project/llvm/test/CodeGen/SystemZ/
Dcmpxchg-03.ll10 %pair = cmpxchg i32 *%src, i32 %cmp, i32 %swap seq_cst seq_cst
21 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
32 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
43 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
56 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
67 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
78 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
91 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
104 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
118 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
[all …]
Dcmpxchg-04.ll10 %pairval = cmpxchg i64 *%src, i64 %cmp, i64 %swap seq_cst seq_cst
21 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
34 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
45 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
56 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
69 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
82 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
93 %pairval = cmpxchg i64 *%ptr, i64 1001, i64 %swap seq_cst seq_cst
104 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 1002 seq_cst seq_cst
117 %pairval = cmpxchg i64 *%src, i64 %cmp, i64 %swap seq_cst seq_cst
[all …]
Dcmpxchg-06.ll16 %pairval = cmpxchg i128 *%src, i128 %cmp, i128 %swap seq_cst seq_cst
27 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 %swap seq_cst seq_cst
40 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 %swap seq_cst seq_cst
51 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 %swap seq_cst seq_cst
62 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 %swap seq_cst seq_cst
75 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 %swap seq_cst seq_cst
88 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 %swap seq_cst seq_cst
99 %pairval = cmpxchg i128 *%ptr, i128 1001, i128 %swap seq_cst seq_cst
110 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 1002 seq_cst seq_cst
127 %pairval = cmpxchg i128 *%src, i128 %cmp, i128 %swap seq_cst seq_cst
[all …]
/external/llvm/test/CodeGen/NVPTX/
Datomics.ll7 %ret = atomicrmw add i32* %addr, i32 %val seq_cst
14 %ret = atomicrmw add i64* %addr, i64 %val seq_cst
22 %ret = atomicrmw sub i32* %subr, i32 %val seq_cst
30 %ret = atomicrmw sub i64* %subr, i64 %val seq_cst
37 %ret = atomicrmw and i32* %subr, i32 %val seq_cst
44 %ret = atomicrmw and i64* %subr, i64 %val seq_cst
50 ; %ret = atomicrmw nand i32* %subr, i32 %val seq_cst
55 ; %ret = atomicrmw nand i64* %subr, i64 %val seq_cst
62 %ret = atomicrmw or i32* %subr, i32 %val seq_cst
69 %ret = atomicrmw or i64* %subr, i64 %val seq_cst
[all …]
/external/llvm/test/CodeGen/SystemZ/
Dcmpxchg-03.ll10 %pair = cmpxchg i32 *%src, i32 %cmp, i32 %swap seq_cst seq_cst
21 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
32 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
43 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
56 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
67 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
78 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
91 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
104 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
118 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
[all …]
Dcmpxchg-04.ll10 %pairval = cmpxchg i64 *%src, i64 %cmp, i64 %swap seq_cst seq_cst
21 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
34 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
45 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
56 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
69 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
82 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
93 %pairval = cmpxchg i64 *%ptr, i64 1001, i64 %swap seq_cst seq_cst
104 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 1002 seq_cst seq_cst
/external/llvm/test/CodeGen/AMDGPU/
Dlocal-atomics64.ll8 %result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst
18 %result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst
27 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst
43 %result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst
55 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 1 seq_cst
65 %result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst
74 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 4 seq_cst
84 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst
96 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 1 seq_cst
106 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst
[all …]
Dflat_atomics_i64.ll9 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst
19 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst
30 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst
41 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst
50 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %out, i64 %in seq_cst
59 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %out, i64 %in seq_cst
69 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %ptr, i64 %in seq_cst
79 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %ptr, i64 %in seq_cst
89 %tmp0 = atomicrmw volatile and i64 addrspace(4)* %gep, i64 %in seq_cst
99 %tmp0 = atomicrmw volatile and i64 addrspace(4)* %gep, i64 %in seq_cst
[all …]
Dflat_atomics.ll9 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst
19 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst
30 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst
41 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst
50 %val = atomicrmw volatile add i32 addrspace(4)* %out, i32 %in seq_cst
59 %val = atomicrmw volatile add i32 addrspace(4)* %out, i32 %in seq_cst
69 %val = atomicrmw volatile add i32 addrspace(4)* %ptr, i32 %in seq_cst
79 %val = atomicrmw volatile add i32 addrspace(4)* %ptr, i32 %in seq_cst
89 %val = atomicrmw volatile and i32 addrspace(4)* %gep, i32 %in seq_cst
99 %val = atomicrmw volatile and i32 addrspace(4)* %gep, i32 %in seq_cst
[all …]
Dlocal-atomics.ll15 %result = atomicrmw xchg i32 addrspace(3)* %ptr, i32 4 seq_cst
26 %result = atomicrmw xchg i32 addrspace(3)* %gep, i32 4 seq_cst
41 %result = atomicrmw add i32 addrspace(3)* %ptr, i32 4 seq_cst
52 %result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst
66 %result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst
77 %result = atomicrmw add i32 addrspace(3)* %ptr, i32 1 seq_cst
89 %result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst
103 %result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst
113 %result = atomicrmw sub i32 addrspace(3)* %ptr, i32 4 seq_cst
124 %result = atomicrmw sub i32 addrspace(3)* %gep, i32 4 seq_cst
[all …]
/external/llvm/test/Transforms/AtomicExpand/X86/
Dexpand-atomic-non-integer.ll20 ; CHECK: %2 = load atomic i32, i32* %1 seq_cst, align 4
23 %res = load atomic float, float* %ptr seq_cst, align 4
60 ; CHECK: store atomic i32 %1, i32* %2 seq_cst, align 4
61 store atomic float %v, float* %ptr seq_cst, align 4
87 ; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst monotonic
93 cmpxchg i8** %ptr, i8* null, i8* %v seq_cst monotonic
115 ; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst seq_cst
121 cmpxchg i8** %ptr, i8* null, i8* %v seq_cst seq_cst
129 ; CHECK: %3 = cmpxchg weak i64* %1, i64 0, i64 %2 seq_cst seq_cst
135 cmpxchg weak i8** %ptr, i8* null, i8* %v seq_cst seq_cst
[all …]
/external/llvm-project/llvm/test/Transforms/AtomicExpand/X86/
Dexpand-atomic-non-integer.ll20 ; CHECK: %2 = load atomic i32, i32* %1 seq_cst, align 4
23 %res = load atomic float, float* %ptr seq_cst, align 4
60 ; CHECK: store atomic i32 %1, i32* %2 seq_cst, align 4
61 store atomic float %v, float* %ptr seq_cst, align 4
87 ; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst monotonic
93 cmpxchg i8** %ptr, i8* null, i8* %v seq_cst monotonic
115 ; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst seq_cst
121 cmpxchg i8** %ptr, i8* null, i8* %v seq_cst seq_cst
129 ; CHECK: %3 = cmpxchg weak i64* %1, i64 0, i64 %2 seq_cst seq_cst
135 cmpxchg weak i8** %ptr, i8* null, i8* %v seq_cst seq_cst
[all …]

12345678910>>...29