/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | element-atomic-memintrins.ll | 11 call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 0, i32 1) 17 ; CHECK-NEXT: store atomic i8 1, i8* [[DEST:%.*]] unordered, align 1 19 ; CHECK-NEXT: store atomic i16 257, i16* [[TMP1]] unordered, align 1 21 ; CHECK-NEXT: store atomic i32 16843009, i32* [[TMP2]] unordered, align 1 23 ; CHECK-NEXT: store atomic i64 72340172838076673, i64* [[TMP3]] unordered, align 1 24 ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 … 27 call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 1, i32 1) 28 call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 2, i32 1) 29 call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 4, i32 1) 30 call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 8, i32 1) [all …]
|
D | atomic.ll | 46 %y = load atomic i32, i32* %p unordered, align 4 56 ; CHECK: %x = load atomic i32, i32* %p unordered, align 4 57 %x = load atomic i32, i32* %p unordered, align 4 66 ; CHECK: %x = load atomic i32, i32* %p unordered, align 4 68 %x = load atomic i32, i32* %p unordered, align 4 69 %y = load atomic i32, i32* %p unordered, align 4 96 ; An unordered access to null is still unreachable. There's no 101 %x = load atomic i32, i32* null unordered, align 4 107 ; CHECK: load atomic i32, i32* null unordered 108 %x = load atomic i32, i32* null unordered, align 4 [all …]
|
D | store.ll | 152 store atomic i32 0, i32* %p unordered, align 4 159 ; CHECK-NEXT: store atomic i32 0, i32* %p unordered, align 4 162 store atomic i32 0, i32* %p unordered, align 4 168 ; CHECK-NEXT: store atomic i32 0, i32* %p unordered, align 4 170 store atomic i32 0, i32* %p unordered, align 4 171 store atomic i32 0, i32* %p unordered, align 4 175 ; Implementation limit - could remove unordered store here, but 182 store atomic i32 0, i32* %p unordered, align 4 198 %v = load atomic i32, i32* %p unordered, align 4 207 store atomic i32 %v, i32* %p unordered, align 4 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Verifier/ |
D | element-wise-atomic-memory-intrinsics.ll | 4 ; CHECK: element size of the element-wise unordered atomic memory intrinsic must be a constant int 5 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 … 7 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 … 10 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 … 13 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* %P, i8* align 4 %Q, i32 1, i32 1) 15 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %P, i8* align 4 %Q, i32 … 18 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* %Q, i32 1, i32 1) 20 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 1 %Q, i32 … 24 declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32,… 27 ; CHECK: element size of the element-wise unordered atomic memory intrinsic must be a constant int [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/DeadStoreElimination/ |
D | OverwriteStoreBegin.ll | 30 ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[TMP0]], i8 … 32 ; CHECK-NEXT: store atomic i32 1, i32* [[ARRAYIDX1]] unordered, align 4 38 call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %p3, i8 0, i64 28, i32 4) 40 store atomic i32 1, i32* %arrayidx1 unordered, align 4 65 ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[TMP0]], i8 … 66 ; CHECK-NEXT: store atomic i32 1, i32* [[P]] unordered, align 4 71 call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %p3, i8 0, i64 28, i32 4) 72 store atomic i32 1, i32* %p unordered, align 4 82 ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[TMP0]], i8 … 88 call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %p3, i8 0, i64 28, i32 4) [all …]
|
D | memintrinsics.ll | 50 declare void @llvm.memcpy.element.unordered.atomic.p0i16.p0i16.i16(i16* nocapture, i16* nocapture, … 51 declare void @llvm.memmove.element.unordered.atomic.p0i16.p0i16.i16(i16* nocapture, i16* nocapture,… 52 declare void @llvm.memset.element.unordered.atomic.p0i16.i16(i16* nocapture, i8, i16, i32) nounwind 62 store atomic i16 0, i16* %A unordered, align 2 ;; Written to by memcpy 63 store atomic i16 0, i16* %B unordered, align 2 ;; Read by memcpy 65 …call void @llvm.memcpy.element.unordered.atomic.p0i16.p0i16.i16(i16* align 2 %A, i16* align 2 %B, … 77 store atomic i16 0, i16* %A unordered, align 2 ;; Written to by memmove 78 store atomic i16 0, i16* %B unordered, align 2 ;; Read by memmove 80 …call void @llvm.memmove.element.unordered.atomic.p0i16.p0i16.i16(i16* align 2 %A, i16* align 2 %B,… 92 store atomic i16 0, i16* %A unordered, align 2 ;; Written to by memset [all …]
|
D | OverwriteStoreEnd.ll | 35 ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[P3]], i8 0,… 37 ; CHECK-NEXT: store atomic i32 1, i32* [[ARRAYIDX1]] unordered, align 4 43 call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %p3, i8 0, i64 28, i32 4) 45 store atomic i32 1, i32* %arrayidx1 unordered, align 4 55 ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[P3]], i8 0,… 63 call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %p3, i8 0, i64 28, i32 4) 90 ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[P3]], i8 0,… 92 ; CHECK-NEXT: store atomic i32 1, i32* [[ARRAYIDX1]] unordered, align 4 97 call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %p3, i8 0, i64 32, i32 4) 99 store atomic i32 1, i32* %arrayidx1 unordered, align 4 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | element-wise-atomic-memory-intrinsics.ll | 5 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 … 14 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 … 23 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 … 32 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %P, i8* align 8 %Q, i32 … 41 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %P, i8* align 16 %Q, i3… 62 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %Dst, i8* align 4 %Src, … 68 …call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32… 77 …call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32… 86 …call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32… 95 …call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %P, i8* align 8 %Q, i32… [all …]
|
/external/llvm/test/Transforms/GVN/ |
D | atomic.ll | 9 ; GVN across unordered store (allowed) 15 store atomic i32 %x, i32* @x unordered, align 4 21 ; GVN across unordered load (allowed) 27 %y = load atomic i32, i32* @x unordered, align 4 34 ; GVN load to unordered load (allowed) 39 %x = load atomic i32, i32* @x unordered, align 4 45 ; GVN unordered load to load (unordered load must not be removed) 48 ; CHECK: load atomic i32, i32* @x unordered 51 %x2 = load atomic i32, i32* @x unordered, align 4 81 ; GVN of an unordered across monotonic load (not allowed) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/GVN/PRE/ |
D | atomic.ll | 9 ; GVN across unordered store (allowed) 15 store atomic i32 %x, i32* @x unordered, align 4 21 ; GVN across unordered load (allowed) 27 %y = load atomic i32, i32* @x unordered, align 4 34 ; GVN load to unordered load (allowed) 39 %x = load atomic i32, i32* @x unordered, align 4 45 ; GVN unordered load to load (unordered load must not be removed) 48 ; CHECK: load atomic i32, i32* @x unordered 51 %x2 = load atomic i32, i32* @x unordered, align 4 81 ; GVN of an unordered across monotonic load (not allowed) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LoopIdiom/X86/ |
D | unordered-atomic-memcpy.ll | 8 ; CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %Dest, i8* align… 20 %V = load atomic i8, i8* %I.0.014 unordered, align 1 21 store atomic i8 %V, i8* %DestI unordered, align 1 33 ; CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %Dest, i8* align… 46 store atomic i8 %V, i8* %DestI unordered, align 1 58 ; CHECK-NOT: call void @llvm.memcpy.element.unordered.atomic 71 store atomic i8 %V, i8* %DestI unordered, align 1 83 ; CHECK-NOT: call void @llvm.memcpy.element.unordered.atomic 96 store atomic i32 %V, i32* %DestI unordered, align 4 108 ; CHECK-NOT: call void @llvm.memcpy.element.unordered.atomic [all …]
|
/external/swiftshader/third_party/LLVM/test/Transforms/GVN/ |
D | atomic.ll | 9 ; GVN across unordered store (allowed) 15 store atomic i32 %x, i32* @x unordered, align 4 33 ; GVN across unordered load (allowed) 39 %y = load atomic i32* @x unordered, align 4 60 ; GVN load to unordered load (allowed) 65 %x = load atomic i32* @x unordered, align 4 71 ; GVN unordered load to load (unordered load must not be removed) 74 ; CHECK: load atomic i32* @x unordered 77 %x2 = load atomic i32* @x unordered, align 4
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Instrumentation/DataFlowSanitizer/ |
D | unordered_atomic_mem_intrins.ll | 11 declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) … 12 declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* noca… 13 declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocap… 17 …; CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %0, i8* align 1… 19 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %0, i8* align 1 %1, i64 … 25 …; CHECK: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %0, i8* align … 27 …call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %0, i8* align 1 %1, i64… 33 …; CHECK: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %0, i8 88, i64 16, i… 35 call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %0, i8 88, i64 16, i32 1)
|
/external/llvm/test/Transforms/InstCombine/ |
D | atomic.ll | 46 %y = load atomic i32, i32* %p unordered, align 4 56 ; CHECK: %x = load atomic i32, i32* %p unordered, align 4 57 %x = load atomic i32, i32* %p unordered, align 4 66 ; CHECK: %x = load atomic i32, i32* %p unordered, align 4 68 %x = load atomic i32, i32* %p unordered, align 4 69 %y = load atomic i32, i32* %p unordered, align 4 96 ; An unordered access to null is still unreachable. There's no 101 %x = load atomic i32, i32* null unordered, align 4 121 ; An unordered access to null is still unreachable. There's no 126 store atomic i32 0, i32* null unordered, align 4 [all …]
|
D | store.ll | 133 store atomic i32 0, i32* %p unordered, align 4 140 ; CHECK-NEXT: store atomic i32 0, i32* %p unordered, align 4 143 store atomic i32 0, i32* %p unordered, align 4 149 ; CHECK-NEXT: store atomic i32 0, i32* %p unordered, align 4 151 store atomic i32 0, i32* %p unordered, align 4 152 store atomic i32 0, i32* %p unordered, align 4 156 ; Implementation limit - could remove unordered store here, but 163 store atomic i32 0, i32* %p unordered, align 4 179 %v = load atomic i32, i32* %p unordered, align 4 188 store atomic i32 %v, i32* %p unordered, align 4 [all …]
|
/external/llvm/test/Transforms/EarlyCSE/ |
D | atomics.ll | 26 ; atomic to unordered atomic forwarding is legal 29 %b = load atomic i32, i32* %P1 unordered, align 4 38 ; than unordered 55 %b = load atomic i32, i32* %P1 unordered, align 4 175 ; Can DSE a normal store in favor of a unordered one 180 store atomic i32 3, i32* %P1 unordered, align 4 184 ; Can also DSE a unordered store in favor of a normal one 188 store atomic i32 3, i32* %P1 unordered, align 4 216 ; Can DSE a unordered store in favor of a unordered one 219 ; CHECK-NEXT: store atomic i32 3, i32* %P1 unordered, align 4 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/EarlyCSE/ |
D | atomics.ll | 27 ; atomic to unordered atomic forwarding is legal 30 %b = load atomic i32, i32* %P1 unordered, align 4 39 ; than unordered 56 %b = load atomic i32, i32* %P1 unordered, align 4 176 ; Can DSE a normal store in favor of a unordered one 181 store atomic i32 3, i32* %P1 unordered, align 4 185 ; Can also DSE a unordered store in favor of a normal one 189 store atomic i32 3, i32* %P1 unordered, align 4 217 ; Can DSE a unordered store in favor of a unordered one 220 ; CHECK-NEXT: store atomic i32 3, i32* %P1 unordered, align 4 [all …]
|
/external/swiftshader/third_party/LLVM/test/Transforms/DeadStoreElimination/ |
D | atomic.ll | 15 ; DSE across unordered store (allowed) 22 store atomic i32 0, i32* @y unordered, align 4 51 ; DSE remove unordered store (allowed) 57 store atomic i32 0, i32* @x unordered, align 4 62 ; DSE unordered store overwriting non-atomic store (allowed) 68 store atomic i32 1, i32* @x unordered, align 4 72 ; DSE no-op unordered atomic store (allowed) 78 %x = load atomic i32* @x unordered, align 4 79 store atomic i32 %x, i32* @x unordered, align 4
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/AliasSet/ |
D | memtransfer.ll | 32 store atomic i8 1, i8* %a unordered, align 1 33 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %d, i8* align 1 %s, i64 … 34 store atomic i8 1, i8* %b unordered, align 1 80 store atomic i8 1, i8* %a unordered, align 1 81 …call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %d, i8* align 1 %s, i64… 82 store atomic i8 1, i8* %b unordered, align 1 124 store atomic i8 1, i8* %a unordered, align 1 125 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 … 126 store atomic i8 1, i8* %b unordered, align 1 152 store atomic i8 1, i8* %a unordered, align 1 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Instrumentation/ThreadSanitizer/ |
D | atomic-non-integer.ll | 6 %v = load atomic float, float* %fptr unordered, align 4 14 %v = load atomic double, double* %fptr unordered, align 8 22 %v = load atomic fp128, fp128* %fptr unordered, align 16 30 store atomic float %v, float* %fptr unordered, align 4 38 store atomic double %v, double* %fptr unordered, align 8 46 store atomic fp128 %v, fp128* %fptr unordered, align 16
|
/external/llvm/test/Transforms/AtomicExpand/X86/ |
D | expand-atomic-non-integer.ll | 10 ; CHECK: %2 = load atomic i32, i32* %1 unordered, align 4 13 %res = load atomic float, float* %ptr unordered, align 4 30 ; CHECK: %2 = load atomic volatile i32, i32* %1 unordered, align 4 33 %res = load atomic volatile float, float* %ptr unordered, align 4 40 ; CHECK: %2 = load atomic i32, i32 addrspace(1)* %1 unordered, align 4 43 %res = load atomic float, float addrspace(1)* %ptr unordered, align 4 51 ; CHECK: store atomic i32 %1, i32* %2 unordered, align 4 52 store atomic float %v, float* %ptr unordered, align 4 69 ; CHECK: store atomic volatile i32 %1, i32* %2 unordered, align 4 70 store atomic volatile float %v, float* %ptr unordered, align 4 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/AtomicExpand/X86/ |
D | expand-atomic-non-integer.ll | 10 ; CHECK: %2 = load atomic i32, i32* %1 unordered, align 4 13 %res = load atomic float, float* %ptr unordered, align 4 30 ; CHECK: %2 = load atomic volatile i32, i32* %1 unordered, align 4 33 %res = load atomic volatile float, float* %ptr unordered, align 4 40 ; CHECK: %2 = load atomic i32, i32 addrspace(1)* %1 unordered, align 4 43 %res = load atomic float, float addrspace(1)* %ptr unordered, align 4 51 ; CHECK: store atomic i32 %1, i32* %2 unordered, align 4 52 store atomic float %v, float* %ptr unordered, align 4 69 ; CHECK: store atomic volatile i32 %1, i32* %2 unordered, align 4 70 store atomic volatile float %v, float* %ptr unordered, align 4 [all …]
|
/external/swiftshader/third_party/LLVM/test/Transforms/LICM/ |
D | atomics.ll | 3 ; Check that we can hoist unordered loads 10 %val = load atomic i32* %y unordered, align 4 42 ; Check that we hoist unordered around monotonic. 51 %valb = load atomic i32* %x unordered, align 4 58 ; CHECK: load atomic i32* %x unordered 62 ; Don't try to "sink" unordered stores yet; it is legal, but the machinery 70 store atomic i32 %vala, i32* %x unordered, align 4
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LICM/ |
D | atomics.ll | 4 ; Check that we can hoist unordered loads 11 %val = load atomic i32, i32* %y unordered, align 4 43 ; Check that we hoist unordered around monotonic. 52 %valb = load atomic i32, i32* %x unordered, align 4 59 ; CHECK: load atomic i32, i32* %x unordered 63 ; We can sink an unordered store 70 store atomic i32 %vala, i32* %x unordered, align 4 82 ; CHECK: store atomic i32 %[[LCSSAPHI]], i32* %x unordered, align 4 128 store atomic volatile i32 %vala, i32* %x unordered, align 4 148 store atomic i32 %vala, i32* %x unordered, align 4 [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | atomic-non-integer.ll | 15 store atomic half %v, half* %fptr unordered, align 2 23 store atomic float %v, float* %fptr unordered, align 4 31 store atomic double %v, double* %fptr unordered, align 8 38 store atomic fp128 %v, fp128* %fptr unordered, align 16 47 %v = load atomic half, half* %fptr unordered, align 2 55 %v = load atomic float, float* %fptr unordered, align 4 63 %v = load atomic double, double* %fptr unordered, align 8 70 %v = load atomic fp128, fp128* %fptr unordered, align 16
|