/external/llvm-project/llvm/test/Transforms/InferAddressSpaces/AMDGPU/ |
D | icmp.ll | 15 %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32* 16 %cmp = icmp eq i32* %cast0, %cast1 22 ; CHECK: %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32* 23 ; CHECK: %cmp = icmp eq i32* %cast0, %cast1 26 %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32* 27 %cmp = icmp eq i32* %cast0, %cast1 41 ; CHECK: %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32* 42 ; CHECK: %cmp = icmp eq i32* %flat.ptr.0, %cast1 44 %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32* 45 %cmp = icmp eq i32* %flat.ptr.0, %cast1 [all …]
|
D | select.ll | 8 ; CHECK-NEXT: %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32* 9 ; CHECK-NEXT: %select = select i1 %c, i32* %cast0, i32* %cast1 13 %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32* 14 %select = select i1 %c, i32* %cast0, i32* %cast1 23 %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32* 24 %select = select i1 %c, i32* %cast0, i32* %cast1 35 %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32* 36 %select = select i1 %c, i32* %cast0, i32* %cast1, !prof !0 43 ; CHECK: %cast1 = addrspacecast i32 addrspace(5)* %private.ptr.1 to i32* 44 ; CHECK: %select = select i1 %c, i32* %cast0, i32* %cast1 [all …]
|
/external/llvm-project/llvm/test/Transforms/InstCombine/ |
D | icmp-custom-dl.ll | 19 %cast1 = bitcast i32* %gep1 to i8* 20 %cmp = icmp ult i8* %cast1, %gep2 21 %use = ptrtoint i8* %cast1 to i64 37 %cast1 = bitcast i32 addrspace(1)* %gep1 to i8 addrspace(1)* 38 %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2 39 %use = ptrtoint i8 addrspace(1)* %cast1 to i64 55 %cast1 = bitcast i32* %gep1 to i8* 56 %cmp = icmp ult i8* %cast1, %gep2 71 %cast1 = bitcast i32 addrspace(1)* %gep1 to i8 addrspace(1)* 72 %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2 [all …]
|
D | sub-gep.ll | 307 %cast1 = ptrtoint i32* %gep1 to i64 309 %sub = sub i64 %cast1, %cast2 322 %cast1 = ptrtoint i32 addrspace(1)* %gep1 to i16 324 %sub = sub i16 %cast1, %cast2 337 %cast1 = ptrtoint i8* %gep1 to i64 339 %sub = sub i64 %cast1, %cast2 352 %cast1 = ptrtoint i8* %gep1 to i64 354 %sub = sub i64 %cast1, %cast2 367 %cast1 = ptrtoint i8* %gep1 to i64 369 %sub = sub i64 %cast1, %cast2
|
/external/llvm/test/CodeGen/X86/ |
D | avx512vl-nontemporal.ll | 9 %cast1 = bitcast i8* %B to <4 x i64>* 11 store <4 x i64> %E2, <4 x i64>* %cast1, align 64, !nontemporal !0 25 %cast1 = bitcast i8* %B to <2 x i64>* 27 store <2 x i64> %E2, <2 x i64>* %cast1, align 64, !nontemporal !0
|
D | avx512-nontemporal.ll | 9 %cast1 = bitcast i8* %B to <8 x i64>* 11 store <8 x i64> %E2, <8 x i64>* %cast1, align 64, !nontemporal !0
|
D | avx2-nontemporal.ll | 52 %cast1 = bitcast i8* %B to <4 x i64>* 54 store <4 x i64> %E2, <4 x i64>* %cast1, align 32, !nontemporal !0
|
D | nontemporal.ll | 114 %cast1 = bitcast i8* %B to <2 x i64>* 116 store <2 x i64> %E2, <2 x i64>* %cast1, align 16, !nontemporal !0
|
/external/llvm-project/mlir/test/mlir-rocm-runner/ |
D | vector-transferops.mlir | 65 %cast1 = memref_cast %23 : memref<?xf32> to memref<*xf32> 68 gpu.host_register %cast1 : memref<*xf32> 75 call @print_memref_f32(%cast1) : (memref<*xf32>) -> () 79 call @print_memref_f32(%cast1) : (memref<*xf32>) -> ()
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | aarch64-bf16-dotprod-intrinsics.ll | 33 %.cast1 = bitcast <2 x float> %lane to <4 x bfloat> 34 …float> @llvm.aarch64.neon.bfdot.v2f32.v4bf16(<2 x float> %r, <4 x bfloat> %a, <4 x bfloat> %.cast1) 46 %.cast1 = bitcast <4 x float> %lane to <8 x bfloat> 47 …float> @llvm.aarch64.neon.bfdot.v4f32.v8bf16(<4 x float> %r, <8 x bfloat> %a, <8 x bfloat> %.cast1) 59 %.cast1 = bitcast <2 x float> %lane to <4 x bfloat> 60 …float> @llvm.aarch64.neon.bfdot.v2f32.v4bf16(<2 x float> %r, <4 x bfloat> %a, <4 x bfloat> %.cast1) 73 %.cast1 = bitcast <4 x float> %lane to <8 x bfloat> 74 …float> @llvm.aarch64.neon.bfdot.v4f32.v8bf16(<4 x float> %r, <8 x bfloat> %a, <8 x bfloat> %.cast1)
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | arm-bf16-dotprod-intrinsics.ll | 32 %.cast1 = bitcast <2 x float> %lane to <4 x bfloat> 33 … float> @llvm.arm.neon.bfdot.v2f32.v4bf16(<2 x float> %r, <4 x bfloat> %a, <4 x bfloat> %.cast1) #3 46 %.cast1 = bitcast <4 x float> %lane to <8 x bfloat> 47 … float> @llvm.arm.neon.bfdot.v4f32.v8bf16(<4 x float> %r, <8 x bfloat> %a, <8 x bfloat> %.cast1) #3 59 %.cast1 = bitcast <2 x float> %lane to <4 x bfloat> 60 … float> @llvm.arm.neon.bfdot.v2f32.v4bf16(<2 x float> %r, <4 x bfloat> %a, <4 x bfloat> %.cast1) #3 73 %.cast1 = bitcast <4 x float> %lane to <8 x bfloat> 74 … float> @llvm.arm.neon.bfdot.v4f32.v8bf16(<4 x float> %r, <8 x bfloat> %a, <8 x bfloat> %.cast1) #3
|
/external/llvm-project/clang/test/CodeGenOpenCL/ |
D | as_type.cl | 92 //CHECK: %[[cast1:.*]] = ptrtoint i32* %[[x]] to i32 93 //CHECK: %[[cast2:.*]] = bitcast i32 %[[cast1]] to <4 x i8> 102 //CHECK: %[[cast1:.*]] = bitcast <4 x i8> %[[astype]] to i32 103 //CHECK: %[[cast2:.*]] = inttoptr i32 %[[cast1]] to i32*
|
/external/llvm-project/llvm/test/CodeGen/X86/ |
D | avx512vl-nontemporal.ll | 24 %cast1 = bitcast i8* %B to <4 x i64>* 26 store <4 x i64> %E2, <4 x i64>* %cast1, align 64, !nontemporal !0 57 %cast1 = bitcast i8* %B to <2 x i64>* 59 store <2 x i64> %E2, <2 x i64>* %cast1, align 64, !nontemporal !0
|
D | avx512-nontemporal.ll | 45 %cast1 = bitcast i8* %B to <8 x i64>* 47 store <8 x i64> %E2, <8 x i64>* %cast1, align 64, !nontemporal !0
|
D | avx2-nontemporal.ll | 67 %cast1 = bitcast i8* %B to <4 x i64>* 69 store <4 x i64> %E2, <4 x i64>* %cast1, align 32, !nontemporal !0
|
/external/llvm-project/llvm/test/Transforms/ObjCARC/ |
D | contract-catchswitch.ll | 45 %cast1 = bitcast i8* %tmp6 to %0* 59 %tmp8 = phi %0* [ %cast, %catch.dispatch1 ], [ %cast1, %invoke.cont.split ]
|
/external/llvm-project/clang/test/Analysis/ |
D | globals.cpp | 41 void cast1() in cast1() function
|
/external/llvm/test/Transforms/ConstantHoisting/ARM/ |
D | bad-cases.ll | 106 %cast1 = trunc i96 %lshr1 to i32 107 %ret = add i32 %cast0, %cast1
|
/external/llvm-project/llvm/test/Transforms/SROA/ |
D | addrspacecast.ll | 110 %cast1 = addrspacecast [32 x i8]* %x to i8 addrspace(1)* 112 call void @llvm.memcpy.p0i8.p1i8.i32(i8* %cast2, i8 addrspace(1)* %cast1, i32 16, i1 false) 134 %cast1 = addrspacecast i8* %gep.x to i8 addrspace(1)* 137 call void @llvm.memcpy.p0i8.p1i8.i32(i8* %cast2, i8 addrspace(1)* %cast1, i32 16, i1 false) 160 %cast1 = addrspacecast i8* %gep0.x to i8 addrspace(1)* 161 %gep1.x = getelementptr i8, i8 addrspace(1)* %cast1, i32 8
|
D | vector-promotion.ll | 277 %a.cast1 = bitcast i32* %a.gep1 to <2 x i32>* 278 store <2 x i32> <i32 1, i32 1>, <2 x i32>* %a.cast1 311 %a.cast1 = bitcast i32* %a.gep1 to <2 x i32>* 312 %second = load <2 x i32>, <2 x i32>* %a.cast1 344 %a.cast1 = bitcast float* %a.gep1 to i8* 345 call void @llvm.memset.p0i8.i32(i8* %a.cast1, i8 1, i32 8, i1 false) 379 %a.cast1 = bitcast float* %a.gep1 to i8* 380 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.cast1, i8* %y, i32 8, i1 false) 613 %cast1 = bitcast <3 x i32>* %a to <4 x i32>* 614 store <4 x i32> undef, <4 x i32>* %cast1, align 16
|
/external/llvm-project/llvm/test/Transforms/ConstantHoisting/ARM/ |
D | bad-cases.ll | 106 %cast1 = trunc i96 %lshr1 to i32 107 %ret = add i32 %cast0, %cast1
|
/external/llvm/test/Transforms/SROA/ |
D | vector-promotion.ll | 277 %a.cast1 = bitcast i32* %a.gep1 to <2 x i32>* 278 store <2 x i32> <i32 1, i32 1>, <2 x i32>* %a.cast1 311 %a.cast1 = bitcast i32* %a.gep1 to <2 x i32>* 312 %second = load <2 x i32>, <2 x i32>* %a.cast1 344 %a.cast1 = bitcast float* %a.gep1 to i8* 345 call void @llvm.memset.p0i8.i32(i8* %a.cast1, i8 1, i32 8, i32 0, i1 false) 379 %a.cast1 = bitcast float* %a.gep1 to i8* 380 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.cast1, i8* %y, i32 8, i32 0, i1 false) 613 %cast1 = bitcast <3 x i32>* %a to <4 x i32>* 614 store <4 x i32> undef, <4 x i32>* %cast1, align 16
|
D | alignment.ll | 41 %cast1 = bitcast i8* %gep1 to i16* 42 store volatile i16 0, i16* %cast1
|
/external/llvm/test/Transforms/InstCombine/ |
D | icmp.ll | 709 %cast1 = bitcast i32* %gep1 to i8* 710 %cmp = icmp ult i8* %cast1, %gep2 711 %use = ptrtoint i8* %cast1 to i64 727 %cast1 = bitcast i32 addrspace(1)* %gep1 to i8 addrspace(1)* 728 %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2 729 %use = ptrtoint i8 addrspace(1)* %cast1 to i64 743 %cast1 = bitcast i32* %gep1 to i8* 744 %cmp = icmp ult i8* %cast1, %gep2 759 %cast1 = bitcast i32 addrspace(1)* %gep1 to i8 addrspace(1)* 760 %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2 [all …]
|
D | sub.ll | 358 %cast1 = ptrtoint i8* %gep1 to i64 360 %sub = sub i64 %cast1, %cast2 371 %cast1 = ptrtoint i32* %gep1 to i64 373 %sub = sub i64 %cast1, %cast2 389 %cast1 = ptrtoint i32 addrspace(1)* %gep1 to i16 391 %sub = sub i16 %cast1, %cast2
|