/external/llvm/test/CodeGen/ARM/ |
D | vfloatintrinsics.ll | 131 %v4f32 = type <4 x float> 133 define %v4f32 @test_v4f32.sqrt(%v4f32 %a) { 135 %1 = call %v4f32 @llvm.sqrt.v4f32(%v4f32 %a) 136 ret %v4f32 %1 139 define %v4f32 @test_v4f32.powi(%v4f32 %a, i32 %b) { 141 %1 = call %v4f32 @llvm.powi.v4f32(%v4f32 %a, i32 %b) 142 ret %v4f32 %1 145 define %v4f32 @test_v4f32.sin(%v4f32 %a) { 147 %1 = call %v4f32 @llvm.sin.v4f32(%v4f32 %a) 148 ret %v4f32 %1 [all …]
|
D | 2011-11-29-128bitArithmetics.ll | 20 %1 = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %0) 25 declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) nounwind readonly 52 %1 = call <4 x float> @llvm.cos.v4f32(<4 x float> %0) 57 declare <4 x float> @llvm.cos.v4f32(<4 x float>) nounwind readonly 83 %1 = call <4 x float> @llvm.exp.v4f32(<4 x float> %0) 88 declare <4 x float> @llvm.exp.v4f32(<4 x float>) nounwind readonly 114 %1 = call <4 x float> @llvm.exp2.v4f32(<4 x float> %0) 119 declare <4 x float> @llvm.exp2.v4f32(<4 x float>) nounwind readonly 145 %1 = call <4 x float> @llvm.log10.v4f32(<4 x float> %0) 150 declare <4 x float> @llvm.log10.v4f32(<4 x float>) nounwind readonly [all …]
|
D | vcvt-v8.ll | 6 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtas.v4i32.v4f32(<4 x float> %tmp1) 22 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtns.v4i32.v4f32(<4 x float> %tmp1) 38 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtps.v4i32.v4f32(<4 x float> %tmp1) 54 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtms.v4i32.v4f32(<4 x float> %tmp1) 70 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtau.v4i32.v4f32(<4 x float> %tmp1) 86 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtnu.v4i32.v4f32(<4 x float> %tmp1) 102 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtpu.v4i32.v4f32(<4 x float> %tmp1) 118 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtmu.v4i32.v4f32(<4 x float> %tmp1) 130 declare <4 x i32> @llvm.arm.neon.vcvtas.v4i32.v4f32(<4 x float>) nounwind readnone 132 declare <4 x i32> @llvm.arm.neon.vcvtns.v4i32.v4f32(<4 x float>) nounwind readnone [all …]
|
D | spill-q.ll | 10 declare <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8*, i32) nounwind readonly 21 …%0 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* %vecptr, i32 1) nounwind ; <<4 x float>> … 23 …%1 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind ; <<4 x float>> [#… 25 …%2 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind ; <<4 x float>> [#… 26 %ld3 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind 28 %ld4 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind 30 %ld5 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind 32 %ld6 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind 34 %ld7 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind 36 %ld8 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-vfloatintrinsics.ll | 129 %v4f32 = type <4 x float> 131 define %v4f32 @test_v4f32.sqrt(%v4f32 %a) { 133 %1 = call %v4f32 @llvm.sqrt.v4f32(%v4f32 %a) 134 ret %v4f32 %1 137 define %v4f32 @test_v4f32.powi(%v4f32 %a, i32 %b) { 139 %1 = call %v4f32 @llvm.powi.v4f32(%v4f32 %a, i32 %b) 140 ret %v4f32 %1 143 define %v4f32 @test_v4f32.sin(%v4f32 %a) { 145 %1 = call %v4f32 @llvm.sin.v4f32(%v4f32 %a) 146 ret %v4f32 %1 [all …]
|
D | arm64-fminv.ll | 13 %min = call float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float> %in) 25 declare float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float>) 38 %max = call float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float> %in) 50 declare float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float>) 63 %minnm = call float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float> %in) 75 declare float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float>) 88 %maxnm = call float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float> %in) 100 declare float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float>)
|
D | arm64-vcvt.ll | 17 %tmp3 = call <4 x i32> @llvm.aarch64.neon.fcvtas.v4i32.v4f32(<4 x float> %A) 31 declare <4 x i32> @llvm.aarch64.neon.fcvtas.v4i32.v4f32(<4 x float>) nounwind readnone 48 %tmp3 = call <4 x i32> @llvm.aarch64.neon.fcvtau.v4i32.v4f32(<4 x float> %A) 62 declare <4 x i32> @llvm.aarch64.neon.fcvtau.v4i32.v4f32(<4 x float>) nounwind readnone 79 %tmp3 = call <4 x i32> @llvm.aarch64.neon.fcvtms.v4i32.v4f32(<4 x float> %A) 93 declare <4 x i32> @llvm.aarch64.neon.fcvtms.v4i32.v4f32(<4 x float>) nounwind readnone 110 %tmp3 = call <4 x i32> @llvm.aarch64.neon.fcvtmu.v4i32.v4f32(<4 x float> %A) 124 declare <4 x i32> @llvm.aarch64.neon.fcvtmu.v4i32.v4f32(<4 x float>) nounwind readnone 141 %tmp3 = call <4 x i32> @llvm.aarch64.neon.fcvtps.v4i32.v4f32(<4 x float> %A) 155 declare <4 x i32> @llvm.aarch64.neon.fcvtps.v4i32.v4f32(<4 x float>) nounwind readnone [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/CellSPU/useful-harnesses/ |
D | vecoperations.c | 6 typedef float v4f32 __attribute__((ext_vector_type(4))); typedef 54 void print_v4f32(const char *str, v4f32 v) { in print_v4f32() 101 v4f32 v4f32_shuffle_1(v4f32 a) { in v4f32_shuffle_1() 102 v4f32 c2 = a.yzwx; in v4f32_shuffle_1() 106 v4f32 v4f32_shuffle_2(v4f32 a) { in v4f32_shuffle_2() 107 v4f32 c2 = a.zwxy; in v4f32_shuffle_2() 111 v4f32 v4f32_shuffle_3(v4f32 a) { in v4f32_shuffle_3() 112 v4f32 c2 = a.wxyz; in v4f32_shuffle_3() 116 v4f32 v4f32_shuffle_4(v4f32 a) { in v4f32_shuffle_4() 117 v4f32 c2 = a.xyzw; in v4f32_shuffle_4() [all …]
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCInstrQPX.td | 78 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v4f32; 83 return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v4f32; 88 return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v4f32; 121 [(set v4f32:$FRT, (fadd v4f32:$FRA, v4f32:$FRB))]>; 132 [(set v4f32:$FRT, (fsub v4f32:$FRA, v4f32:$FRB))]>; 142 [(set v4f32:$FRT, (PPCfre v4f32:$FRB))]>; 151 [(set v4f32:$FRT, (PPCfrsqrte v4f32:$FRB))]>; 164 [(set v4f32:$FRT, (fmul v4f32:$FRA, v4f32:$FRC))]>; 179 [(set v4f32:$FRT, (fma v4f32:$FRA, v4f32:$FRC, v4f32:$FRB))]>; 190 [(set v4f32:$FRT, (fneg (fma v4f32:$FRA, v4f32:$FRC, [all …]
|
D | PPCInstrVSX.td | 161 [(set v4f32:$XT, (fadd v4f32:$XA, v4f32:$XB))]>; 171 [(set v4f32:$XT, (fmul v4f32:$XA, v4f32:$XB))]>; 187 [(set v4f32:$XT, (fsub v4f32:$XA, v4f32:$XB))]>; 275 [(set v4f32:$XT, (fma v4f32:$XA, v4f32:$XB, v4f32:$XTi))]>, 307 [(set v4f32:$XT, (fma v4f32:$XA, v4f32:$XB, (fneg v4f32:$XTi)))]>, 339 [(set v4f32:$XT, (fneg (fma v4f32:$XA, v4f32:$XB, v4f32:$XTi)))]>, 371 [(set v4f32:$XT, (fneg (fma v4f32:$XA, v4f32:$XB, (fneg v4f32:$XTi))))]>, 415 [(set v4f32:$XT, (fdiv v4f32:$XA, v4f32:$XB))]>; 424 [(set v4f32:$XT, (fsqrt v4f32:$XB))]>; 447 [(set v4f32:$XT, (PPCfre v4f32:$XB))]>; [all …]
|
/external/clang/test/Sema/ |
D | arm_vfma.c | 6 void func(float32x2_t v2f32, float32x4_t v4f32) { in func() argument 8 vfmaq_f32(v4f32, v4f32, v4f32); in func() 11 vfmsq_f32(v4f32, v4f32, v4f32); in func()
|
/external/clang/test/CodeGen/ |
D | vectorcall.c | 56 typedef float __attribute__((vector_size(16))) v4f32; typedef 57 struct HVA2 { v4f32 x, y; }; 58 struct HVA4 { v4f32 w, x, y, z; }; 64 void __vectorcall hva2(struct HVA4 a, struct HVA4 b, v4f32 c) {} in hva2() 68 void __vectorcall hva3(v4f32 a, v4f32 b, v4f32 c, v4f32 d, v4f32 e, struct HVA2 f) {} in hva3()
|
/external/swiftshader/third_party/LLVM/lib/Target/CellSPU/ |
D | SPUMathInstr.td | 60 // f32, v4f32 divide instruction sequence: 80 def Interpv4f32: CodeFrag<(FIv4f32 (v4f32 VECREG:$rB), (FRESTv4f32 (v4f32 VECREG:$rB)))>; 82 def DivEstv4f32: CodeFrag<(FMv4f32 (v4f32 VECREG:$rA), Interpv4f32.Fragment)>; 85 (v4f32 VECREG:$rB), 86 (v4f32 VECREG:$rA)), 92 def : Pat<(fdiv (v4f32 VECREG:$rA), (v4f32 VECREG:$rB)), 95 (CGTIv4f32 (FNMSv4f32 (v4f32 VECREG:$rB), 97 (v4f32 VECREG:$rA)), -1))>;
|
D | CellSDKIntrinsics.td | 345 [(set (v4f32 VECREG:$rT), (int_spu_si_fa (v4f32 VECREG:$rA), 346 (v4f32 VECREG:$rB)))]>; 351 [(set (v4f32 VECREG:$rT), (int_spu_si_fs (v4f32 VECREG:$rA), 352 (v4f32 VECREG:$rB)))]>; 357 [(set (v4f32 VECREG:$rT), (int_spu_si_fm (v4f32 VECREG:$rA), 358 (v4f32 VECREG:$rB)))]>; 363 [(set (v4f32 VECREG:$rT), (int_spu_si_fceq (v4f32 VECREG:$rA), 364 (v4f32 VECREG:$rB)))]>; 369 [(set (v4f32 VECREG:$rT), (int_spu_si_fcgt (v4f32 VECREG:$rA), 370 (v4f32 VECREG:$rB)))]>; [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | llvm.amdgcn.buffer.store.ll | 10 call void @llvm.amdgcn.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 0, i32 0, i1 0, i1 0) 11 call void @llvm.amdgcn.buffer.store.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i1 1, i1 0) 12 call void @llvm.amdgcn.buffer.store.v4f32(<4 x float> %3, <4 x i32> %0, i32 0, i32 0, i1 0, i1 1) 20 call void @llvm.amdgcn.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 0, i32 42, i1 0, i1 0) 28 call void @llvm.amdgcn.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %2, i32 0, i1 0, i1 0) 36 call void @llvm.amdgcn.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 0, i32 %2, i1 0, i1 0) 44 …call void @llvm.amdgcn.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %2, i32 %3, i1 0, i1 0) 53 …call void @llvm.amdgcn.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %3, i32 %2, i1 0, i1 0) 67 call void @llvm.amdgcn.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %2, i32 0, i1 0, i1 0) 68 %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %0, i32 %3, i32 0, i1 0, i1 0) [all …]
|
D | llvm.amdgcn.buffer.store.format.ll | 10 …call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %1, <4 x i32> %0, i32 0, i32 0, i1 0,… 11 …call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i1 1,… 12 …call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %3, <4 x i32> %0, i32 0, i32 0, i1 0,… 20 …call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %1, <4 x i32> %0, i32 0, i32 42, i1 0… 28 …call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %1, <4 x i32> %0, i32 %2, i32 0, i1 0… 36 …call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %1, <4 x i32> %0, i32 0, i32 %2, i1 0… 44 …call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %1, <4 x i32> %0, i32 %2, i32 %3, i1 … 53 …call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %1, <4 x i32> %0, i32 %3, i32 %2, i1 … 67 …call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %1, <4 x i32> %0, i32 %2, i32 0, i1 0… 68 …%data = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %0, i32 %3, i32 0, i1 0, … [all …]
|
D | llvm.amdgcn.buffer.load.format.ll | 11 …%data = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %0, i32 0, i32 0, i1 0, i… 12 …%data_glc = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %0, i32 0, i32 0, i1 … 13 …%data_slc = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %0, i32 0, i32 0, i1 … 25 …%data = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %0, i32 0, i32 42, i1 0, … 42 …%d.0 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %0, i32 0, i32 4156, i1 0,… 43 …%d.1 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %0, i32 0, i32 36860, i1 0… 44 …%d.2 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %0, i32 0, i32 36864, i1 0… 58 …%d.0 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %0, i32 0, i32 4160, i1 0,… 59 …%d.1 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %0, i32 0, i32 4176, i1 0,… 69 …%data = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %0, i32 %1, i32 0, i1 0, … [all …]
|
D | llvm.amdgcn.buffer.load.ll | 11 %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %0, i32 0, i32 0, i1 0, i1 0) 12 …%data_glc = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %0, i32 0, i32 0, i1 1, i1 0) 13 …%data_slc = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %0, i32 0, i32 0, i1 0, i1 1) 25 %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %0, i32 0, i32 42, i1 0, i1 0) 36 %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %0, i32 0, i32 8192, i1 0, i1 0) 45 %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %0, i32 %1, i32 0, i1 0, i1 0) 54 %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %0, i32 0, i32 %1, i1 0, i1 0) 64 %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %0, i32 0, i32 %ofs, i1 0, i1 0) 73 %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %0, i32 %1, i32 %2, i1 0, i1 0) 83 %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %0, i32 %2, i32 %1, i1 0, i1 0) [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/ |
D | spill-q.ll | 10 declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly 21 …%0 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %vecptr, i32 1) nounwind ; <<4 x float>> [#use… 23 …%1 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=… 25 …%2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=… 26 %ld3 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 28 %ld4 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 30 %ld5 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 32 %ld6 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 34 %ld7 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 36 %ld8 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/Thumb2/ |
D | thumb2-spill-q.ll | 10 declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly 21 %0 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %vecptr, i32 1) nounwind 23 …%1 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=… 25 …%2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=… 26 %ld3 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 28 %ld4 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 30 %ld5 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 32 %ld6 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 34 %ld7 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 36 %ld8 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind [all …]
|
/external/llvm/test/CodeGen/Thumb2/ |
D | thumb2-spill-q.ll | 10 declare <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8*, i32) nounwind readonly 21 %0 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* %vecptr, i32 1) nounwind 23 …%1 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind ; <<4 x float>> [#… 25 …%2 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind ; <<4 x float>> [#… 26 %ld3 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind 28 %ld4 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind 30 %ld5 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind 32 %ld6 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind 34 %ld7 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind 36 %ld8 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind [all …]
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | intrinsic-cost.ll | 15 %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.load) 25 … estimated cost of 46 for instruction: %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.… 28 …n estimated cost of 1 for instruction: %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.… 32 declare <4 x float> @llvm.ceil.v4f32(<4 x float>) nounwind readnone 43 %2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.load) 53 …mated cost of 46 for instruction: %2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.… 56 …imated cost of 1 for instruction: %2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.… 60 declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>) nounwind readnone 71 %2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %wide.load, <4 x float> %b, <4 x float> %c) 81 … estimated cost of 4 for instruction: %2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %wide.… [all …]
|
/external/llvm/test/CodeGen/Mips/msa/ |
D | basic_operations_float.ll | 8 @v4f32 = global <4 x float> <float 0.0, float 0.0, float 0.0, float 0.0> 17 store volatile <4 x float> <float 0.0, float 0.0, float 0.0, float 0.0>, <4 x float>*@v4f32 20 store volatile <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, <4 x float>*@v4f32 24 store volatile <4 x float> <float 1.0, float 1.0, float 1.0, float 31.0>, <4 x float>*@v4f32 30 …latile <4 x float> <float 65537.0, float 65537.0, float 65537.0, float 65537.0>, <4 x float>*@v4f32 35 store volatile <4 x float> <float 1.0, float 2.0, float 1.0, float 2.0>, <4 x float>*@v4f32 41 store volatile <4 x float> <float 3.0, float 4.0, float 5.0, float 6.0>, <4 x float>*@v4f32 103 store volatile <4 x float> %5, <4 x float>*@v4f32 126 %1 = load <4 x float>, <4 x float>* @v4f32 143 %1 = load <4 x float>, <4 x float>* @v4f32 [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | qpx-rounding-ops.ll | 7 %call = tail call <4 x float> @llvm.floor.v4f32(<4 x float> %x) nounwind readnone 17 declare <4 x float> @llvm.floor.v4f32(<4 x float>) nounwind readnone 33 %call = tail call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %x) nounwind readnone 43 declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>) nounwind readnone 59 %call = tail call <4 x float> @llvm.ceil.v4f32(<4 x float> %x) nounwind readnone 69 declare <4 x float> @llvm.ceil.v4f32(<4 x float>) nounwind readnone 85 %call = tail call <4 x float> @llvm.trunc.v4f32(<4 x float> %x) nounwind readnone 95 declare <4 x float> @llvm.trunc.v4f32(<4 x float>) nounwind readnone
|
/external/llvm/test/CodeGen/Mips/ |
D | no-odd-spreg-msa.ll | 8 @v4f32 = global <4 x float> zeroinitializer 15 %0 = load volatile <4 x float>, <4 x float>* @v4f32 28 store <4 x float> %1, <4 x float>* @v4f32 34 ; ALL: lw $[[R0:[0-9]+]], %got(v4f32)( 49 %0 = load volatile <4 x float>, <4 x float>* @v4f32 62 store <4 x float> %1, <4 x float>* @v4f32 68 ; ALL: lw $[[R0:[0-9]+]], %got(v4f32)( 80 %0 = load volatile <4 x float>, <4 x float>* @v4f32 97 ; ALL: lw $[[R0:[0-9]+]], %got(v4f32)( 108 %0 = load volatile <4 x float>, <4 x float>* @v4f32 [all …]
|