/external/llvm/test/CodeGen/ARM/ |
D | vfloatintrinsics.ll | 131 %v4f32 = type <4 x float> 133 define %v4f32 @test_v4f32.sqrt(%v4f32 %a) { 135 %1 = call %v4f32 @llvm.sqrt.v4f32(%v4f32 %a) 136 ret %v4f32 %1 139 define %v4f32 @test_v4f32.powi(%v4f32 %a, i32 %b) { 141 %1 = call %v4f32 @llvm.powi.v4f32(%v4f32 %a, i32 %b) 142 ret %v4f32 %1 145 define %v4f32 @test_v4f32.sin(%v4f32 %a) { 147 %1 = call %v4f32 @llvm.sin.v4f32(%v4f32 %a) 148 ret %v4f32 %1 [all …]
|
D | 2011-11-29-128bitArithmetics.ll | 20 %1 = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %0) 25 declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) nounwind readonly 52 %1 = call <4 x float> @llvm.cos.v4f32(<4 x float> %0) 57 declare <4 x float> @llvm.cos.v4f32(<4 x float>) nounwind readonly 83 %1 = call <4 x float> @llvm.exp.v4f32(<4 x float> %0) 88 declare <4 x float> @llvm.exp.v4f32(<4 x float>) nounwind readonly 114 %1 = call <4 x float> @llvm.exp2.v4f32(<4 x float> %0) 119 declare <4 x float> @llvm.exp2.v4f32(<4 x float>) nounwind readonly 145 %1 = call <4 x float> @llvm.log10.v4f32(<4 x float> %0) 150 declare <4 x float> @llvm.log10.v4f32(<4 x float>) nounwind readonly [all …]
|
D | spill-q.ll | 10 declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly 21 …%0 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %vecptr, i32 1) nounwind ; <<4 x float>> [#use… 23 …%1 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=… 25 …%2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=… 26 %ld3 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 28 %ld4 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 30 %ld5 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 32 %ld6 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 34 %ld7 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 36 %ld8 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind [all …]
|
D | vcvt-v8.ll | 6 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtas.v4i32.v4f32(<4 x float> %tmp1) 22 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtns.v4i32.v4f32(<4 x float> %tmp1) 38 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtps.v4i32.v4f32(<4 x float> %tmp1) 54 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtms.v4i32.v4f32(<4 x float> %tmp1) 70 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtau.v4i32.v4f32(<4 x float> %tmp1) 86 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtnu.v4i32.v4f32(<4 x float> %tmp1) 102 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtpu.v4i32.v4f32(<4 x float> %tmp1) 118 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtmu.v4i32.v4f32(<4 x float> %tmp1) 130 declare <4 x i32> @llvm.arm.neon.vcvtas.v4i32.v4f32(<4 x float>) nounwind readnone 132 declare <4 x i32> @llvm.arm.neon.vcvtns.v4i32.v4f32(<4 x float>) nounwind readnone [all …]
|
D | vrec.ll | 31 %tmp2 = call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %tmp1) 39 declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) nounwind readnone 55 %tmp3 = call <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float> %tmp1, <4 x float> %tmp2) 60 declare <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float>, <4 x float>) nounwind readnone 90 %tmp2 = call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %tmp1) 98 declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) nounwind readnone 114 %tmp3 = call <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float> %tmp1, <4 x float> %tmp2) 119 declare <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float>, <4 x float>) nounwind readnone
|
D | 2012-01-23-PostRA-LICM.ll | 32 %tmp16 = call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %tmp11) nounwind 33 …%tmp17 = call <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float> %tmp16, <4 x float> %tmp11) noun… 35 …%tmp19 = call <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float> %tmp18, <4 x float> %tmp11) noun… 38 …%tmp22 = call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %tmp21, <4 x float> undef) nounwi… 51 …%tmp34 = call <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float> undef, <4 x float> %tmp28) nounw… 73 …%tmp57 = call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %tmp56, <4 x float> %tmp55) nounw… 97 declare <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float>, <4 x float>) nounwind readnone 99 declare <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float>, <4 x float>) nounwind readnone 101 declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) nounwind readnone
|
D | out-of-registers.ll | 11 …%2 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld4.v4f32(i8… 20 …tail call void @llvm.arm.neon.vst4.v4f32(i8* %1, <4 x float> %divp_vec, <4 x float> %div3p_vec, <4… 30 declare void @llvm.arm.neon.vst4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32… 31 declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld4.v4f32(i8*, i32) …
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-vfloatintrinsics.ll | 129 %v4f32 = type <4 x float> 131 define %v4f32 @test_v4f32.sqrt(%v4f32 %a) { 133 %1 = call %v4f32 @llvm.sqrt.v4f32(%v4f32 %a) 134 ret %v4f32 %1 137 define %v4f32 @test_v4f32.powi(%v4f32 %a, i32 %b) { 139 %1 = call %v4f32 @llvm.powi.v4f32(%v4f32 %a, i32 %b) 140 ret %v4f32 %1 143 define %v4f32 @test_v4f32.sin(%v4f32 %a) { 145 %1 = call %v4f32 @llvm.sin.v4f32(%v4f32 %a) 146 ret %v4f32 %1 [all …]
|
D | arm64-fminv.ll | 13 %min = call float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float> %in) 25 declare float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float>) 38 %max = call float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float> %in) 50 declare float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float>) 63 %minnm = call float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float> %in) 75 declare float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float>) 88 %maxnm = call float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float> %in) 100 declare float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float>)
|
D | arm64-vcvt.ll | 17 %tmp3 = call <4 x i32> @llvm.aarch64.neon.fcvtas.v4i32.v4f32(<4 x float> %A) 31 declare <4 x i32> @llvm.aarch64.neon.fcvtas.v4i32.v4f32(<4 x float>) nounwind readnone 48 %tmp3 = call <4 x i32> @llvm.aarch64.neon.fcvtau.v4i32.v4f32(<4 x float> %A) 62 declare <4 x i32> @llvm.aarch64.neon.fcvtau.v4i32.v4f32(<4 x float>) nounwind readnone 79 %tmp3 = call <4 x i32> @llvm.aarch64.neon.fcvtms.v4i32.v4f32(<4 x float> %A) 93 declare <4 x i32> @llvm.aarch64.neon.fcvtms.v4i32.v4f32(<4 x float>) nounwind readnone 110 %tmp3 = call <4 x i32> @llvm.aarch64.neon.fcvtmu.v4i32.v4f32(<4 x float> %A) 124 declare <4 x i32> @llvm.aarch64.neon.fcvtmu.v4i32.v4f32(<4 x float>) nounwind readnone 141 %tmp3 = call <4 x i32> @llvm.aarch64.neon.fcvtps.v4i32.v4f32(<4 x float> %A) 155 declare <4 x i32> @llvm.aarch64.neon.fcvtps.v4i32.v4f32(<4 x float>) nounwind readnone [all …]
|
D | sincospow-vector-expansion.ll | 69 %1 = call <4 x float> @llvm.cos.v4f32(<4 x float> %v1) 79 %1 = call <4 x float> @llvm.sin.v4f32(<4 x float> %v1) 89 %1 = call <4 x float> @llvm.pow.v4f32(<4 x float> %v1, <4 x float> %v2) 93 declare <4 x float> @llvm.cos.v4f32(<4 x float>) 94 declare <4 x float> @llvm.sin.v4f32(<4 x float>) 95 declare <4 x float> @llvm.pow.v4f32(<4 x float>, <4 x float>)
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCInstrQPX.td | 78 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v4f32; 83 return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v4f32; 88 return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v4f32; 121 [(set v4f32:$FRT, (fadd v4f32:$FRA, v4f32:$FRB))]>; 132 [(set v4f32:$FRT, (fsub v4f32:$FRA, v4f32:$FRB))]>; 142 [(set v4f32:$FRT, (PPCfre v4f32:$FRB))]>; 151 [(set v4f32:$FRT, (PPCfrsqrte v4f32:$FRB))]>; 164 [(set v4f32:$FRT, (fmul v4f32:$FRA, v4f32:$FRC))]>; 179 [(set v4f32:$FRT, (fma v4f32:$FRA, v4f32:$FRC, v4f32:$FRB))]>; 190 [(set v4f32:$FRT, (fneg (fma v4f32:$FRA, v4f32:$FRC, [all …]
|
D | PPCInstrVSX.td | 130 [(set v4f32:$XT, (fadd v4f32:$XA, v4f32:$XB))]>; 140 [(set v4f32:$XT, (fmul v4f32:$XA, v4f32:$XB))]>; 156 [(set v4f32:$XT, (fsub v4f32:$XA, v4f32:$XB))]>; 244 [(set v4f32:$XT, (fma v4f32:$XA, v4f32:$XB, v4f32:$XTi))]>, 276 [(set v4f32:$XT, (fma v4f32:$XA, v4f32:$XB, (fneg v4f32:$XTi)))]>, 308 [(set v4f32:$XT, (fneg (fma v4f32:$XA, v4f32:$XB, v4f32:$XTi)))]>, 340 [(set v4f32:$XT, (fneg (fma v4f32:$XA, v4f32:$XB, (fneg v4f32:$XTi))))]>, 384 [(set v4f32:$XT, (fdiv v4f32:$XA, v4f32:$XB))]>; 393 [(set v4f32:$XT, (fsqrt v4f32:$XB))]>; 416 [(set v4f32:$XT, (PPCfre v4f32:$XB))]>; [all …]
|
D | PPCInstrAltivec.td | 262 [(set v4f32:$vD, (IntID v4f32:$vB))]>; 405 [(set v4f32:$vD, 406 (fma v4f32:$vA, v4f32:$vC, v4f32:$vB))]>; 411 [(set v4f32:$vD, (fneg (fma v4f32:$vA, v4f32:$vC, 412 (fneg v4f32:$vB))))]>; 434 [(set v4f32:$vD, (fadd v4f32:$vA, v4f32:$vB))]>; 466 [(set v4f32:$vD, 470 [(set v4f32:$vD, 475 (int_ppc_altivec_vctsxs v4f32:$vB, imm:$UIMM))]>; 479 (int_ppc_altivec_vctuxs v4f32:$vB, imm:$UIMM))]>; [all …]
|
/external/clang/test/CodeGen/ |
D | vectorcall.c | 56 typedef float __attribute__((vector_size(16))) v4f32; typedef 57 struct HVA2 { v4f32 x, y; }; 58 struct HVA4 { v4f32 w, x, y, z; }; 64 void __vectorcall hva2(struct HVA4 a, struct HVA4 b, v4f32 c) {} in hva2() 68 void __vectorcall hva3(v4f32 a, v4f32 b, v4f32 c, v4f32 d, v4f32 e, struct HVA2 f) {} in hva3()
|
/external/llvm/test/CodeGen/Thumb2/ |
D | thumb2-spill-q.ll | 10 declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly 21 %0 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %vecptr, i32 1) nounwind 23 …%1 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=… 25 …%2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=… 26 %ld3 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 28 %ld4 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 30 %ld5 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 32 %ld6 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 34 %ld7 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 36 %ld8 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind [all …]
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | intrinsic-cost.ll | 15 %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.load) 25 … estimated cost of 46 for instruction: %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.… 28 …n estimated cost of 1 for instruction: %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.… 32 declare <4 x float> @llvm.ceil.v4f32(<4 x float>) nounwind readnone 43 %2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.load) 53 …mated cost of 46 for instruction: %2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.… 56 …imated cost of 1 for instruction: %2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.… 60 declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>) nounwind readnone 71 %2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %wide.load, <4 x float> %b, <4 x float> %c) 81 … estimated cost of 4 for instruction: %2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %wide.… [all …]
|
/external/llvm/test/CodeGen/Mips/msa/ |
D | basic_operations_float.ll | 4 @v4f32 = global <4 x float> <float 0.0, float 0.0, float 0.0, float 0.0> 13 store volatile <4 x float> <float 0.0, float 0.0, float 0.0, float 0.0>, <4 x float>*@v4f32 16 store volatile <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, <4 x float>*@v4f32 20 store volatile <4 x float> <float 1.0, float 1.0, float 1.0, float 31.0>, <4 x float>*@v4f32 24 …latile <4 x float> <float 65537.0, float 65537.0, float 65537.0, float 65537.0>, <4 x float>*@v4f32 29 store volatile <4 x float> <float 1.0, float 2.0, float 1.0, float 2.0>, <4 x float>*@v4f32 33 store volatile <4 x float> <float 3.0, float 4.0, float 5.0, float 6.0>, <4 x float>*@v4f32 83 store volatile <4 x float> %5, <4 x float>*@v4f32 108 %1 = load <4 x float>, <4 x float>* @v4f32 126 %1 = load <4 x float>, <4 x float>* @v4f32 [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | qpx-rounding-ops.ll | 7 %call = tail call <4 x float> @llvm.floor.v4f32(<4 x float> %x) nounwind readnone 17 declare <4 x float> @llvm.floor.v4f32(<4 x float>) nounwind readnone 33 %call = tail call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %x) nounwind readnone 43 declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>) nounwind readnone 59 %call = tail call <4 x float> @llvm.ceil.v4f32(<4 x float> %x) nounwind readnone 69 declare <4 x float> @llvm.ceil.v4f32(<4 x float>) nounwind readnone 85 %call = tail call <4 x float> @llvm.trunc.v4f32(<4 x float> %x) nounwind readnone 95 declare <4 x float> @llvm.trunc.v4f32(<4 x float>) nounwind readnone
|
D | vec_rounding.ll | 98 declare <4 x float> @llvm.floor.v4f32(<4 x float> %p) 101 %t = call <4 x float> @llvm.floor.v4f32(<4 x float> %p) 117 declare <4 x float> @llvm.ceil.v4f32(<4 x float> %p) 120 %t = call <4 x float> @llvm.ceil.v4f32(<4 x float> %p) 136 declare <4 x float> @llvm.trunc.v4f32(<4 x float> %p) 139 %t = call <4 x float> @llvm.trunc.v4f32(<4 x float> %p) 155 declare <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p) 158 %t = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p)
|
/external/llvm/test/CodeGen/X86/ |
D | vec_floor.ll | 17 %t = call <4 x float> @llvm.floor.v4f32(<4 x float> %p) 20 declare <4 x float> @llvm.floor.v4f32(<4 x float> %p) 53 %t = call <4 x float> @llvm.ceil.v4f32(<4 x float> %p) 56 declare <4 x float> @llvm.ceil.v4f32(<4 x float> %p) 89 %t = call <4 x float> @llvm.trunc.v4f32(<4 x float> %p) 92 declare <4 x float> @llvm.trunc.v4f32(<4 x float> %p) 125 %t = call <4 x float> @llvm.rint.v4f32(<4 x float> %p) 128 declare <4 x float> @llvm.rint.v4f32(<4 x float> %p) 161 %t = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p) 164 declare <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p) [all …]
|
/external/llvm/test/CodeGen/Mips/ |
D | no-odd-spreg-msa.ll | 4 @v4f32 = global <4 x float> zeroinitializer 11 %0 = load volatile <4 x float>, <4 x float>* @v4f32 24 store <4 x float> %1, <4 x float>* @v4f32 30 ; ALL: lw $[[R0:[0-9]+]], %got(v4f32)( 45 %0 = load volatile <4 x float>, <4 x float>* @v4f32 58 store <4 x float> %1, <4 x float>* @v4f32 64 ; ALL: lw $[[R0:[0-9]+]], %got(v4f32)( 76 %0 = load volatile <4 x float>, <4 x float>* @v4f32 93 ; ALL: lw $[[R0:[0-9]+]], %got(v4f32)( 104 %0 = load volatile <4 x float>, <4 x float>* @v4f32 [all …]
|
/external/llvm/lib/Target/ARM/ |
D | ARMTargetTransformInfo.cpp | 59 { ISD::FP_EXTEND, MVT::v4f32, 4 } in getCastInstrCost() 105 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, in getCastInstrCost() 106 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, in getCastInstrCost() 114 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, in getCastInstrCost() 115 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, in getCastInstrCost() 116 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, in getCastInstrCost() 117 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, in getCastInstrCost() 118 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, in getCastInstrCost() 119 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, in getCastInstrCost() 129 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, in getCastInstrCost() [all …]
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64TargetTransformInfo.cpp | 193 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, in getCastInstrCost() 196 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, in getCastInstrCost() 208 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 }, in getCastInstrCost() 209 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, in getCastInstrCost() 210 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, in getCastInstrCost() 211 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, in getCastInstrCost() 224 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, in getCastInstrCost() 227 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, in getCastInstrCost() 239 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, in getCastInstrCost() 240 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 }, in getCastInstrCost() [all …]
|
/external/llvm/lib/Target/X86/ |
D | X86TargetTransformInfo.cpp | 398 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, in getShuffleCost() 421 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, in getShuffleCost() 438 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd in getShuffleCost() 478 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, in getCastInstrCost() 479 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 }, in getCastInstrCost() 480 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, in getCastInstrCost() 481 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, in getCastInstrCost() 482 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, in getCastInstrCost() 483 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 }, in getCastInstrCost() 484 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, in getCastInstrCost() [all …]
|