Lines Matching refs:f64

8 declare double     @llvm.sqrt.f64(double %Val)
12 ; HARD: vsqrt.f64 d0, d0
13 %1 = call double @llvm.sqrt.f64(double %a)
17 declare double @llvm.powi.f64(double %Val, i32 %power)
22 %1 = call double @llvm.powi.f64(double %a, i32 %b)
26 declare double @llvm.sin.f64(double %Val)
31 %1 = call double @llvm.sin.f64(double %a)
35 declare double @llvm.cos.f64(double %Val)
40 %1 = call double @llvm.cos.f64(double %a)
44 declare double @llvm.pow.f64(double %Val, double %power)
49 %1 = call double @llvm.pow.f64(double %a, double %b)
53 declare double @llvm.exp.f64(double %Val)
58 %1 = call double @llvm.exp.f64(double %a)
62 declare double @llvm.exp2.f64(double %Val)
67 %1 = call double @llvm.exp2.f64(double %a)
71 declare double @llvm.log.f64(double %Val)
76 %1 = call double @llvm.log.f64(double %a)
80 declare double @llvm.log10.f64(double %Val)
85 %1 = call double @llvm.log10.f64(double %a)
89 declare double @llvm.log2.f64(double %Val)
94 %1 = call double @llvm.log2.f64(double %a)
98 declare double @llvm.fma.f64(double %a, double %b, double %c)
102 ; HARD: vfma.f64
103 %1 = call double @llvm.fma.f64(double %a, double %b, double %c)
108 declare double @llvm.fabs.f64(double %Val)
118 ; DP: vabs.f64 d0, d0
119 %1 = call double @llvm.fabs.f64(double %a)
123 declare double @llvm.copysign.f64(double %Mag, double %Sgn)
133 %1 = call double @llvm.copysign.f64(double %a, double %b)
137 declare double @llvm.floor.f64(double %Val)
142 ; FP-ARMv8: vrintm.f64
143 %1 = call double @llvm.floor.f64(double %a)
147 declare double @llvm.ceil.f64(double %Val)
152 ; FP-ARMv8: vrintp.f64
153 %1 = call double @llvm.ceil.f64(double %a)
157 declare double @llvm.trunc.f64(double %Val)
162 ; FP-ARMv8: vrintz.f64
163 %1 = call double @llvm.trunc.f64(double %a)
167 declare double @llvm.rint.f64(double %Val)
172 ; FP-ARMv8: vrintx.f64
173 %1 = call double @llvm.rint.f64(double %a)
177 declare double @llvm.nearbyint.f64(double %Val)
182 ; FP-ARMv8: vrintr.f64
183 %1 = call double @llvm.nearbyint.f64(double %a)
187 declare double @llvm.round.f64(double %Val)
192 ; FP-ARMv8: vrinta.f64
193 %1 = call double @llvm.round.f64(double %a)
197 declare double @llvm.fmuladd.f64(double %a, double %b, double %c)
202 ; VFP4: vmul.f64
203 ; VFP4: vadd.f64
204 ; FP-ARMv8: vmla.f64
205 %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
209 declare i16 @llvm.convert.to.fp16.f64(double %a)
214 ; FP-ARMv8: vcvt{{[bt]}}.f16.f64
215 %1 = call i16 @llvm.convert.to.fp16.f64(double %a)
219 declare double @llvm.convert.from.fp16.f64(i16 %a)
227 ; VFPv4: vcvt.f64.f32
228 ; FP-ARMv8: vcvt{{[bt]}}.f64.f16
229 %1 = call double @llvm.convert.from.fp16.f64(i16 %a)