Lines Matching full:experimental
13 // CHECK: call double @llvm.experimental.constrained.frem.f64(double %{{.*}}, double %{{.*}}, metad… in foo()
14 // CHECK: call float @llvm.experimental.constrained.frem.f32(float %{{.*}}, float %{{.*}}, metadata… in foo()
15 // CHECK: call x86_fp80 @llvm.experimental.constrained.frem.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}},… in foo()
16 // CHECK: call fp128 @llvm.experimental.constrained.frem.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadat… in foo()
20 // CHECK: call double @llvm.experimental.constrained.pow.f64(double %{{.*}}, double %{{.*}}, metada… in foo()
21 // CHECK: call float @llvm.experimental.constrained.pow.f32(float %{{.*}}, float %{{.*}}, metadata … in foo()
22 // CHECK: call x86_fp80 @llvm.experimental.constrained.pow.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, … in foo()
23 // CHECK: call fp128 @llvm.experimental.constrained.pow.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata… in foo()
27 // CHECK: call double @llvm.experimental.constrained.powi.f64(double %{{.*}}, metadata !"round.tone… in foo()
28 // CHECK: call float @llvm.experimental.constrained.powi.f32(float %{{.*}}, metadata !"round.tonear… in foo()
29 // CHECK: call x86_fp80 @llvm.experimental.constrained.powi.f80(x86_fp80 %{{.*}}, metadata !"round.… in foo()
33 // CHECK: call double @llvm.experimental.constrained.ceil.f64(double %{{.*}}, metadata !"fpexcept.s… in foo()
34 // CHECK: call float @llvm.experimental.constrained.ceil.f32(float %{{.*}}, metadata !"fpexcept.str… in foo()
35 // CHECK: call x86_fp80 @llvm.experimental.constrained.ceil.f80(x86_fp80 %{{.*}}, metadata !"fpexce… in foo()
36 // CHECK: call fp128 @llvm.experimental.constrained.ceil.f128(fp128 %{{.*}}, metadata !"fpexcept.st… in foo()
40 // CHECK: call double @llvm.experimental.constrained.cos.f64(double %{{.*}}, metadata !"round.tonea… in foo()
41 // CHECK: call float @llvm.experimental.constrained.cos.f32(float %{{.*}}, metadata !"round.toneare… in foo()
42 // CHECK: call x86_fp80 @llvm.experimental.constrained.cos.f80(x86_fp80 %{{.*}}, metadata !"round.t… in foo()
43 // CHECK: call fp128 @llvm.experimental.constrained.cos.f128(fp128 %{{.*}}, metadata !"round.tonear… in foo()
47 // CHECK: call double @llvm.experimental.constrained.exp.f64(double %{{.*}}, metadata !"round.tonea… in foo()
48 // CHECK: call float @llvm.experimental.constrained.exp.f32(float %{{.*}}, metadata !"round.toneare… in foo()
49 // CHECK: call x86_fp80 @llvm.experimental.constrained.exp.f80(x86_fp80 %{{.*}}, metadata !"round.t… in foo()
50 // CHECK: call fp128 @llvm.experimental.constrained.exp.f128(fp128 %{{.*}}, metadata !"round.tonear… in foo()
54 // CHECK: call double @llvm.experimental.constrained.exp2.f64(double %{{.*}}, metadata !"round.tone… in foo()
55 // CHECK: call float @llvm.experimental.constrained.exp2.f32(float %{{.*}}, metadata !"round.tonear… in foo()
56 // CHECK: call x86_fp80 @llvm.experimental.constrained.exp2.f80(x86_fp80 %{{.*}}, metadata !"round.… in foo()
57 // CHECK: call fp128 @llvm.experimental.constrained.exp2.f128(fp128 %{{.*}}, metadata !"round.tonea… in foo()
61 // CHECK: call double @llvm.experimental.constrained.floor.f64(double %{{.*}}, metadata !"fpexcept.… in foo()
62 // CHECK: call float @llvm.experimental.constrained.floor.f32(float %{{.*}}, metadata !"fpexcept.st… in foo()
63 // CHECK: call x86_fp80 @llvm.experimental.constrained.floor.f80(x86_fp80 %{{.*}}, metadata !"fpexc… in foo()
64 // CHECK: call fp128 @llvm.experimental.constrained.floor.f128(fp128 %{{.*}}, metadata !"fpexcept.s… in foo()
68 // CHECK: call double @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}}, double… in foo()
69 // CHECK: call float @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float %{{… in foo()
70 // CHECK: call x86_fp80 @llvm.experimental.constrained.fma.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, … in foo()
71 // CHECK: call fp128 @llvm.experimental.constrained.fma.f128(fp128 %{{.*}}, fp128 %{{.*}}, fp128 %{… in foo()
75 // CHECK: call double @llvm.experimental.constrained.maxnum.f64(double %{{.*}}, double %{{.*}}, met… in foo()
76 // CHECK: call float @llvm.experimental.constrained.maxnum.f32(float %{{.*}}, float %{{.*}}, metada… in foo()
77 // CHECK: call x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}… in foo()
78 // CHECK: call fp128 @llvm.experimental.constrained.maxnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metad… in foo()
82 // CHECK: call double @llvm.experimental.constrained.minnum.f64(double %{{.*}}, double %{{.*}}, met… in foo()
83 // CHECK: call float @llvm.experimental.constrained.minnum.f32(float %{{.*}}, float %{{.*}}, metada… in foo()
84 // CHECK: call x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}… in foo()
85 // CHECK: call fp128 @llvm.experimental.constrained.minnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metad… in foo()
89 // CHECK: call i64 @llvm.experimental.constrained.llrint.i64.f64(double %{{.*}}, metadata !"round.t… in foo()
90 // CHECK: call i64 @llvm.experimental.constrained.llrint.i64.f32(float %{{.*}}, metadata !"round.to… in foo()
91 // CHECK: call i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80 %{{.*}}, metadata !"round… in foo()
92 // CHECK: call i64 @llvm.experimental.constrained.llrint.i64.f128(fp128 %{{.*}}, metadata !"round.t… in foo()
96 // CHECK: call i64 @llvm.experimental.constrained.llround.i64.f64(double %{{.*}}, metadata !"fpexce… in foo()
97 // CHECK: call i64 @llvm.experimental.constrained.llround.i64.f32(float %{{.*}}, metadata !"fpexcep… in foo()
98 // CHECK: call i64 @llvm.experimental.constrained.llround.i64.f80(x86_fp80 %{{.*}}, metadata !"fpex… in foo()
99 // CHECK: call i64 @llvm.experimental.constrained.llround.i64.f128(fp128 %{{.*}}, metadata !"fpexce… in foo()
103 // CHECK: call double @llvm.experimental.constrained.log.f64(double %{{.*}}, metadata !"round.tonea… in foo()
104 // CHECK: call float @llvm.experimental.constrained.log.f32(float %{{.*}}, metadata !"round.toneare… in foo()
105 // CHECK: call x86_fp80 @llvm.experimental.constrained.log.f80(x86_fp80 %{{.*}}, metadata !"round.t… in foo()
106 // CHECK: call fp128 @llvm.experimental.constrained.log.f128(fp128 %{{.*}}, metadata !"round.tonear… in foo()
110 // CHECK: call double @llvm.experimental.constrained.log10.f64(double %{{.*}}, metadata !"round.ton… in foo()
111 // CHECK: call float @llvm.experimental.constrained.log10.f32(float %{{.*}}, metadata !"round.tonea… in foo()
112 // CHECK: call x86_fp80 @llvm.experimental.constrained.log10.f80(x86_fp80 %{{.*}}, metadata !"round… in foo()
113 // CHECK: call fp128 @llvm.experimental.constrained.log10.f128(fp128 %{{.*}}, metadata !"round.tone… in foo()
117 // CHECK: call double @llvm.experimental.constrained.log2.f64(double %{{.*}}, metadata !"round.tone… in foo()
118 // CHECK: call float @llvm.experimental.constrained.log2.f32(float %{{.*}}, metadata !"round.tonear… in foo()
119 // CHECK: call x86_fp80 @llvm.experimental.constrained.log2.f80(x86_fp80 %{{.*}}, metadata !"round.… in foo()
120 // CHECK: call fp128 @llvm.experimental.constrained.log2.f128(fp128 %{{.*}}, metadata !"round.tonea… in foo()
124 // CHECK: call i64 @llvm.experimental.constrained.lrint.i64.f64(double %{{.*}}, metadata !"round.to… in foo()
125 // CHECK: call i64 @llvm.experimental.constrained.lrint.i64.f32(float %{{.*}}, metadata !"round.ton… in foo()
126 // CHECK: call i64 @llvm.experimental.constrained.lrint.i64.f80(x86_fp80 %{{.*}}, metadata !"round.… in foo()
127 // CHECK: call i64 @llvm.experimental.constrained.lrint.i64.f128(fp128 %{{.*}}, metadata !"round.to… in foo()
131 // CHECK: call i64 @llvm.experimental.constrained.lround.i64.f64(double %{{.*}}, metadata !"fpexcep… in foo()
132 // CHECK: call i64 @llvm.experimental.constrained.lround.i64.f32(float %{{.*}}, metadata !"fpexcept… in foo()
133 // CHECK: call i64 @llvm.experimental.constrained.lround.i64.f80(x86_fp80 %{{.*}}, metadata !"fpexc… in foo()
134 // CHECK: call i64 @llvm.experimental.constrained.lround.i64.f128(fp128 %{{.*}}, metadata !"fpexcep… in foo()
138 // CHECK: call double @llvm.experimental.constrained.nearbyint.f64(double %{{.*}}, metadata !"round… in foo()
139 // CHECK: call float @llvm.experimental.constrained.nearbyint.f32(float %{{.*}}, metadata !"round.t… in foo()
140 // CHECK: call x86_fp80 @llvm.experimental.constrained.nearbyint.f80(x86_fp80 %{{.*}}, metadata !"r… in foo()
141 // CHECK: call fp128 @llvm.experimental.constrained.nearbyint.f128(fp128 %{{.*}}, metadata !"round.… in foo()
145 // CHECK: call double @llvm.experimental.constrained.rint.f64(double %{{.*}}, metadata !"fpexcept.s… in foo()
146 // CHECK: call float @llvm.experimental.constrained.rint.f32(float %{{.*}}, metadata !"fpexcept.str… in foo()
147 // CHECK: call x86_fp80 @llvm.experimental.constrained.rint.f80(x86_fp80 %{{.*}}, metadata !"fpexce… in foo()
148 // CHECK: call fp128 @llvm.experimental.constrained.rint.f128(fp128 %{{.*}}, metadata !"fpexcept.st… in foo()
152 // CHECK: call double @llvm.experimental.constrained.round.f64(double %{{.*}}, metadata !"fpexcept.… in foo()
153 // CHECK: call float @llvm.experimental.constrained.round.f32(float %{{.*}}, metadata !"fpexcept.st… in foo()
154 // CHECK: call x86_fp80 @llvm.experimental.constrained.round.f80(x86_fp80 %{{.*}}, metadata !"fpexc… in foo()
155 // CHECK: call fp128 @llvm.experimental.constrained.round.f128(fp128 %{{.*}}, metadata !"fpexcept.s… in foo()
159 // CHECK: call double @llvm.experimental.constrained.sin.f64(double %{{.*}}, metadata !"round.tonea… in foo()
160 // CHECK: call float @llvm.experimental.constrained.sin.f32(float %{{.*}}, metadata !"round.toneare… in foo()
161 // CHECK: call x86_fp80 @llvm.experimental.constrained.sin.f80(x86_fp80 %{{.*}}, metadata !"round.t… in foo()
162 // CHECK: call fp128 @llvm.experimental.constrained.sin.f128(fp128 %{{.*}}, metadata !"round.tonear… in foo()
166 // CHECK: call double @llvm.experimental.constrained.sqrt.f64(double %{{.*}}, metadata !"round.tone… in foo()
167 // CHECK: call float @llvm.experimental.constrained.sqrt.f32(float %{{.*}}, metadata !"round.tonear… in foo()
168 // CHECK: call x86_fp80 @llvm.experimental.constrained.sqrt.f80(x86_fp80 %{{.*}}, metadata !"round.… in foo()
169 // CHECK: call fp128 @llvm.experimental.constrained.sqrt.f128(fp128 %{{.*}}, metadata !"round.tonea… in foo()
173 // CHECK: call double @llvm.experimental.constrained.trunc.f64(double %{{.*}}, metadata !"fpexcept.… in foo()
174 // CHECK: call float @llvm.experimental.constrained.trunc.f32(float %{{.*}}, metadata !"fpexcept.st… in foo()
175 // CHECK: call x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80 %{{.*}}, metadata !"fpexc… in foo()
176 // CHECK: call fp128 @llvm.experimental.constrained.trunc.f128(fp128 %{{.*}}, metadata !"fpexcept.s… in foo()
179 // CHECK: declare double @llvm.experimental.constrained.frem.f64(double, double, metadata, metadata)
180 // CHECK: declare float @llvm.experimental.constrained.frem.f32(float, float, metadata, metadata)
181 // CHECK: declare x86_fp80 @llvm.experimental.constrained.frem.f80(x86_fp80, x86_fp80, metadata, me…
182 // CHECK: declare fp128 @llvm.experimental.constrained.frem.f128(fp128, fp128, metadata, metadata)
184 // CHECK: declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata)
185 // CHECK: declare float @llvm.experimental.constrained.pow.f32(float, float, metadata, metadata)
186 // CHECK: declare x86_fp80 @llvm.experimental.constrained.pow.f80(x86_fp80, x86_fp80, metadata, met…
187 // CHECK: declare fp128 @llvm.experimental.constrained.pow.f128(fp128, fp128, metadata, metadata)
189 // CHECK: declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata)
190 // CHECK: declare float @llvm.experimental.constrained.powi.f32(float, i32, metadata, metadata)
191 // CHECK: declare x86_fp80 @llvm.experimental.constrained.powi.f80(x86_fp80, i32, metadata, metadat…
193 // CHECK: declare double @llvm.experimental.constrained.ceil.f64(double, metadata)
194 // CHECK: declare float @llvm.experimental.constrained.ceil.f32(float, metadata)
195 // CHECK: declare x86_fp80 @llvm.experimental.constrained.ceil.f80(x86_fp80, metadata)
196 // CHECK: declare fp128 @llvm.experimental.constrained.ceil.f128(fp128, metadata)
198 // CHECK: declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata)
199 // CHECK: declare float @llvm.experimental.constrained.cos.f32(float, metadata, metadata)
200 // CHECK: declare x86_fp80 @llvm.experimental.constrained.cos.f80(x86_fp80, metadata, metadata)
201 // CHECK: declare fp128 @llvm.experimental.constrained.cos.f128(fp128, metadata, metadata)
203 // CHECK: declare double @llvm.experimental.constrained.exp.f64(double, metadata, metadata)
204 // CHECK: declare float @llvm.experimental.constrained.exp.f32(float, metadata, metadata)
205 // CHECK: declare x86_fp80 @llvm.experimental.constrained.exp.f80(x86_fp80, metadata, metadata)
206 // CHECK: declare fp128 @llvm.experimental.constrained.exp.f128(fp128, metadata, metadata)
208 // CHECK: declare double @llvm.experimental.constrained.exp2.f64(double, metadata, metadata)
209 // CHECK: declare float @llvm.experimental.constrained.exp2.f32(float, metadata, metadata)
210 // CHECK: declare x86_fp80 @llvm.experimental.constrained.exp2.f80(x86_fp80, metadata, metadata)
211 // CHECK: declare fp128 @llvm.experimental.constrained.exp2.f128(fp128, metadata, metadata)
213 // CHECK: declare double @llvm.experimental.constrained.floor.f64(double, metadata)
214 // CHECK: declare float @llvm.experimental.constrained.floor.f32(float, metadata)
215 // CHECK: declare x86_fp80 @llvm.experimental.constrained.floor.f80(x86_fp80, metadata)
216 // CHECK: declare fp128 @llvm.experimental.constrained.floor.f128(fp128, metadata)
218 // CHECK: declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, m…
219 // CHECK: declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metad…
220 // CHECK: declare x86_fp80 @llvm.experimental.constrained.fma.f80(x86_fp80, x86_fp80, x86_fp80, met…
221 // CHECK: declare fp128 @llvm.experimental.constrained.fma.f128(fp128, fp128, fp128, metadata, meta…
223 // CHECK: declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata)
224 // CHECK: declare float @llvm.experimental.constrained.maxnum.f32(float, float, metadata)
225 // CHECK: declare x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80, x86_fp80, metadata)
226 // CHECK: declare fp128 @llvm.experimental.constrained.maxnum.f128(fp128, fp128, metadata)
228 // CHECK: declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata)
229 // CHECK: declare float @llvm.experimental.constrained.minnum.f32(float, float, metadata)
230 // CHECK: declare x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80, x86_fp80, metadata)
231 // CHECK: declare fp128 @llvm.experimental.constrained.minnum.f128(fp128, fp128, metadata)
233 // CHECK: declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata)
234 // CHECK: declare i64 @llvm.experimental.constrained.llrint.i64.f32(float, metadata, metadata)
235 // CHECK: declare i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80, metadata, metadata)
236 // CHECK: declare i64 @llvm.experimental.constrained.llrint.i64.f128(fp128, metadata, metadata)
238 // CHECK: declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata)
239 // CHECK: declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata)
240 // CHECK: declare i64 @llvm.experimental.constrained.llround.i64.f80(x86_fp80, metadata)
241 // CHECK: declare i64 @llvm.experimental.constrained.llround.i64.f128(fp128, metadata)
243 // CHECK: declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata)
244 // CHECK: declare float @llvm.experimental.constrained.log.f32(float, metadata, metadata)
245 // CHECK: declare x86_fp80 @llvm.experimental.constrained.log.f80(x86_fp80, metadata, metadata)
246 // CHECK: declare fp128 @llvm.experimental.constrained.log.f128(fp128, metadata, metadata)
248 // CHECK: declare double @llvm.experimental.constrained.log10.f64(double, metadata, metadata)
249 // CHECK: declare float @llvm.experimental.constrained.log10.f32(float, metadata, metadata)
250 // CHECK: declare x86_fp80 @llvm.experimental.constrained.log10.f80(x86_fp80, metadata, metadata)
251 // CHECK: declare fp128 @llvm.experimental.constrained.log10.f128(fp128, metadata, metadata)
253 // CHECK: declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata)
254 // CHECK: declare float @llvm.experimental.constrained.log2.f32(float, metadata, metadata)
255 // CHECK: declare x86_fp80 @llvm.experimental.constrained.log2.f80(x86_fp80, metadata, metadata)
256 // CHECK: declare fp128 @llvm.experimental.constrained.log2.f128(fp128, metadata, metadata)
258 // CHECK: declare i64 @llvm.experimental.constrained.lrint.i64.f64(double, metadata, metadata)
259 // CHECK: declare i64 @llvm.experimental.constrained.lrint.i64.f32(float, metadata, metadata)
260 // CHECK: declare i64 @llvm.experimental.constrained.lrint.i64.f80(x86_fp80, metadata, metadata)
261 // CHECK: declare i64 @llvm.experimental.constrained.lrint.i64.f128(fp128, metadata, metadata)
263 // CHECK: declare i64 @llvm.experimental.constrained.lround.i64.f64(double, metadata)
264 // CHECK: declare i64 @llvm.experimental.constrained.lround.i64.f32(float, metadata)
265 // CHECK: declare i64 @llvm.experimental.constrained.lround.i64.f80(x86_fp80, metadata)
266 // CHECK: declare i64 @llvm.experimental.constrained.lround.i64.f128(fp128, metadata)
268 // CHECK: declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
269 // CHECK: declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
270 // CHECK: declare x86_fp80 @llvm.experimental.constrained.nearbyint.f80(x86_fp80, metadata, metadat…
271 // CHECK: declare fp128 @llvm.experimental.constrained.nearbyint.f128(fp128, metadata, metadata)
273 // CHECK: declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
274 // CHECK: declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
275 // CHECK: declare x86_fp80 @llvm.experimental.constrained.rint.f80(x86_fp80, metadata, metadata)
276 // CHECK: declare fp128 @llvm.experimental.constrained.rint.f128(fp128, metadata, metadata)
278 // CHECK: declare double @llvm.experimental.constrained.round.f64(double, metadata)
279 // CHECK: declare float @llvm.experimental.constrained.round.f32(float, metadata)
280 // CHECK: declare x86_fp80 @llvm.experimental.constrained.round.f80(x86_fp80, metadata)
281 // CHECK: declare fp128 @llvm.experimental.constrained.round.f128(fp128, metadata)
283 // CHECK: declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata)
284 // CHECK: declare float @llvm.experimental.constrained.sin.f32(float, metadata, metadata)
285 // CHECK: declare x86_fp80 @llvm.experimental.constrained.sin.f80(x86_fp80, metadata, metadata)
286 // CHECK: declare fp128 @llvm.experimental.constrained.sin.f128(fp128, metadata, metadata)
288 // CHECK: declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
289 // CHECK: declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata)
290 // CHECK: declare x86_fp80 @llvm.experimental.constrained.sqrt.f80(x86_fp80, metadata, metadata)
291 // CHECK: declare fp128 @llvm.experimental.constrained.sqrt.f128(fp128, metadata, metadata)
293 // CHECK: declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
294 // CHECK: declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
295 // CHECK: declare x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80, metadata)
296 // CHECK: declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
304 …// CHECK: call float @llvm.experimental.constrained.fmuladd.f32(float %{{.*}}, float %{{.*}}, floa… in bar()
306 …// CHECK: call double @llvm.experimental.constrained.fmuladd.f64(double %{{.*}}, double %{{.*}}, d… in bar()
308 …// CHECK: call x86_fp80 @llvm.experimental.constrained.fmuladd.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.… in bar()