1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S -mtriple "i386-pc-linux"     | FileCheck %s
3; RUN: opt < %s -instcombine -S -mtriple "i386-pc-win32"     | FileCheck %s
4; RUN: opt < %s -instcombine -S -mtriple "x86_64-pc-win32"   | FileCheck %s
5; RUN: opt < %s -instcombine -S -mtriple "i386-pc-mingw32"   | FileCheck %s
6; RUN: opt < %s -instcombine -S -mtriple "x86_64-pc-mingw32" | FileCheck %s
7; RUN: opt < %s -instcombine -S -mtriple "sparc-sun-solaris" | FileCheck %s
8; RUN: opt < %s -instcombine -S -mtriple "x86_64-pc-win32" -enable-debugify 2>&1 | FileCheck --check-prefix=DBG-VALID %s
9
10declare double @floor(double)
11declare double @ceil(double)
12declare double @round(double)
13declare double @roundeven(double)
14declare double @nearbyint(double)
15declare double @trunc(double)
16declare double @fabs(double)
17
18declare double @llvm.ceil.f64(double)
19declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
20
21declare double @llvm.fabs.f64(double)
22declare <2 x double> @llvm.fabs.v2f64(<2 x double>)
23
24declare double @llvm.floor.f64(double)
25declare <2 x double> @llvm.floor.v2f64(<2 x double>)
26
27declare double @llvm.nearbyint.f64(double)
28declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>)
29
30declare float @llvm.rint.f32(float)
31declare <2 x float> @llvm.rint.v2f32(<2 x float>)
32
33declare double @llvm.round.f64(double)
34declare <2 x double> @llvm.round.v2f64(<2 x double>)
35
36declare double @llvm.roundeven.f64(double)
37declare <2 x double> @llvm.roundeven.v2f64(<2 x double>)
38
39declare double @llvm.trunc.f64(double)
40declare <2 x double> @llvm.trunc.v2f64(<2 x double>)
41
42define float @test_shrink_libcall_floor(float %C) {
43; CHECK-LABEL: @test_shrink_libcall_floor(
44; CHECK-NEXT:    [[F:%.*]] = call float @llvm.floor.f32(float [[C:%.*]])
45; CHECK-NEXT:    ret float [[F]]
46;
47  %D = fpext float %C to double
48  ; --> floorf
49  %E = call double @floor(double %D)
50  %F = fptrunc double %E to float
51  ret float %F
52}
53
54define float @test_shrink_libcall_ceil(float %C) {
55; CHECK-LABEL: @test_shrink_libcall_ceil(
56; CHECK-NEXT:    [[F:%.*]] = call float @llvm.ceil.f32(float [[C:%.*]])
57; CHECK-NEXT:    ret float [[F]]
58;
59  %D = fpext float %C to double
60  ; --> ceilf
61  %E = call double @ceil(double %D)
62  %F = fptrunc double %E to float
63  ret float %F
64}
65
66define float @test_shrink_libcall_round(float %C) {
67; CHECK-LABEL: @test_shrink_libcall_round(
68; CHECK-NEXT:    [[F:%.*]] = call float @llvm.round.f32(float [[C:%.*]])
69; CHECK-NEXT:    ret float [[F]]
70;
71  %D = fpext float %C to double
72  ; --> roundf
73  %E = call double @round(double %D)
74  %F = fptrunc double %E to float
75  ret float %F
76}
77
78define float @test_shrink_libcall_roundeven(float %C) {
79; CHECK-LABEL: @test_shrink_libcall_roundeven(
80; CHECK-NEXT:    [[F:%.*]] = call float @llvm.roundeven.f32(float [[C:%.*]])
81; CHECK-NEXT:    ret float [[F]]
82;
83  %D = fpext float %C to double
84  ; --> roundeven
85  %E = call double @roundeven(double %D)
86  %F = fptrunc double %E to float
87  ret float %F
88}
89
90define float @test_shrink_libcall_nearbyint(float %C) {
91; CHECK-LABEL: @test_shrink_libcall_nearbyint(
92; CHECK-NEXT:    [[F:%.*]] = call float @llvm.nearbyint.f32(float [[C:%.*]])
93; CHECK-NEXT:    ret float [[F]]
94;
95  %D = fpext float %C to double
96  ; --> nearbyintf
97  %E = call double @nearbyint(double %D)
98  %F = fptrunc double %E to float
99  ret float %F
100}
101
102define float @test_shrink_libcall_trunc(float %C) {
103; CHECK-LABEL: @test_shrink_libcall_trunc(
104; CHECK-NEXT:    [[F:%.*]] = call float @llvm.trunc.f32(float [[C:%.*]])
105; CHECK-NEXT:    ret float [[F]]
106;
107  %D = fpext float %C to double
108  ; --> truncf
109  %E = call double @trunc(double %D)
110  %F = fptrunc double %E to float
111  ret float %F
112}
113
114; This is replaced with the intrinsic, which does the right thing on
115; CHECK platforms.
116define float @test_shrink_libcall_fabs(float %C) {
117; CHECK-LABEL: @test_shrink_libcall_fabs(
118; CHECK-NEXT:    [[F:%.*]] = call float @llvm.fabs.f32(float [[C:%.*]])
119; CHECK-NEXT:    ret float [[F]]
120;
121  %D = fpext float %C to double
122  %E = call double @fabs(double %D)
123  %F = fptrunc double %E to float
124  ret float %F
125}
126
127; Make sure fast math flags are preserved
128define float @test_shrink_libcall_fabs_fast(float %C) {
129; CHECK-LABEL: @test_shrink_libcall_fabs_fast(
130; CHECK-NEXT:    [[F:%.*]] = call fast float @llvm.fabs.f32(float [[C:%.*]])
131; CHECK-NEXT:    ret float [[F]]
132;
133  %D = fpext float %C to double
134  %E = call fast double @fabs(double %D)
135  %F = fptrunc double %E to float
136  ret float %F
137}
138
139define float @test_shrink_intrin_ceil(float %C) {
140; CHECK-LABEL: @test_shrink_intrin_ceil(
141; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.ceil.f32(float [[C:%.*]])
142; CHECK-NEXT:    ret float [[TMP1]]
143;
144  %D = fpext float %C to double
145  %E = call double @llvm.ceil.f64(double %D)
146  %F = fptrunc double %E to float
147  ret float %F
148}
149
150define float @test_shrink_intrin_fabs(float %C) {
151; CHECK-LABEL: @test_shrink_intrin_fabs(
152; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.fabs.f32(float [[C:%.*]])
153; CHECK-NEXT:    ret float [[TMP1]]
154;
155  %D = fpext float %C to double
156  %E = call double @llvm.fabs.f64(double %D)
157  %F = fptrunc double %E to float
158  ret float %F
159}
160
161define float @test_shrink_intrin_floor(float %C) {
162; CHECK-LABEL: @test_shrink_intrin_floor(
163; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.floor.f32(float [[C:%.*]])
164; CHECK-NEXT:    ret float [[TMP1]]
165;
166  %D = fpext float %C to double
167  %E = call double @llvm.floor.f64(double %D)
168  %F = fptrunc double %E to float
169  ret float %F
170}
171
172define float @test_shrink_intrin_nearbyint(float %C) {
173; CHECK-LABEL: @test_shrink_intrin_nearbyint(
174; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.nearbyint.f32(float [[C:%.*]])
175; CHECK-NEXT:    ret float [[TMP1]]
176;
177  %D = fpext float %C to double
178  %E = call double @llvm.nearbyint.f64(double %D)
179  %F = fptrunc double %E to float
180  ret float %F
181}
182
183define half @test_shrink_intrin_rint(half %C) {
184; CHECK-LABEL: @test_shrink_intrin_rint(
185; CHECK-NEXT:    [[TMP1:%.*]] = call half @llvm.rint.f16(half [[C:%.*]])
186; CHECK-NEXT:    ret half [[TMP1]]
187;
188  %D = fpext half %C to float
189  %E = call float @llvm.rint.f32(float %D)
190  %F = fptrunc float %E to half
191  ret half %F
192}
193
194define float @test_shrink_intrin_round(float %C) {
195; CHECK-LABEL: @test_shrink_intrin_round(
196; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.round.f32(float [[C:%.*]])
197; CHECK-NEXT:    ret float [[TMP1]]
198;
199  %D = fpext float %C to double
200  %E = call double @llvm.round.f64(double %D)
201  %F = fptrunc double %E to float
202  ret float %F
203}
204
205define float @test_shrink_intrin_roundeven(float %C) {
206; CHECK-LABEL: @test_shrink_intrin_roundeven(
207; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.roundeven.f32(float [[C:%.*]])
208; CHECK-NEXT:    ret float [[TMP1]]
209;
210  %D = fpext float %C to double
211  %E = call double @llvm.roundeven.f64(double %D)
212  %F = fptrunc double %E to float
213  ret float %F
214}
215
216define float @test_shrink_intrin_trunc(float %C) {
217; CHECK-LABEL: @test_shrink_intrin_trunc(
218; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.trunc.f32(float [[C:%.*]])
219; CHECK-NEXT:    ret float [[TMP1]]
220;
221  %D = fpext float %C to double
222  %E = call double @llvm.trunc.f64(double %D)
223  %F = fptrunc double %E to float
224  ret float %F
225}
226
227declare void @use_v2f64(<2 x double>)
228declare void @use_v2f32(<2 x float>)
229
230define <2 x float> @test_shrink_intrin_ceil_multi_use(<2 x float> %C) {
231; CHECK-LABEL: @test_shrink_intrin_ceil_multi_use(
232; CHECK-NEXT:    [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
233; CHECK-NEXT:    [[E:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[D]])
234; CHECK-NEXT:    [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
235; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[D]])
236; CHECK-NEXT:    ret <2 x float> [[F]]
237;
238  %D = fpext <2 x float> %C to <2 x double>
239  %E = call <2 x double> @llvm.ceil.v2f64(<2 x double> %D)
240  %F = fptrunc <2 x double> %E to <2 x float>
241  call void @use_v2f64(<2 x double> %D)
242  ret <2 x float> %F
243}
244
245define <2 x float> @test_shrink_intrin_fabs_multi_use(<2 x float> %C) {
246; CHECK-LABEL: @test_shrink_intrin_fabs_multi_use(
247; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x float> @llvm.fabs.v2f32(<2 x float> [[C:%.*]])
248; CHECK-NEXT:    [[E:%.*]] = fpext <2 x float> [[TMP1]] to <2 x double>
249; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[E]])
250; CHECK-NEXT:    ret <2 x float> [[TMP1]]
251;
252  %D = fpext <2 x float> %C to <2 x double>
253  %E = call <2 x double> @llvm.fabs.v2f64(<2 x double> %D)
254  %F = fptrunc <2 x double> %E to <2 x float>
255  call void @use_v2f64(<2 x double> %E)
256  ret <2 x float> %F
257}
258
259define <2 x float> @test_shrink_intrin_floor_multi_use(<2 x float> %C) {
260; CHECK-LABEL: @test_shrink_intrin_floor_multi_use(
261; CHECK-NEXT:    [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
262; CHECK-NEXT:    [[E:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[D]])
263; CHECK-NEXT:    [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
264; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[D]])
265; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[E]])
266; CHECK-NEXT:    ret <2 x float> [[F]]
267;
268  %D = fpext <2 x float> %C to <2 x double>
269  %E = call <2 x double> @llvm.floor.v2f64(<2 x double> %D)
270  %F = fptrunc <2 x double> %E to <2 x float>
271  call void @use_v2f64(<2 x double> %D)
272  call void @use_v2f64(<2 x double> %E)
273  ret <2 x float> %F
274}
275
276define <2 x float> @test_shrink_intrin_nearbyint_multi_use(<2 x float> %C) {
277; CHECK-LABEL: @test_shrink_intrin_nearbyint_multi_use(
278; CHECK-NEXT:    [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
279; CHECK-NEXT:    [[E:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[D]])
280; CHECK-NEXT:    [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
281; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[D]])
282; CHECK-NEXT:    ret <2 x float> [[F]]
283;
284  %D = fpext <2 x float> %C to <2 x double>
285  %E = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %D)
286  %F = fptrunc <2 x double> %E to <2 x float>
287  call void @use_v2f64(<2 x double> %D)
288  ret <2 x float> %F
289}
290
291define <2 x half> @test_shrink_intrin_rint_multi_use(<2 x half> %C) {
292; CHECK-LABEL: @test_shrink_intrin_rint_multi_use(
293; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x half> @llvm.rint.v2f16(<2 x half> [[C:%.*]])
294; CHECK-NEXT:    [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x float>
295; CHECK-NEXT:    call void @use_v2f32(<2 x float> [[E]])
296; CHECK-NEXT:    ret <2 x half> [[TMP1]]
297;
298  %D = fpext <2 x half> %C to <2 x float>
299  %E = call <2 x float> @llvm.rint.v2f32(<2 x float> %D)
300  %F = fptrunc <2 x float> %E to <2 x half>
301  call void @use_v2f32(<2 x float> %E)
302  ret <2 x half> %F
303}
304
305define <2 x float> @test_shrink_intrin_round_multi_use(<2 x float> %C) {
306; CHECK-LABEL: @test_shrink_intrin_round_multi_use(
307; CHECK-NEXT:    [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
308; CHECK-NEXT:    [[E:%.*]] = call <2 x double> @llvm.round.v2f64(<2 x double> [[D]])
309; CHECK-NEXT:    [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
310; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[D]])
311; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[E]])
312; CHECK-NEXT:    ret <2 x float> [[F]]
313;
314  %D = fpext <2 x float> %C to <2 x double>
315  %E = call <2 x double> @llvm.round.v2f64(<2 x double> %D)
316  %F = fptrunc <2 x double> %E to <2 x float>
317  call void @use_v2f64(<2 x double> %D)
318  call void @use_v2f64(<2 x double> %E)
319  ret <2 x float> %F
320}
321
322define <2 x float> @test_shrink_intrin_roundeven_multi_use(<2 x float> %C) {
323; CHECK-LABEL: @test_shrink_intrin_roundeven_multi_use(
324; CHECK-NEXT:    [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
325; CHECK-NEXT:    [[E:%.*]] = call <2 x double> @llvm.roundeven.v2f64(<2 x double> [[D]])
326; CHECK-NEXT:    [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
327; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[D]])
328; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[E]])
329; CHECK-NEXT:    ret <2 x float> [[F]]
330;
331  %D = fpext <2 x float> %C to <2 x double>
332  %E = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %D)
333  %F = fptrunc <2 x double> %E to <2 x float>
334  call void @use_v2f64(<2 x double> %D)
335  call void @use_v2f64(<2 x double> %E)
336  ret <2 x float> %F
337}
338
339define <2 x float> @test_shrink_intrin_trunc_multi_use(<2 x float> %C) {
340; CHECK-LABEL: @test_shrink_intrin_trunc_multi_use(
341; CHECK-NEXT:    [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
342; CHECK-NEXT:    [[E:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[D]])
343; CHECK-NEXT:    [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
344; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[D]])
345; CHECK-NEXT:    ret <2 x float> [[F]]
346;
347  %D = fpext <2 x float> %C to <2 x double>
348  %E = call <2 x double> @llvm.trunc.v2f64(<2 x double> %D)
349  %F = fptrunc <2 x double> %E to <2 x float>
350  call void @use_v2f64(<2 x double> %D)
351  ret <2 x float> %F
352}
353
354; Make sure fast math flags are preserved
355define float @test_shrink_intrin_fabs_fast(float %C) {
356; CHECK-LABEL: @test_shrink_intrin_fabs_fast(
357; CHECK-NEXT:    [[TMP1:%.*]] = call fast float @llvm.fabs.f32(float [[C:%.*]])
358; CHECK-NEXT:    ret float [[TMP1]]
359;
360  %D = fpext float %C to double
361  %E = call fast double @llvm.fabs.f64(double %D)
362  %F = fptrunc double %E to float
363  ret float %F
364}
365
366define float @test_no_shrink_intrin_floor(double %D) {
367; CHECK-LABEL: @test_no_shrink_intrin_floor(
368; CHECK-NEXT:    [[E:%.*]] = call double @llvm.floor.f64(double [[D:%.*]])
369; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to float
370; CHECK-NEXT:    ret float [[F]]
371;
372  %E = call double @llvm.floor.f64(double %D)
373  %F = fptrunc double %E to float
374  ret float %F
375}
376
377define float @test_no_shrink_intrin_ceil(double %D) {
378; CHECK-LABEL: @test_no_shrink_intrin_ceil(
379; CHECK-NEXT:    [[E:%.*]] = call double @llvm.ceil.f64(double [[D:%.*]])
380; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to float
381; CHECK-NEXT:    ret float [[F]]
382;
383  %E = call double @llvm.ceil.f64(double %D)
384  %F = fptrunc double %E to float
385  ret float %F
386}
387
388define float @test_no_shrink_intrin_round(double %D) {
389; CHECK-LABEL: @test_no_shrink_intrin_round(
390; CHECK-NEXT:    [[E:%.*]] = call double @llvm.round.f64(double [[D:%.*]])
391; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to float
392; CHECK-NEXT:    ret float [[F]]
393;
394  %E = call double @llvm.round.f64(double %D)
395  %F = fptrunc double %E to float
396  ret float %F
397}
398
399define float @test_no_shrink_intrin_roundeven(double %D) {
400; CHECK-LABEL: @test_no_shrink_intrin_roundeven(
401; CHECK-NEXT:    [[E:%.*]] = call double @llvm.roundeven.f64(double [[D:%.*]])
402; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to float
403; CHECK-NEXT:    ret float [[F]]
404;
405  %E = call double @llvm.roundeven.f64(double %D)
406  %F = fptrunc double %E to float
407  ret float %F
408}
409
410define float @test_no_shrink_intrin_nearbyint(double %D) {
411; CHECK-LABEL: @test_no_shrink_intrin_nearbyint(
412; CHECK-NEXT:    [[E:%.*]] = call double @llvm.nearbyint.f64(double [[D:%.*]])
413; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to float
414; CHECK-NEXT:    ret float [[F]]
415;
416  %E = call double @llvm.nearbyint.f64(double %D)
417  %F = fptrunc double %E to float
418  ret float %F
419}
420
421define float @test_no_shrink_intrin_trunc(double %D) {
422; CHECK-LABEL: @test_no_shrink_intrin_trunc(
423; CHECK-NEXT:    [[E:%.*]] = call double @llvm.trunc.f64(double [[D:%.*]])
424; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to float
425; CHECK-NEXT:    ret float [[F]]
426;
427  %E = call double @llvm.trunc.f64(double %D)
428  %F = fptrunc double %E to float
429  ret float %F
430}
431
432define float @test_shrink_intrin_fabs_double_src(double %D) {
433; CHECK-LABEL: @test_shrink_intrin_fabs_double_src(
434; CHECK-NEXT:    [[TMP1:%.*]] = fptrunc double [[D:%.*]] to float
435; CHECK-NEXT:    [[F:%.*]] = call float @llvm.fabs.f32(float [[TMP1]])
436; CHECK-NEXT:    ret float [[F]]
437;
438  %E = call double @llvm.fabs.f64(double %D)
439  %F = fptrunc double %E to float
440  ret float %F
441}
442
443; Make sure fast math flags are preserved
444define float @test_shrink_intrin_fabs_fast_double_src(double %D) {
445; CHECK-LABEL: @test_shrink_intrin_fabs_fast_double_src(
446; CHECK-NEXT:    [[TMP1:%.*]] = fptrunc double [[D:%.*]] to float
447; CHECK-NEXT:    [[F:%.*]] = call fast float @llvm.fabs.f32(float [[TMP1]])
448; CHECK-NEXT:    ret float [[F]]
449;
450  %E = call fast double @llvm.fabs.f64(double %D)
451  %F = fptrunc double %E to float
452  ret float %F
453}
454
455define float @test_shrink_float_convertible_constant_intrin_floor() {
456; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_floor(
457; CHECK-NEXT:    ret float 2.000000e+00
458;
459  %E = call double @llvm.floor.f64(double 2.1)
460  %F = fptrunc double %E to float
461  ret float %F
462}
463
464define float @test_shrink_float_convertible_constant_intrin_ceil() {
465; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_ceil(
466; CHECK-NEXT:    ret float 3.000000e+00
467;
468  %E = call double @llvm.ceil.f64(double 2.1)
469  %F = fptrunc double %E to float
470  ret float %F
471}
472
473define float @test_shrink_float_convertible_constant_intrin_round() {
474; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_round(
475; CHECK-NEXT:    ret float 2.000000e+00
476;
477  %E = call double @llvm.round.f64(double 2.1)
478  %F = fptrunc double %E to float
479  ret float %F
480}
481
482define float @test_shrink_float_convertible_constant_intrin_roundeven() {
483; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_roundeven(
484; CHECK-NEXT:    ret float 2.000000e+00
485;
486  %E = call double @llvm.roundeven.f64(double 2.1)
487  %F = fptrunc double %E to float
488  ret float %F
489}
490
491define float @test_shrink_float_convertible_constant_intrin_nearbyint() {
492; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_nearbyint(
493; CHECK-NEXT:    ret float 2.000000e+00
494;
495  %E = call double @llvm.nearbyint.f64(double 2.1)
496  %F = fptrunc double %E to float
497  ret float %F
498}
499
500define float @test_shrink_float_convertible_constant_intrin_trunc() {
501; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_trunc(
502; CHECK-NEXT:    ret float 2.000000e+00
503;
504  %E = call double @llvm.trunc.f64(double 2.1)
505  %F = fptrunc double %E to float
506  ret float %F
507}
508
509define float @test_shrink_float_convertible_constant_intrin_fabs() {
510; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_fabs(
511; CHECK-NEXT:    ret float 0x4000CCCCC0000000
512;
513  %E = call double @llvm.fabs.f64(double 2.1)
514  %F = fptrunc double %E to float
515  ret float %F
516}
517
518; Make sure fast math flags are preserved
519define float @test_shrink_float_convertible_constant_intrin_fabs_fast() {
520; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_fabs_fast(
521; CHECK-NEXT:    ret float 0x4000CCCCC0000000
522;
523  %E = call fast double @llvm.fabs.f64(double 2.1)
524  %F = fptrunc double %E to float
525  ret float %F
526}
527
528define half @test_no_shrink_mismatched_type_intrin_floor(double %D) {
529; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_floor(
530; CHECK-NEXT:    [[E:%.*]] = call double @llvm.floor.f64(double [[D:%.*]])
531; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to half
532; CHECK-NEXT:    ret half [[F]]
533;
534  %E = call double @llvm.floor.f64(double %D)
535  %F = fptrunc double %E to half
536  ret half %F
537}
538
539define half @test_no_shrink_mismatched_type_intrin_ceil(double %D) {
540; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_ceil(
541; CHECK-NEXT:    [[E:%.*]] = call double @llvm.ceil.f64(double [[D:%.*]])
542; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to half
543; CHECK-NEXT:    ret half [[F]]
544;
545  %E = call double @llvm.ceil.f64(double %D)
546  %F = fptrunc double %E to half
547  ret half %F
548}
549
550define half @test_no_shrink_mismatched_type_intrin_round(double %D) {
551; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_round(
552; CHECK-NEXT:    [[E:%.*]] = call double @llvm.round.f64(double [[D:%.*]])
553; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to half
554; CHECK-NEXT:    ret half [[F]]
555;
556  %E = call double @llvm.round.f64(double %D)
557  %F = fptrunc double %E to half
558  ret half %F
559}
560
561define half @test_no_shrink_mismatched_type_intrin_roundeven(double %D) {
562; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_roundeven(
563; CHECK-NEXT:    [[E:%.*]] = call double @llvm.roundeven.f64(double [[D:%.*]])
564; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to half
565; CHECK-NEXT:    ret half [[F]]
566;
567  %E = call double @llvm.roundeven.f64(double %D)
568  %F = fptrunc double %E to half
569  ret half %F
570}
571
572define half @test_no_shrink_mismatched_type_intrin_nearbyint(double %D) {
573; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_nearbyint(
574; CHECK-NEXT:    [[E:%.*]] = call double @llvm.nearbyint.f64(double [[D:%.*]])
575; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to half
576; CHECK-NEXT:    ret half [[F]]
577;
578  %E = call double @llvm.nearbyint.f64(double %D)
579  %F = fptrunc double %E to half
580  ret half %F
581}
582
583define half @test_no_shrink_mismatched_type_intrin_trunc(double %D) {
584; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_trunc(
585; CHECK-NEXT:    [[E:%.*]] = call double @llvm.trunc.f64(double [[D:%.*]])
586; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to half
587; CHECK-NEXT:    ret half [[F]]
588;
589  %E = call double @llvm.trunc.f64(double %D)
590  %F = fptrunc double %E to half
591  ret half %F
592}
593
594define half @test_shrink_mismatched_type_intrin_fabs_double_src(double %D) {
595; CHECK-LABEL: @test_shrink_mismatched_type_intrin_fabs_double_src(
596; CHECK-NEXT:    [[TMP1:%.*]] = fptrunc double [[D:%.*]] to half
597; CHECK-NEXT:    [[F:%.*]] = call half @llvm.fabs.f16(half [[TMP1]])
598; CHECK-NEXT:    ret half [[F]]
599;
600  %E = call double @llvm.fabs.f64(double %D)
601  %F = fptrunc double %E to half
602  ret half %F
603}
604
605; Make sure fast math flags are preserved
606define half @test_mismatched_type_intrin_fabs_fast_double_src(double %D) {
607; CHECK-LABEL: @test_mismatched_type_intrin_fabs_fast_double_src(
608; CHECK-NEXT:    [[TMP1:%.*]] = fptrunc double [[D:%.*]] to half
609; CHECK-NEXT:    [[F:%.*]] = call fast half @llvm.fabs.f16(half [[TMP1]])
610; CHECK-NEXT:    ret half [[F]]
611;
612  %E = call fast double @llvm.fabs.f64(double %D)
613  %F = fptrunc double %E to half
614  ret half %F
615}
616
617define <2 x double> @test_shrink_intrin_floor_fp16_vec(<2 x half> %C) {
618; CHECK-LABEL: @test_shrink_intrin_floor_fp16_vec(
619; CHECK-NEXT:    [[TMP1:%.*]] = call arcp <2 x half> @llvm.floor.v2f16(<2 x half> [[C:%.*]])
620; CHECK-NEXT:    [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
621; CHECK-NEXT:    ret <2 x double> [[E]]
622;
623  %D = fpext <2 x half> %C to <2 x double>
624  %E = call arcp <2 x double> @llvm.floor.v2f64(<2 x double> %D)
625  ret <2 x double> %E
626}
627
628define float @test_shrink_intrin_ceil_fp16_src(half %C) {
629; CHECK-LABEL: @test_shrink_intrin_ceil_fp16_src(
630; CHECK-NEXT:    [[TMP1:%.*]] = call half @llvm.ceil.f16(half [[C:%.*]])
631; CHECK-NEXT:    [[F:%.*]] = fpext half [[TMP1]] to float
632; CHECK-NEXT:    ret float [[F]]
633;
634  %D = fpext half %C to double
635  %E = call double @llvm.ceil.f64(double %D)
636  %F = fptrunc double %E to float
637  ret float %F
638}
639
640define <2 x double> @test_shrink_intrin_round_fp16_vec(<2 x half> %C) {
641; CHECK-LABEL: @test_shrink_intrin_round_fp16_vec(
642; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x half> @llvm.round.v2f16(<2 x half> [[C:%.*]])
643; CHECK-NEXT:    [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
644; CHECK-NEXT:    ret <2 x double> [[E]]
645;
646  %D = fpext <2 x  half> %C to <2 x double>
647  %E = call <2 x double> @llvm.round.v2f64(<2 x double> %D)
648  ret <2 x double> %E
649}
650
651define <2 x double> @test_shrink_intrin_roundeven_fp16_vec(<2 x half> %C) {
652; CHECK-LABEL: @test_shrink_intrin_roundeven_fp16_vec(
653; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x half> @llvm.roundeven.v2f16(<2 x half> [[C:%.*]])
654; CHECK-NEXT:    [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
655; CHECK-NEXT:    ret <2 x double> [[E]]
656;
657  %D = fpext <2 x  half> %C to <2 x double>
658  %E = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %D)
659  ret <2 x double> %E
660}
661
662define float @test_shrink_intrin_nearbyint_fp16_src(half %C) {
663; CHECK-LABEL: @test_shrink_intrin_nearbyint_fp16_src(
664; CHECK-NEXT:    [[TMP1:%.*]] = call half @llvm.nearbyint.f16(half [[C:%.*]])
665; CHECK-NEXT:    [[F:%.*]] = fpext half [[TMP1]] to float
666; CHECK-NEXT:    ret float [[F]]
667;
668  %D = fpext half %C to double
669  %E = call double @llvm.nearbyint.f64(double %D)
670  %F = fptrunc double %E to float
671  ret float %F
672}
673
674define <2 x double> @test_shrink_intrin_trunc_fp16_src(<2 x half> %C) {
675; CHECK-LABEL: @test_shrink_intrin_trunc_fp16_src(
676; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x half> @llvm.trunc.v2f16(<2 x half> [[C:%.*]])
677; CHECK-NEXT:    [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
678; CHECK-NEXT:    ret <2 x double> [[E]]
679;
680  %D = fpext <2 x half> %C to <2 x double>
681  %E = call <2 x double> @llvm.trunc.v2f64(<2 x double> %D)
682  ret <2 x double> %E
683}
684
685define float @test_shrink_intrin_fabs_fp16_src(half %C) {
686; CHECK-LABEL: @test_shrink_intrin_fabs_fp16_src(
687; CHECK-NEXT:    [[TMP1:%.*]] = call half @llvm.fabs.f16(half [[C:%.*]])
688; CHECK-NEXT:    [[F:%.*]] = fpext half [[TMP1]] to float
689; CHECK-NEXT:    ret float [[F]]
690;
691  %D = fpext half %C to double
692  %E = call double @llvm.fabs.f64(double %D)
693  %F = fptrunc double %E to float
694  ret float %F
695}
696
697; Make sure fast math flags are preserved
698define float @test_shrink_intrin_fabs_fast_fp16_src(half %C) {
699; CHECK-LABEL: @test_shrink_intrin_fabs_fast_fp16_src(
700; CHECK-NEXT:    [[TMP1:%.*]] = call fast half @llvm.fabs.f16(half [[C:%.*]])
701; CHECK-NEXT:    [[F:%.*]] = fpext half [[TMP1]] to float
702; CHECK-NEXT:    ret float [[F]]
703;
704  %D = fpext half %C to double
705  %E = call fast double @llvm.fabs.f64(double %D)
706  %F = fptrunc double %E to float
707  ret float %F
708}
709
710define float @test_no_shrink_intrin_floor_multi_use_fpext(half %C) {
711; CHECK-LABEL: @test_no_shrink_intrin_floor_multi_use_fpext(
712; CHECK-NEXT:    [[D:%.*]] = fpext half [[C:%.*]] to double
713; CHECK-NEXT:    store volatile double [[D]], double* undef, align 8
714; CHECK-NEXT:    [[E:%.*]] = call double @llvm.floor.f64(double [[D]])
715; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to float
716; CHECK-NEXT:    ret float [[F]]
717;
718  %D = fpext half %C to double
719  store volatile double %D, double* undef
720  %E = call double @llvm.floor.f64(double %D)
721  %F = fptrunc double %E to float
722  ret float %F
723}
724
725define float @test_no_shrink_intrin_fabs_multi_use_fpext(half %C) {
726; CHECK-LABEL: @test_no_shrink_intrin_fabs_multi_use_fpext(
727; CHECK-NEXT:    [[D:%.*]] = fpext half [[C:%.*]] to double
728; CHECK-NEXT:    store volatile double [[D]], double* undef, align 8
729; CHECK-NEXT:    [[E:%.*]] = call double @llvm.fabs.f64(double [[D]])
730; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to float
731; CHECK-NEXT:    ret float [[F]]
732;
733  %D = fpext half %C to double
734  store volatile double %D, double* undef
735  %E = call double @llvm.fabs.f64(double %D)
736  %F = fptrunc double %E to float
737  ret float %F
738}
739
740; DBG-VALID: CheckModuleDebugify: PASS
741