1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S -instcombine < %s | FileCheck %s
3
4declare half @llvm.fabs.f16(half)
5declare double @llvm.fabs.f64(double)
6declare <2 x float> @llvm.fabs.v2f32(<2 x float>)
7
8define i1 @fpext_fpext(float %x, float %y) {
9; CHECK-LABEL: @fpext_fpext(
10; CHECK-NEXT:    [[CMP:%.*]] = fcmp nnan ogt float [[X:%.*]], [[Y:%.*]]
11; CHECK-NEXT:    ret i1 [[CMP]]
12;
13  %ext1 = fpext float %x to double
14  %ext2 = fpext float %y to double
15  %cmp = fcmp nnan ogt double %ext1, %ext2
16  ret i1 %cmp
17}
18
19define i1 @fpext_constant(float %a) {
20; CHECK-LABEL: @fpext_constant(
21; CHECK-NEXT:    [[CMP:%.*]] = fcmp ninf ogt float [[A:%.*]], 1.000000e+00
22; CHECK-NEXT:    ret i1 [[CMP]]
23;
24  %ext = fpext float %a to double
25  %cmp = fcmp ninf ogt double %ext, 1.000000e+00
26  ret i1 %cmp
27}
28
29define <2 x i1> @fpext_constant_vec_splat(<2 x half> %a) {
30; CHECK-LABEL: @fpext_constant_vec_splat(
31; CHECK-NEXT:    [[CMP:%.*]] = fcmp nnan ole <2 x half> [[A:%.*]], <half 0xH5140, half 0xH5140>
32; CHECK-NEXT:    ret <2 x i1> [[CMP]]
33;
34  %ext = fpext <2 x half> %a to <2 x double>
35  %cmp = fcmp nnan ole <2 x double> %ext, <double 42.0, double 42.0>
36  ret <2 x i1> %cmp
37}
38
39define i1 @fpext_constant_lossy(float %a) {
40; CHECK-LABEL: @fpext_constant_lossy(
41; CHECK-NEXT:    [[EXT:%.*]] = fpext float [[A:%.*]] to double
42; CHECK-NEXT:    [[CMP:%.*]] = fcmp ogt double [[EXT]], 0x3FF0000000000001
43; CHECK-NEXT:    ret i1 [[CMP]]
44;
45  %ext = fpext float %a to double
46  %cmp = fcmp ogt double %ext, 0x3FF0000000000001 ; more precision than float.
47  ret i1 %cmp
48}
49
50define i1 @fpext_constant_denorm(float %a) {
51; CHECK-LABEL: @fpext_constant_denorm(
52; CHECK-NEXT:    [[EXT:%.*]] = fpext float [[A:%.*]] to double
53; CHECK-NEXT:    [[CMP:%.*]] = fcmp ogt double [[EXT]], 0x36A0000000000000
54; CHECK-NEXT:    ret i1 [[CMP]]
55;
56  %ext = fpext float %a to double
57  %cmp = fcmp ogt double %ext, 0x36A0000000000000 ; denormal in float.
58  ret i1 %cmp
59}
60
61define i1 @fneg_constant_swap_pred(float %x) {
62; CHECK-LABEL: @fneg_constant_swap_pred(
63; CHECK-NEXT:    [[CMP:%.*]] = fcmp olt float [[X:%.*]], -1.000000e+00
64; CHECK-NEXT:    ret i1 [[CMP]]
65;
66  %neg = fsub float -0.0, %x
67  %cmp = fcmp ogt float %neg, 1.0
68  ret i1 %cmp
69}
70
71define i1 @unary_fneg_constant_swap_pred(float %x) {
72; CHECK-LABEL: @unary_fneg_constant_swap_pred(
73; CHECK-NEXT:    [[CMP:%.*]] = fcmp olt float [[X:%.*]], -1.000000e+00
74; CHECK-NEXT:    ret i1 [[CMP]]
75;
76  %neg = fneg float %x
77  %cmp = fcmp ogt float %neg, 1.0
78  ret i1 %cmp
79}
80
81define <2 x i1> @fneg_constant_swap_pred_vec(<2 x float> %x) {
82; CHECK-LABEL: @fneg_constant_swap_pred_vec(
83; CHECK-NEXT:    [[CMP:%.*]] = fcmp olt <2 x float> [[X:%.*]], <float -1.000000e+00, float -2.000000e+00>
84; CHECK-NEXT:    ret <2 x i1> [[CMP]]
85;
86  %neg = fsub <2 x float> <float -0.0, float -0.0>, %x
87  %cmp = fcmp ogt <2 x float> %neg, <float 1.0, float 2.0>
88  ret <2 x i1> %cmp
89}
90
91define <2 x i1> @unary_fneg_constant_swap_pred_vec(<2 x float> %x) {
92; CHECK-LABEL: @unary_fneg_constant_swap_pred_vec(
93; CHECK-NEXT:    [[CMP:%.*]] = fcmp olt <2 x float> [[X:%.*]], <float -1.000000e+00, float -2.000000e+00>
94; CHECK-NEXT:    ret <2 x i1> [[CMP]]
95;
96  %neg = fneg <2 x float> %x
97  %cmp = fcmp ogt <2 x float> %neg, <float 1.0, float 2.0>
98  ret <2 x i1> %cmp
99}
100
101define <2 x i1> @fneg_constant_swap_pred_vec_undef(<2 x float> %x) {
102; CHECK-LABEL: @fneg_constant_swap_pred_vec_undef(
103; CHECK-NEXT:    [[CMP:%.*]] = fcmp olt <2 x float> [[X:%.*]], <float -1.000000e+00, float -2.000000e+00>
104; CHECK-NEXT:    ret <2 x i1> [[CMP]]
105;
106  %neg = fsub <2 x float> <float undef, float -0.0>, %x
107  %cmp = fcmp ogt <2 x float> %neg, <float 1.0, float 2.0>
108  ret <2 x i1> %cmp
109}
110
111; The new fcmp should have the same FMF as the original.
112
113define i1 @fneg_fmf(float %x) {
114; CHECK-LABEL: @fneg_fmf(
115; CHECK-NEXT:    [[R:%.*]] = fcmp fast oeq float [[X:%.*]], -4.200000e+01
116; CHECK-NEXT:    ret i1 [[R]]
117;
118  %n = fsub fast float -0.0, %x
119  %r = fcmp fast oeq float %n, 42.0
120  ret i1 %r
121}
122
123define i1 @unary_fneg_fmf(float %x) {
124; CHECK-LABEL: @unary_fneg_fmf(
125; CHECK-NEXT:    [[R:%.*]] = fcmp fast oeq float [[X:%.*]], -4.200000e+01
126; CHECK-NEXT:    ret i1 [[R]]
127;
128  %n = fneg fast float %x
129  %r = fcmp fast oeq float %n, 42.0
130  ret i1 %r
131}
132
133; The new fcmp should have the same FMF as the original, vector edition.
134
135define <2 x i1> @fcmp_fneg_fmf_vec(<2 x float> %x) {
136; CHECK-LABEL: @fcmp_fneg_fmf_vec(
137; CHECK-NEXT:    [[R:%.*]] = fcmp reassoc nnan ule <2 x float> [[X:%.*]], <float -4.200000e+01, float 1.900000e+01>
138; CHECK-NEXT:    ret <2 x i1> [[R]]
139;
140  %n = fsub nsz <2 x float> zeroinitializer, %x
141  %r = fcmp nnan reassoc uge <2 x float> %n, <float 42.0, float -19.0>
142  ret <2 x i1> %r
143}
144
145define i1 @fneg_fneg_swap_pred(float %x, float %y) {
146; CHECK-LABEL: @fneg_fneg_swap_pred(
147; CHECK-NEXT:    [[CMP:%.*]] = fcmp nnan ogt float [[X:%.*]], [[Y:%.*]]
148; CHECK-NEXT:    ret i1 [[CMP]]
149;
150  %neg1 = fsub float -0.0, %x
151  %neg2 = fsub float -0.0, %y
152  %cmp = fcmp nnan olt float %neg1, %neg2
153  ret i1 %cmp
154}
155
156define i1 @unary_fneg_unary_fneg_swap_pred(float %x, float %y) {
157; CHECK-LABEL: @unary_fneg_unary_fneg_swap_pred(
158; CHECK-NEXT:    [[CMP:%.*]] = fcmp nnan ogt float [[X:%.*]], [[Y:%.*]]
159; CHECK-NEXT:    ret i1 [[CMP]]
160;
161  %neg1 = fneg float %x
162  %neg2 = fneg float %y
163  %cmp = fcmp nnan olt float %neg1, %neg2
164  ret i1 %cmp
165}
166
167define i1 @unary_fneg_fneg_swap_pred(float %x, float %y) {
168; CHECK-LABEL: @unary_fneg_fneg_swap_pred(
169; CHECK-NEXT:    [[CMP:%.*]] = fcmp nnan ogt float [[X:%.*]], [[Y:%.*]]
170; CHECK-NEXT:    ret i1 [[CMP]]
171;
172  %neg1 = fneg float %x
173  %neg2 = fsub float -0.0, %y
174  %cmp = fcmp nnan olt float %neg1, %neg2
175  ret i1 %cmp
176}
177
178define i1 @fneg_unary_fneg_swap_pred(float %x, float %y) {
179; CHECK-LABEL: @fneg_unary_fneg_swap_pred(
180; CHECK-NEXT:    [[CMP:%.*]] = fcmp nnan ogt float [[X:%.*]], [[Y:%.*]]
181; CHECK-NEXT:    ret i1 [[CMP]]
182;
183  %neg1 = fsub float -0.0, %x
184  %neg2 = fneg float %y
185  %cmp = fcmp nnan olt float %neg1, %neg2
186  ret i1 %cmp
187}
188
189define <2 x i1> @fneg_fneg_swap_pred_vec(<2 x float> %x, <2 x float> %y) {
190; CHECK-LABEL: @fneg_fneg_swap_pred_vec(
191; CHECK-NEXT:    [[CMP:%.*]] = fcmp ninf ogt <2 x float> [[X:%.*]], [[Y:%.*]]
192; CHECK-NEXT:    ret <2 x i1> [[CMP]]
193;
194  %neg1 = fsub <2 x float> <float -0.0, float -0.0>, %x
195  %neg2 = fsub <2 x float> <float -0.0, float -0.0>, %y
196  %cmp = fcmp ninf olt <2 x float> %neg1, %neg2
197  ret <2 x i1> %cmp
198}
199
200define <2 x i1> @unary_fneg_unary_fneg_swap_pred_vec(<2 x float> %x, <2 x float> %y) {
201; CHECK-LABEL: @unary_fneg_unary_fneg_swap_pred_vec(
202; CHECK-NEXT:    [[CMP:%.*]] = fcmp ninf ogt <2 x float> [[X:%.*]], [[Y:%.*]]
203; CHECK-NEXT:    ret <2 x i1> [[CMP]]
204;
205  %neg1 = fneg <2 x float> %x
206  %neg2 = fneg <2 x float> %y
207  %cmp = fcmp ninf olt <2 x float> %neg1, %neg2
208  ret <2 x i1> %cmp
209}
210
211define <2 x i1> @unary_fneg_fneg_swap_pred_vec(<2 x float> %x, <2 x float> %y) {
212; CHECK-LABEL: @unary_fneg_fneg_swap_pred_vec(
213; CHECK-NEXT:    [[CMP:%.*]] = fcmp ninf ogt <2 x float> [[X:%.*]], [[Y:%.*]]
214; CHECK-NEXT:    ret <2 x i1> [[CMP]]
215;
216  %neg1 = fneg <2 x float> %x
217  %neg2 = fsub <2 x float> <float -0.0, float -0.0>, %y
218  %cmp = fcmp ninf olt <2 x float> %neg1, %neg2
219  ret <2 x i1> %cmp
220}
221
222define <2 x i1> @fneg_unary_fneg_swap_pred_vec(<2 x float> %x, <2 x float> %y) {
223; CHECK-LABEL: @fneg_unary_fneg_swap_pred_vec(
224; CHECK-NEXT:    [[CMP:%.*]] = fcmp ninf ogt <2 x float> [[X:%.*]], [[Y:%.*]]
225; CHECK-NEXT:    ret <2 x i1> [[CMP]]
226;
227  %neg1 = fsub <2 x float> <float -0.0, float -0.0>, %x
228  %neg2 = fneg <2 x float> %y
229  %cmp = fcmp ninf olt <2 x float> %neg1, %neg2
230  ret <2 x i1> %cmp
231}
232
233define <2 x i1> @fneg_fneg_swap_pred_vec_undef(<2 x float> %x, <2 x float> %y) {
234; CHECK-LABEL: @fneg_fneg_swap_pred_vec_undef(
235; CHECK-NEXT:    [[CMP:%.*]] = fcmp ogt <2 x float> [[X:%.*]], [[Y:%.*]]
236; CHECK-NEXT:    ret <2 x i1> [[CMP]]
237;
238  %neg1 = fsub <2 x float> <float -0.0, float undef>, %x
239  %neg2 = fsub <2 x float> <float undef, float -0.0>, %y
240  %cmp = fcmp olt <2 x float> %neg1, %neg2
241  ret <2 x i1> %cmp
242}
243
244define <2 x i1> @unary_fneg_fneg_swap_pred_vec_undef(<2 x float> %x, <2 x float> %y) {
245; CHECK-LABEL: @unary_fneg_fneg_swap_pred_vec_undef(
246; CHECK-NEXT:    [[CMP:%.*]] = fcmp ogt <2 x float> [[X:%.*]], [[Y:%.*]]
247; CHECK-NEXT:    ret <2 x i1> [[CMP]]
248;
249  %neg1 = fneg <2 x float> %x
250  %neg2 = fsub <2 x float> <float undef, float -0.0>, %y
251  %cmp = fcmp olt <2 x float> %neg1, %neg2
252  ret <2 x i1> %cmp
253}
254
255define <2 x i1> @fneg_unary_fneg_swap_pred_vec_undef(<2 x float> %x, <2 x float> %y) {
256; CHECK-LABEL: @fneg_unary_fneg_swap_pred_vec_undef(
257; CHECK-NEXT:    [[CMP:%.*]] = fcmp ogt <2 x float> [[X:%.*]], [[Y:%.*]]
258; CHECK-NEXT:    ret <2 x i1> [[CMP]]
259;
260  %neg1 = fsub <2 x float> <float -0.0, float undef>, %x
261  %neg2 = fneg <2 x float> %y
262  %cmp = fcmp olt <2 x float> %neg1, %neg2
263  ret <2 x i1> %cmp
264}
265
266define i1 @test7(float %x) {
267; CHECK-LABEL: @test7(
268; CHECK-NEXT:    [[CMP:%.*]] = fcmp ogt float [[X:%.*]], 0.000000e+00
269; CHECK-NEXT:    ret i1 [[CMP]]
270;
271  %ext = fpext float %x to ppc_fp128
272  %cmp = fcmp ogt ppc_fp128 %ext, 0xM00000000000000000000000000000000
273  ret i1 %cmp
274}
275
276define float @test8(float %x) {
277; CHECK-LABEL: @test8(
278; CHECK-NEXT:    [[CMP:%.*]] = fcmp olt float [[X:%.*]], 0.000000e+00
279; CHECK-NEXT:    [[CONV2:%.*]] = uitofp i1 [[CMP]] to float
280; CHECK-NEXT:    ret float [[CONV2]]
281;
282  %conv = fpext float %x to double
283  %cmp = fcmp olt double %conv, 0.000000e+00
284  %conv1 = zext i1 %cmp to i32
285  %conv2 = sitofp i32 %conv1 to float
286  ret float %conv2
287; Float comparison to zero shouldn't cast to double.
288}
289
290define i1 @fabs_uge(double %a) {
291; CHECK-LABEL: @fabs_uge(
292; CHECK-NEXT:    ret i1 true
293;
294  %call = call double @llvm.fabs.f64(double %a)
295  %cmp = fcmp uge double %call, 0.0
296  ret i1 %cmp
297}
298
299define i1 @fabs_olt(half %a) {
300; CHECK-LABEL: @fabs_olt(
301; CHECK-NEXT:    ret i1 false
302;
303  %call = call half @llvm.fabs.f16(half %a)
304  %cmp = fcmp olt half %call, 0.0
305  ret i1 %cmp
306}
307
308define <2 x i1> @fabs_ole(<2 x float> %a) {
309; CHECK-LABEL: @fabs_ole(
310; CHECK-NEXT:    [[CMP:%.*]] = fcmp ninf oeq <2 x float> [[A:%.*]], zeroinitializer
311; CHECK-NEXT:    ret <2 x i1> [[CMP]]
312;
313  %call = call <2 x float> @llvm.fabs.v2f32(<2 x float> %a)
314  %cmp = fcmp ninf ole <2 x float> %call, zeroinitializer
315  ret <2 x i1> %cmp
316}
317
318define <2 x i1> @fabs_ule(<2 x float> %a) {
319; CHECK-LABEL: @fabs_ule(
320; CHECK-NEXT:    [[CMP:%.*]] = fcmp ninf arcp ueq <2 x float> [[A:%.*]], zeroinitializer
321; CHECK-NEXT:    ret <2 x i1> [[CMP]]
322;
323  %call = call <2 x float> @llvm.fabs.v2f32(<2 x float> %a)
324  %cmp = fcmp ninf arcp ule <2 x float> %call, zeroinitializer
325  ret <2 x i1> %cmp
326}
327
328define i1 @fabs_ogt(double %a) {
329; CHECK-LABEL: @fabs_ogt(
330; CHECK-NEXT:    [[CMP:%.*]] = fcmp reassoc one double [[A:%.*]], 0.000000e+00
331; CHECK-NEXT:    ret i1 [[CMP]]
332;
333  %call = call double @llvm.fabs.f64(double %a)
334  %cmp = fcmp reassoc ogt double %call, 0.0
335  ret i1 %cmp
336}
337
338define i1 @fabs_ugt(double %a) {
339; CHECK-LABEL: @fabs_ugt(
340; CHECK-NEXT:    [[CMP:%.*]] = fcmp reassoc ninf une double [[A:%.*]], 0.000000e+00
341; CHECK-NEXT:    ret i1 [[CMP]]
342;
343  %call = call double @llvm.fabs.f64(double %a)
344  %cmp = fcmp ninf reassoc ugt double %call, 0.0
345  ret i1 %cmp
346}
347
348define i1 @fabs_oge(double %a) {
349; CHECK-LABEL: @fabs_oge(
350; CHECK-NEXT:    [[CMP:%.*]] = fcmp afn ord double [[A:%.*]], 0.000000e+00
351; CHECK-NEXT:    ret i1 [[CMP]]
352;
353  %call = call double @llvm.fabs.f64(double %a)
354  %cmp = fcmp afn oge double %call, 0.0
355  ret i1 %cmp
356}
357
358define i1 @fabs_ult(double %a) {
359; CHECK-LABEL: @fabs_ult(
360; CHECK-NEXT:    [[CMP:%.*]] = fcmp reassoc arcp uno double [[A:%.*]], 0.000000e+00
361; CHECK-NEXT:    ret i1 [[CMP]]
362;
363  %call = call double @llvm.fabs.f64(double %a)
364  %cmp = fcmp reassoc arcp ult double %call, 0.0
365  ret i1 %cmp
366}
367
368define <2 x i1> @fabs_ult_nnan(<2 x float> %a) {
369; CHECK-LABEL: @fabs_ult_nnan(
370; CHECK-NEXT:    ret <2 x i1> zeroinitializer
371;
372  %call = call <2 x float> @llvm.fabs.v2f32(<2 x float> %a)
373  %cmp = fcmp nnan reassoc arcp ult <2 x float> %call, zeroinitializer
374  ret <2 x i1> %cmp
375}
376
377define i1 @fabs_une(half %a) {
378; CHECK-LABEL: @fabs_une(
379; CHECK-NEXT:    [[CMP:%.*]] = fcmp ninf une half [[A:%.*]], 0xH0000
380; CHECK-NEXT:    ret i1 [[CMP]]
381;
382  %call = call half @llvm.fabs.f16(half %a)
383  %cmp = fcmp ninf une half %call, 0.0
384  ret i1 %cmp
385}
386
387define i1 @fabs_oeq(double %a) {
388; CHECK-LABEL: @fabs_oeq(
389; CHECK-NEXT:    [[CMP:%.*]] = fcmp reassoc ninf oeq double [[A:%.*]], 0.000000e+00
390; CHECK-NEXT:    ret i1 [[CMP]]
391;
392  %call = call double @llvm.fabs.f64(double %a)
393  %cmp = fcmp ninf reassoc oeq double %call, 0.0
394  ret i1 %cmp
395}
396
397define i1 @fabs_one(double %a) {
398; CHECK-LABEL: @fabs_one(
399; CHECK-NEXT:    [[CMP:%.*]] = fcmp fast one double [[A:%.*]], 0.000000e+00
400; CHECK-NEXT:    ret i1 [[CMP]]
401;
402  %call = call double @llvm.fabs.f64(double %a)
403  %cmp = fcmp fast one double %call, 0.0
404  ret i1 %cmp
405}
406
407define <2 x i1> @fabs_ueq(<2 x float> %a) {
408; CHECK-LABEL: @fabs_ueq(
409; CHECK-NEXT:    [[CMP:%.*]] = fcmp arcp ueq <2 x float> [[A:%.*]], zeroinitializer
410; CHECK-NEXT:    ret <2 x i1> [[CMP]]
411;
412  %call = call <2 x float> @llvm.fabs.v2f32(<2 x float> %a)
413  %cmp = fcmp arcp ueq <2 x float> %call, zeroinitializer
414  ret <2 x i1> %cmp
415}
416
417define <2 x i1> @fabs_ord(<2 x float> %a) {
418; CHECK-LABEL: @fabs_ord(
419; CHECK-NEXT:    [[CMP:%.*]] = fcmp arcp ord <2 x float> [[A:%.*]], zeroinitializer
420; CHECK-NEXT:    ret <2 x i1> [[CMP]]
421;
422  %call = call <2 x float> @llvm.fabs.v2f32(<2 x float> %a)
423  %cmp = fcmp arcp ord <2 x float> %call, zeroinitializer
424  ret <2 x i1> %cmp
425}
426
427define <2 x i1> @fabs_uno(<2 x float> %a) {
428; CHECK-LABEL: @fabs_uno(
429; CHECK-NEXT:    [[CMP:%.*]] = fcmp arcp uno <2 x float> [[A:%.*]], zeroinitializer
430; CHECK-NEXT:    ret <2 x i1> [[CMP]]
431;
432  %call = call <2 x float> @llvm.fabs.v2f32(<2 x float> %a)
433  %cmp = fcmp arcp uno <2 x float> %call, zeroinitializer
434  ret <2 x i1> %cmp
435}
436
437; Don't crash.
438define i32 @test17(double %a, double (double)* %p) {
439; CHECK-LABEL: @test17(
440; CHECK-NEXT:    [[CALL:%.*]] = tail call double [[P:%.*]](double [[A:%.*]])
441; CHECK-NEXT:    [[CMP:%.*]] = fcmp ueq double [[CALL]], 0.000000e+00
442; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
443; CHECK-NEXT:    ret i32 [[CONV]]
444;
445  %call = tail call double %p(double %a)
446  %cmp = fcmp ueq double %call, 0.000000e+00
447  %conv = zext i1 %cmp to i32
448  ret i32 %conv
449}
450
451; Can fold fcmp with undef on one side by choosing NaN for the undef
452define i32 @test18_undef_unordered(float %a) {
453; CHECK-LABEL: @test18_undef_unordered(
454; CHECK-NEXT:    ret i32 1
455;
456  %cmp = fcmp ueq float %a, undef
457  %conv = zext i1 %cmp to i32
458  ret i32 %conv
459}
460; Can fold fcmp with undef on one side by choosing NaN for the undef
461define i32 @test18_undef_ordered(float %a) {
462; CHECK-LABEL: @test18_undef_ordered(
463; CHECK-NEXT:    ret i32 0
464;
465  %cmp = fcmp oeq float %a, undef
466  %conv = zext i1 %cmp to i32
467  ret i32 %conv
468}
469
470; Can fold fcmp with undef on both side
471;   fcmp u_pred undef, undef -> true
472;   fcmp o_pred undef, undef -> false
473; because whatever you choose for the first undef
474; you can choose NaN for the other undef
475define i1 @test19_undef_unordered() {
476; CHECK-LABEL: @test19_undef_unordered(
477; CHECK-NEXT:    ret i1 true
478;
479  %cmp = fcmp ueq float undef, undef
480  ret i1 %cmp
481}
482
483define i1 @test19_undef_ordered() {
484; CHECK-LABEL: @test19_undef_ordered(
485; CHECK-NEXT:    ret i1 false
486;
487  %cmp = fcmp oeq float undef, undef
488  ret i1 %cmp
489}
490
491; Can fold 1.0 / X < 0.0 --> X < 0 with ninf
492define i1 @test20_recipX_olt_0(float %X) {
493; CHECK-LABEL: @test20_recipX_olt_0(
494; CHECK-NEXT:    [[CMP:%.*]] = fcmp ninf olt float [[X:%.*]], 0.000000e+00
495; CHECK-NEXT:    ret i1 [[CMP]]
496;
497  %div = fdiv ninf float 1.0, %X
498  %cmp = fcmp ninf olt float %div, 0.0
499  ret i1 %cmp
500}
501
502; Can fold -2.0 / X <= 0.0 --> X >= 0 with ninf
503define i1 @test21_recipX_ole_0(float %X) {
504; CHECK-LABEL: @test21_recipX_ole_0(
505; CHECK-NEXT:    [[CMP:%.*]] = fcmp ninf oge float [[X:%.*]], 0.000000e+00
506; CHECK-NEXT:    ret i1 [[CMP]]
507;
508  %div = fdiv ninf float -2.0, %X
509  %cmp = fcmp ninf ole float %div, 0.0
510  ret i1 %cmp
511}
512
513; Can fold 2.0 / X > 0.0 --> X > 0 with ninf
514define i1 @test22_recipX_ogt_0(float %X) {
515; CHECK-LABEL: @test22_recipX_ogt_0(
516; CHECK-NEXT:    [[CMP:%.*]] = fcmp ninf ogt float [[X:%.*]], 0.000000e+00
517; CHECK-NEXT:    ret i1 [[CMP]]
518;
519  %div = fdiv ninf float 2.0, %X
520  %cmp = fcmp ninf ogt float %div, 0.0
521  ret i1 %cmp
522}
523
524; Can fold -1.0 / X >= 0.0 --> X <= 0 with ninf
525define i1 @test23_recipX_oge_0(float %X) {
526; CHECK-LABEL: @test23_recipX_oge_0(
527; CHECK-NEXT:    [[CMP:%.*]] = fcmp ninf ole float [[X:%.*]], 0.000000e+00
528; CHECK-NEXT:    ret i1 [[CMP]]
529;
530  %div = fdiv ninf float -1.0, %X
531  %cmp = fcmp ninf oge float %div, 0.0
532  ret i1 %cmp
533}
534
535; Do not fold 1.0 / X > 0.0 when ninf is missing
536define i1 @test24_recipX_noninf_cmp(float %X) {
537; CHECK-LABEL: @test24_recipX_noninf_cmp(
538; CHECK-NEXT:    [[DIV:%.*]] = fdiv ninf float 2.000000e+00, [[X:%.*]]
539; CHECK-NEXT:    [[CMP:%.*]] = fcmp ogt float [[DIV]], 0.000000e+00
540; CHECK-NEXT:    ret i1 [[CMP]]
541;
542  %div = fdiv ninf float 2.0, %X
543  %cmp = fcmp ogt float %div, 0.0
544  ret i1 %cmp
545}
546
547; Do not fold 1.0 / X > 0.0 when ninf is missing
548define i1 @test25_recipX_noninf_div(float %X) {
549; CHECK-LABEL: @test25_recipX_noninf_div(
550; CHECK-NEXT:    [[DIV:%.*]] = fdiv float 2.000000e+00, [[X:%.*]]
551; CHECK-NEXT:    [[CMP:%.*]] = fcmp ninf ogt float [[DIV]], 0.000000e+00
552; CHECK-NEXT:    ret i1 [[CMP]]
553;
554  %div = fdiv float 2.0, %X
555  %cmp = fcmp ninf ogt float %div, 0.0
556  ret i1 %cmp
557}
558
559; Do not fold 1.0 / X > 0.0 with unordered predicates
560define i1 @test26_recipX_unorderd(float %X) {
561; CHECK-LABEL: @test26_recipX_unorderd(
562; CHECK-NEXT:    [[DIV:%.*]] = fdiv ninf float 2.000000e+00, [[X:%.*]]
563; CHECK-NEXT:    [[CMP:%.*]] = fcmp ninf ugt float [[DIV]], 0.000000e+00
564; CHECK-NEXT:    ret i1 [[CMP]]
565;
566  %div = fdiv ninf float 2.0, %X
567  %cmp = fcmp ninf ugt float %div, 0.0
568  ret i1 %cmp
569}
570
571; Fold <-1.0, -1.0> / X > <-0.0, -0.0>
572define <2 x i1> @test27_recipX_gt_vecsplat(<2 x float> %X) {
573; CHECK-LABEL: @test27_recipX_gt_vecsplat(
574; CHECK-NEXT:    [[CMP:%.*]] = fcmp ninf olt <2 x float> [[X:%.*]], zeroinitializer
575; CHECK-NEXT:    ret <2 x i1> [[CMP]]
576;
577  %div = fdiv ninf <2 x float> <float -1.0, float -1.0>, %X
578  %cmp = fcmp ninf ogt <2 x float> %div, <float -0.0, float -0.0>
579  ret <2 x i1> %cmp
580}
581
582