1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S | FileCheck %s
3
4declare void @use(i32)
5
6; PR1949
7
8define i1 @test1(i32 %a) {
9; CHECK-LABEL: @test1(
10; CHECK-NEXT:    [[C:%.*]] = icmp ugt i32 [[A:%.*]], -5
11; CHECK-NEXT:    ret i1 [[C]]
12;
13  %b = add i32 %a, 4
14  %c = icmp ult i32 %b, 4
15  ret i1 %c
16}
17
18define <2 x i1> @test1vec(<2 x i32> %a) {
19; CHECK-LABEL: @test1vec(
20; CHECK-NEXT:    [[C:%.*]] = icmp ugt <2 x i32> [[A:%.*]], <i32 -5, i32 -5>
21; CHECK-NEXT:    ret <2 x i1> [[C]]
22;
23  %b = add <2 x i32> %a, <i32 4, i32 4>
24  %c = icmp ult <2 x i32> %b, <i32 4, i32 4>
25  ret <2 x i1> %c
26}
27
28define i1 @test2(i32 %a) {
29; CHECK-LABEL: @test2(
30; CHECK-NEXT:    [[C:%.*]] = icmp ult i32 [[A:%.*]], 4
31; CHECK-NEXT:    ret i1 [[C]]
32;
33  %b = sub i32 %a, 4
34  %c = icmp ugt i32 %b, -5
35  ret i1 %c
36}
37
38define <2 x i1> @test2vec(<2 x i32> %a) {
39; CHECK-LABEL: @test2vec(
40; CHECK-NEXT:    [[C:%.*]] = icmp ult <2 x i32> [[A:%.*]], <i32 4, i32 4>
41; CHECK-NEXT:    ret <2 x i1> [[C]]
42;
43  %b = sub <2 x i32> %a, <i32 4, i32 4>
44  %c = icmp ugt <2 x i32> %b, <i32 -5, i32 -5>
45  ret <2 x i1> %c
46}
47
48define i1 @test3(i32 %a) {
49; CHECK-LABEL: @test3(
50; CHECK-NEXT:    [[C:%.*]] = icmp sgt i32 [[A:%.*]], 2147483643
51; CHECK-NEXT:    ret i1 [[C]]
52;
53  %b = add i32 %a, 4
54  %c = icmp slt i32 %b, 2147483652
55  ret i1 %c
56}
57
58define <2 x i1> @test3vec(<2 x i32> %a) {
59; CHECK-LABEL: @test3vec(
60; CHECK-NEXT:    [[C:%.*]] = icmp sgt <2 x i32> [[A:%.*]], <i32 2147483643, i32 2147483643>
61; CHECK-NEXT:    ret <2 x i1> [[C]]
62;
63  %b = add <2 x i32> %a, <i32 4, i32 4>
64  %c = icmp slt <2 x i32> %b, <i32 2147483652, i32 2147483652>
65  ret <2 x i1> %c
66}
67
68define i1 @test4(i32 %a) {
69; CHECK-LABEL: @test4(
70; CHECK-NEXT:    [[C:%.*]] = icmp slt i32 [[A:%.*]], -4
71; CHECK-NEXT:    ret i1 [[C]]
72;
73  %b = add i32 %a, 2147483652
74  %c = icmp sge i32 %b, 4
75  ret i1 %c
76}
77
78define { i32, i1 } @test4multiuse(i32 %a) {
79; CHECK-LABEL: @test4multiuse(
80; CHECK-NEXT:    [[B:%.*]] = add nsw i32 [[A:%.*]], -2147483644
81; CHECK-NEXT:    [[C:%.*]] = icmp slt i32 [[A]], 2147483640
82; CHECK-NEXT:    [[TMP:%.*]] = insertvalue { i32, i1 } undef, i32 [[B]], 0
83; CHECK-NEXT:    [[RES:%.*]] = insertvalue { i32, i1 } [[TMP]], i1 [[C]], 1
84; CHECK-NEXT:    ret { i32, i1 } [[RES]]
85;
86
87  %b = add nsw i32 %a, -2147483644
88  %c = icmp slt i32 %b, -4
89
90  %tmp = insertvalue { i32, i1 } undef, i32 %b, 0
91  %res = insertvalue { i32, i1 } %tmp, i1 %c, 1
92
93  ret { i32, i1 } %res
94}
95
96define <2 x i1> @test4vec(<2 x i32> %a) {
97; CHECK-LABEL: @test4vec(
98; CHECK-NEXT:    [[C:%.*]] = icmp slt <2 x i32> [[A:%.*]], <i32 -4, i32 -4>
99; CHECK-NEXT:    ret <2 x i1> [[C]]
100;
101  %b = add <2 x i32> %a, <i32 2147483652, i32 2147483652>
102  %c = icmp sge <2 x i32> %b, <i32 4, i32 4>
103  ret <2 x i1> %c
104}
105
106; icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2), when C - C2 does not overflow.
107; This becomes equality because it's at the limit.
108
109define i1 @nsw_slt1(i8 %a) {
110; CHECK-LABEL: @nsw_slt1(
111; CHECK-NEXT:    [[C:%.*]] = icmp eq i8 [[A:%.*]], -128
112; CHECK-NEXT:    ret i1 [[C]]
113;
114  %b = add nsw i8 %a, 100
115  %c = icmp slt i8 %b, -27
116  ret i1 %c
117}
118
119define <2 x i1> @nsw_slt1_splat_vec(<2 x i8> %a) {
120; CHECK-LABEL: @nsw_slt1_splat_vec(
121; CHECK-NEXT:    [[C:%.*]] = icmp eq <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
122; CHECK-NEXT:    ret <2 x i1> [[C]]
123;
124  %b = add nsw <2 x i8> %a, <i8 100, i8 100>
125  %c = icmp slt <2 x i8> %b, <i8 -27, i8 -27>
126  ret <2 x i1> %c
127}
128
129; icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2), when C - C2 does not overflow.
130; This becomes equality because it's at the limit.
131
132define i1 @nsw_slt2(i8 %a) {
133; CHECK-LABEL: @nsw_slt2(
134; CHECK-NEXT:    [[C:%.*]] = icmp ne i8 [[A:%.*]], 127
135; CHECK-NEXT:    ret i1 [[C]]
136;
137  %b = add nsw i8 %a, -100
138  %c = icmp slt i8 %b, 27
139  ret i1 %c
140}
141
142define <2 x i1> @nsw_slt2_splat_vec(<2 x i8> %a) {
143; CHECK-LABEL: @nsw_slt2_splat_vec(
144; CHECK-NEXT:    [[C:%.*]] = icmp ne <2 x i8> [[A:%.*]], <i8 127, i8 127>
145; CHECK-NEXT:    ret <2 x i1> [[C]]
146;
147  %b = add nsw <2 x i8> %a, <i8 -100, i8 -100>
148  %c = icmp slt <2 x i8> %b, <i8 27, i8 27>
149  ret <2 x i1> %c
150}
151
152; icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2), when C - C2 does not overflow.
153; Less than the limit, so the predicate doesn't change.
154
155define i1 @nsw_slt3(i8 %a) {
156; CHECK-LABEL: @nsw_slt3(
157; CHECK-NEXT:    [[C:%.*]] = icmp slt i8 [[A:%.*]], -126
158; CHECK-NEXT:    ret i1 [[C]]
159;
160  %b = add nsw i8 %a, 100
161  %c = icmp slt i8 %b, -26
162  ret i1 %c
163}
164
165; icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2), when C - C2 does not overflow.
166; Less than the limit, so the predicate doesn't change.
167
168define i1 @nsw_slt4(i8 %a) {
169; CHECK-LABEL: @nsw_slt4(
170; CHECK-NEXT:    [[C:%.*]] = icmp slt i8 [[A:%.*]], 126
171; CHECK-NEXT:    ret i1 [[C]]
172;
173  %b = add nsw i8 %a, -100
174  %c = icmp slt i8 %b, 26
175  ret i1 %c
176}
177
178; icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2), when C - C2 does not overflow.
179; Try sgt to make sure that works too.
180
181define i1 @nsw_sgt1(i8 %a) {
182; CHECK-LABEL: @nsw_sgt1(
183; CHECK-NEXT:    [[C:%.*]] = icmp eq i8 [[A:%.*]], 127
184; CHECK-NEXT:    ret i1 [[C]]
185;
186  %b = add nsw i8 %a, -100
187  %c = icmp sgt i8 %b, 26
188  ret i1 %c
189}
190
191define <2 x i1> @nsw_sgt1_splat_vec(<2 x i8> %a) {
192; CHECK-LABEL: @nsw_sgt1_splat_vec(
193; CHECK-NEXT:    [[C:%.*]] = icmp eq <2 x i8> [[A:%.*]], <i8 127, i8 127>
194; CHECK-NEXT:    ret <2 x i1> [[C]]
195;
196  %b = add nsw <2 x i8> %a, <i8 -100, i8 -100>
197  %c = icmp sgt <2 x i8> %b, <i8 26, i8 26>
198  ret <2 x i1> %c
199}
200
201define i1 @nsw_sgt2(i8 %a) {
202; CHECK-LABEL: @nsw_sgt2(
203; CHECK-NEXT:    [[C:%.*]] = icmp sgt i8 [[A:%.*]], -126
204; CHECK-NEXT:    ret i1 [[C]]
205;
206  %b = add nsw i8 %a, 100
207  %c = icmp sgt i8 %b, -26
208  ret i1 %c
209}
210
211define <2 x i1> @nsw_sgt2_splat_vec(<2 x i8> %a) {
212; CHECK-LABEL: @nsw_sgt2_splat_vec(
213; CHECK-NEXT:    [[C:%.*]] = icmp sgt <2 x i8> [[A:%.*]], <i8 -126, i8 -126>
214; CHECK-NEXT:    ret <2 x i1> [[C]]
215;
216  %b = add nsw <2 x i8> %a, <i8 100, i8 100>
217  %c = icmp sgt <2 x i8> %b, <i8 -26, i8 -26>
218  ret <2 x i1> %c
219}
220
221; icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2), when C - C2 does not overflow.
222; Comparison with 0 doesn't need special-casing.
223
224define i1 @slt_zero_add_nsw(i32 %a) {
225; CHECK-LABEL: @slt_zero_add_nsw(
226; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[A:%.*]], -1
227; CHECK-NEXT:    ret i1 [[CMP]]
228;
229  %add = add nsw i32 %a, 1
230  %cmp = icmp slt i32 %add, 0
231  ret i1 %cmp
232}
233
234; The same fold should work with vectors.
235
236define <2 x i1> @slt_zero_add_nsw_splat_vec(<2 x i8> %a) {
237; CHECK-LABEL: @slt_zero_add_nsw_splat_vec(
238; CHECK-NEXT:    [[CMP:%.*]] = icmp slt <2 x i8> [[A:%.*]], <i8 -1, i8 -1>
239; CHECK-NEXT:    ret <2 x i1> [[CMP]]
240;
241  %add = add nsw <2 x i8> %a, <i8 1, i8 1>
242  %cmp = icmp slt <2 x i8> %add, zeroinitializer
243  ret <2 x i1> %cmp
244}
245
246; Test the edges - instcombine should not interfere with simplification to constants.
247; Constant subtraction does not overflow, but this is false.
248
249define i1 @nsw_slt3_ov_no(i8 %a) {
250; CHECK-LABEL: @nsw_slt3_ov_no(
251; CHECK-NEXT:    ret i1 false
252;
253  %b = add nsw i8 %a, 100
254  %c = icmp slt i8 %b, -28
255  ret i1 %c
256}
257
258; Test the edges - instcombine should not interfere with simplification to constants.
259; Constant subtraction overflows. This is false.
260
261define i1 @nsw_slt4_ov(i8 %a) {
262; CHECK-LABEL: @nsw_slt4_ov(
263; CHECK-NEXT:    ret i1 false
264;
265  %b = add nsw i8 %a, 100
266  %c = icmp slt i8 %b, -29
267  ret i1 %c
268}
269
270; Test the edges - instcombine should not interfere with simplification to constants.
271; Constant subtraction overflows. This is true.
272
273define i1 @nsw_slt5_ov(i8 %a) {
274; CHECK-LABEL: @nsw_slt5_ov(
275; CHECK-NEXT:    ret i1 true
276;
277  %b = add nsw i8 %a, -100
278  %c = icmp slt i8 %b, 28
279  ret i1 %c
280}
281
282; InstCombine should not thwart this opportunity to simplify completely.
283
284define i1 @slt_zero_add_nsw_signbit(i8 %x) {
285; CHECK-LABEL: @slt_zero_add_nsw_signbit(
286; CHECK-NEXT:    ret i1 true
287;
288  %y = add nsw i8 %x, -128
289  %z = icmp slt i8 %y, 0
290  ret i1 %z
291}
292
293; InstCombine should not thwart this opportunity to simplify completely.
294
295define i1 @slt_zero_add_nuw_signbit(i8 %x) {
296; CHECK-LABEL: @slt_zero_add_nuw_signbit(
297; CHECK-NEXT:    ret i1 true
298;
299  %y = add nuw i8 %x, 128
300  %z = icmp slt i8 %y, 0
301  ret i1 %z
302}
303
304define i1 @reduce_add_ult(i32 %in) {
305; CHECK-LABEL: @reduce_add_ult(
306; CHECK-NEXT:    [[A18:%.*]] = icmp ult i32 [[IN:%.*]], 9
307; CHECK-NEXT:    ret i1 [[A18]]
308;
309  %a6 = add nuw i32 %in, 3
310  %a18 = icmp ult i32 %a6, 12
311  ret i1 %a18
312}
313
314define i1 @reduce_add_ugt(i32 %in) {
315; CHECK-LABEL: @reduce_add_ugt(
316; CHECK-NEXT:    [[A18:%.*]] = icmp ugt i32 [[IN:%.*]], 9
317; CHECK-NEXT:    ret i1 [[A18]]
318;
319  %a6 = add nuw i32 %in, 3
320  %a18 = icmp ugt i32 %a6, 12
321  ret i1 %a18
322}
323
324define i1 @reduce_add_ule(i32 %in) {
325; CHECK-LABEL: @reduce_add_ule(
326; CHECK-NEXT:    [[A18:%.*]] = icmp ult i32 [[IN:%.*]], 10
327; CHECK-NEXT:    ret i1 [[A18]]
328;
329  %a6 = add nuw i32 %in, 3
330  %a18 = icmp ule i32 %a6, 12
331  ret i1 %a18
332}
333
334define i1 @reduce_add_uge(i32 %in) {
335; CHECK-LABEL: @reduce_add_uge(
336; CHECK-NEXT:    [[A18:%.*]] = icmp ugt i32 [[IN:%.*]], 8
337; CHECK-NEXT:    ret i1 [[A18]]
338;
339  %a6 = add nuw i32 %in, 3
340  %a18 = icmp uge i32 %a6, 12
341  ret i1 %a18
342}
343
344define i1 @ult_add_ssubov(i32 %in) {
345; CHECK-LABEL: @ult_add_ssubov(
346; CHECK-NEXT:    ret i1 false
347;
348  %a6 = add nuw i32 %in, 71
349  %a18 = icmp ult i32 %a6, 3
350  ret i1 %a18
351}
352
353define i1 @ult_add_nonuw(i8 %in) {
354; CHECK-LABEL: @ult_add_nonuw(
355; CHECK-NEXT:    [[A6:%.*]] = add i8 [[IN:%.*]], 71
356; CHECK-NEXT:    [[A18:%.*]] = icmp ult i8 [[A6]], 12
357; CHECK-NEXT:    ret i1 [[A18]]
358;
359  %a6 = add i8 %in, 71
360  %a18 = icmp ult i8 %a6, 12
361  ret i1 %a18
362}
363
364define i1 @uge_add_nonuw(i32 %in) {
365; CHECK-LABEL: @uge_add_nonuw(
366; CHECK-NEXT:    [[A6:%.*]] = add i32 [[IN:%.*]], 3
367; CHECK-NEXT:    [[A18:%.*]] = icmp ugt i32 [[A6]], 11
368; CHECK-NEXT:    ret i1 [[A18]]
369;
370  %a6 = add i32 %in, 3
371  %a18 = icmp uge i32 %a6, 12
372  ret i1 %a18
373}
374
375; Test unsigned add overflow patterns. The div ops are only here to
376; thwart complexity based canonicalization of the operand order.
377
378define i1 @op_ugt_sum_commute1(i8 %p1, i8 %p2) {
379; CHECK-LABEL: @op_ugt_sum_commute1(
380; CHECK-NEXT:    [[X:%.*]] = sdiv i8 42, [[P1:%.*]]
381; CHECK-NEXT:    [[Y:%.*]] = sdiv i8 42, [[P2:%.*]]
382; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], -1
383; CHECK-NEXT:    [[C:%.*]] = icmp ugt i8 [[Y]], [[TMP1]]
384; CHECK-NEXT:    ret i1 [[C]]
385;
386  %x = sdiv i8 42, %p1
387  %y = sdiv i8 42, %p2
388  %a = add i8 %x, %y
389  %c = icmp ugt i8 %x, %a
390  ret i1 %c
391}
392
393define <2 x i1> @op_ugt_sum_vec_commute2(<2 x i8> %p1, <2 x i8> %p2) {
394; CHECK-LABEL: @op_ugt_sum_vec_commute2(
395; CHECK-NEXT:    [[X:%.*]] = sdiv <2 x i8> <i8 42, i8 -42>, [[P1:%.*]]
396; CHECK-NEXT:    [[Y:%.*]] = sdiv <2 x i8> <i8 42, i8 -42>, [[P2:%.*]]
397; CHECK-NEXT:    [[TMP1:%.*]] = xor <2 x i8> [[X]], <i8 -1, i8 -1>
398; CHECK-NEXT:    [[C:%.*]] = icmp ugt <2 x i8> [[Y]], [[TMP1]]
399; CHECK-NEXT:    ret <2 x i1> [[C]]
400;
401  %x = sdiv <2 x i8> <i8 42, i8 -42>, %p1
402  %y = sdiv <2 x i8> <i8 42, i8 -42>, %p2
403  %a = add <2 x i8> %y, %x
404  %c = icmp ugt <2 x i8> %x, %a
405  ret <2 x i1> %c
406}
407
408define i1 @sum_ugt_op_uses(i8 %p1, i8 %p2, i8* %p3) {
409; CHECK-LABEL: @sum_ugt_op_uses(
410; CHECK-NEXT:    [[X:%.*]] = sdiv i8 42, [[P1:%.*]]
411; CHECK-NEXT:    [[Y:%.*]] = sdiv i8 42, [[P2:%.*]]
412; CHECK-NEXT:    [[A:%.*]] = add nsw i8 [[X]], [[Y]]
413; CHECK-NEXT:    store i8 [[A]], i8* [[P3:%.*]], align 1
414; CHECK-NEXT:    [[C:%.*]] = icmp ugt i8 [[X]], [[A]]
415; CHECK-NEXT:    ret i1 [[C]]
416;
417  %x = sdiv i8 42, %p1
418  %y = sdiv i8 42, %p2
419  %a = add i8 %x, %y
420  store i8 %a, i8* %p3
421  %c = icmp ugt i8 %x, %a
422  ret i1 %c
423}
424
425define <2 x i1> @sum_ult_op_vec_commute1(<2 x i8> %p1, <2 x i8> %p2) {
426; CHECK-LABEL: @sum_ult_op_vec_commute1(
427; CHECK-NEXT:    [[X:%.*]] = sdiv <2 x i8> <i8 42, i8 -42>, [[P1:%.*]]
428; CHECK-NEXT:    [[Y:%.*]] = sdiv <2 x i8> <i8 -42, i8 42>, [[P2:%.*]]
429; CHECK-NEXT:    [[TMP1:%.*]] = xor <2 x i8> [[X]], <i8 -1, i8 -1>
430; CHECK-NEXT:    [[C:%.*]] = icmp ugt <2 x i8> [[Y]], [[TMP1]]
431; CHECK-NEXT:    ret <2 x i1> [[C]]
432;
433  %x = sdiv <2 x i8> <i8 42, i8 -42>, %p1
434  %y = sdiv <2 x i8> <i8 -42, i8 42>, %p2
435  %a = add <2 x i8> %x, %y
436  %c = icmp ult <2 x i8> %a, %x
437  ret <2 x i1> %c
438}
439
440define i1 @sum_ult_op_commute2(i8 %p1, i8 %p2) {
441; CHECK-LABEL: @sum_ult_op_commute2(
442; CHECK-NEXT:    [[X:%.*]] = sdiv i8 42, [[P1:%.*]]
443; CHECK-NEXT:    [[Y:%.*]] = sdiv i8 42, [[P2:%.*]]
444; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], -1
445; CHECK-NEXT:    [[C:%.*]] = icmp ugt i8 [[Y]], [[TMP1]]
446; CHECK-NEXT:    ret i1 [[C]]
447;
448  %x = sdiv i8 42, %p1
449  %y = sdiv i8 42, %p2
450  %a = add i8 %y, %x
451  %c = icmp ult i8 %a, %x
452  ret i1 %c
453}
454
455define i1 @sum_ult_op_uses(i8 %x, i8 %y, i8* %p) {
456; CHECK-LABEL: @sum_ult_op_uses(
457; CHECK-NEXT:    [[A:%.*]] = add i8 [[Y:%.*]], [[X:%.*]]
458; CHECK-NEXT:    store i8 [[A]], i8* [[P:%.*]], align 1
459; CHECK-NEXT:    [[C:%.*]] = icmp ult i8 [[A]], [[X]]
460; CHECK-NEXT:    ret i1 [[C]]
461;
462  %a = add i8 %y, %x
463  store i8 %a, i8* %p
464  %c = icmp ult i8 %a, %x
465  ret i1 %c
466}
467
468; X + Z >s Y + Z -> X > Y if there is no overflow.
469define i1 @common_op_nsw(i32 %x, i32 %y, i32 %z) {
470; CHECK-LABEL: @common_op_nsw(
471; CHECK-NEXT:    [[C:%.*]] = icmp sgt i32 [[X:%.*]], [[Y:%.*]]
472; CHECK-NEXT:    ret i1 [[C]]
473;
474  %lhs = add nsw i32 %x, %z
475  %rhs = add nsw i32 %y, %z
476  %c = icmp sgt i32 %lhs, %rhs
477  ret i1 %c
478}
479
480define i1 @common_op_nsw_extra_uses(i32 %x, i32 %y, i32 %z) {
481; CHECK-LABEL: @common_op_nsw_extra_uses(
482; CHECK-NEXT:    [[LHS:%.*]] = add nsw i32 [[X:%.*]], [[Z:%.*]]
483; CHECK-NEXT:    call void @use(i32 [[LHS]])
484; CHECK-NEXT:    [[RHS:%.*]] = add nsw i32 [[Y:%.*]], [[Z]]
485; CHECK-NEXT:    call void @use(i32 [[RHS]])
486; CHECK-NEXT:    [[C:%.*]] = icmp sgt i32 [[X]], [[Y]]
487; CHECK-NEXT:    ret i1 [[C]]
488;
489  %lhs = add nsw i32 %x, %z
490  call void @use(i32 %lhs)
491  %rhs = add nsw i32 %y, %z
492  call void @use(i32 %rhs)
493  %c = icmp sgt i32 %lhs, %rhs
494  ret i1 %c
495}
496
497; X + Z >u Z + Y -> X > Y if there is no overflow.
498define i1 @common_op_nuw(i32 %x, i32 %y, i32 %z) {
499; CHECK-LABEL: @common_op_nuw(
500; CHECK-NEXT:    [[C:%.*]] = icmp ugt i32 [[X:%.*]], [[Y:%.*]]
501; CHECK-NEXT:    ret i1 [[C]]
502;
503  %lhs = add nuw i32 %x, %z
504  %rhs = add nuw i32 %z, %y
505  %c = icmp ugt i32 %lhs, %rhs
506  ret i1 %c
507}
508
509define i1 @common_op_nuw_extra_uses(i32 %x, i32 %y, i32 %z) {
510; CHECK-LABEL: @common_op_nuw_extra_uses(
511; CHECK-NEXT:    [[LHS:%.*]] = add nuw i32 [[X:%.*]], [[Z:%.*]]
512; CHECK-NEXT:    call void @use(i32 [[LHS]])
513; CHECK-NEXT:    [[RHS:%.*]] = add nuw i32 [[Z]], [[Y:%.*]]
514; CHECK-NEXT:    call void @use(i32 [[RHS]])
515; CHECK-NEXT:    [[C:%.*]] = icmp ugt i32 [[X]], [[Y]]
516; CHECK-NEXT:    ret i1 [[C]]
517;
518  %lhs = add nuw i32 %x, %z
519  call void @use(i32 %lhs)
520  %rhs = add nuw i32 %z, %y
521  call void @use(i32 %rhs)
522  %c = icmp ugt i32 %lhs, %rhs
523  ret i1 %c
524}
525
526define i1 @common_op_nsw_commute(i32 %x, i32 %y, i32 %z) {
527; CHECK-LABEL: @common_op_nsw_commute(
528; CHECK-NEXT:    [[C:%.*]] = icmp slt i32 [[X:%.*]], [[Y:%.*]]
529; CHECK-NEXT:    ret i1 [[C]]
530;
531  %lhs = add nsw i32 %z, %x
532  %rhs = add nsw i32 %y, %z
533  %c = icmp slt i32 %lhs, %rhs
534  ret i1 %c
535}
536
537define i1 @common_op_nuw_commute(i32 %x, i32 %y, i32 %z) {
538; CHECK-LABEL: @common_op_nuw_commute(
539; CHECK-NEXT:    [[C:%.*]] = icmp ult i32 [[X:%.*]], [[Y:%.*]]
540; CHECK-NEXT:    ret i1 [[C]]
541;
542  %lhs = add nuw i32 %z, %x
543  %rhs = add nuw i32 %z, %y
544  %c = icmp ult i32 %lhs, %rhs
545  ret i1 %c
546}
547
548; X + Y > X -> Y > 0 if there is no overflow.
549define i1 @common_op_test29(i32 %x, i32 %y) {
550; CHECK-LABEL: @common_op_test29(
551; CHECK-NEXT:    [[C:%.*]] = icmp sgt i32 [[Y:%.*]], 0
552; CHECK-NEXT:    ret i1 [[C]]
553;
554  %lhs = add nsw i32 %x, %y
555  %c = icmp sgt i32 %lhs, %x
556  ret i1 %c
557}
558
559; X + Y > X -> Y > 0 if there is no overflow.
560define i1 @sum_nuw(i32 %x, i32 %y) {
561; CHECK-LABEL: @sum_nuw(
562; CHECK-NEXT:    [[C:%.*]] = icmp ne i32 [[Y:%.*]], 0
563; CHECK-NEXT:    ret i1 [[C]]
564;
565  %lhs = add nuw i32 %x, %y
566  %c = icmp ugt i32 %lhs, %x
567  ret i1 %c
568}
569
570; X > X + Y -> 0 > Y if there is no overflow.
571define i1 @sum_nsw_commute(i32 %x, i32 %y) {
572; CHECK-LABEL: @sum_nsw_commute(
573; CHECK-NEXT:    [[C:%.*]] = icmp slt i32 [[Y:%.*]], 0
574; CHECK-NEXT:    ret i1 [[C]]
575;
576  %rhs = add nsw i32 %x, %y
577  %c = icmp sgt i32 %x, %rhs
578  ret i1 %c
579}
580
581; X > X + Y -> 0 > Y if there is no overflow.
582define i1 @sum_nuw_commute(i32 %x, i32 %y) {
583; CHECK-LABEL: @sum_nuw_commute(
584; CHECK-NEXT:    ret i1 false
585;
586  %rhs = add nuw i32 %x, %y
587  %c = icmp ugt i32 %x, %rhs
588  ret i1 %c
589}
590
591; PR2698 - https://bugs.llvm.org/show_bug.cgi?id=2698
592
593declare void @use1(i1)
594declare void @use8(i8)
595
596define void @bzip1(i8 %a, i8 %b, i8 %x) {
597; CHECK-LABEL: @bzip1(
598; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8 [[A:%.*]], [[B:%.*]]
599; CHECK-NEXT:    call void @use1(i1 [[CMP]])
600; CHECK-NEXT:    ret void
601;
602  %add1 = add i8 %a, %x
603  %add2 = add i8 %b, %x
604  %cmp = icmp eq i8 %add1, %add2
605  call void @use1(i1 %cmp)
606  ret void
607}
608
609define void @bzip2(i8 %a, i8 %b, i8 %x) {
610; CHECK-LABEL: @bzip2(
611; CHECK-NEXT:    [[ADD1:%.*]] = add i8 [[A:%.*]], [[X:%.*]]
612; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8 [[A]], [[B:%.*]]
613; CHECK-NEXT:    call void @use1(i1 [[CMP]])
614; CHECK-NEXT:    call void @use8(i8 [[ADD1]])
615; CHECK-NEXT:    ret void
616;
617  %add1 = add i8 %a, %x
618  %add2 = add i8 %b, %x
619  %cmp = icmp eq i8 %add1, %add2
620  call void @use1(i1 %cmp)
621  call void @use8(i8 %add1)
622  ret void
623}
624
625define <2 x i1> @icmp_eq_add_undef(<2 x i32> %a) {
626; CHECK-LABEL: @icmp_eq_add_undef(
627; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i32> [[A:%.*]], <i32 5, i32 undef>
628; CHECK-NEXT:    ret <2 x i1> [[CMP]]
629;
630  %add = add <2 x i32> %a, <i32 5, i32 undef>
631  %cmp = icmp eq <2 x i32> %add, <i32 10, i32 10>
632  ret <2 x i1> %cmp
633}
634
635define <2 x i1> @icmp_eq_add_non_splat(<2 x i32> %a) {
636; CHECK-LABEL: @icmp_eq_add_non_splat(
637; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i32> [[A:%.*]], <i32 5, i32 4>
638; CHECK-NEXT:    ret <2 x i1> [[CMP]]
639;
640  %add = add <2 x i32> %a, <i32 5, i32 6>
641  %cmp = icmp eq <2 x i32> %add, <i32 10, i32 10>
642  ret <2 x i1> %cmp
643}
644
645define <2 x i1> @icmp_eq_add_undef2(<2 x i32> %a) {
646; CHECK-LABEL: @icmp_eq_add_undef2(
647; CHECK-NEXT:    [[ADD:%.*]] = add <2 x i32> [[A:%.*]], <i32 5, i32 5>
648; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i32> [[ADD]], <i32 10, i32 undef>
649; CHECK-NEXT:    ret <2 x i1> [[CMP]]
650;
651  %add = add <2 x i32> %a, <i32 5, i32 5>
652  %cmp = icmp eq <2 x i32> %add, <i32 10, i32 undef>
653  ret <2 x i1> %cmp
654}
655
656define <2 x i1> @icmp_eq_add_non_splat2(<2 x i32> %a) {
657; CHECK-LABEL: @icmp_eq_add_non_splat2(
658; CHECK-NEXT:    [[ADD:%.*]] = add <2 x i32> [[A:%.*]], <i32 5, i32 5>
659; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i32> [[ADD]], <i32 10, i32 11>
660; CHECK-NEXT:    ret <2 x i1> [[CMP]]
661;
662  %add = add <2 x i32> %a, <i32 5, i32 5>
663  %cmp = icmp eq <2 x i32> %add, <i32 10, i32 11>
664  ret <2 x i1> %cmp
665}
666