1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S | FileCheck %s
3
4declare void @use(i1)
5
6define i1 @PR1817_1(i32 %X) {
7; CHECK-LABEL: @PR1817_1(
8; CHECK-NEXT:    [[B:%.*]] = icmp ult i32 [[X:%.*]], 10
9; CHECK-NEXT:    ret i1 [[B]]
10;
11  %A = icmp slt i32 %X, 10
12  %B = icmp ult i32 %X, 10
13  %C = and i1 %A, %B
14  ret i1 %C
15}
16
17define i1 @PR1817_2(i32 %X) {
18; CHECK-LABEL: @PR1817_2(
19; CHECK-NEXT:    [[A:%.*]] = icmp slt i32 [[X:%.*]], 10
20; CHECK-NEXT:    ret i1 [[A]]
21;
22  %A = icmp slt i32 %X, 10
23  %B = icmp ult i32 %X, 10
24  %C = or i1 %A, %B
25  ret i1 %C
26}
27
28define i1 @PR2330(i32 %a, i32 %b) {
29; CHECK-LABEL: @PR2330(
30; CHECK-NEXT:    [[TMP1:%.*]] = or i32 [[B:%.*]], [[A:%.*]]
31; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i32 [[TMP1]], 8
32; CHECK-NEXT:    ret i1 [[TMP2]]
33;
34  %cmp1 = icmp ult i32 %a, 8
35  %cmp2 = icmp ult i32 %b, 8
36  %and = and i1 %cmp2, %cmp1
37  ret i1 %and
38}
39
40; if LHSC and RHSC differ only by one bit:
41; (X == C1 || X == C2) -> (X & ~(C1 ^ C2)) == C1 (C1 has 1 less set bit)
42; PR14708: https://bugs.llvm.org/show_bug.cgi?id=14708
43
44define i1 @or_eq_with_one_bit_diff_constants1(i32 %x) {
45; CHECK-LABEL: @or_eq_with_one_bit_diff_constants1(
46; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[X:%.*]], -2
47; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 50
48; CHECK-NEXT:    ret i1 [[TMP2]]
49;
50  %cmp1 = icmp eq i32 %x, 50
51  %cmp2 = icmp eq i32 %x, 51
52  %or = or i1 %cmp1, %cmp2
53  ret i1 %or
54}
55
56; (X != C1 && X != C2) -> (X & ~(C1 ^ C2)) != C1 (C1 has 1 less set bit)
57
58define i1 @and_ne_with_one_bit_diff_constants1(i32 %x) {
59; CHECK-LABEL: @and_ne_with_one_bit_diff_constants1(
60; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[X:%.*]], -2
61; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 50
62; CHECK-NEXT:    ret i1 [[TMP2]]
63;
64  %cmp1 = icmp ne i32 %x, 51
65  %cmp2 = icmp ne i32 %x, 50
66  %and = and i1 %cmp1, %cmp2
67  ret i1 %and
68}
69
70; The constants are not necessarily off-by-one, just off-by-one-bit.
71
72define i1 @or_eq_with_one_bit_diff_constants2(i32 %x) {
73; CHECK-LABEL: @or_eq_with_one_bit_diff_constants2(
74; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[X:%.*]], -33
75; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 65
76; CHECK-NEXT:    ret i1 [[TMP2]]
77;
78  %cmp1 = icmp eq i32 %x, 97
79  %cmp2 = icmp eq i32 %x, 65
80  %or = or i1 %cmp1, %cmp2
81  ret i1 %or
82}
83
84define i1 @and_ne_with_one_bit_diff_constants2(i19 %x) {
85; CHECK-LABEL: @and_ne_with_one_bit_diff_constants2(
86; CHECK-NEXT:    [[TMP1:%.*]] = and i19 [[X:%.*]], -129
87; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i19 [[TMP1]], 65
88; CHECK-NEXT:    ret i1 [[TMP2]]
89;
90  %cmp1 = icmp ne i19 %x, 65
91  %cmp2 = icmp ne i19 %x, 193
92  %and = and i1 %cmp1, %cmp2
93  ret i1 %and
94}
95
96; Make sure the constants are treated as unsigned when comparing them.
97
98define i1 @or_eq_with_one_bit_diff_constants3(i8 %x) {
99; CHECK-LABEL: @or_eq_with_one_bit_diff_constants3(
100; CHECK-NEXT:    [[TMP1:%.*]] = and i8 [[X:%.*]], 127
101; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 126
102; CHECK-NEXT:    ret i1 [[TMP2]]
103;
104  %cmp1 = icmp eq i8 %x, 254
105  %cmp2 = icmp eq i8 %x, 126
106  %or = or i1 %cmp1, %cmp2
107  ret i1 %or
108}
109
110define i1 @and_ne_with_one_bit_diff_constants3(i8 %x) {
111; CHECK-LABEL: @and_ne_with_one_bit_diff_constants3(
112; CHECK-NEXT:    [[TMP1:%.*]] = and i8 [[X:%.*]], 127
113; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i8 [[TMP1]], 65
114; CHECK-NEXT:    ret i1 [[TMP2]]
115;
116  %cmp1 = icmp ne i8 %x, 65
117  %cmp2 = icmp ne i8 %x, 193
118  %and = and i1 %cmp1, %cmp2
119  ret i1 %and
120}
121
122; Use an 'add' to eliminate an icmp if the constants are off-by-one (not off-by-one-bit).
123; (X == 13 | X == 14) -> X-13 <u 2
124
125define i1 @or_eq_with_diff_one(i8 %x) {
126; CHECK-LABEL: @or_eq_with_diff_one(
127; CHECK-NEXT:    [[TMP1:%.*]] = add i8 [[X:%.*]], -13
128; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], 2
129; CHECK-NEXT:    ret i1 [[TMP2]]
130;
131  %cmp1 = icmp eq i8 %x, 13
132  %cmp2 = icmp eq i8 %x, 14
133  %or = or i1 %cmp1, %cmp2
134  ret i1 %or
135}
136
137; (X != 40 | X != 39) -> X-39 >u 1
138
139define i1 @and_ne_with_diff_one(i32 %x) {
140; CHECK-LABEL: @and_ne_with_diff_one(
141; CHECK-NEXT:    [[TMP1:%.*]] = add i32 [[X:%.*]], -39
142; CHECK-NEXT:    [[TMP2:%.*]] = icmp ugt i32 [[TMP1]], 1
143; CHECK-NEXT:    ret i1 [[TMP2]]
144;
145  %cmp1 = icmp ne i32 %x, 40
146  %cmp2 = icmp ne i32 %x, 39
147  %and = and i1 %cmp1, %cmp2
148  ret i1 %and
149}
150
151; Make sure the constants are treated as signed when comparing them.
152; PR32524: https://bugs.llvm.org/show_bug.cgi?id=32524
153
154define i1 @or_eq_with_diff_one_signed(i32 %x) {
155; CHECK-LABEL: @or_eq_with_diff_one_signed(
156; CHECK-NEXT:    [[TMP1:%.*]] = add i32 [[X:%.*]], 1
157; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i32 [[TMP1]], 2
158; CHECK-NEXT:    ret i1 [[TMP2]]
159;
160  %cmp1 = icmp eq i32 %x, 0
161  %cmp2 = icmp eq i32 %x, -1
162  %or = or i1 %cmp1, %cmp2
163  ret i1 %or
164}
165
166define i1 @and_ne_with_diff_one_signed(i64 %x) {
167; CHECK-LABEL: @and_ne_with_diff_one_signed(
168; CHECK-NEXT:    [[TMP1:%.*]] = add i64 [[X:%.*]], 1
169; CHECK-NEXT:    [[TMP2:%.*]] = icmp ugt i64 [[TMP1]], 1
170; CHECK-NEXT:    ret i1 [[TMP2]]
171;
172  %cmp1 = icmp ne i64 %x, -1
173  %cmp2 = icmp ne i64 %x, 0
174  %and = and i1 %cmp1, %cmp2
175  ret i1 %and
176}
177
178; Vectors with splat constants get the same folds.
179
180define <2 x i1> @or_eq_with_one_bit_diff_constants2_splatvec(<2 x i32> %x) {
181; CHECK-LABEL: @or_eq_with_one_bit_diff_constants2_splatvec(
182; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[X:%.*]], <i32 -33, i32 -33>
183; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq <2 x i32> [[TMP1]], <i32 65, i32 65>
184; CHECK-NEXT:    ret <2 x i1> [[TMP2]]
185;
186  %cmp1 = icmp eq <2 x i32> %x, <i32 97, i32 97>
187  %cmp2 = icmp eq <2 x i32> %x, <i32 65, i32 65>
188  %or = or <2 x i1> %cmp1, %cmp2
189  ret <2 x i1> %or
190}
191
192define <2 x i1> @and_ne_with_diff_one_splatvec(<2 x i32> %x) {
193; CHECK-LABEL: @and_ne_with_diff_one_splatvec(
194; CHECK-NEXT:    [[TMP1:%.*]] = add <2 x i32> [[X:%.*]], <i32 -39, i32 -39>
195; CHECK-NEXT:    [[TMP2:%.*]] = icmp ugt <2 x i32> [[TMP1]], <i32 1, i32 1>
196; CHECK-NEXT:    ret <2 x i1> [[TMP2]]
197;
198  %cmp1 = icmp ne <2 x i32> %x, <i32 40, i32 40>
199  %cmp2 = icmp ne <2 x i32> %x, <i32 39, i32 39>
200  %and = and <2 x i1> %cmp1, %cmp2
201  ret <2 x i1> %and
202}
203
204; This is a fuzzer-generated test that would assert because
205; we'd get into foldAndOfICmps() without running InstSimplify
206; on an 'and' that should have been killed. It's not obvious
207; why, but removing anything hides the bug, hence the long test.
208
209define void @simplify_before_foldAndOfICmps() {
210; CHECK-LABEL: @simplify_before_foldAndOfICmps(
211; CHECK-NEXT:    [[A8:%.*]] = alloca i16, align 2
212; CHECK-NEXT:    [[L7:%.*]] = load i16, i16* [[A8]], align 2
213; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i16 [[L7]], -1
214; CHECK-NEXT:    [[B11:%.*]] = zext i1 [[TMP1]] to i16
215; CHECK-NEXT:    [[C10:%.*]] = icmp ugt i16 [[L7]], [[B11]]
216; CHECK-NEXT:    [[C5:%.*]] = icmp slt i16 [[L7]], 1
217; CHECK-NEXT:    [[C11:%.*]] = icmp ne i16 [[L7]], 0
218; CHECK-NEXT:    [[C7:%.*]] = icmp slt i16 [[L7]], 0
219; CHECK-NEXT:    [[B15:%.*]] = xor i1 [[C7]], [[C10]]
220; CHECK-NEXT:    [[B19:%.*]] = xor i1 [[C11]], [[B15]]
221; CHECK-NEXT:    [[TMP2:%.*]] = and i1 [[C10]], [[C5]]
222; CHECK-NEXT:    [[C3:%.*]] = and i1 [[TMP2]], [[B19]]
223; CHECK-NEXT:    [[TMP3:%.*]] = xor i1 [[C10]], true
224; CHECK-NEXT:    [[C18:%.*]] = or i1 [[C7]], [[TMP3]]
225; CHECK-NEXT:    [[TMP4:%.*]] = sext i1 [[C3]] to i64
226; CHECK-NEXT:    [[G26:%.*]] = getelementptr i1, i1* null, i64 [[TMP4]]
227; CHECK-NEXT:    store i16 [[L7]], i16* undef, align 2
228; CHECK-NEXT:    store i1 [[C18]], i1* undef, align 1
229; CHECK-NEXT:    store i1* [[G26]], i1** undef, align 8
230; CHECK-NEXT:    ret void
231;
232  %A8 = alloca i16
233  %L7 = load i16, i16* %A8
234  %G21 = getelementptr i16, i16* %A8, i8 -1
235  %B11 = udiv i16 %L7, -1
236  %G4 = getelementptr i16, i16* %A8, i16 %B11
237  %L2 = load i16, i16* %G4
238  %L = load i16, i16* %G4
239  %B23 = mul i16 %B11, %B11
240  %L4 = load i16, i16* %A8
241  %B21 = sdiv i16 %L7, %L4
242  %B7 = sub i16 0, %B21
243  %B18 = mul i16 %B23, %B7
244  %C10 = icmp ugt i16 %L, %B11
245  %B20 = and i16 %L7, %L2
246  %B1 = mul i1 %C10, true
247  %C5 = icmp sle i16 %B21, %L
248  %C11 = icmp ule i16 %B21, %L
249  %C7 = icmp slt i16 %B20, 0
250  %B29 = srem i16 %L4, %B18
251  %B15 = add i1 %C7, %C10
252  %B19 = add i1 %C11, %B15
253  %C6 = icmp sge i1 %C11, %B19
254  %B33 = or i16 %B29, %L4
255  %C13 = icmp uge i1 %C5, %B1
256  %C3 = icmp ult i1 %C13, %C6
257  store i16 undef, i16* %G21
258  %C18 = icmp ule i1 %C10, %C7
259  %G26 = getelementptr i1, i1* null, i1 %C3
260  store i16 %B33, i16* undef
261  store i1 %C18, i1* undef
262  store i1* %G26, i1** undef
263  ret void
264}
265
266define i1 @PR42691_1(i32 %x) {
267; CHECK-LABEL: @PR42691_1(
268; CHECK-NEXT:    [[TMP1:%.*]] = icmp ugt i32 [[X:%.*]], 2147483646
269; CHECK-NEXT:    ret i1 [[TMP1]]
270;
271  %c1 = icmp slt i32 %x, 0
272  %c2 = icmp eq i32 %x, 2147483647
273  %c = or i1 %c1, %c2
274  ret i1 %c
275}
276
277define i1 @PR42691_2(i32 %x) {
278; CHECK-LABEL: @PR42691_2(
279; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt i32 [[X:%.*]], -2
280; CHECK-NEXT:    ret i1 [[TMP1]]
281;
282  %c1 = icmp ult i32 %x, 2147483648
283  %c2 = icmp eq i32 %x, 4294967295
284  %c = or i1 %c1, %c2
285  ret i1 %c
286}
287
288define i1 @PR42691_3(i32 %x) {
289; CHECK-LABEL: @PR42691_3(
290; CHECK-NEXT:    [[TMP1:%.*]] = icmp ult i32 [[X:%.*]], -2147483647
291; CHECK-NEXT:    ret i1 [[TMP1]]
292;
293  %c1 = icmp sge i32 %x, 0
294  %c2 = icmp eq i32 %x, -2147483648
295  %c = or i1 %c1, %c2
296  ret i1 %c
297}
298
299define i1 @PR42691_4(i32 %x) {
300; CHECK-LABEL: @PR42691_4(
301; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i32 [[X:%.*]], 1
302; CHECK-NEXT:    ret i1 [[TMP1]]
303;
304  %c1 = icmp uge i32 %x, 2147483648
305  %c2 = icmp eq i32 %x, 0
306  %c = or i1 %c1, %c2
307  ret i1 %c
308}
309
310define i1 @PR42691_5(i32 %x) {
311; CHECK-LABEL: @PR42691_5(
312; CHECK-NEXT:    [[X_OFF:%.*]] = add i32 [[X:%.*]], -1
313; CHECK-NEXT:    [[TMP1:%.*]] = icmp ugt i32 [[X_OFF]], 2147483645
314; CHECK-NEXT:    ret i1 [[TMP1]]
315;
316  %c1 = icmp slt i32 %x, 1
317  %c2 = icmp eq i32 %x, 2147483647
318  %c = or i1 %c1, %c2
319  ret i1 %c
320}
321
322define i1 @PR42691_6(i32 %x) {
323; CHECK-LABEL: @PR42691_6(
324; CHECK-NEXT:    [[X_OFF:%.*]] = add i32 [[X:%.*]], 2147483647
325; CHECK-NEXT:    [[TMP1:%.*]] = icmp ugt i32 [[X_OFF]], 2147483645
326; CHECK-NEXT:    ret i1 [[TMP1]]
327;
328  %c1 = icmp ult i32 %x, 2147483649
329  %c2 = icmp eq i32 %x, 4294967295
330  %c = or i1 %c1, %c2
331  ret i1 %c
332}
333
334define i1 @PR42691_7(i32 %x) {
335; CHECK-LABEL: @PR42691_7(
336; CHECK-NEXT:    [[TMP1:%.*]] = add i32 [[X:%.*]], -1
337; CHECK-NEXT:    [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0
338; CHECK-NEXT:    ret i1 [[TMP2]]
339;
340  %c1 = icmp uge i32 %x, 2147483649
341  %c2 = icmp eq i32 %x, 0
342  %c = or i1 %c1, %c2
343  ret i1 %c
344}
345
346define i1 @PR42691_8(i32 %x) {
347; CHECK-LABEL: @PR42691_8(
348; CHECK-NEXT:    [[X_OFF:%.*]] = add i32 [[X:%.*]], 2147483647
349; CHECK-NEXT:    [[TMP1:%.*]] = icmp ult i32 [[X_OFF]], -2147483635
350; CHECK-NEXT:    ret i1 [[TMP1]]
351;
352  %c1 = icmp slt i32 %x, 14
353  %c2 = icmp ne i32 %x, -2147483648
354  %c = and i1 %c1, %c2
355  ret i1 %c
356}
357
358define i1 @PR42691_9(i32 %x) {
359; CHECK-LABEL: @PR42691_9(
360; CHECK-NEXT:    [[X_OFF:%.*]] = add i32 [[X:%.*]], -14
361; CHECK-NEXT:    [[TMP1:%.*]] = icmp ult i32 [[X_OFF]], 2147483633
362; CHECK-NEXT:    ret i1 [[TMP1]]
363;
364  %c1 = icmp sgt i32 %x, 13
365  %c2 = icmp ne i32 %x, 2147483647
366  %c = and i1 %c1, %c2
367  ret i1 %c
368}
369
370define i1 @PR42691_10(i32 %x) {
371; CHECK-LABEL: @PR42691_10(
372; CHECK-NEXT:    [[X_OFF:%.*]] = add i32 [[X:%.*]], -14
373; CHECK-NEXT:    [[TMP1:%.*]] = icmp ult i32 [[X_OFF]], -15
374; CHECK-NEXT:    ret i1 [[TMP1]]
375;
376  %c1 = icmp ugt i32 %x, 13
377  %c2 = icmp ne i32 %x, 4294967295
378  %c = and i1 %c1, %c2
379  ret i1 %c
380}
381
382define i1 @substitute_constant_and_eq_eq(i8 %x, i8 %y) {
383; CHECK-LABEL: @substitute_constant_and_eq_eq(
384; CHECK-NEXT:    [[C1:%.*]] = icmp eq i8 [[X:%.*]], 42
385; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i8 [[Y:%.*]], 42
386; CHECK-NEXT:    [[TMP2:%.*]] = and i1 [[C1]], [[TMP1]]
387; CHECK-NEXT:    ret i1 [[TMP2]]
388;
389  %c1 = icmp eq i8 %x, 42
390  %c2 = icmp eq i8 %x, %y
391  %r = and i1 %c1, %c2
392  ret i1 %r
393}
394
395define i1 @substitute_constant_and_eq_eq_commute(i8 %x, i8 %y) {
396; CHECK-LABEL: @substitute_constant_and_eq_eq_commute(
397; CHECK-NEXT:    [[C1:%.*]] = icmp eq i8 [[X:%.*]], 42
398; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i8 [[Y:%.*]], 42
399; CHECK-NEXT:    [[TMP2:%.*]] = and i1 [[C1]], [[TMP1]]
400; CHECK-NEXT:    ret i1 [[TMP2]]
401;
402  %c1 = icmp eq i8 %x, 42
403  %c2 = icmp eq i8 %x, %y
404  %r = and i1 %c2, %c1
405  ret i1 %r
406}
407
408define i1 @substitute_constant_and_eq_ugt_swap(i8 %x, i8 %y) {
409; CHECK-LABEL: @substitute_constant_and_eq_ugt_swap(
410; CHECK-NEXT:    [[C1:%.*]] = icmp eq i8 [[X:%.*]], 42
411; CHECK-NEXT:    [[TMP1:%.*]] = icmp ugt i8 [[Y:%.*]], 42
412; CHECK-NEXT:    [[TMP2:%.*]] = and i1 [[C1]], [[TMP1]]
413; CHECK-NEXT:    ret i1 [[TMP2]]
414;
415  %c1 = icmp eq i8 %x, 42
416  %c2 = icmp ugt i8 %y, %x
417  %r = and i1 %c2, %c1
418  ret i1 %r
419}
420
421define <2 x i1> @substitute_constant_and_eq_ne_vec(<2 x i8> %x, <2 x i8> %y) {
422; CHECK-LABEL: @substitute_constant_and_eq_ne_vec(
423; CHECK-NEXT:    [[C1:%.*]] = icmp eq <2 x i8> [[X:%.*]], <i8 42, i8 97>
424; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne <2 x i8> [[Y:%.*]], <i8 42, i8 97>
425; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i1> [[C1]], [[TMP1]]
426; CHECK-NEXT:    ret <2 x i1> [[TMP2]]
427;
428  %c1 = icmp eq <2 x i8> %x, <i8 42, i8 97>
429  %c2 = icmp ne <2 x i8> %x, %y
430  %r = and <2 x i1> %c1, %c2
431  ret <2 x i1> %r
432}
433
434define i1 @substitute_constant_and_eq_sgt_use(i8 %x, i8 %y) {
435; CHECK-LABEL: @substitute_constant_and_eq_sgt_use(
436; CHECK-NEXT:    [[C1:%.*]] = icmp eq i8 [[X:%.*]], 42
437; CHECK-NEXT:    call void @use(i1 [[C1]])
438; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i8 [[Y:%.*]], 42
439; CHECK-NEXT:    [[TMP2:%.*]] = and i1 [[C1]], [[TMP1]]
440; CHECK-NEXT:    ret i1 [[TMP2]]
441;
442  %c1 = icmp eq i8 %x, 42
443  call void @use(i1 %c1)
444  %c2 = icmp sgt i8 %x, %y
445  %r = and i1 %c2, %c1
446  ret i1 %r
447}
448
449; Negative test - extra use
450
451define i1 @substitute_constant_and_eq_sgt_use2(i8 %x, i8 %y) {
452; CHECK-LABEL: @substitute_constant_and_eq_sgt_use2(
453; CHECK-NEXT:    [[C1:%.*]] = icmp eq i8 [[X:%.*]], 42
454; CHECK-NEXT:    [[C2:%.*]] = icmp sgt i8 [[X]], [[Y:%.*]]
455; CHECK-NEXT:    call void @use(i1 [[C2]])
456; CHECK-NEXT:    [[R:%.*]] = and i1 [[C2]], [[C1]]
457; CHECK-NEXT:    ret i1 [[R]]
458;
459  %c1 = icmp eq i8 %x, 42
460  %c2 = icmp sgt i8 %x, %y
461  call void @use(i1 %c2)
462  %r = and i1 %c2, %c1
463  ret i1 %r
464}
465
466; Extra use does not prevent transform if the expression simplifies:
467; X == MAX && X < Y --> false
468
469define i1 @slt_and_max(i8 %x, i8 %y)  {
470; CHECK-LABEL: @slt_and_max(
471; CHECK-NEXT:    [[C2:%.*]] = icmp slt i8 [[X:%.*]], [[Y:%.*]]
472; CHECK-NEXT:    call void @use(i1 [[C2]])
473; CHECK-NEXT:    ret i1 false
474;
475  %c1 = icmp eq i8 %x, 127
476  %c2 = icmp slt i8 %x, %y
477  call void @use(i1 %c2)
478  %r = and i1 %c2, %c1
479  ret i1 %r
480}
481
482; Extra use does not prevent transform if the expression simplifies:
483; X == MAX && X >= Y --> X == MAX
484
485define i1 @sge_and_max(i8 %x, i8 %y)  {
486; CHECK-LABEL: @sge_and_max(
487; CHECK-NEXT:    [[C1:%.*]] = icmp eq i8 [[X:%.*]], 127
488; CHECK-NEXT:    [[C2:%.*]] = icmp sge i8 [[X]], [[Y:%.*]]
489; CHECK-NEXT:    call void @use(i1 [[C2]])
490; CHECK-NEXT:    ret i1 [[C1]]
491;
492  %c1 = icmp eq i8 %x, 127
493  %c2 = icmp sge i8 %x, %y
494  call void @use(i1 %c2)
495  %r = and i1 %c2, %c1
496  ret i1 %r
497}
498
499define i1 @substitute_constant_and_ne_ugt_swap(i8 %x, i8 %y) {
500; CHECK-LABEL: @substitute_constant_and_ne_ugt_swap(
501; CHECK-NEXT:    [[C1:%.*]] = icmp ne i8 [[X:%.*]], 42
502; CHECK-NEXT:    [[C2:%.*]] = icmp ugt i8 [[Y:%.*]], [[X]]
503; CHECK-NEXT:    [[R:%.*]] = and i1 [[C2]], [[C1]]
504; CHECK-NEXT:    ret i1 [[R]]
505;
506  %c1 = icmp ne i8 %x, 42
507  %c2 = icmp ugt i8 %y, %x
508  %r = and i1 %c2, %c1
509  ret i1 %r
510}
511
512define i1 @substitute_constant_or_ne_swap_sle(i8 %x, i8 %y) {
513; CHECK-LABEL: @substitute_constant_or_ne_swap_sle(
514; CHECK-NEXT:    [[C1:%.*]] = icmp ne i8 [[X:%.*]], 42
515; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i8 [[Y:%.*]], 43
516; CHECK-NEXT:    [[TMP2:%.*]] = or i1 [[C1]], [[TMP1]]
517; CHECK-NEXT:    ret i1 [[TMP2]]
518;
519  %c1 = icmp ne i8 %x, 42
520  %c2 = icmp sle i8 %y, %x
521  %r = or i1 %c1, %c2
522  ret i1 %r
523}
524
525define i1 @substitute_constant_or_ne_uge_commute(i8 %x, i8 %y) {
526; CHECK-LABEL: @substitute_constant_or_ne_uge_commute(
527; CHECK-NEXT:    [[C1:%.*]] = icmp ne i8 [[X:%.*]], 42
528; CHECK-NEXT:    [[TMP1:%.*]] = icmp ult i8 [[Y:%.*]], 43
529; CHECK-NEXT:    [[TMP2:%.*]] = or i1 [[C1]], [[TMP1]]
530; CHECK-NEXT:    ret i1 [[TMP2]]
531;
532  %c1 = icmp ne i8 %x, 42
533  %c2 = icmp uge i8 %x, %y
534  %r = or i1 %c2, %c1
535  ret i1 %r
536}
537
538; Negative test - not safe to substitute vector constant with undef element
539
540define <2 x i1> @substitute_constant_or_ne_slt_swap_vec(<2 x i8> %x, <2 x i8> %y) {
541; CHECK-LABEL: @substitute_constant_or_ne_slt_swap_vec(
542; CHECK-NEXT:    [[C1:%.*]] = icmp ne <2 x i8> [[X:%.*]], <i8 42, i8 undef>
543; CHECK-NEXT:    [[C2:%.*]] = icmp slt <2 x i8> [[Y:%.*]], [[X]]
544; CHECK-NEXT:    [[R:%.*]] = or <2 x i1> [[C1]], [[C2]]
545; CHECK-NEXT:    ret <2 x i1> [[R]]
546;
547  %c1 = icmp ne <2 x i8> %x, <i8 42, i8 undef>
548  %c2 = icmp slt <2 x i8> %y, %x
549  %r = or <2 x i1> %c1, %c2
550  ret <2 x i1> %r
551}
552
553define i1 @substitute_constant_or_eq_swap_ne(i8 %x, i8 %y) {
554; CHECK-LABEL: @substitute_constant_or_eq_swap_ne(
555; CHECK-NEXT:    [[C1:%.*]] = icmp eq i8 [[X:%.*]], 42
556; CHECK-NEXT:    [[C2:%.*]] = icmp ne i8 [[Y:%.*]], [[X]]
557; CHECK-NEXT:    [[R:%.*]] = or i1 [[C1]], [[C2]]
558; CHECK-NEXT:    ret i1 [[R]]
559;
560  %c1 = icmp eq i8 %x, 42
561  %c2 = icmp ne i8 %y, %x
562  %r = or i1 %c1, %c2
563  ret i1 %r
564}
565
566define i1 @substitute_constant_or_ne_sge_use(i8 %x, i8 %y) {
567; CHECK-LABEL: @substitute_constant_or_ne_sge_use(
568; CHECK-NEXT:    [[C1:%.*]] = icmp ne i8 [[X:%.*]], 42
569; CHECK-NEXT:    call void @use(i1 [[C1]])
570; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i8 [[Y:%.*]], 43
571; CHECK-NEXT:    [[TMP2:%.*]] = or i1 [[C1]], [[TMP1]]
572; CHECK-NEXT:    ret i1 [[TMP2]]
573;
574  %c1 = icmp ne i8 %x, 42
575  call void @use(i1 %c1)
576  %c2 = icmp sge i8 %x, %y
577  %r = or i1 %c2, %c1
578  ret i1 %r
579}
580
581; Negative test - extra use
582
583define i1 @substitute_constant_or_ne_ule_use2(i8 %x, i8 %y) {
584; CHECK-LABEL: @substitute_constant_or_ne_ule_use2(
585; CHECK-NEXT:    [[C1:%.*]] = icmp ne i8 [[X:%.*]], 42
586; CHECK-NEXT:    [[C2:%.*]] = icmp ule i8 [[X]], [[Y:%.*]]
587; CHECK-NEXT:    call void @use(i1 [[C2]])
588; CHECK-NEXT:    [[R:%.*]] = or i1 [[C2]], [[C1]]
589; CHECK-NEXT:    ret i1 [[R]]
590;
591  %c1 = icmp ne i8 %x, 42
592  %c2 = icmp ule i8 %x, %y
593  call void @use(i1 %c2)
594  %r = or i1 %c2, %c1
595  ret i1 %r
596}
597