1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S | FileCheck %s
3
4; Should fold
5;   (%y ^ -1) u>= %x
6; to
7;   @llvm.uadd.with.overflow(%x, %y) + extractvalue + not
8;
9; All tests here have extra uses, to ensure that the pattern isn't perturbed.
10
11declare void @use8(i8)
12declare void @use2x8(<2 x i8>)
13
14define i1 @t0_basic(i8 %x, i8 %y) {
15; CHECK-LABEL: @t0_basic(
16; CHECK-NEXT:    [[T0:%.*]] = xor i8 [[Y:%.*]], -1
17; CHECK-NEXT:    call void @use8(i8 [[T0]])
18; CHECK-NEXT:    [[R:%.*]] = icmp uge i8 [[T0]], [[X:%.*]]
19; CHECK-NEXT:    ret i1 [[R]]
20;
21  %t0 = xor i8 %y, -1
22  call void @use8(i8 %t0)
23  %r = icmp uge i8 %t0, %x
24  ret i1 %r
25}
26
27define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) {
28; CHECK-LABEL: @t1_vec(
29; CHECK-NEXT:    [[T0:%.*]] = xor <2 x i8> [[Y:%.*]], <i8 -1, i8 -1>
30; CHECK-NEXT:    call void @use2x8(<2 x i8> [[T0]])
31; CHECK-NEXT:    [[R:%.*]] = icmp uge <2 x i8> [[T0]], [[X:%.*]]
32; CHECK-NEXT:    ret <2 x i1> [[R]]
33;
34  %t0 = xor <2 x i8> %y, <i8 -1, i8 -1>
35  call void @use2x8(<2 x i8> %t0)
36  %r = icmp uge <2 x i8> %t0, %x
37  ret <2 x i1> %r
38}
39
40; Commutativity
41
42declare i8 @gen8()
43
44define i1 @t2_commutative(i8 %y) {
45; CHECK-LABEL: @t2_commutative(
46; CHECK-NEXT:    [[T0:%.*]] = xor i8 [[Y:%.*]], -1
47; CHECK-NEXT:    call void @use8(i8 [[T0]])
48; CHECK-NEXT:    [[X:%.*]] = call i8 @gen8()
49; CHECK-NEXT:    [[R:%.*]] = icmp ule i8 [[X]], [[T0]]
50; CHECK-NEXT:    ret i1 [[R]]
51;
52  %t0 = xor i8 %y, -1
53  call void @use8(i8 %t0)
54  %x = call i8 @gen8()
55  %r = icmp ule i8 %x, %t0 ; swapped
56  ret i1 %r
57}
58
59; Extra-use tests
60
61define i1 @t3_no_extrause(i8 %x, i8 %y) {
62; CHECK-LABEL: @t3_no_extrause(
63; CHECK-NEXT:    [[T0:%.*]] = xor i8 [[Y:%.*]], -1
64; CHECK-NEXT:    [[R:%.*]] = icmp uge i8 [[T0]], [[X:%.*]]
65; CHECK-NEXT:    ret i1 [[R]]
66;
67  %t0 = xor i8 %y, -1
68  %r = icmp uge i8 %t0, %x
69  ret i1 %r
70}
71
72; Negative tests
73
74define i1 @n4_wrong_pred0(i8 %x, i8 %y) {
75; CHECK-LABEL: @n4_wrong_pred0(
76; CHECK-NEXT:    [[T0:%.*]] = xor i8 [[Y:%.*]], -1
77; CHECK-NEXT:    call void @use8(i8 [[T0]])
78; CHECK-NEXT:    [[R:%.*]] = icmp ule i8 [[T0]], [[X:%.*]]
79; CHECK-NEXT:    ret i1 [[R]]
80;
81  %t0 = xor i8 %y, -1
82  call void @use8(i8 %t0)
83  %r = icmp ule i8 %t0, %x
84  ret i1 %r
85}
86
87define i1 @n5_wrong_pred1(i8 %x, i8 %y) {
88; CHECK-LABEL: @n5_wrong_pred1(
89; CHECK-NEXT:    [[T0:%.*]] = xor i8 [[Y:%.*]], -1
90; CHECK-NEXT:    call void @use8(i8 [[T0]])
91; CHECK-NEXT:    [[R:%.*]] = icmp ugt i8 [[T0]], [[X:%.*]]
92; CHECK-NEXT:    ret i1 [[R]]
93;
94  %t0 = xor i8 %y, -1
95  call void @use8(i8 %t0)
96  %r = icmp ugt i8 %t0, %x
97  ret i1 %r
98}
99
100define i1 @n6_wrong_pred2(i8 %x, i8 %y) {
101; CHECK-LABEL: @n6_wrong_pred2(
102; CHECK-NEXT:    [[T0:%.*]] = xor i8 [[Y:%.*]], -1
103; CHECK-NEXT:    call void @use8(i8 [[T0]])
104; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[T0]], [[X:%.*]]
105; CHECK-NEXT:    ret i1 [[R]]
106;
107  %t0 = xor i8 %y, -1
108  call void @use8(i8 %t0)
109  %r = icmp eq i8 %t0, %x
110  ret i1 %r
111}
112
113define i1 @n7_wrong_pred3(i8 %x, i8 %y) {
114; CHECK-LABEL: @n7_wrong_pred3(
115; CHECK-NEXT:    [[T0:%.*]] = xor i8 [[Y:%.*]], -1
116; CHECK-NEXT:    call void @use8(i8 [[T0]])
117; CHECK-NEXT:    [[R:%.*]] = icmp ne i8 [[T0]], [[X:%.*]]
118; CHECK-NEXT:    ret i1 [[R]]
119;
120  %t0 = xor i8 %y, -1
121  call void @use8(i8 %t0)
122  %r = icmp ne i8 %t0, %x
123  ret i1 %r
124}
125
126define i1 @n8_wrong_pred4(i8 %x, i8 %y) {
127; CHECK-LABEL: @n8_wrong_pred4(
128; CHECK-NEXT:    [[T0:%.*]] = xor i8 [[Y:%.*]], -1
129; CHECK-NEXT:    call void @use8(i8 [[T0]])
130; CHECK-NEXT:    [[R:%.*]] = icmp slt i8 [[T0]], [[X:%.*]]
131; CHECK-NEXT:    ret i1 [[R]]
132;
133  %t0 = xor i8 %y, -1
134  call void @use8(i8 %t0)
135  %r = icmp slt i8 %t0, %x
136  ret i1 %r
137}
138
139define i1 @n9_wrong_pred5(i8 %x, i8 %y) {
140; CHECK-LABEL: @n9_wrong_pred5(
141; CHECK-NEXT:    [[T0:%.*]] = xor i8 [[Y:%.*]], -1
142; CHECK-NEXT:    call void @use8(i8 [[T0]])
143; CHECK-NEXT:    [[R:%.*]] = icmp sle i8 [[T0]], [[X:%.*]]
144; CHECK-NEXT:    ret i1 [[R]]
145;
146  %t0 = xor i8 %y, -1
147  call void @use8(i8 %t0)
148  %r = icmp sle i8 %t0, %x
149  ret i1 %r
150}
151
152define i1 @n10_wrong_pred6(i8 %x, i8 %y) {
153; CHECK-LABEL: @n10_wrong_pred6(
154; CHECK-NEXT:    [[T0:%.*]] = xor i8 [[Y:%.*]], -1
155; CHECK-NEXT:    call void @use8(i8 [[T0]])
156; CHECK-NEXT:    [[R:%.*]] = icmp sgt i8 [[T0]], [[X:%.*]]
157; CHECK-NEXT:    ret i1 [[R]]
158;
159  %t0 = xor i8 %y, -1
160  call void @use8(i8 %t0)
161  %r = icmp sgt i8 %t0, %x
162  ret i1 %r
163}
164
165define i1 @n11_wrong_pred7(i8 %x, i8 %y) {
166; CHECK-LABEL: @n11_wrong_pred7(
167; CHECK-NEXT:    [[T0:%.*]] = xor i8 [[Y:%.*]], -1
168; CHECK-NEXT:    call void @use8(i8 [[T0]])
169; CHECK-NEXT:    [[R:%.*]] = icmp sge i8 [[T0]], [[X:%.*]]
170; CHECK-NEXT:    ret i1 [[R]]
171;
172  %t0 = xor i8 %y, -1
173  call void @use8(i8 %t0)
174  %r = icmp sge i8 %t0, %x
175  ret i1 %r
176}
177
178define <2 x i1> @n12_vec_nonsplat(<2 x i8> %x, <2 x i8> %y) {
179; CHECK-LABEL: @n12_vec_nonsplat(
180; CHECK-NEXT:    [[T0:%.*]] = xor <2 x i8> [[Y:%.*]], <i8 -1, i8 -2>
181; CHECK-NEXT:    call void @use2x8(<2 x i8> [[T0]])
182; CHECK-NEXT:    [[R:%.*]] = icmp uge <2 x i8> [[T0]], [[X:%.*]]
183; CHECK-NEXT:    ret <2 x i1> [[R]]
184;
185  %t0 = xor <2 x i8> %y, <i8 -1, i8 -2> ; must be -1.
186  call void @use2x8(<2 x i8> %t0)
187  %r = icmp uge <2 x i8> %t0, %x
188  ret <2 x i1> %r
189}
190