1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S | FileCheck %s
3
4declare void @use8(i8)
5
6declare void @use1(i1)
7declare void @llvm.assume(i1)
8
9; Here we don't know that at least one of the values being added is non-zero
10define i1 @t0_bad(i8 %base, i8 %offset) {
11; CHECK-LABEL: @t0_bad(
12; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]
13; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
14; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
15; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
16; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
17; CHECK-NEXT:    ret i1 [[R]]
18;
19  %adjusted = add i8 %base, %offset
20  call void @use8(i8 %adjusted)
21  %not_null = icmp ne i8 %adjusted, 0
22  %no_underflow = icmp ult i8 %adjusted, %base
23  %r = and i1 %not_null, %no_underflow
24  ret i1 %r
25}
26
27; Ok, base is non-zero.
28define i1 @t1(i8 %base, i8 %offset) {
29; CHECK-LABEL: @t1(
30; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
31; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
32; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
33; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
34; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[BASE]]
35; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
36; CHECK-NEXT:    ret i1 [[TMP2]]
37;
38  %cmp = icmp slt i8 %base, 0
39  call void @llvm.assume(i1 %cmp)
40
41  %adjusted = add i8 %base, %offset
42  call void @use8(i8 %adjusted)
43  %not_null = icmp ne i8 %adjusted, 0
44  %no_underflow = icmp ult i8 %adjusted, %base
45  %r = and i1 %not_null, %no_underflow
46  ret i1 %r
47}
48
49; Ok, offset is non-zero.
50define i1 @t2(i8 %base, i8 %offset) {
51; CHECK-LABEL: @t2(
52; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
53; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
54; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET]]
55; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
56; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
57; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
58; CHECK-NEXT:    ret i1 [[TMP2]]
59;
60  %cmp = icmp slt i8 %offset, 0
61  call void @llvm.assume(i1 %cmp)
62
63  %adjusted = add i8 %base, %offset
64  call void @use8(i8 %adjusted)
65  %not_null = icmp ne i8 %adjusted, 0
66  %no_underflow = icmp ult i8 %adjusted, %base
67  %r = and i1 %not_null, %no_underflow
68  ret i1 %r
69}
70
71; We need to produce extra instruction, so one of icmp's must go away.
72define i1 @t3_oneuse0(i8 %base, i8 %offset) {
73; CHECK-LABEL: @t3_oneuse0(
74; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
75; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
76; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
77; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
78; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
79; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]])
80; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[BASE]]
81; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
82; CHECK-NEXT:    ret i1 [[TMP2]]
83;
84  %cmp = icmp slt i8 %base, 0
85  call void @llvm.assume(i1 %cmp)
86
87  %adjusted = add i8 %base, %offset
88  call void @use8(i8 %adjusted)
89  %not_null = icmp ne i8 %adjusted, 0
90  call void @use1(i1 %not_null)
91  %no_underflow = icmp ult i8 %adjusted, %base
92  %r = and i1 %not_null, %no_underflow
93  ret i1 %r
94}
95define i1 @t4_oneuse1(i8 %base, i8 %offset) {
96; CHECK-LABEL: @t4_oneuse1(
97; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
98; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
99; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
100; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
101; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
102; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
103; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[BASE]]
104; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
105; CHECK-NEXT:    ret i1 [[TMP2]]
106;
107  %cmp = icmp slt i8 %base, 0
108  call void @llvm.assume(i1 %cmp)
109
110  %adjusted = add i8 %base, %offset
111  call void @use8(i8 %adjusted)
112  %not_null = icmp ne i8 %adjusted, 0
113  %no_underflow = icmp ult i8 %adjusted, %base
114  call void @use1(i1 %no_underflow)
115  %r = and i1 %not_null, %no_underflow
116  ret i1 %r
117}
118define i1 @t5_oneuse2_bad(i8 %base, i8 %offset) {
119; CHECK-LABEL: @t5_oneuse2_bad(
120; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
121; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
122; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
123; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
124; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
125; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]])
126; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
127; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
128; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
129; CHECK-NEXT:    ret i1 [[R]]
130;
131  %cmp = icmp slt i8 %base, 0
132  call void @llvm.assume(i1 %cmp)
133
134  %adjusted = add i8 %base, %offset
135  call void @use8(i8 %adjusted)
136  %not_null = icmp ne i8 %adjusted, 0
137  call void @use1(i1 %not_null)
138  %no_underflow = icmp ult i8 %adjusted, %base
139  call void @use1(i1 %no_underflow)
140  %r = and i1 %not_null, %no_underflow
141  ret i1 %r
142}
143
144define i1 @t6_commutativity0(i8 %base, i8 %offset) {
145; CHECK-LABEL: @t6_commutativity0(
146; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
147; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
148; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
149; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
150; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[BASE]]
151; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
152; CHECK-NEXT:    ret i1 [[TMP2]]
153;
154  %cmp = icmp slt i8 %base, 0
155  call void @llvm.assume(i1 %cmp)
156
157  %adjusted = add i8 %base, %offset
158  call void @use8(i8 %adjusted)
159  %not_null = icmp ne i8 %adjusted, 0
160  %no_underflow = icmp ult i8 %adjusted, %base
161  %r = and i1 %no_underflow, %not_null ; swapped
162  ret i1 %r
163}
164define i1 @t7_commutativity1(i8 %base, i8 %offset) {
165; CHECK-LABEL: @t7_commutativity1(
166; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
167; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
168; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
169; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
170; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[BASE]]
171; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
172; CHECK-NEXT:    ret i1 [[TMP2]]
173;
174  %cmp = icmp slt i8 %base, 0
175  call void @llvm.assume(i1 %cmp)
176
177  %adjusted = add i8 %base, %offset
178  call void @use8(i8 %adjusted)
179  %not_null = icmp ne i8 %adjusted, 0
180  %no_underflow = icmp ugt i8 %base, %adjusted ; swapped
181  %r = and i1 %not_null, %no_underflow
182  ret i1 %r
183}
184define i1 @t7_commutativity3(i8 %base, i8 %offset) {
185; CHECK-LABEL: @t7_commutativity3(
186; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
187; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
188; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
189; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
190; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[BASE]]
191; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
192; CHECK-NEXT:    ret i1 [[TMP2]]
193;
194  %cmp = icmp slt i8 %base, 0
195  call void @llvm.assume(i1 %cmp)
196
197  %adjusted = add i8 %base, %offset
198  call void @use8(i8 %adjusted)
199  %not_null = icmp ne i8 %adjusted, 0
200  %no_underflow = icmp ugt i8 %base, %adjusted ; swapped
201  %r = and i1 %no_underflow, %not_null ; swapped
202  ret i1 %r
203}
204
205; We could have the opposite question, did we get null or overflow happened?
206define i1 @t8(i8 %base, i8 %offset) {
207; CHECK-LABEL: @t8(
208; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
209; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
210; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
211; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
212; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[BASE]]
213; CHECK-NEXT:    [[TMP2:%.*]] = icmp uge i8 [[TMP1]], [[OFFSET]]
214; CHECK-NEXT:    ret i1 [[TMP2]]
215;
216  %cmp = icmp slt i8 %base, 0
217  call void @llvm.assume(i1 %cmp)
218
219  %adjusted = add i8 %base, %offset
220  call void @use8(i8 %adjusted)
221  %not_null = icmp eq i8 %adjusted, 0
222  %no_underflow = icmp uge i8 %adjusted, %base
223  %r = or i1 %not_null, %no_underflow
224  ret i1 %r
225}
226
227; The comparison can be with any of the values being added.
228define i1 @t9(i8 %base, i8 %offset) {
229; CHECK-LABEL: @t9(
230; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
231; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
232; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
233; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
234; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[BASE]]
235; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
236; CHECK-NEXT:    ret i1 [[TMP2]]
237;
238  %cmp = icmp slt i8 %base, 0
239  call void @llvm.assume(i1 %cmp)
240
241  %adjusted = add i8 %base, %offset
242  call void @use8(i8 %adjusted)
243  %not_null = icmp ne i8 %adjusted, 0
244  %no_underflow = icmp ult i8 %adjusted, %offset
245  %r = and i1 %not_null, %no_underflow
246  ret i1 %r
247}
248