1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S | FileCheck %s
3
4; Here we subtract two values, check that subtraction did not overflow AND
5; that the result is non-zero. This can be simplified just to a comparison
6; between the base and offset.
7
8declare void @use8(i8)
9declare void @use64(i64)
10declare void @use1(i1)
11
12declare {i8, i1} @llvm.usub.with.overflow(i8, i8)
13declare void @useagg({i8, i1})
14
15declare void @llvm.assume(i1)
16
17; There is a number of base patterns..
18
19define i1 @t0_noncanonical_ignoreme(i8 %base, i8 %offset) {
20; CHECK-LABEL: @t0_noncanonical_ignoreme(
21; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
22; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
23; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
24; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
25; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
26; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]])
27; CHECK-NEXT:    [[TMP1:%.*]] = icmp ugt i8 [[BASE]], [[OFFSET]]
28; CHECK-NEXT:    ret i1 [[TMP1]]
29;
30  %adjusted = sub i8 %base, %offset
31  call void @use8(i8 %adjusted)
32  %no_underflow = icmp ule i8 %adjusted, %base
33  call void @use1(i1 %no_underflow)
34  %not_null = icmp ne i8 %adjusted, 0
35  call void @use1(i1 %not_null)
36  %r = and i1 %not_null, %no_underflow
37  ret i1 %r
38}
39
40define i1 @t1(i8 %base, i8 %offset) {
41; CHECK-LABEL: @t1(
42; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
43; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
44; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
45; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
46; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
47; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]])
48; CHECK-NEXT:    [[TMP1:%.*]] = icmp ugt i8 [[BASE]], [[OFFSET]]
49; CHECK-NEXT:    ret i1 [[TMP1]]
50;
51  %adjusted = sub i8 %base, %offset
52  call void @use8(i8 %adjusted)
53  %no_underflow = icmp uge i8 %base, %offset
54  call void @use1(i1 %no_underflow)
55  %not_null = icmp ne i8 %adjusted, 0
56  call void @use1(i1 %not_null)
57  %r = and i1 %not_null, %no_underflow
58  ret i1 %r
59}
60define i1 @t1_strict(i8 %base, i8 %offset) {
61; CHECK-LABEL: @t1_strict(
62; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
63; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
64; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ugt i8 [[BASE]], [[OFFSET]]
65; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
66; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
67; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]])
68; CHECK-NEXT:    ret i1 [[NO_UNDERFLOW]]
69;
70  %adjusted = sub i8 %base, %offset
71  call void @use8(i8 %adjusted)
72  %no_underflow = icmp ugt i8 %base, %offset ; same is valid for strict predicate
73  call void @use1(i1 %no_underflow)
74  %not_null = icmp ne i8 %adjusted, 0
75  call void @use1(i1 %not_null)
76  %r = and i1 %not_null, %no_underflow
77  ret i1 %r
78}
79
80define i1 @t2(i8 %base, i8 %offset) {
81; CHECK-LABEL: @t2(
82; CHECK-NEXT:    [[AGG:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 [[BASE:%.*]], i8 [[OFFSET:%.*]])
83; CHECK-NEXT:    call void @useagg({ i8, i1 } [[AGG]])
84; CHECK-NEXT:    [[ADJUSTED:%.*]] = extractvalue { i8, i1 } [[AGG]], 0
85; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
86; CHECK-NEXT:    [[UNDERFLOW:%.*]] = extractvalue { i8, i1 } [[AGG]], 1
87; CHECK-NEXT:    call void @use1(i1 [[UNDERFLOW]])
88; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = xor i1 [[UNDERFLOW]], true
89; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
90; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
91; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
92; CHECK-NEXT:    ret i1 [[R]]
93;
94  %agg = call {i8, i1} @llvm.usub.with.overflow(i8 %base, i8 %offset)
95  call void @useagg({i8, i1} %agg)
96  %adjusted = extractvalue {i8, i1} %agg, 0
97  call void @use8(i8 %adjusted)
98  %underflow = extractvalue {i8, i1} %agg, 1
99  call void @use1(i1 %underflow)
100  %no_underflow = xor i1 %underflow, -1
101  call void @use1(i1 %no_underflow)
102  %not_null = icmp ne i8 %adjusted, 0
103  %r = and i1 %not_null, %no_underflow
104  ret i1 %r
105}
106
107; Commutativity
108
109define i1 @t3_commutability0(i8 %base, i8 %offset) {
110; CHECK-LABEL: @t3_commutability0(
111; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
112; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
113; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
114; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
115; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
116; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]])
117; CHECK-NEXT:    [[TMP1:%.*]] = icmp ugt i8 [[BASE]], [[OFFSET]]
118; CHECK-NEXT:    ret i1 [[TMP1]]
119;
120  %adjusted = sub i8 %base, %offset
121  call void @use8(i8 %adjusted)
122  %no_underflow = icmp ule i8 %offset, %base ; swapped
123  call void @use1(i1 %no_underflow)
124  %not_null = icmp ne i8 %adjusted, 0
125  call void @use1(i1 %not_null)
126  %r = and i1 %not_null, %no_underflow
127  ret i1 %r
128}
129define i1 @t4_commutability1(i8 %base, i8 %offset) {
130; CHECK-LABEL: @t4_commutability1(
131; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
132; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
133; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
134; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
135; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
136; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]])
137; CHECK-NEXT:    [[TMP1:%.*]] = icmp ugt i8 [[BASE]], [[OFFSET]]
138; CHECK-NEXT:    ret i1 [[TMP1]]
139;
140  %adjusted = sub i8 %base, %offset
141  call void @use8(i8 %adjusted)
142  %no_underflow = icmp uge i8 %base, %offset
143  call void @use1(i1 %no_underflow)
144  %not_null = icmp ne i8 %adjusted, 0
145  call void @use1(i1 %not_null)
146  %r = and i1 %no_underflow, %not_null ; swapped
147  ret i1 %r
148}
149define i1 @t5_commutability2(i8 %base, i8 %offset) {
150; CHECK-LABEL: @t5_commutability2(
151; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
152; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
153; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
154; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
155; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
156; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]])
157; CHECK-NEXT:    [[TMP1:%.*]] = icmp ugt i8 [[BASE]], [[OFFSET]]
158; CHECK-NEXT:    ret i1 [[TMP1]]
159;
160  %adjusted = sub i8 %base, %offset
161  call void @use8(i8 %adjusted)
162  %no_underflow = icmp ule i8 %offset, %base ; swapped
163  call void @use1(i1 %no_underflow)
164  %not_null = icmp ne i8 %adjusted, 0
165  call void @use1(i1 %not_null)
166  %r = and i1 %no_underflow, %not_null ; swapped
167  ret i1 %r
168}
169
170define i1 @t6_commutability(i8 %base, i8 %offset) {
171; CHECK-LABEL: @t6_commutability(
172; CHECK-NEXT:    [[AGG:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 [[BASE:%.*]], i8 [[OFFSET:%.*]])
173; CHECK-NEXT:    call void @useagg({ i8, i1 } [[AGG]])
174; CHECK-NEXT:    [[ADJUSTED:%.*]] = extractvalue { i8, i1 } [[AGG]], 0
175; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
176; CHECK-NEXT:    [[UNDERFLOW:%.*]] = extractvalue { i8, i1 } [[AGG]], 1
177; CHECK-NEXT:    call void @use1(i1 [[UNDERFLOW]])
178; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = xor i1 [[UNDERFLOW]], true
179; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
180; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
181; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
182; CHECK-NEXT:    ret i1 [[R]]
183;
184  %agg = call {i8, i1} @llvm.usub.with.overflow(i8 %base, i8 %offset)
185  call void @useagg({i8, i1} %agg)
186  %adjusted = extractvalue {i8, i1} %agg, 0
187  call void @use8(i8 %adjusted)
188  %underflow = extractvalue {i8, i1} %agg, 1
189  call void @use1(i1 %underflow)
190  %no_underflow = xor i1 %underflow, -1
191  call void @use1(i1 %no_underflow)
192  %not_null = icmp ne i8 %adjusted, 0
193  %r = and i1 %no_underflow, %not_null ; swapped
194  ret i1 %r
195}
196
197; What if we were checking the opposite question, that we either got null,
198; or overflow happened?
199
200define i1 @t7(i8 %base, i8 %offset) {
201; CHECK-LABEL: @t7(
202; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
203; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
204; CHECK-NEXT:    [[UNDERFLOW:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]]
205; CHECK-NEXT:    call void @use1(i1 [[UNDERFLOW]])
206; CHECK-NEXT:    [[NULL:%.*]] = icmp eq i8 [[ADJUSTED]], 0
207; CHECK-NEXT:    call void @use1(i1 [[NULL]])
208; CHECK-NEXT:    [[TMP1:%.*]] = icmp ule i8 [[BASE]], [[OFFSET]]
209; CHECK-NEXT:    ret i1 [[TMP1]]
210;
211  %adjusted = sub i8 %base, %offset
212  call void @use8(i8 %adjusted)
213  %underflow = icmp ult i8 %base, %offset
214  call void @use1(i1 %underflow)
215  %null = icmp eq i8 %adjusted, 0
216  call void @use1(i1 %null)
217  %r = or i1 %null, %underflow
218  ret i1 %r
219}
220define i1 @t7_nonstrict(i8 %base, i8 %offset) {
221; CHECK-LABEL: @t7_nonstrict(
222; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
223; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
224; CHECK-NEXT:    [[UNDERFLOW:%.*]] = icmp ule i8 [[BASE]], [[OFFSET]]
225; CHECK-NEXT:    call void @use1(i1 [[UNDERFLOW]])
226; CHECK-NEXT:    [[NULL:%.*]] = icmp eq i8 [[ADJUSTED]], 0
227; CHECK-NEXT:    call void @use1(i1 [[NULL]])
228; CHECK-NEXT:    ret i1 [[UNDERFLOW]]
229;
230  %adjusted = sub i8 %base, %offset
231  call void @use8(i8 %adjusted)
232  %underflow = icmp ule i8 %base, %offset ; same is valid for non-strict predicate
233  call void @use1(i1 %underflow)
234  %null = icmp eq i8 %adjusted, 0
235  call void @use1(i1 %null)
236  %r = or i1 %null, %underflow
237  ret i1 %r
238}
239
240define i1 @t8(i8 %base, i8 %offset) {
241; CHECK-LABEL: @t8(
242; CHECK-NEXT:    [[AGG:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 [[BASE:%.*]], i8 [[OFFSET:%.*]])
243; CHECK-NEXT:    call void @useagg({ i8, i1 } [[AGG]])
244; CHECK-NEXT:    [[ADJUSTED:%.*]] = extractvalue { i8, i1 } [[AGG]], 0
245; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
246; CHECK-NEXT:    [[UNDERFLOW:%.*]] = extractvalue { i8, i1 } [[AGG]], 1
247; CHECK-NEXT:    call void @use1(i1 [[UNDERFLOW]])
248; CHECK-NEXT:    [[NULL:%.*]] = icmp eq i8 [[ADJUSTED]], 0
249; CHECK-NEXT:    [[R:%.*]] = or i1 [[NULL]], [[UNDERFLOW]]
250; CHECK-NEXT:    ret i1 [[R]]
251;
252  %agg = call {i8, i1} @llvm.usub.with.overflow(i8 %base, i8 %offset)
253  call void @useagg({i8, i1} %agg)
254  %adjusted = extractvalue {i8, i1} %agg, 0
255  call void @use8(i8 %adjusted)
256  %underflow = extractvalue {i8, i1} %agg, 1
257  call void @use1(i1 %underflow)
258  %null = icmp eq i8 %adjusted, 0
259  %r = or i1 %null, %underflow
260  ret i1 %r
261}
262
263; And these patterns also have commutative variants
264
265define i1 @t9_commutative(i8 %base, i8 %offset) {
266; CHECK-LABEL: @t9_commutative(
267; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
268; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
269; CHECK-NEXT:    [[UNDERFLOW:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]]
270; CHECK-NEXT:    call void @use1(i1 [[UNDERFLOW]])
271; CHECK-NEXT:    [[NULL:%.*]] = icmp eq i8 [[ADJUSTED]], 0
272; CHECK-NEXT:    call void @use1(i1 [[NULL]])
273; CHECK-NEXT:    [[TMP1:%.*]] = icmp ule i8 [[BASE]], [[OFFSET]]
274; CHECK-NEXT:    ret i1 [[TMP1]]
275;
276  %adjusted = sub i8 %base, %offset
277  call void @use8(i8 %adjusted)
278  %underflow = icmp ult i8 %base, %adjusted ; swapped
279  call void @use1(i1 %underflow)
280  %null = icmp eq i8 %adjusted, 0
281  call void @use1(i1 %null)
282  %r = or i1 %null, %underflow
283  ret i1 %r
284}
285
286;-------------------------------------------------------------------------------
287
288define i1 @t10(i64 %base, i64* nonnull %offsetptr) {
289; CHECK-LABEL: @t10(
290; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
291; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
292; CHECK-NEXT:    call void @use64(i64 [[ADJUSTED]])
293; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[OFFSET]], [[BASE]]
294; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
295; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i64 [[ADJUSTED]], 0
296; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]])
297; CHECK-NEXT:    [[TMP1:%.*]] = icmp ult i64 [[OFFSET]], [[BASE]]
298; CHECK-NEXT:    ret i1 [[TMP1]]
299;
300  %offset = ptrtoint i64* %offsetptr to i64
301
302  %adjusted = sub i64 %base, %offset
303  call void @use64(i64 %adjusted)
304  %no_underflow = icmp ult i64 %adjusted, %base
305  call void @use1(i1 %no_underflow)
306  %not_null = icmp ne i64 %adjusted, 0
307  call void @use1(i1 %not_null)
308  %r = and i1 %not_null, %no_underflow
309  ret i1 %r
310}
311define i1 @t11_commutative(i64 %base, i64* nonnull %offsetptr) {
312; CHECK-LABEL: @t11_commutative(
313; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
314; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
315; CHECK-NEXT:    call void @use64(i64 [[ADJUSTED]])
316; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[OFFSET]], [[BASE]]
317; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
318; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i64 [[ADJUSTED]], 0
319; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]])
320; CHECK-NEXT:    [[TMP1:%.*]] = icmp ult i64 [[OFFSET]], [[BASE]]
321; CHECK-NEXT:    ret i1 [[TMP1]]
322;
323  %offset = ptrtoint i64* %offsetptr to i64
324
325  %adjusted = sub i64 %base, %offset
326  call void @use64(i64 %adjusted)
327  %no_underflow = icmp ugt i64 %base, %adjusted ; swapped
328  call void @use1(i1 %no_underflow)
329  %not_null = icmp ne i64 %adjusted, 0
330  call void @use1(i1 %not_null)
331  %r = and i1 %not_null, %no_underflow
332  ret i1 %r
333}
334
335define i1 @t12(i64 %base, i64* nonnull %offsetptr) {
336; CHECK-LABEL: @t12(
337; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
338; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
339; CHECK-NEXT:    call void @use64(i64 [[ADJUSTED]])
340; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[OFFSET]], [[BASE]]
341; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
342; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp eq i64 [[ADJUSTED]], 0
343; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]])
344; CHECK-NEXT:    [[TMP1:%.*]] = icmp uge i64 [[OFFSET]], [[BASE]]
345; CHECK-NEXT:    ret i1 [[TMP1]]
346;
347  %offset = ptrtoint i64* %offsetptr to i64
348
349  %adjusted = sub i64 %base, %offset
350  call void @use64(i64 %adjusted)
351  %no_underflow = icmp uge i64 %adjusted, %base
352  call void @use1(i1 %no_underflow)
353  %not_null = icmp eq i64 %adjusted, 0
354  call void @use1(i1 %not_null)
355  %r = or i1 %not_null, %no_underflow
356  ret i1 %r
357}
358define i1 @t13(i64 %base, i64* nonnull %offsetptr) {
359; CHECK-LABEL: @t13(
360; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
361; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
362; CHECK-NEXT:    call void @use64(i64 [[ADJUSTED]])
363; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[OFFSET]], [[BASE]]
364; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
365; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp eq i64 [[ADJUSTED]], 0
366; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]])
367; CHECK-NEXT:    [[TMP1:%.*]] = icmp uge i64 [[OFFSET]], [[BASE]]
368; CHECK-NEXT:    ret i1 [[TMP1]]
369;
370  %offset = ptrtoint i64* %offsetptr to i64
371
372  %adjusted = sub i64 %base, %offset
373  call void @use64(i64 %adjusted)
374  %no_underflow = icmp ule i64 %base, %adjusted ; swapped
375  call void @use1(i1 %no_underflow)
376  %not_null = icmp eq i64 %adjusted, 0
377  call void @use1(i1 %not_null)
378  %r = or i1 %not_null, %no_underflow
379  ret i1 %r
380}
381
382define i1 @t14_bad(i64 %base, i64 %offset) {
383; CHECK-LABEL: @t14_bad(
384; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET:%.*]]
385; CHECK-NEXT:    call void @use64(i64 [[ADJUSTED]])
386; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[ADJUSTED]], [[BASE]]
387; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
388; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i64 [[ADJUSTED]], 0
389; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]])
390; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
391; CHECK-NEXT:    ret i1 [[R]]
392;
393  %adjusted = sub i64 %base, %offset
394  call void @use64(i64 %adjusted)
395  %no_underflow = icmp ult i64 %adjusted, %base
396  call void @use1(i1 %no_underflow)
397  %not_null = icmp ne i64 %adjusted, 0
398  call void @use1(i1 %not_null)
399  %r = and i1 %not_null, %no_underflow
400  ret i1 %r
401}
402
403define i1 @base_ult_offset(i8 %base, i8 %offset) {
404; CHECK-LABEL: @base_ult_offset(
405; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
406; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
407; CHECK-NEXT:    [[TMP1:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]]
408; CHECK-NEXT:    ret i1 [[TMP1]]
409;
410  %adjusted = sub i8 %base, %offset
411  call void @use8(i8 %adjusted)
412  %not_null = icmp ne i8 %adjusted, 0
413  %no_underflow = icmp ule i8 %base, %offset
414  %r = and i1 %no_underflow, %not_null
415  ret i1 %r
416}
417define i1 @base_uge_offset(i8 %base, i8 %offset) {
418; CHECK-LABEL: @base_uge_offset(
419; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
420; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
421; CHECK-NEXT:    [[TMP1:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
422; CHECK-NEXT:    ret i1 [[TMP1]]
423;
424  %adjusted = sub i8 %base, %offset
425  call void @use8(i8 %adjusted)
426  %not_null = icmp eq i8 %adjusted, 0
427  %no_underflow = icmp ugt i8 %base, %offset
428  %r = or i1 %no_underflow, %not_null
429  ret i1 %r
430}
431