1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt %s -instsimplify -S | FileCheck %s
3
4; Here we subtract two values, check that subtraction did not overflow AND
5; that the result is non-zero. This can be simplified just to a comparison
6; between the base and offset.
7
8define i1 @t0(i64 %base, i64* nonnull %offsetptr) {
9; CHECK-LABEL: @t0(
10; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
11; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
12; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[ADJUSTED]], [[BASE]]
13; CHECK-NEXT:    ret i1 [[NO_UNDERFLOW]]
14;
15  %offset = ptrtoint i64* %offsetptr to i64
16
17  %adjusted = sub i64 %base, %offset
18  %no_underflow = icmp uge i64 %adjusted, %base
19  %not_null = icmp ne i64 %adjusted, 0
20  %r = and i1 %not_null, %no_underflow
21  ret i1 %r
22}
23
24define i1 @t1(i64 %base, i64* nonnull %offsetptr) {
25; CHECK-LABEL: @t1(
26; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
27; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
28; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[ADJUSTED]], [[BASE]]
29; CHECK-NEXT:    ret i1 [[NO_UNDERFLOW]]
30;
31  %offset = ptrtoint i64* %offsetptr to i64
32
33  %adjusted = sub i64 %base, %offset
34  %no_underflow = icmp ult i64 %adjusted, %base
35  %not_null = icmp eq i64 %adjusted, 0
36  %r = or i1 %not_null, %no_underflow
37  ret i1 %r
38}
39
40define i1 @t2_commutative(i64 %base, i64* nonnull %offsetptr) {
41; CHECK-LABEL: @t2_commutative(
42; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
43; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
44; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[BASE]], [[ADJUSTED]]
45; CHECK-NEXT:    ret i1 [[NO_UNDERFLOW]]
46;
47  %offset = ptrtoint i64* %offsetptr to i64
48
49  %adjusted = sub i64 %base, %offset
50  %no_underflow = icmp ule i64 %base, %adjusted
51  %not_null = icmp ne i64 %adjusted, 0
52  %r = and i1 %not_null, %no_underflow
53  ret i1 %r
54}
55
56define i1 @t3_commutative(i64 %base, i64* nonnull %offsetptr) {
57; CHECK-LABEL: @t3_commutative(
58; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
59; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
60; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[BASE]], [[ADJUSTED]]
61; CHECK-NEXT:    ret i1 [[NO_UNDERFLOW]]
62;
63  %offset = ptrtoint i64* %offsetptr to i64
64
65  %adjusted = sub i64 %base, %offset
66  %no_underflow = icmp ugt i64 %base, %adjusted
67  %not_null = icmp eq i64 %adjusted, 0
68  %r = or i1 %not_null, %no_underflow
69  ret i1 %r
70}
71
72; We don't know that offset is non-zero, so we can't fold.
73define i1 @t4_bad(i64 %base, i64 %offset) {
74; CHECK-LABEL: @t4_bad(
75; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET:%.*]]
76; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[ADJUSTED]], [[BASE]]
77; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i64 [[ADJUSTED]], 0
78; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
79; CHECK-NEXT:    ret i1 [[R]]
80;
81  %adjusted = sub i64 %base, %offset
82  %no_underflow = icmp uge i64 %adjusted, %base
83  %not_null = icmp ne i64 %adjusted, 0
84  %r = and i1 %not_null, %no_underflow
85  ret i1 %r
86}
87