1; RUN: opt -instcombine -S < %s | FileCheck %s
2
3@gp = global i32* null, align 8
4
5declare i8* @malloc(i64) #1
6
7define i1 @compare_global_trivialeq() {
8  %m = call i8* @malloc(i64 4)
9  %bc = bitcast i8* %m to i32*
10  %lgp = load i32*, i32** @gp, align 8
11  %cmp = icmp eq i32* %bc, %lgp
12  ret i1 %cmp
13; CHECK-LABEL: compare_global_trivialeq
14; CHECK: ret i1 false
15}
16
17define i1 @compare_global_trivialne() {
18  %m = call i8* @malloc(i64 4)
19  %bc = bitcast i8* %m to i32*
20  %lgp = load i32*, i32** @gp, align 8
21  %cmp = icmp ne i32* %bc, %lgp
22  ret i1 %cmp
23; CHECK-LABEL: compare_global_trivialne
24; CHECK: ret i1 true
25}
26
27
28; Although the %m is marked nocapture in the deopt operand in call to function f,
29; we cannot remove the alloc site: call to malloc
30; The comparison should fold to false irrespective of whether the call to malloc can be elided or not
31declare void @f()
32define i1 @compare_and_call_with_deopt() {
33; CHECK-LABEL: compare_and_call_with_deopt
34  %m = call i8* @malloc(i64 24)
35  %bc = bitcast i8* %m to i32*
36  %lgp = load i32*, i32** @gp, align 8, !nonnull !0
37  %cmp = icmp eq i32* %lgp, %bc
38  tail call void @f() [ "deopt"(i8* %m) ]
39  ret i1 %cmp
40; CHECK: ret i1 false
41}
42
43; Same functon as above with deopt operand in function f, but comparison is NE
44define i1 @compare_ne_and_call_with_deopt() {
45; CHECK-LABEL: compare_ne_and_call_with_deopt
46  %m = call i8* @malloc(i64 24)
47  %bc = bitcast i8* %m to i32*
48  %lgp = load i32*, i32** @gp, align 8, !nonnull !0
49  %cmp = icmp ne i32* %lgp, %bc
50  tail call void @f() [ "deopt"(i8* %m) ]
51  ret i1 %cmp
52; CHECK: ret i1 true
53}
54
55; Same function as above, but global not marked nonnull, and we cannot fold the comparison
56define i1 @compare_ne_global_maybe_null() {
57; CHECK-LABEL: compare_ne_global_maybe_null
58  %m = call i8* @malloc(i64 24)
59  %bc = bitcast i8* %m to i32*
60  %lgp = load i32*, i32** @gp
61  %cmp = icmp ne i32* %lgp, %bc
62  tail call void @f() [ "deopt"(i8* %m) ]
63  ret i1 %cmp
64; CHECK: ret i1 %cmp
65}
66
67; FIXME: The comparison should fold to false since %m escapes (call to function escape)
68; after the comparison.
69declare void @escape(i8*)
70define i1 @compare_and_call_after() {
71; CHECK-LABEL: compare_and_call_after
72  %m = call i8* @malloc(i64 24)
73  %bc = bitcast i8* %m to i32*
74  %lgp = load i32*, i32** @gp, align 8, !nonnull !0
75  %cmp = icmp eq i32* %bc, %lgp
76  br i1 %cmp, label %escape_call, label %just_return
77
78escape_call:
79 call void @escape(i8* %m)
80 ret i1 true
81
82just_return:
83 ret i1 %cmp
84}
85
86define i1 @compare_distinct_mallocs() {
87  %m = call i8* @malloc(i64 4)
88  %n = call i8* @malloc(i64 4)
89  %cmp = icmp eq i8* %m, %n
90  ret i1 %cmp
91  ; CHECK-LABEL: compare_distinct_mallocs
92  ; CHECK: ret i1 false
93}
94
95; the compare is folded to true since the folding compare looks through bitcasts.
96; call to malloc and the bitcast instructions are elided after that since there are no uses of the malloc
97define i1 @compare_samepointer_under_bitcast() {
98  %m = call i8* @malloc(i64 4)
99  %bc = bitcast i8* %m to i32*
100  %bcback = bitcast i32* %bc to i8*
101  %cmp = icmp eq i8* %m, %bcback
102  ret i1 %cmp
103; CHECK-LABEL: compare_samepointer_under_bitcast
104; CHECK: ret i1 true
105}
106
107; the compare is folded to true since the folding compare looks through bitcasts.
108; The malloc call for %m cannot be elided since it is used in the call to function f.
109define i1 @compare_samepointer_escaped() {
110  %m = call i8* @malloc(i64 4)
111  %bc = bitcast i8* %m to i32*
112  %bcback = bitcast i32* %bc to i8*
113  %cmp = icmp eq i8* %m, %bcback
114  call void @f() [ "deopt"(i8* %m) ]
115  ret i1 %cmp
116; CHECK-LABEL: compare_samepointer_escaped
117; CHECK-NEXT: %m = call i8* @malloc(i64 4)
118; CHECK-NEXT: call void @f() [ "deopt"(i8* %m) ]
119; CHECK: ret i1 true
120}
121
122; Technically, we can fold the %cmp2 comparison, even though %m escapes through
123; the ret statement since `ret` terminates the function and we cannot reach from
124; the ret to cmp.
125; FIXME: Folding this %cmp2 when %m escapes through ret could be an issue with
126; cross-threading data dependencies since we do not make the distinction between
127; atomic and non-atomic loads in capture tracking.
128define i8* @compare_ret_escape(i8* %c) {
129  %m = call i8* @malloc(i64 4)
130  %n = call i8* @malloc(i64 4)
131  %cmp = icmp eq i8* %n, %c
132  br i1 %cmp, label %retst, label %chk
133
134retst:
135  ret i8* %m
136
137chk:
138  %bc = bitcast i8* %m to i32*
139  %lgp = load i32*, i32** @gp, align 8, !nonnull !0
140  %cmp2 = icmp eq i32* %bc, %lgp
141  br i1 %cmp2, label %retst,  label %chk2
142
143chk2:
144  ret i8* %n
145; CHECK-LABEL: compare_ret_escape
146; CHECK: %cmp = icmp eq i8* %n, %c
147; CHECK: %cmp2 = icmp eq i32* %lgp, %bc
148}
149
150; The malloc call for %m cannot be elided since it is used in the call to function f.
151; However, the cmp can be folded to true as %n doesnt escape and %m, %n are distinct allocations
152define i1 @compare_distinct_pointer_escape() {
153  %m = call i8* @malloc(i64 4)
154  %n = call i8* @malloc(i64 4)
155  tail call void @f() [ "deopt"(i8* %m) ]
156  %cmp = icmp ne i8* %m, %n
157  ret i1 %cmp
158; CHECK-LABEL: compare_distinct_pointer_escape
159; CHECK-NEXT: %m = call i8* @malloc(i64 4)
160; CHECK-NEXT: tail call void @f() [ "deopt"(i8* %m) ]
161; CHECK-NEXT: ret i1 true
162}
163
164!0 = !{}
165