1; RUN: opt < %s -inline -inline-threshold=20 -S | FileCheck %s
2
3define internal i32 @callee1(i32 %A, i32 %B) {
4  %C = sdiv i32 %A, %B
5  ret i32 %C
6}
7
8define i32 @caller1() {
9; CHECK-LABEL: define i32 @caller1(
10; CHECK-NEXT: ret i32 3
11
12  %X = call i32 @callee1( i32 10, i32 3 )
13  ret i32 %X
14}
15
16define i32 @caller2() {
17; Check that we can constant-prop through instructions after inlining callee21
18; to get constants in the inlined callsite to callee22.
19; FIXME: Currently, the threshold is fixed at 20 because we don't perform
20; *recursive* cost analysis to realize that the nested call site will definitely
21; inline and be cheap. We should eventually do that and lower the threshold here
22; to 1.
23;
24; CHECK-LABEL: @caller2(
25; CHECK-NOT: call void @callee2
26; CHECK: ret
27
28  %x = call i32 @callee21(i32 42, i32 48)
29  ret i32 %x
30}
31
32define i32 @callee21(i32 %x, i32 %y) {
33  %sub = sub i32 %y, %x
34  %result = call i32 @callee22(i32 %sub)
35  ret i32 %result
36}
37
38declare i8* @getptr()
39
40define i32 @callee22(i32 %x) {
41  %icmp = icmp ugt i32 %x, 42
42  br i1 %icmp, label %bb.true, label %bb.false
43bb.true:
44  ; This block musn't be counted in the inline cost.
45  %x1 = add i32 %x, 1
46  %x2 = add i32 %x1, 1
47  %x3 = add i32 %x2, 1
48  %x4 = add i32 %x3, 1
49  %x5 = add i32 %x4, 1
50  %x6 = add i32 %x5, 1
51  %x7 = add i32 %x6, 1
52  %x8 = add i32 %x7, 1
53
54  ret i32 %x8
55bb.false:
56  ret i32 %x
57}
58
59define i32 @caller3() {
60; Check that even if the expensive path is hidden behind several basic blocks,
61; it doesn't count toward the inline cost when constant-prop proves those paths
62; dead.
63;
64; CHECK-LABEL: @caller3(
65; CHECK-NOT: call
66; CHECK: ret i32 6
67
68entry:
69  %x = call i32 @callee3(i32 42, i32 48)
70  ret i32 %x
71}
72
73define i32 @callee3(i32 %x, i32 %y) {
74  %sub = sub i32 %y, %x
75  %icmp = icmp ugt i32 %sub, 42
76  br i1 %icmp, label %bb.true, label %bb.false
77
78bb.true:
79  %icmp2 = icmp ult i32 %sub, 64
80  br i1 %icmp2, label %bb.true.true, label %bb.true.false
81
82bb.true.true:
83  ; This block musn't be counted in the inline cost.
84  %x1 = add i32 %x, 1
85  %x2 = add i32 %x1, 1
86  %x3 = add i32 %x2, 1
87  %x4 = add i32 %x3, 1
88  %x5 = add i32 %x4, 1
89  %x6 = add i32 %x5, 1
90  %x7 = add i32 %x6, 1
91  %x8 = add i32 %x7, 1
92  br label %bb.merge
93
94bb.true.false:
95  ; This block musn't be counted in the inline cost.
96  %y1 = add i32 %y, 1
97  %y2 = add i32 %y1, 1
98  %y3 = add i32 %y2, 1
99  %y4 = add i32 %y3, 1
100  %y5 = add i32 %y4, 1
101  %y6 = add i32 %y5, 1
102  %y7 = add i32 %y6, 1
103  %y8 = add i32 %y7, 1
104  br label %bb.merge
105
106bb.merge:
107  %result = phi i32 [ %x8, %bb.true.true ], [ %y8, %bb.true.false ]
108  ret i32 %result
109
110bb.false:
111  ret i32 %sub
112}
113
114declare {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b)
115
116define i8 @caller4(i8 %z) {
117; Check that we can constant fold through intrinsics such as the
118; overflow-detecting arithmetic instrinsics. These are particularly important
119; as they are used heavily in standard library code and generic C++ code where
120; the arguments are oftent constant but complete generality is required.
121;
122; CHECK-LABEL: @caller4(
123; CHECK-NOT: call
124; CHECK: ret i8 -1
125
126entry:
127  %x = call i8 @callee4(i8 254, i8 14, i8 %z)
128  ret i8 %x
129}
130
131define i8 @callee4(i8 %x, i8 %y, i8 %z) {
132  %uadd = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %x, i8 %y)
133  %o = extractvalue {i8, i1} %uadd, 1
134  br i1 %o, label %bb.true, label %bb.false
135
136bb.true:
137  ret i8 -1
138
139bb.false:
140  ; This block musn't be counted in the inline cost.
141  %z1 = add i8 %z, 1
142  %z2 = add i8 %z1, 1
143  %z3 = add i8 %z2, 1
144  %z4 = add i8 %z3, 1
145  %z5 = add i8 %z4, 1
146  %z6 = add i8 %z5, 1
147  %z7 = add i8 %z6, 1
148  %z8 = add i8 %z7, 1
149  ret i8 %z8
150}
151
152define i64 @caller5(i64 %y) {
153; Check that we can round trip constants through various kinds of casts etc w/o
154; losing track of the constant prop in the inline cost analysis.
155;
156; CHECK-LABEL: @caller5(
157; CHECK-NOT: call
158; CHECK: ret i64 -1
159
160entry:
161  %x = call i64 @callee5(i64 42, i64 %y)
162  ret i64 %x
163}
164
165define i64 @callee5(i64 %x, i64 %y) {
166  %inttoptr = inttoptr i64 %x to i8*
167  %bitcast = bitcast i8* %inttoptr to i32*
168  %ptrtoint = ptrtoint i32* %bitcast to i64
169  %trunc = trunc i64 %ptrtoint to i32
170  %zext = zext i32 %trunc to i64
171  %cmp = icmp eq i64 %zext, 42
172  br i1 %cmp, label %bb.true, label %bb.false
173
174bb.true:
175  ret i64 -1
176
177bb.false:
178  ; This block musn't be counted in the inline cost.
179  %y1 = add i64 %y, 1
180  %y2 = add i64 %y1, 1
181  %y3 = add i64 %y2, 1
182  %y4 = add i64 %y3, 1
183  %y5 = add i64 %y4, 1
184  %y6 = add i64 %y5, 1
185  %y7 = add i64 %y6, 1
186  %y8 = add i64 %y7, 1
187  ret i64 %y8
188}
189
190define float @caller6() {
191; Check that we can constant-prop through fcmp instructions
192;
193; CHECK-LABEL: @caller6(
194; CHECK-NOT: call
195; CHECK: ret
196  %x = call float @callee6(float 42.0)
197  ret float %x
198}
199
200define float @callee6(float %x) {
201  %icmp = fcmp ugt float %x, 42.0
202  br i1 %icmp, label %bb.true, label %bb.false
203
204bb.true:
205  ; This block musn't be counted in the inline cost.
206  %x1 = fadd float %x, 1.0
207  %x2 = fadd float %x1, 1.0
208  %x3 = fadd float %x2, 1.0
209  %x4 = fadd float %x3, 1.0
210  %x5 = fadd float %x4, 1.0
211  %x6 = fadd float %x5, 1.0
212  %x7 = fadd float %x6, 1.0
213  %x8 = fadd float %x7, 1.0
214  ret float %x8
215
216bb.false:
217  ret float %x
218}
219
220
221
222define i32 @PR13412.main() {
223; This is a somewhat complicated three layer subprogram that was reported to
224; compute the wrong value for a branch due to assuming that an argument
225; mid-inline couldn't be equal to another pointer.
226;
227; After inlining, the branch should point directly to the exit block, not to
228; the intermediate block.
229; CHECK: @PR13412.main
230; CHECK: br i1 true, label %[[TRUE_DEST:.*]], label %[[FALSE_DEST:.*]]
231; CHECK: [[FALSE_DEST]]:
232; CHECK-NEXT: call void @PR13412.fail()
233; CHECK: [[TRUE_DEST]]:
234; CHECK-NEXT: ret i32 0
235
236entry:
237  %i1 = alloca i64
238  store i64 0, i64* %i1
239  %arraydecay = bitcast i64* %i1 to i32*
240  %call = call i1 @PR13412.first(i32* %arraydecay, i32* %arraydecay)
241  br i1 %call, label %cond.end, label %cond.false
242
243cond.false:
244  call void @PR13412.fail()
245  br label %cond.end
246
247cond.end:
248  ret i32 0
249}
250
251define internal i1 @PR13412.first(i32* %a, i32* %b) {
252entry:
253  %call = call i32* @PR13412.second(i32* %a, i32* %b)
254  %cmp = icmp eq i32* %call, %b
255  ret i1 %cmp
256}
257
258declare void @PR13412.fail()
259
260define internal i32* @PR13412.second(i32* %a, i32* %b) {
261entry:
262  %sub.ptr.lhs.cast = ptrtoint i32* %b to i64
263  %sub.ptr.rhs.cast = ptrtoint i32* %a to i64
264  %sub.ptr.sub = sub i64 %sub.ptr.lhs.cast, %sub.ptr.rhs.cast
265  %sub.ptr.div = ashr exact i64 %sub.ptr.sub, 2
266  %cmp = icmp ugt i64 %sub.ptr.div, 1
267  br i1 %cmp, label %if.then, label %if.end3
268
269if.then:
270  %0 = load i32, i32* %a
271  %1 = load i32, i32* %b
272  %cmp1 = icmp eq i32 %0, %1
273  br i1 %cmp1, label %return, label %if.end3
274
275if.end3:
276  br label %return
277
278return:
279  %retval.0 = phi i32* [ %b, %if.end3 ], [ %a, %if.then ]
280  ret i32* %retval.0
281}
282