1; RUN: opt < %s -inline -S | FileCheck %s
2
3; We have to apply the less restrictive TailCallKind of the call site being
4; inlined and any call sites cloned into the caller.
5
6; No tail marker after inlining, since test_capture_c captures an alloca.
7; CHECK: define void @test_capture_a(
8; CHECK-NOT: tail
9; CHECK: call void @test_capture_c(
10
11declare void @test_capture_c(i32*)
12define internal void @test_capture_b(i32* %P) {
13  tail call void @test_capture_c(i32* %P)
14  ret void
15}
16define void @test_capture_a() {
17  %A = alloca i32  		; captured by test_capture_b
18  call void @test_capture_b(i32* %A)
19  ret void
20}
21
22; No musttail marker after inlining, since the prototypes don't match.
23; CHECK: define void @test_proto_mismatch_a(
24; CHECK-NOT: musttail
25; CHECK: call void @test_proto_mismatch_c(
26
27declare void @test_proto_mismatch_c(i32*)
28define internal void @test_proto_mismatch_b(i32* %p) {
29  musttail call void @test_proto_mismatch_c(i32* %p)
30  ret void
31}
32define void @test_proto_mismatch_a() {
33  call void @test_proto_mismatch_b(i32* null)
34  ret void
35}
36
37; After inlining through a musttail call site, we need to keep musttail markers
38; to prevent unbounded stack growth.
39; CHECK: define void @test_musttail_basic_a(
40; CHECK: musttail call void @test_musttail_basic_c(
41
42declare void @test_musttail_basic_c(i32* %p)
43define internal void @test_musttail_basic_b(i32* %p) {
44  musttail call void @test_musttail_basic_c(i32* %p)
45  ret void
46}
47define void @test_musttail_basic_a(i32* %p) {
48  musttail call void @test_musttail_basic_b(i32* %p)
49  ret void
50}
51
52; Don't insert lifetime end markers here, the lifetime is trivially over due
53; the return.
54; CHECK: define void @test_byval_a(
55; CHECK: musttail call void @test_byval_c(
56; CHECK-NEXT: ret void
57
58declare void @test_byval_c(i32* byval %p)
59define internal void @test_byval_b(i32* byval %p) {
60  musttail call void @test_byval_c(i32* byval %p)
61  ret void
62}
63define void @test_byval_a(i32* byval %p) {
64  musttail call void @test_byval_b(i32* byval %p)
65  ret void
66}
67
68; Don't insert a stack restore, we're about to return.
69; CHECK: define void @test_dynalloca_a(
70; CHECK: call i8* @llvm.stacksave(
71; CHECK: alloca i8, i32 %n
72; CHECK: musttail call void @test_dynalloca_c(
73; CHECK-NEXT: ret void
74
75declare void @escape(i8* %buf)
76declare void @test_dynalloca_c(i32* byval %p, i32 %n)
77define internal void @test_dynalloca_b(i32* byval %p, i32 %n) alwaysinline {
78  %buf = alloca i8, i32 %n              ; dynamic alloca
79  call void @escape(i8* %buf)           ; escape it
80  musttail call void @test_dynalloca_c(i32* byval %p, i32 %n)
81  ret void
82}
83define void @test_dynalloca_a(i32* byval %p, i32 %n) {
84  musttail call void @test_dynalloca_b(i32* byval %p, i32 %n)
85  ret void
86}
87
88; We can't merge the returns.
89; CHECK: define void @test_multiret_a(
90; CHECK: musttail call void @test_multiret_c(
91; CHECK-NEXT: ret void
92; CHECK: musttail call void @test_multiret_d(
93; CHECK-NEXT: ret void
94
95declare void @test_multiret_c(i1 zeroext %b)
96declare void @test_multiret_d(i1 zeroext %b)
97define internal void @test_multiret_b(i1 zeroext %b) {
98  br i1 %b, label %c, label %d
99c:
100  musttail call void @test_multiret_c(i1 zeroext %b)
101  ret void
102d:
103  musttail call void @test_multiret_d(i1 zeroext %b)
104  ret void
105}
106define void @test_multiret_a(i1 zeroext %b) {
107  musttail call void @test_multiret_b(i1 zeroext %b)
108  ret void
109}
110
111; We have to avoid bitcast chains.
112; CHECK: define i32* @test_retptr_a(
113; CHECK: musttail call i8* @test_retptr_c(
114; CHECK-NEXT: bitcast i8* {{.*}} to i32*
115; CHECK-NEXT: ret i32*
116
117declare i8* @test_retptr_c()
118define internal i16* @test_retptr_b() {
119  %rv = musttail call i8* @test_retptr_c()
120  %v = bitcast i8* %rv to i16*
121  ret i16* %v
122}
123define i32* @test_retptr_a() {
124  %rv = musttail call i16* @test_retptr_b()
125  %v = bitcast i16* %rv to i32*
126  ret i32* %v
127}
128
129; Combine the last two cases: multiple returns with pointer bitcasts.
130; CHECK: define i32* @test_multiptrret_a(
131; CHECK: musttail call i8* @test_multiptrret_c(
132; CHECK-NEXT: bitcast i8* {{.*}} to i32*
133; CHECK-NEXT: ret i32*
134; CHECK: musttail call i8* @test_multiptrret_d(
135; CHECK-NEXT: bitcast i8* {{.*}} to i32*
136; CHECK-NEXT: ret i32*
137
138declare i8* @test_multiptrret_c(i1 zeroext %b)
139declare i8* @test_multiptrret_d(i1 zeroext %b)
140define internal i16* @test_multiptrret_b(i1 zeroext %b) {
141  br i1 %b, label %c, label %d
142c:
143  %c_rv = musttail call i8* @test_multiptrret_c(i1 zeroext %b)
144  %c_v = bitcast i8* %c_rv to i16*
145  ret i16* %c_v
146d:
147  %d_rv = musttail call i8* @test_multiptrret_d(i1 zeroext %b)
148  %d_v = bitcast i8* %d_rv to i16*
149  ret i16* %d_v
150}
151define i32* @test_multiptrret_a(i1 zeroext %b) {
152  %rv = musttail call i16* @test_multiptrret_b(i1 zeroext %b)
153  %v = bitcast i16* %rv to i32*
154  ret i32* %v
155}
156
157; Inline a musttail call site which contains a normal return and a musttail call.
158; CHECK: define i32 @test_mixedret_a(
159; CHECK: br i1 %b
160; CHECK: musttail call i32 @test_mixedret_c(
161; CHECK-NEXT: ret i32
162; CHECK: call i32 @test_mixedret_d(i1 zeroext %b)
163; CHECK: add i32 1,
164; CHECK-NOT: br
165; CHECK: ret i32
166
167declare i32 @test_mixedret_c(i1 zeroext %b)
168declare i32 @test_mixedret_d(i1 zeroext %b)
169define internal i32 @test_mixedret_b(i1 zeroext %b) {
170  br i1 %b, label %c, label %d
171c:
172  %c_rv = musttail call i32 @test_mixedret_c(i1 zeroext %b)
173  ret i32 %c_rv
174d:
175  %d_rv = call i32 @test_mixedret_d(i1 zeroext %b)
176  %d_rv1 = add i32 1, %d_rv
177  ret i32 %d_rv1
178}
179define i32 @test_mixedret_a(i1 zeroext %b) {
180  %rv = musttail call i32 @test_mixedret_b(i1 zeroext %b)
181  ret i32 %rv
182}
183