1; RUN: llc < %s -march=x86 -tailcallopt | grep TAILCALL | count 5
2
3; With -tailcallopt, CodeGen guarantees a tail call optimization
4; for all of these.
5
6declare fastcc i32 @tailcallee(i32 %a1, i32 %a2, i32 %a3, i32 %a4)
7
8define fastcc i32 @tailcaller(i32 %in1, i32 %in2) nounwind {
9entry:
10  %tmp11 = tail call fastcc i32 @tailcallee(i32 %in1, i32 %in2, i32 %in1, i32 %in2)
11  ret i32 %tmp11
12}
13
14declare fastcc i8* @alias_callee()
15
16define fastcc noalias i8* @noalias_caller() nounwind {
17  %p = tail call fastcc i8* @alias_callee()
18  ret i8* %p
19}
20
21declare fastcc noalias i8* @noalias_callee()
22
23define fastcc i8* @alias_caller() nounwind {
24  %p = tail call fastcc noalias i8* @noalias_callee()
25  ret i8* %p
26}
27
28declare fastcc i32 @i32_callee()
29
30define fastcc i32 @ret_undef() nounwind {
31  %p = tail call fastcc i32 @i32_callee()
32  ret i32 undef
33}
34
35declare fastcc void @does_not_return()
36
37define fastcc i32 @noret() nounwind {
38  tail call fastcc void @does_not_return()
39  unreachable
40}
41