1; RUN: llc < %s -mcpu=generic -mtriple=x86_64-pc-linux-gnu | FileCheck %s
2
3; Verify that the var arg parameters which are passed in registers are stored
4; in home stack slots allocated by the caller and that AP is correctly
5; calculated.
6define x86_64_win64cc void @average_va(i32 %count, ...) nounwind {
7entry:
8; CHECK: pushq
9; CHECK: movq   %r9, 40(%rsp)
10; CHECK: movq   %r8, 32(%rsp)
11; CHECK: movq   %rdx, 24(%rsp)
12; CHECK: leaq   24(%rsp), %rax
13
14  %ap = alloca i8*, align 8                       ; <i8**> [#uses=1]
15  %ap.0 = bitcast i8** %ap to i8*
16  call void @llvm.va_start(i8* %ap.0)
17  ret void
18}
19
20declare void @llvm.va_start(i8*) nounwind
21declare void @llvm.va_copy(i8*, i8*) nounwind
22declare void @llvm.va_end(i8*) nounwind
23
24; CHECK-LABEL: f5:
25; CHECK: pushq
26; CHECK: leaq 56(%rsp),
27define x86_64_win64cc i8** @f5(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, ...) nounwind {
28entry:
29  %ap = alloca i8*, align 8
30  %ap.0 = bitcast i8** %ap to i8*
31  call void @llvm.va_start(i8* %ap.0)
32  ret i8** %ap
33}
34
35; CHECK-LABEL: f4:
36; CHECK: pushq
37; CHECK: leaq 48(%rsp),
38define x86_64_win64cc i8** @f4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
39entry:
40  %ap = alloca i8*, align 8
41  %ap.0 = bitcast i8** %ap to i8*
42  call void @llvm.va_start(i8* %ap.0)
43  ret i8** %ap
44}
45
46; CHECK-LABEL: f3:
47; CHECK: pushq
48; CHECK: leaq 40(%rsp),
49define x86_64_win64cc i8** @f3(i64 %a0, i64 %a1, i64 %a2, ...) nounwind {
50entry:
51  %ap = alloca i8*, align 8
52  %ap.0 = bitcast i8** %ap to i8*
53  call void @llvm.va_start(i8* %ap.0)
54  ret i8** %ap
55}
56
57; WinX86_64 uses char* for va_list. Verify that the correct amount of bytes
58; are copied using va_copy.
59
60; CHECK-LABEL: copy1:
61; CHECK: leaq 32(%rsp), [[REG_copy1:%[a-z]+]]
62; CHECK: movq [[REG_copy1]], 8(%rsp)
63; CHECK: movq [[REG_copy1]], (%rsp)
64; CHECK: ret
65define x86_64_win64cc void @copy1(i64 %a0, ...) nounwind {
66entry:
67  %ap = alloca i8*, align 8
68  %cp = alloca i8*, align 8
69  %ap.0 = bitcast i8** %ap to i8*
70  %cp.0 = bitcast i8** %cp to i8*
71  call void @llvm.va_start(i8* %ap.0)
72  call void @llvm.va_copy(i8* %cp.0, i8* %ap.0)
73  ret void
74}
75
76; CHECK-LABEL: copy4:
77; CHECK: leaq 56(%rsp), [[REG_copy4:%[a-z]+]]
78; CHECK: movq [[REG_copy4]], 8(%rsp)
79; CHECK: movq [[REG_copy4]], (%rsp)
80; CHECK: ret
81define x86_64_win64cc void @copy4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
82entry:
83  %ap = alloca i8*, align 8
84  %cp = alloca i8*, align 8
85  %ap.0 = bitcast i8** %ap to i8*
86  %cp.0 = bitcast i8** %cp to i8*
87  call void @llvm.va_start(i8* %ap.0)
88  call void @llvm.va_copy(i8* %cp.0, i8* %ap.0)
89  ret void
90}
91
92; CHECK-LABEL: arg4:
93; va_start:
94; CHECK: leaq 48(%rsp), [[REG_arg4_1:%[a-z]+]]
95; CHECK: movq [[REG_arg4_1]], (%rsp)
96; va_arg:
97; CHECK: leaq 52(%rsp), [[REG_arg4_2:%[a-z]+]]
98; CHECK: movq [[REG_arg4_2]], (%rsp)
99; CHECK: movl 48(%rsp), %eax
100; CHECK: ret
101define x86_64_win64cc i32 @arg4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
102entry:
103  %ap = alloca i8*, align 8
104  %ap.0 = bitcast i8** %ap to i8*
105  call void @llvm.va_start(i8* %ap.0)
106  %tmp = va_arg i8** %ap, i32
107  ret i32 %tmp
108}
109