1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-linux -mattr=-avx | FileCheck %s -check-prefix=X64
3; Win64 has not supported byval yet.
4; RUN: llc < %s -mtriple=i686-- -mattr=-avx | FileCheck %s -check-prefix=X86
5
6%struct.s = type { i8, i8, i8, i8, i8, i8, i8, i8,
7                   i8, i8, i8, i8, i8, i8, i8, i8,
8                   i8, i8, i8, i8, i8, i8, i8, i8,
9                   i8, i8, i8, i8, i8, i8, i8, i8,
10                   i8, i8, i8, i8, i8, i8, i8, i8,
11                   i8, i8, i8, i8, i8, i8, i8, i8,
12                   i8, i8, i8, i8, i8, i8, i8, i8,
13                   i8, i8, i8, i8, i8, i8, i8, i8,
14                   i8, i8, i8, i8, i8, i8, i8, i8,
15                   i8, i8, i8, i8, i8, i8, i8, i8,
16                   i8, i8, i8, i8, i8, i8, i8, i8,
17                   i8, i8, i8, i8, i8, i8, i8, i8,
18                   i8, i8, i8, i8, i8, i8, i8, i8,
19                   i8, i8, i8, i8, i8, i8, i8, i8,
20                   i8, i8, i8, i8, i8, i8, i8, i8,
21                   i8, i8, i8, i8, i8, i8, i8, i8,
22                   i8 }
23
24
25define void @g(i8 signext  %a1, i8 signext  %a2, i8 signext  %a3, i8 signext  %a4, i8 signext  %a5, i8 signext  %a6) nounwind {
26; X64-LABEL: g:
27; X64:       # %bb.0: # %entry
28; X64-NEXT:    pushq %rbx
29; X64-NEXT:    subq $272, %rsp # imm = 0x110
30; X64-NEXT:    movb %dil, {{[0-9]+}}(%rsp)
31; X64-NEXT:    movb %sil, {{[0-9]+}}(%rsp)
32; X64-NEXT:    movb %dl, {{[0-9]+}}(%rsp)
33; X64-NEXT:    movb %cl, {{[0-9]+}}(%rsp)
34; X64-NEXT:    movb %r8b, {{[0-9]+}}(%rsp)
35; X64-NEXT:    movb %r9b, {{[0-9]+}}(%rsp)
36; X64-NEXT:    leaq {{[0-9]+}}(%rsp), %rbx
37; X64-NEXT:    movl $16, %ecx
38; X64-NEXT:    movq %rsp, %rdi
39; X64-NEXT:    movq %rbx, %rsi
40; X64-NEXT:    rep;movsq (%rsi), %es:(%rdi)
41; X64-NEXT:    movb {{[0-9]+}}(%rsp), %al
42; X64-NEXT:    movb %al, {{[0-9]+}}(%rsp)
43; X64-NEXT:    callq f
44; X64-NEXT:    movl $16, %ecx
45; X64-NEXT:    movq %rsp, %rdi
46; X64-NEXT:    movq %rbx, %rsi
47; X64-NEXT:    rep;movsq (%rsi), %es:(%rdi)
48; X64-NEXT:    movb {{[0-9]+}}(%rsp), %al
49; X64-NEXT:    movb %al, {{[0-9]+}}(%rsp)
50; X64-NEXT:    callq f
51; X64-NEXT:    addq $272, %rsp # imm = 0x110
52; X64-NEXT:    popq %rbx
53; X64-NEXT:    retq
54;
55; X86-LABEL: g:
56; X86:       # %bb.0: # %entry
57; X86-NEXT:    pushl %ebp
58; X86-NEXT:    movl %esp, %ebp
59; X86-NEXT:    pushl %ebx
60; X86-NEXT:    pushl %edi
61; X86-NEXT:    pushl %esi
62; X86-NEXT:    andl $-8, %esp
63; X86-NEXT:    subl $272, %esp # imm = 0x110
64; X86-NEXT:    movb 28(%ebp), %al
65; X86-NEXT:    movb 24(%ebp), %cl
66; X86-NEXT:    movb 20(%ebp), %dl
67; X86-NEXT:    movb 16(%ebp), %ah
68; X86-NEXT:    movb 12(%ebp), %ch
69; X86-NEXT:    movb 8(%ebp), %dh
70; X86-NEXT:    movb %dh, {{[0-9]+}}(%esp)
71; X86-NEXT:    movb %ch, {{[0-9]+}}(%esp)
72; X86-NEXT:    movb %ah, {{[0-9]+}}(%esp)
73; X86-NEXT:    movb %dl, {{[0-9]+}}(%esp)
74; X86-NEXT:    movb %cl, {{[0-9]+}}(%esp)
75; X86-NEXT:    movb %al, {{[0-9]+}}(%esp)
76; X86-NEXT:    leal {{[0-9]+}}(%esp), %ebx
77; X86-NEXT:    movl $32, %ecx
78; X86-NEXT:    movl %esp, %edi
79; X86-NEXT:    movl %ebx, %esi
80; X86-NEXT:    rep;movsl (%esi), %es:(%edi)
81; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
82; X86-NEXT:    movb %al, {{[0-9]+}}(%esp)
83; X86-NEXT:    calll f
84; X86-NEXT:    movl $32, %ecx
85; X86-NEXT:    movl %esp, %edi
86; X86-NEXT:    movl %ebx, %esi
87; X86-NEXT:    rep;movsl (%esi), %es:(%edi)
88; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
89; X86-NEXT:    movb %al, {{[0-9]+}}(%esp)
90; X86-NEXT:    calll f
91; X86-NEXT:    leal -12(%ebp), %esp
92; X86-NEXT:    popl %esi
93; X86-NEXT:    popl %edi
94; X86-NEXT:    popl %ebx
95; X86-NEXT:    popl %ebp
96; X86-NEXT:    retl
97entry:
98        %a = alloca %struct.s
99        %tmp = getelementptr %struct.s, %struct.s* %a, i32 0, i32 0
100        store i8 %a1, i8* %tmp, align 8
101        %tmp2 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 1
102        store i8 %a2, i8* %tmp2, align 8
103        %tmp4 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 2
104        store i8 %a3, i8* %tmp4, align 8
105        %tmp6 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 3
106        store i8 %a4, i8* %tmp6, align 8
107        %tmp8 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 4
108        store i8 %a5, i8* %tmp8, align 8
109        %tmp10 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 5
110        store i8 %a6, i8* %tmp10, align 8
111        call void @f(%struct.s* byval(%struct.s) %a)
112        call void @f(%struct.s* byval(%struct.s) %a)
113        ret void
114}
115
116declare void @f(%struct.s* byval(%struct.s))
117