1; RUN: llc < %s -march=arm64 -aarch64-stp-suppress=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s
2; RUN: llc < %s -march=arm64 -aarch64-unscaled-mem-op=true\
3; RUN:   -verify-machineinstrs -mcpu=cyclone | FileCheck -check-prefix=STUR_CHK %s
4
5; CHECK: stp_int
6; CHECK: stp w0, w1, [x2]
7define void @stp_int(i32 %a, i32 %b, i32* nocapture %p) nounwind {
8  store i32 %a, i32* %p, align 4
9  %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
10  store i32 %b, i32* %add.ptr, align 4
11  ret void
12}
13
14; CHECK: stp_long
15; CHECK: stp x0, x1, [x2]
16define void @stp_long(i64 %a, i64 %b, i64* nocapture %p) nounwind {
17  store i64 %a, i64* %p, align 8
18  %add.ptr = getelementptr inbounds i64, i64* %p, i64 1
19  store i64 %b, i64* %add.ptr, align 8
20  ret void
21}
22
23; CHECK: stp_float
24; CHECK: stp s0, s1, [x0]
25define void @stp_float(float %a, float %b, float* nocapture %p) nounwind {
26  store float %a, float* %p, align 4
27  %add.ptr = getelementptr inbounds float, float* %p, i64 1
28  store float %b, float* %add.ptr, align 4
29  ret void
30}
31
32; CHECK: stp_double
33; CHECK: stp d0, d1, [x0]
34define void @stp_double(double %a, double %b, double* nocapture %p) nounwind {
35  store double %a, double* %p, align 8
36  %add.ptr = getelementptr inbounds double, double* %p, i64 1
37  store double %b, double* %add.ptr, align 8
38  ret void
39}
40
41; Test the load/store optimizer---combine ldurs into a ldp, if appropriate
42define void @stur_int(i32 %a, i32 %b, i32* nocapture %p) nounwind {
43; STUR_CHK: stur_int
44; STUR_CHK: stp w{{[0-9]+}}, {{w[0-9]+}}, [x{{[0-9]+}}, #-8]
45; STUR_CHK-NEXT: ret
46  %p1 = getelementptr inbounds i32, i32* %p, i32 -1
47  store i32 %a, i32* %p1, align 2
48  %p2 = getelementptr inbounds i32, i32* %p, i32 -2
49  store i32 %b, i32* %p2, align 2
50  ret void
51}
52
53define void @stur_long(i64 %a, i64 %b, i64* nocapture %p) nounwind {
54; STUR_CHK: stur_long
55; STUR_CHK: stp x{{[0-9]+}}, {{x[0-9]+}}, [x{{[0-9]+}}, #-16]
56; STUR_CHK-NEXT: ret
57  %p1 = getelementptr inbounds i64, i64* %p, i32 -1
58  store i64 %a, i64* %p1, align 2
59  %p2 = getelementptr inbounds i64, i64* %p, i32 -2
60  store i64 %b, i64* %p2, align 2
61  ret void
62}
63
64define void @stur_float(float %a, float %b, float* nocapture %p) nounwind {
65; STUR_CHK: stur_float
66; STUR_CHK: stp s{{[0-9]+}}, {{s[0-9]+}}, [x{{[0-9]+}}, #-8]
67; STUR_CHK-NEXT: ret
68  %p1 = getelementptr inbounds float, float* %p, i32 -1
69  store float %a, float* %p1, align 2
70  %p2 = getelementptr inbounds float, float* %p, i32 -2
71  store float %b, float* %p2, align 2
72  ret void
73}
74
75define void @stur_double(double %a, double %b, double* nocapture %p) nounwind {
76; STUR_CHK: stur_double
77; STUR_CHK: stp d{{[0-9]+}}, {{d[0-9]+}}, [x{{[0-9]+}}, #-16]
78; STUR_CHK-NEXT: ret
79  %p1 = getelementptr inbounds double, double* %p, i32 -1
80  store double %a, double* %p1, align 2
81  %p2 = getelementptr inbounds double, double* %p, i32 -2
82  store double %b, double* %p2, align 2
83  ret void
84}
85
86define void @splat_v4i32(i32 %v, i32 *%p) {
87entry:
88
89; CHECK-LABEL: splat_v4i32
90; CHECK-DAG: stp w0, w0, [x1]
91; CHECK-DAG: stp w0, w0, [x1, #8]
92; CHECK: ret
93
94  %p17 = insertelement <4 x i32> undef, i32 %v, i32 0
95  %p18 = insertelement <4 x i32> %p17, i32 %v, i32 1
96  %p19 = insertelement <4 x i32> %p18, i32 %v, i32 2
97  %p20 = insertelement <4 x i32> %p19, i32 %v, i32 3
98  %p21 = bitcast i32* %p to <4 x i32>*
99  store <4 x i32> %p20, <4 x i32>* %p21, align 4
100  ret void
101}
102