1; RUN: llc < %s -mtriple=arm64-eabi -aarch64-enable-stp-suppress=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s 2 3; CHECK-LABEL: stp_int 4; CHECK: stp w0, w1, [x2] 5define void @stp_int(i32 %a, i32 %b, i32* nocapture %p) nounwind { 6 store i32 %a, i32* %p, align 4 7 %add.ptr = getelementptr inbounds i32, i32* %p, i64 1 8 store i32 %b, i32* %add.ptr, align 4 9 ret void 10} 11 12; CHECK-LABEL: stp_long 13; CHECK: stp x0, x1, [x2] 14define void @stp_long(i64 %a, i64 %b, i64* nocapture %p) nounwind { 15 store i64 %a, i64* %p, align 8 16 %add.ptr = getelementptr inbounds i64, i64* %p, i64 1 17 store i64 %b, i64* %add.ptr, align 8 18 ret void 19} 20 21; CHECK-LABEL: stp_float 22; CHECK: stp s0, s1, [x0] 23define void @stp_float(float %a, float %b, float* nocapture %p) nounwind { 24 store float %a, float* %p, align 4 25 %add.ptr = getelementptr inbounds float, float* %p, i64 1 26 store float %b, float* %add.ptr, align 4 27 ret void 28} 29 30; CHECK-LABEL: stp_double 31; CHECK: stp d0, d1, [x0] 32define void @stp_double(double %a, double %b, double* nocapture %p) nounwind { 33 store double %a, double* %p, align 8 34 %add.ptr = getelementptr inbounds double, double* %p, i64 1 35 store double %b, double* %add.ptr, align 8 36 ret void 37} 38 39; CHECK-LABEL: stp_doublex2 40; CHECK: stp q0, q1, [x0] 41define void @stp_doublex2(<2 x double> %a, <2 x double> %b, <2 x double>* nocapture %p) nounwind { 42 store <2 x double> %a, <2 x double>* %p, align 16 43 %add.ptr = getelementptr inbounds <2 x double>, <2 x double>* %p, i64 1 44 store <2 x double> %b, <2 x double>* %add.ptr, align 16 45 ret void 46} 47 48; Test the load/store optimizer---combine ldurs into a ldp, if appropriate 49define void @stur_int(i32 %a, i32 %b, i32* nocapture %p) nounwind { 50; CHECK-LABEL: stur_int 51; CHECK: stp w{{[0-9]+}}, {{w[0-9]+}}, [x{{[0-9]+}}, #-8] 52; CHECK-NEXT: ret 53 %p1 = getelementptr inbounds i32, i32* %p, i32 -1 54 store i32 %a, i32* %p1, align 2 55 %p2 = getelementptr inbounds i32, i32* %p, i32 -2 56 store i32 %b, i32* %p2, align 2 57 ret void 58} 59 60define void @stur_long(i64 %a, i64 %b, i64* nocapture %p) nounwind { 61; CHECK-LABEL: stur_long 62; CHECK: stp x{{[0-9]+}}, {{x[0-9]+}}, [x{{[0-9]+}}, #-16] 63; CHECK-NEXT: ret 64 %p1 = getelementptr inbounds i64, i64* %p, i32 -1 65 store i64 %a, i64* %p1, align 2 66 %p2 = getelementptr inbounds i64, i64* %p, i32 -2 67 store i64 %b, i64* %p2, align 2 68 ret void 69} 70 71define void @stur_float(float %a, float %b, float* nocapture %p) nounwind { 72; CHECK-LABEL: stur_float 73; CHECK: stp s{{[0-9]+}}, {{s[0-9]+}}, [x{{[0-9]+}}, #-8] 74; CHECK-NEXT: ret 75 %p1 = getelementptr inbounds float, float* %p, i32 -1 76 store float %a, float* %p1, align 2 77 %p2 = getelementptr inbounds float, float* %p, i32 -2 78 store float %b, float* %p2, align 2 79 ret void 80} 81 82define void @stur_double(double %a, double %b, double* nocapture %p) nounwind { 83; CHECK-LABEL: stur_double 84; CHECK: stp d{{[0-9]+}}, {{d[0-9]+}}, [x{{[0-9]+}}, #-16] 85; CHECK-NEXT: ret 86 %p1 = getelementptr inbounds double, double* %p, i32 -1 87 store double %a, double* %p1, align 2 88 %p2 = getelementptr inbounds double, double* %p, i32 -2 89 store double %b, double* %p2, align 2 90 ret void 91} 92 93define void @stur_doublex2(<2 x double> %a, <2 x double> %b, <2 x double>* nocapture %p) nounwind { 94; CHECK-LABEL: stur_doublex2 95; CHECK: stp q{{[0-9]+}}, q{{[0-9]+}}, [x{{[0-9]+}}, #-32] 96; CHECK-NEXT: ret 97 %p1 = getelementptr inbounds <2 x double>, <2 x double>* %p, i32 -1 98 store <2 x double> %a, <2 x double>* %p1, align 2 99 %p2 = getelementptr inbounds <2 x double>, <2 x double>* %p, i32 -2 100 store <2 x double> %b, <2 x double>* %p2, align 2 101 ret void 102} 103 104define void @splat_v4i32(i32 %v, i32 *%p) { 105entry: 106 107; CHECK-LABEL: splat_v4i32 108; CHECK-DAG: dup v0.4s, w0 109; CHECK-DAG: str q0, [x1] 110; CHECK: ret 111 112 %p17 = insertelement <4 x i32> undef, i32 %v, i32 0 113 %p18 = insertelement <4 x i32> %p17, i32 %v, i32 1 114 %p19 = insertelement <4 x i32> %p18, i32 %v, i32 2 115 %p20 = insertelement <4 x i32> %p19, i32 %v, i32 3 116 %p21 = bitcast i32* %p to <4 x i32>* 117 store <4 x i32> %p20, <4 x i32>* %p21, align 4 118 ret void 119} 120 121; Check that a non-splat store that is storing a vector created by 4 122; insertelements that is not a splat vector does not get split. 123define void @nosplat_v4i32(i32 %v, i32 *%p) { 124entry: 125 126; CHECK-LABEL: nosplat_v4i32: 127; CHECK: str w0, 128; CHECK: ldr q[[REG1:[0-9]+]], 129; CHECK-DAG: mov v[[REG1]].s[1], w0 130; CHECK-DAG: mov v[[REG1]].s[2], w0 131; CHECK-DAG: mov v[[REG1]].s[3], w0 132; CHECK: str q[[REG1]], [x1] 133; CHECK: ret 134 135 %p17 = insertelement <4 x i32> undef, i32 %v, i32 %v 136 %p18 = insertelement <4 x i32> %p17, i32 %v, i32 1 137 %p19 = insertelement <4 x i32> %p18, i32 %v, i32 2 138 %p20 = insertelement <4 x i32> %p19, i32 %v, i32 3 139 %p21 = bitcast i32* %p to <4 x i32>* 140 store <4 x i32> %p20, <4 x i32>* %p21, align 4 141 ret void 142} 143 144; Check that a non-splat store that is storing a vector created by 4 145; insertelements that is not a splat vector does not get split. 146define void @nosplat2_v4i32(i32 %v, i32 *%p, <4 x i32> %vin) { 147entry: 148 149; CHECK-LABEL: nosplat2_v4i32: 150; CHECK: mov v[[REG1]].s[1], w0 151; CHECK-DAG: mov v[[REG1]].s[2], w0 152; CHECK-DAG: mov v[[REG1]].s[3], w0 153; CHECK: str q[[REG1]], [x1] 154; CHECK: ret 155 156 %p18 = insertelement <4 x i32> %vin, i32 %v, i32 1 157 %p19 = insertelement <4 x i32> %p18, i32 %v, i32 2 158 %p20 = insertelement <4 x i32> %p19, i32 %v, i32 3 159 %p21 = bitcast i32* %p to <4 x i32>* 160 store <4 x i32> %p20, <4 x i32>* %p21, align 4 161 ret void 162} 163 164; Read of %b to compute %tmp2 shouldn't prevent formation of stp 165; CHECK-LABEL: stp_int_rar_hazard 166; CHECK: ldr [[REG:w[0-9]+]], [x2, #8] 167; CHECK: add w8, [[REG]], w1 168; CHECK: stp w0, w1, [x2] 169; CHECK: ret 170define i32 @stp_int_rar_hazard(i32 %a, i32 %b, i32* nocapture %p) nounwind { 171 store i32 %a, i32* %p, align 4 172 %ld.ptr = getelementptr inbounds i32, i32* %p, i64 2 173 %tmp = load i32, i32* %ld.ptr, align 4 174 %tmp2 = add i32 %tmp, %b 175 %add.ptr = getelementptr inbounds i32, i32* %p, i64 1 176 store i32 %b, i32* %add.ptr, align 4 177 ret i32 %tmp2 178} 179 180; Read of %b to compute %tmp2 shouldn't prevent formation of stp 181; CHECK-LABEL: stp_int_rar_hazard_after 182; CHECK: ldr [[REG:w[0-9]+]], [x3, #4] 183; CHECK: add w0, [[REG]], w2 184; CHECK: stp w1, w2, [x3] 185; CHECK: ret 186define i32 @stp_int_rar_hazard_after(i32 %w0, i32 %a, i32 %b, i32* nocapture %p) nounwind { 187 store i32 %a, i32* %p, align 4 188 %ld.ptr = getelementptr inbounds i32, i32* %p, i64 1 189 %tmp = load i32, i32* %ld.ptr, align 4 190 %tmp2 = add i32 %tmp, %b 191 %add.ptr = getelementptr inbounds i32, i32* %p, i64 1 192 store i32 %b, i32* %add.ptr, align 4 193 ret i32 %tmp2 194} 195