1; RUN: llc < %s -mtriple=armv7-apple-ios -O0 | FileCheck %s -check-prefix=NO-REALIGN
2; RUN: llc < %s -mtriple=armv7-apple-ios -O0 | FileCheck %s -check-prefix=REALIGN
3
4; rdar://12713765
5; When realign-stack is set to false, make sure we are not creating stack
6; objects that are assumed to be 64-byte aligned.
7@T3_retval = common global <16 x float> zeroinitializer, align 16
8
9define void @test1(<16 x float>* noalias sret %agg.result) nounwind ssp "no-realign-stack" {
10entry:
11; NO-REALIGN-LABEL: test1
12; NO-REALIGN: mov r[[R2:[0-9]+]], r[[R1:[0-9]+]]
13; NO-REALIGN: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]!
14; NO-REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
15; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1]], #32
16; NO-REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
17; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1]], #48
18; NO-REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
19
20; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1:[0-9]+]], #48
21; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
22; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1]], #32
23; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
24; NO-REALIGN: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]!
25; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
26
27; NO-REALIGN: add r[[R2:[0-9]+]], r[[R0:0]], #48
28; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
29; NO-REALIGN: add r[[R2:[0-9]+]], r[[R0]], #32
30; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
31; NO-REALIGN: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]!
32; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]
33 %retval = alloca <16 x float>, align 16
34 %0 = load <16 x float>, <16 x float>* @T3_retval, align 16
35 store <16 x float> %0, <16 x float>* %retval
36 %1 = load <16 x float>, <16 x float>* %retval
37 store <16 x float> %1, <16 x float>* %agg.result, align 16
38 ret void
39}
40
41define void @test2(<16 x float>* noalias sret %agg.result) nounwind ssp {
42entry:
43; REALIGN-LABEL: test2
44; REALIGN: bfc sp, #0, #6
45; REALIGN: mov r[[R2:[0-9]+]], r[[R1:[0-9]+]]
46; REALIGN: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]!
47; REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
48; REALIGN: add r[[R2:[0-9]+]], r[[R1]], #32
49; REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
50; REALIGN: add r[[R2:[0-9]+]], r[[R1]], #48
51; REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
52
53
54; REALIGN: orr r[[R2:[0-9]+]], r[[R1:[0-9]+]], #48
55; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
56; REALIGN: orr r[[R2:[0-9]+]], r[[R1]], #32
57; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
58; REALIGN: orr r[[R2:[0-9]+]], r[[R1]], #16
59; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
60; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
61
62; REALIGN: add r[[R1:[0-9]+]], r[[R0:0]], #48
63; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
64; REALIGN: add r[[R1:[0-9]+]], r[[R0]], #32
65; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
66; REALIGN: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]!
67; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]
68 %retval = alloca <16 x float>, align 16
69 %0 = load <16 x float>, <16 x float>* @T3_retval, align 16
70 store <16 x float> %0, <16 x float>* %retval
71 %1 = load <16 x float>, <16 x float>* %retval
72 store <16 x float> %1, <16 x float>* %agg.result, align 16
73 ret void
74}
75