1; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - -lower-interleaved-accesses=false | FileCheck %s
2
3define <8 x i8> @vpaddi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
4;CHECK-LABEL: vpaddi8:
5;CHECK: vpadd.i8
6	%tmp1 = load <8 x i8>, <8 x i8>* %A
7	%tmp2 = load <8 x i8>, <8 x i8>* %B
8	%tmp3 = call <8 x i8> @llvm.arm.neon.vpadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
9	ret <8 x i8> %tmp3
10}
11
12define <4 x i16> @vpaddi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
13;CHECK-LABEL: vpaddi16:
14;CHECK: vpadd.i16
15	%tmp1 = load <4 x i16>, <4 x i16>* %A
16	%tmp2 = load <4 x i16>, <4 x i16>* %B
17	%tmp3 = call <4 x i16> @llvm.arm.neon.vpadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
18	ret <4 x i16> %tmp3
19}
20
21define <2 x i32> @vpaddi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
22;CHECK-LABEL: vpaddi32:
23;CHECK: vpadd.i32
24	%tmp1 = load <2 x i32>, <2 x i32>* %A
25	%tmp2 = load <2 x i32>, <2 x i32>* %B
26	%tmp3 = call <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
27	ret <2 x i32> %tmp3
28}
29
30define <2 x float> @vpaddf32(<2 x float>* %A, <2 x float>* %B) nounwind {
31;CHECK-LABEL: vpaddf32:
32;CHECK: vpadd.f32
33	%tmp1 = load <2 x float>, <2 x float>* %A
34	%tmp2 = load <2 x float>, <2 x float>* %B
35	%tmp3 = call <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
36	ret <2 x float> %tmp3
37}
38
39declare <8 x i8>  @llvm.arm.neon.vpadd.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
40declare <4 x i16> @llvm.arm.neon.vpadd.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
41declare <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
42
43declare <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float>, <2 x float>) nounwind readnone
44
45define <4 x i16> @vpaddls8(<8 x i8>* %A) nounwind {
46;CHECK-LABEL: vpaddls8:
47;CHECK: vpaddl.s8
48	%tmp1 = load <8 x i8>, <8 x i8>* %A
49	%tmp2 = call <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8> %tmp1)
50	ret <4 x i16> %tmp2
51}
52
53define <2 x i32> @vpaddls16(<4 x i16>* %A) nounwind {
54;CHECK-LABEL: vpaddls16:
55;CHECK: vpaddl.s16
56	%tmp1 = load <4 x i16>, <4 x i16>* %A
57	%tmp2 = call <2 x i32> @llvm.arm.neon.vpaddls.v2i32.v4i16(<4 x i16> %tmp1)
58	ret <2 x i32> %tmp2
59}
60
61define <1 x i64> @vpaddls32(<2 x i32>* %A) nounwind {
62;CHECK-LABEL: vpaddls32:
63;CHECK: vpaddl.s32
64	%tmp1 = load <2 x i32>, <2 x i32>* %A
65	%tmp2 = call <1 x i64> @llvm.arm.neon.vpaddls.v1i64.v2i32(<2 x i32> %tmp1)
66	ret <1 x i64> %tmp2
67}
68
69define <4 x i16> @vpaddlu8(<8 x i8>* %A) nounwind {
70;CHECK-LABEL: vpaddlu8:
71;CHECK: vpaddl.u8
72	%tmp1 = load <8 x i8>, <8 x i8>* %A
73	%tmp2 = call <4 x i16> @llvm.arm.neon.vpaddlu.v4i16.v8i8(<8 x i8> %tmp1)
74	ret <4 x i16> %tmp2
75}
76
77define <2 x i32> @vpaddlu16(<4 x i16>* %A) nounwind {
78;CHECK-LABEL: vpaddlu16:
79;CHECK: vpaddl.u16
80	%tmp1 = load <4 x i16>, <4 x i16>* %A
81	%tmp2 = call <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16> %tmp1)
82	ret <2 x i32> %tmp2
83}
84
85define <1 x i64> @vpaddlu32(<2 x i32>* %A) nounwind {
86;CHECK-LABEL: vpaddlu32:
87;CHECK: vpaddl.u32
88	%tmp1 = load <2 x i32>, <2 x i32>* %A
89	%tmp2 = call <1 x i64> @llvm.arm.neon.vpaddlu.v1i64.v2i32(<2 x i32> %tmp1)
90	ret <1 x i64> %tmp2
91}
92
93define <8 x i16> @vpaddlQs8(<16 x i8>* %A) nounwind {
94;CHECK-LABEL: vpaddlQs8:
95;CHECK: vpaddl.s8
96	%tmp1 = load <16 x i8>, <16 x i8>* %A
97	%tmp2 = call <8 x i16> @llvm.arm.neon.vpaddls.v8i16.v16i8(<16 x i8> %tmp1)
98	ret <8 x i16> %tmp2
99}
100
101define <4 x i32> @vpaddlQs16(<8 x i16>* %A) nounwind {
102;CHECK-LABEL: vpaddlQs16:
103;CHECK: vpaddl.s16
104	%tmp1 = load <8 x i16>, <8 x i16>* %A
105	%tmp2 = call <4 x i32> @llvm.arm.neon.vpaddls.v4i32.v8i16(<8 x i16> %tmp1)
106	ret <4 x i32> %tmp2
107}
108
109define <2 x i64> @vpaddlQs32(<4 x i32>* %A) nounwind {
110;CHECK-LABEL: vpaddlQs32:
111;CHECK: vpaddl.s32
112	%tmp1 = load <4 x i32>, <4 x i32>* %A
113	%tmp2 = call <2 x i64> @llvm.arm.neon.vpaddls.v2i64.v4i32(<4 x i32> %tmp1)
114	ret <2 x i64> %tmp2
115}
116
117define <8 x i16> @vpaddlQu8(<16 x i8>* %A) nounwind {
118;CHECK-LABEL: vpaddlQu8:
119;CHECK: vpaddl.u8
120	%tmp1 = load <16 x i8>, <16 x i8>* %A
121	%tmp2 = call <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8> %tmp1)
122	ret <8 x i16> %tmp2
123}
124
125define <4 x i32> @vpaddlQu16(<8 x i16>* %A) nounwind {
126;CHECK-LABEL: vpaddlQu16:
127;CHECK: vpaddl.u16
128	%tmp1 = load <8 x i16>, <8 x i16>* %A
129	%tmp2 = call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16> %tmp1)
130	ret <4 x i32> %tmp2
131}
132
133define <2 x i64> @vpaddlQu32(<4 x i32>* %A) nounwind {
134;CHECK-LABEL: vpaddlQu32:
135;CHECK: vpaddl.u32
136	%tmp1 = load <4 x i32>, <4 x i32>* %A
137	%tmp2 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %tmp1)
138	ret <2 x i64> %tmp2
139}
140
141; Test AddCombine optimization that generates a vpaddl.s
142define void @addCombineToVPADDL() nounwind ssp {
143; CHECK: vpaddl.s8
144  %cbcr = alloca <16 x i8>, align 16
145  %X = alloca <8 x i8>, align 8
146  %tmp = load <16 x i8>, <16 x i8>* %cbcr
147  %tmp1 = shufflevector <16 x i8> %tmp, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
148  %tmp2 = load <16 x i8>, <16 x i8>* %cbcr
149  %tmp3 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
150  %add = add <8 x i8> %tmp3, %tmp1
151  store <8 x i8> %add, <8 x i8>* %X, align 8
152  ret void
153}
154
155; Legalization produces a EXTRACT_VECTOR_ELT DAG node which performs an extend from
156; i16 to i32. In this case the input for the formed VPADDL needs to be a vector of i16s.
157define <2 x i16> @fromExtendingExtractVectorElt(<4 x i16> %in) {
158;CHECK-LABEL: fromExtendingExtractVectorElt:
159;CHECK: vpaddl.s16
160  %tmp1 = shufflevector <4 x i16> %in, <4 x i16> undef, <2 x i32> <i32 0, i32 2>
161  %tmp2 = shufflevector <4 x i16> %in, <4 x i16> undef, <2 x i32> <i32 1, i32 3>
162  %x = add <2 x i16> %tmp2, %tmp1
163  ret <2 x i16> %x
164}
165
166declare <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8>) nounwind readnone
167declare <2 x i32> @llvm.arm.neon.vpaddls.v2i32.v4i16(<4 x i16>) nounwind readnone
168declare <1 x i64> @llvm.arm.neon.vpaddls.v1i64.v2i32(<2 x i32>) nounwind readnone
169
170declare <4 x i16> @llvm.arm.neon.vpaddlu.v4i16.v8i8(<8 x i8>) nounwind readnone
171declare <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16>) nounwind readnone
172declare <1 x i64> @llvm.arm.neon.vpaddlu.v1i64.v2i32(<2 x i32>) nounwind readnone
173
174declare <8 x i16> @llvm.arm.neon.vpaddls.v8i16.v16i8(<16 x i8>) nounwind readnone
175declare <4 x i32> @llvm.arm.neon.vpaddls.v4i32.v8i16(<8 x i16>) nounwind readnone
176declare <2 x i64> @llvm.arm.neon.vpaddls.v2i64.v4i32(<4 x i32>) nounwind readnone
177
178declare <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8>) nounwind readnone
179declare <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16>) nounwind readnone
180declare <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32>) nounwind readnone
181