1; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
2
3define <8 x i8> @vqadds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
4;CHECK: vqadds8:
5;CHECK: vqadd.s8
6	%tmp1 = load <8 x i8>* %A
7	%tmp2 = load <8 x i8>* %B
8	%tmp3 = call <8 x i8> @llvm.arm.neon.vqadds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
9	ret <8 x i8> %tmp3
10}
11
12define <4 x i16> @vqadds16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
13;CHECK: vqadds16:
14;CHECK: vqadd.s16
15	%tmp1 = load <4 x i16>* %A
16	%tmp2 = load <4 x i16>* %B
17	%tmp3 = call <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
18	ret <4 x i16> %tmp3
19}
20
21define <2 x i32> @vqadds32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
22;CHECK: vqadds32:
23;CHECK: vqadd.s32
24	%tmp1 = load <2 x i32>* %A
25	%tmp2 = load <2 x i32>* %B
26	%tmp3 = call <2 x i32> @llvm.arm.neon.vqadds.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
27	ret <2 x i32> %tmp3
28}
29
30define <1 x i64> @vqadds64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
31;CHECK: vqadds64:
32;CHECK: vqadd.s64
33	%tmp1 = load <1 x i64>* %A
34	%tmp2 = load <1 x i64>* %B
35	%tmp3 = call <1 x i64> @llvm.arm.neon.vqadds.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
36	ret <1 x i64> %tmp3
37}
38
39define <8 x i8> @vqaddu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
40;CHECK: vqaddu8:
41;CHECK: vqadd.u8
42	%tmp1 = load <8 x i8>* %A
43	%tmp2 = load <8 x i8>* %B
44	%tmp3 = call <8 x i8> @llvm.arm.neon.vqaddu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
45	ret <8 x i8> %tmp3
46}
47
48define <4 x i16> @vqaddu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
49;CHECK: vqaddu16:
50;CHECK: vqadd.u16
51	%tmp1 = load <4 x i16>* %A
52	%tmp2 = load <4 x i16>* %B
53	%tmp3 = call <4 x i16> @llvm.arm.neon.vqaddu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
54	ret <4 x i16> %tmp3
55}
56
57define <2 x i32> @vqaddu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
58;CHECK: vqaddu32:
59;CHECK: vqadd.u32
60	%tmp1 = load <2 x i32>* %A
61	%tmp2 = load <2 x i32>* %B
62	%tmp3 = call <2 x i32> @llvm.arm.neon.vqaddu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
63	ret <2 x i32> %tmp3
64}
65
66define <1 x i64> @vqaddu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
67;CHECK: vqaddu64:
68;CHECK: vqadd.u64
69	%tmp1 = load <1 x i64>* %A
70	%tmp2 = load <1 x i64>* %B
71	%tmp3 = call <1 x i64> @llvm.arm.neon.vqaddu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
72	ret <1 x i64> %tmp3
73}
74
75define <16 x i8> @vqaddQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
76;CHECK: vqaddQs8:
77;CHECK: vqadd.s8
78	%tmp1 = load <16 x i8>* %A
79	%tmp2 = load <16 x i8>* %B
80	%tmp3 = call <16 x i8> @llvm.arm.neon.vqadds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
81	ret <16 x i8> %tmp3
82}
83
84define <8 x i16> @vqaddQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
85;CHECK: vqaddQs16:
86;CHECK: vqadd.s16
87	%tmp1 = load <8 x i16>* %A
88	%tmp2 = load <8 x i16>* %B
89	%tmp3 = call <8 x i16> @llvm.arm.neon.vqadds.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
90	ret <8 x i16> %tmp3
91}
92
93define <4 x i32> @vqaddQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
94;CHECK: vqaddQs32:
95;CHECK: vqadd.s32
96	%tmp1 = load <4 x i32>* %A
97	%tmp2 = load <4 x i32>* %B
98	%tmp3 = call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
99	ret <4 x i32> %tmp3
100}
101
102define <2 x i64> @vqaddQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
103;CHECK: vqaddQs64:
104;CHECK: vqadd.s64
105	%tmp1 = load <2 x i64>* %A
106	%tmp2 = load <2 x i64>* %B
107	%tmp3 = call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
108	ret <2 x i64> %tmp3
109}
110
111define <16 x i8> @vqaddQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
112;CHECK: vqaddQu8:
113;CHECK: vqadd.u8
114	%tmp1 = load <16 x i8>* %A
115	%tmp2 = load <16 x i8>* %B
116	%tmp3 = call <16 x i8> @llvm.arm.neon.vqaddu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
117	ret <16 x i8> %tmp3
118}
119
120define <8 x i16> @vqaddQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
121;CHECK: vqaddQu16:
122;CHECK: vqadd.u16
123	%tmp1 = load <8 x i16>* %A
124	%tmp2 = load <8 x i16>* %B
125	%tmp3 = call <8 x i16> @llvm.arm.neon.vqaddu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
126	ret <8 x i16> %tmp3
127}
128
129define <4 x i32> @vqaddQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
130;CHECK: vqaddQu32:
131;CHECK: vqadd.u32
132	%tmp1 = load <4 x i32>* %A
133	%tmp2 = load <4 x i32>* %B
134	%tmp3 = call <4 x i32> @llvm.arm.neon.vqaddu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
135	ret <4 x i32> %tmp3
136}
137
138define <2 x i64> @vqaddQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
139;CHECK: vqaddQu64:
140;CHECK: vqadd.u64
141	%tmp1 = load <2 x i64>* %A
142	%tmp2 = load <2 x i64>* %B
143	%tmp3 = call <2 x i64> @llvm.arm.neon.vqaddu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
144	ret <2 x i64> %tmp3
145}
146
147declare <8 x i8>  @llvm.arm.neon.vqadds.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
148declare <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
149declare <2 x i32> @llvm.arm.neon.vqadds.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
150declare <1 x i64> @llvm.arm.neon.vqadds.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
151
152declare <8 x i8>  @llvm.arm.neon.vqaddu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
153declare <4 x i16> @llvm.arm.neon.vqaddu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
154declare <2 x i32> @llvm.arm.neon.vqaddu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
155declare <1 x i64> @llvm.arm.neon.vqaddu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
156
157declare <16 x i8> @llvm.arm.neon.vqadds.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
158declare <8 x i16> @llvm.arm.neon.vqadds.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
159declare <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
160declare <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
161
162declare <16 x i8> @llvm.arm.neon.vqaddu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
163declare <8 x i16> @llvm.arm.neon.vqaddu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
164declare <4 x i32> @llvm.arm.neon.vqaddu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
165declare <2 x i64> @llvm.arm.neon.vqaddu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
166