1; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
2
3define <8 x i8> @vsli8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
4;CHECK: vsli8:
5;CHECK: vsli.8
6	%tmp1 = load <8 x i8>* %A
7	%tmp2 = load <8 x i8>* %B
8	%tmp3 = call <8 x i8> @llvm.arm.neon.vshiftins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
9	ret <8 x i8> %tmp3
10}
11
12define <4 x i16> @vsli16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
13;CHECK: vsli16:
14;CHECK: vsli.16
15	%tmp1 = load <4 x i16>* %A
16	%tmp2 = load <4 x i16>* %B
17	%tmp3 = call <4 x i16> @llvm.arm.neon.vshiftins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
18	ret <4 x i16> %tmp3
19}
20
21define <2 x i32> @vsli32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
22;CHECK: vsli32:
23;CHECK: vsli.32
24	%tmp1 = load <2 x i32>* %A
25	%tmp2 = load <2 x i32>* %B
26	%tmp3 = call <2 x i32> @llvm.arm.neon.vshiftins.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> < i32 31, i32 31 >)
27	ret <2 x i32> %tmp3
28}
29
30define <1 x i64> @vsli64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
31;CHECK: vsli64:
32;CHECK: vsli.64
33	%tmp1 = load <1 x i64>* %A
34	%tmp2 = load <1 x i64>* %B
35	%tmp3 = call <1 x i64> @llvm.arm.neon.vshiftins.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2, <1 x i64> < i64 63 >)
36	ret <1 x i64> %tmp3
37}
38
39define <16 x i8> @vsliQ8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
40;CHECK: vsliQ8:
41;CHECK: vsli.8
42	%tmp1 = load <16 x i8>* %A
43	%tmp2 = load <16 x i8>* %B
44	%tmp3 = call <16 x i8> @llvm.arm.neon.vshiftins.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
45	ret <16 x i8> %tmp3
46}
47
48define <8 x i16> @vsliQ16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
49;CHECK: vsliQ16:
50;CHECK: vsli.16
51	%tmp1 = load <8 x i16>* %A
52	%tmp2 = load <8 x i16>* %B
53	%tmp3 = call <8 x i16> @llvm.arm.neon.vshiftins.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >)
54	ret <8 x i16> %tmp3
55}
56
57define <4 x i32> @vsliQ32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
58;CHECK: vsliQ32:
59;CHECK: vsli.32
60	%tmp1 = load <4 x i32>* %A
61	%tmp2 = load <4 x i32>* %B
62	%tmp3 = call <4 x i32> @llvm.arm.neon.vshiftins.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >)
63	ret <4 x i32> %tmp3
64}
65
66define <2 x i64> @vsliQ64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
67;CHECK: vsliQ64:
68;CHECK: vsli.64
69	%tmp1 = load <2 x i64>* %A
70	%tmp2 = load <2 x i64>* %B
71	%tmp3 = call <2 x i64> @llvm.arm.neon.vshiftins.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2, <2 x i64> < i64 63, i64 63 >)
72	ret <2 x i64> %tmp3
73}
74
75define <8 x i8> @vsri8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
76;CHECK: vsri8:
77;CHECK: vsri.8
78	%tmp1 = load <8 x i8>* %A
79	%tmp2 = load <8 x i8>* %B
80	%tmp3 = call <8 x i8> @llvm.arm.neon.vshiftins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
81	ret <8 x i8> %tmp3
82}
83
84define <4 x i16> @vsri16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
85;CHECK: vsri16:
86;CHECK: vsri.16
87	%tmp1 = load <4 x i16>* %A
88	%tmp2 = load <4 x i16>* %B
89	%tmp3 = call <4 x i16> @llvm.arm.neon.vshiftins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
90	ret <4 x i16> %tmp3
91}
92
93define <2 x i32> @vsri32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
94;CHECK: vsri32:
95;CHECK: vsri.32
96	%tmp1 = load <2 x i32>* %A
97	%tmp2 = load <2 x i32>* %B
98	%tmp3 = call <2 x i32> @llvm.arm.neon.vshiftins.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> < i32 -32, i32 -32 >)
99	ret <2 x i32> %tmp3
100}
101
102define <1 x i64> @vsri64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
103;CHECK: vsri64:
104;CHECK: vsri.64
105	%tmp1 = load <1 x i64>* %A
106	%tmp2 = load <1 x i64>* %B
107	%tmp3 = call <1 x i64> @llvm.arm.neon.vshiftins.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2, <1 x i64> < i64 -64 >)
108	ret <1 x i64> %tmp3
109}
110
111define <16 x i8> @vsriQ8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
112;CHECK: vsriQ8:
113;CHECK: vsri.8
114	%tmp1 = load <16 x i8>* %A
115	%tmp2 = load <16 x i8>* %B
116	%tmp3 = call <16 x i8> @llvm.arm.neon.vshiftins.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
117	ret <16 x i8> %tmp3
118}
119
120define <8 x i16> @vsriQ16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
121;CHECK: vsriQ16:
122;CHECK: vsri.16
123	%tmp1 = load <8 x i16>* %A
124	%tmp2 = load <8 x i16>* %B
125	%tmp3 = call <8 x i16> @llvm.arm.neon.vshiftins.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
126	ret <8 x i16> %tmp3
127}
128
129define <4 x i32> @vsriQ32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
130;CHECK: vsriQ32:
131;CHECK: vsri.32
132	%tmp1 = load <4 x i32>* %A
133	%tmp2 = load <4 x i32>* %B
134	%tmp3 = call <4 x i32> @llvm.arm.neon.vshiftins.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
135	ret <4 x i32> %tmp3
136}
137
138define <2 x i64> @vsriQ64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
139;CHECK: vsriQ64:
140;CHECK: vsri.64
141	%tmp1 = load <2 x i64>* %A
142	%tmp2 = load <2 x i64>* %B
143	%tmp3 = call <2 x i64> @llvm.arm.neon.vshiftins.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2, <2 x i64> < i64 -64, i64 -64 >)
144	ret <2 x i64> %tmp3
145}
146
147declare <8 x i8>  @llvm.arm.neon.vshiftins.v8i8(<8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
148declare <4 x i16> @llvm.arm.neon.vshiftins.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) nounwind readnone
149declare <2 x i32> @llvm.arm.neon.vshiftins.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) nounwind readnone
150declare <1 x i64> @llvm.arm.neon.vshiftins.v1i64(<1 x i64>, <1 x i64>, <1 x i64>) nounwind readnone
151
152declare <16 x i8> @llvm.arm.neon.vshiftins.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
153declare <8 x i16> @llvm.arm.neon.vshiftins.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone
154declare <4 x i32> @llvm.arm.neon.vshiftins.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
155declare <2 x i64> @llvm.arm.neon.vshiftins.v2i64(<2 x i64>, <2 x i64>, <2 x i64>) nounwind readnone
156