1; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
2
3define <8 x i8> @vsras8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
4;CHECK-LABEL: vsras8:
5;CHECK: ssra.8b
6	%tmp1 = load <8 x i8>, <8 x i8>* %A
7	%tmp2 = load <8 x i8>, <8 x i8>* %B
8	%tmp3 = ashr <8 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
9        %tmp4 = add <8 x i8> %tmp1, %tmp3
10	ret <8 x i8> %tmp4
11}
12
13define <4 x i16> @vsras16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
14;CHECK-LABEL: vsras16:
15;CHECK: ssra.4h
16	%tmp1 = load <4 x i16>, <4 x i16>* %A
17	%tmp2 = load <4 x i16>, <4 x i16>* %B
18	%tmp3 = ashr <4 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15 >
19        %tmp4 = add <4 x i16> %tmp1, %tmp3
20	ret <4 x i16> %tmp4
21}
22
23define <2 x i32> @vsras32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
24;CHECK-LABEL: vsras32:
25;CHECK: ssra.2s
26	%tmp1 = load <2 x i32>, <2 x i32>* %A
27	%tmp2 = load <2 x i32>, <2 x i32>* %B
28	%tmp3 = ashr <2 x i32> %tmp2, < i32 31, i32 31 >
29        %tmp4 = add <2 x i32> %tmp1, %tmp3
30	ret <2 x i32> %tmp4
31}
32
33define <16 x i8> @vsraQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
34;CHECK-LABEL: vsraQs8:
35;CHECK: ssra.16b
36	%tmp1 = load <16 x i8>, <16 x i8>* %A
37	%tmp2 = load <16 x i8>, <16 x i8>* %B
38	%tmp3 = ashr <16 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
39        %tmp4 = add <16 x i8> %tmp1, %tmp3
40	ret <16 x i8> %tmp4
41}
42
43define <8 x i16> @vsraQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
44;CHECK-LABEL: vsraQs16:
45;CHECK: ssra.8h
46	%tmp1 = load <8 x i16>, <8 x i16>* %A
47	%tmp2 = load <8 x i16>, <8 x i16>* %B
48	%tmp3 = ashr <8 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
49        %tmp4 = add <8 x i16> %tmp1, %tmp3
50	ret <8 x i16> %tmp4
51}
52
53define <4 x i32> @vsraQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
54;CHECK-LABEL: vsraQs32:
55;CHECK: ssra.4s
56	%tmp1 = load <4 x i32>, <4 x i32>* %A
57	%tmp2 = load <4 x i32>, <4 x i32>* %B
58	%tmp3 = ashr <4 x i32> %tmp2, < i32 31, i32 31, i32 31, i32 31 >
59        %tmp4 = add <4 x i32> %tmp1, %tmp3
60	ret <4 x i32> %tmp4
61}
62
63define <2 x i64> @vsraQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
64;CHECK-LABEL: vsraQs64:
65;CHECK: ssra.2d
66	%tmp1 = load <2 x i64>, <2 x i64>* %A
67	%tmp2 = load <2 x i64>, <2 x i64>* %B
68	%tmp3 = ashr <2 x i64> %tmp2, < i64 63, i64 63 >
69        %tmp4 = add <2 x i64> %tmp1, %tmp3
70	ret <2 x i64> %tmp4
71}
72
73define <8 x i8> @vsrau8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
74;CHECK-LABEL: vsrau8:
75;CHECK: usra.8b
76	%tmp1 = load <8 x i8>, <8 x i8>* %A
77	%tmp2 = load <8 x i8>, <8 x i8>* %B
78	%tmp3 = lshr <8 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
79        %tmp4 = add <8 x i8> %tmp1, %tmp3
80	ret <8 x i8> %tmp4
81}
82
83define <4 x i16> @vsrau16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
84;CHECK-LABEL: vsrau16:
85;CHECK: usra.4h
86	%tmp1 = load <4 x i16>, <4 x i16>* %A
87	%tmp2 = load <4 x i16>, <4 x i16>* %B
88	%tmp3 = lshr <4 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15 >
89        %tmp4 = add <4 x i16> %tmp1, %tmp3
90	ret <4 x i16> %tmp4
91}
92
93define <2 x i32> @vsrau32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
94;CHECK-LABEL: vsrau32:
95;CHECK: usra.2s
96	%tmp1 = load <2 x i32>, <2 x i32>* %A
97	%tmp2 = load <2 x i32>, <2 x i32>* %B
98	%tmp3 = lshr <2 x i32> %tmp2, < i32 31, i32 31 >
99        %tmp4 = add <2 x i32> %tmp1, %tmp3
100	ret <2 x i32> %tmp4
101}
102
103
104define <16 x i8> @vsraQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
105;CHECK-LABEL: vsraQu8:
106;CHECK: usra.16b
107	%tmp1 = load <16 x i8>, <16 x i8>* %A
108	%tmp2 = load <16 x i8>, <16 x i8>* %B
109	%tmp3 = lshr <16 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
110        %tmp4 = add <16 x i8> %tmp1, %tmp3
111	ret <16 x i8> %tmp4
112}
113
114define <8 x i16> @vsraQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
115;CHECK-LABEL: vsraQu16:
116;CHECK: usra.8h
117	%tmp1 = load <8 x i16>, <8 x i16>* %A
118	%tmp2 = load <8 x i16>, <8 x i16>* %B
119	%tmp3 = lshr <8 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
120        %tmp4 = add <8 x i16> %tmp1, %tmp3
121	ret <8 x i16> %tmp4
122}
123
124define <4 x i32> @vsraQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
125;CHECK-LABEL: vsraQu32:
126;CHECK: usra.4s
127	%tmp1 = load <4 x i32>, <4 x i32>* %A
128	%tmp2 = load <4 x i32>, <4 x i32>* %B
129	%tmp3 = lshr <4 x i32> %tmp2, < i32 31, i32 31, i32 31, i32 31 >
130        %tmp4 = add <4 x i32> %tmp1, %tmp3
131	ret <4 x i32> %tmp4
132}
133
134define <2 x i64> @vsraQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
135;CHECK-LABEL: vsraQu64:
136;CHECK: usra.2d
137	%tmp1 = load <2 x i64>, <2 x i64>* %A
138	%tmp2 = load <2 x i64>, <2 x i64>* %B
139	%tmp3 = lshr <2 x i64> %tmp2, < i64 63, i64 63 >
140        %tmp4 = add <2 x i64> %tmp1, %tmp3
141	ret <2 x i64> %tmp4
142}
143
144define <1 x i64> @vsra_v1i64(<1 x i64> %A, <1 x i64> %B) nounwind {
145; CHECK-LABEL: vsra_v1i64:
146; CHECK: ssra d0, d1, #63
147  %tmp3 = ashr <1 x i64> %B, < i64 63 >
148  %tmp4 = add <1 x i64> %A, %tmp3
149  ret <1 x i64> %tmp4
150}
151