1; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
2
3define <8 x i8> @vuzpi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
4;CHECK: vuzpi8:
5;CHECK: vuzp.8
6;CHECK-NEXT: vadd.i8
7	%tmp1 = load <8 x i8>* %A
8	%tmp2 = load <8 x i8>* %B
9	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
10	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
11        %tmp5 = add <8 x i8> %tmp3, %tmp4
12	ret <8 x i8> %tmp5
13}
14
15define <4 x i16> @vuzpi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
16;CHECK: vuzpi16:
17;CHECK: vuzp.16
18;CHECK-NEXT: vadd.i16
19	%tmp1 = load <4 x i16>* %A
20	%tmp2 = load <4 x i16>* %B
21	%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
22	%tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
23        %tmp5 = add <4 x i16> %tmp3, %tmp4
24	ret <4 x i16> %tmp5
25}
26
27; VUZP.32 is equivalent to VTRN.32 for 64-bit vectors.
28
29define <16 x i8> @vuzpQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
30;CHECK: vuzpQi8:
31;CHECK: vuzp.8
32;CHECK-NEXT: vadd.i8
33	%tmp1 = load <16 x i8>* %A
34	%tmp2 = load <16 x i8>* %B
35	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
36	%tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
37        %tmp5 = add <16 x i8> %tmp3, %tmp4
38	ret <16 x i8> %tmp5
39}
40
41define <8 x i16> @vuzpQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
42;CHECK: vuzpQi16:
43;CHECK: vuzp.16
44;CHECK-NEXT: vadd.i16
45	%tmp1 = load <8 x i16>* %A
46	%tmp2 = load <8 x i16>* %B
47	%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
48	%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
49        %tmp5 = add <8 x i16> %tmp3, %tmp4
50	ret <8 x i16> %tmp5
51}
52
53define <4 x i32> @vuzpQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
54;CHECK: vuzpQi32:
55;CHECK: vuzp.32
56;CHECK-NEXT: vadd.i32
57	%tmp1 = load <4 x i32>* %A
58	%tmp2 = load <4 x i32>* %B
59	%tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
60	%tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
61        %tmp5 = add <4 x i32> %tmp3, %tmp4
62	ret <4 x i32> %tmp5
63}
64
65define <4 x float> @vuzpQf(<4 x float>* %A, <4 x float>* %B) nounwind {
66;CHECK: vuzpQf:
67;CHECK: vuzp.32
68;CHECK-NEXT: vadd.f32
69	%tmp1 = load <4 x float>* %A
70	%tmp2 = load <4 x float>* %B
71	%tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
72	%tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
73        %tmp5 = fadd <4 x float> %tmp3, %tmp4
74	ret <4 x float> %tmp5
75}
76
77; Undef shuffle indices should not prevent matching to VUZP:
78
79define <8 x i8> @vuzpi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
80;CHECK: vuzpi8_undef:
81;CHECK: vuzp.8
82;CHECK-NEXT: vadd.i8
83	%tmp1 = load <8 x i8>* %A
84	%tmp2 = load <8 x i8>* %B
85	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 2, i32 undef, i32 undef, i32 8, i32 10, i32 12, i32 14>
86	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 undef, i32 undef, i32 13, i32 15>
87        %tmp5 = add <8 x i8> %tmp3, %tmp4
88	ret <8 x i8> %tmp5
89}
90
91define <8 x i16> @vuzpQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
92;CHECK: vuzpQi16_undef:
93;CHECK: vuzp.16
94;CHECK-NEXT: vadd.i16
95	%tmp1 = load <8 x i16>* %A
96	%tmp2 = load <8 x i16>* %B
97	%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 undef, i32 4, i32 undef, i32 8, i32 10, i32 12, i32 14>
98	%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 undef, i32 undef, i32 11, i32 13, i32 15>
99        %tmp5 = add <8 x i16> %tmp3, %tmp4
100	ret <8 x i16> %tmp5
101}
102
103