1; Test the MSA intrinsics that are encoded with the 3R instruction format and
2; use the result as a third operand and results in wider elements than the
3; operands had.
4
5; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
6; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
7
8@llvm_mips_dpadd_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
9@llvm_mips_dpadd_s_h_ARG2 = global <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23>, align 16
10@llvm_mips_dpadd_s_h_ARG3 = global <16 x i8> <i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39>, align 16
11@llvm_mips_dpadd_s_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
12
13define void @llvm_mips_dpadd_s_h_test() nounwind {
14entry:
15  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_h_ARG1
16  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_s_h_ARG2
17  %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_s_h_ARG3
18  %3 = tail call <8 x i16> @llvm.mips.dpadd.s.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
19  store <8 x i16> %3, <8 x i16>* @llvm_mips_dpadd_s_h_RES
20  ret void
21}
22
23declare <8 x i16> @llvm.mips.dpadd.s.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
24
25; CHECK: llvm_mips_dpadd_s_h_test:
26; CHECK: ld.b
27; CHECK: ld.b
28; CHECK: ld.h
29; CHECK: dpadd_s.h
30; CHECK: st.h
31; CHECK: .size llvm_mips_dpadd_s_h_test
32;
33@llvm_mips_dpadd_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
34@llvm_mips_dpadd_s_w_ARG2 = global <8 x i16> <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>, align 16
35@llvm_mips_dpadd_s_w_ARG3 = global <8 x i16> <i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>, align 16
36@llvm_mips_dpadd_s_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
37
38define void @llvm_mips_dpadd_s_w_test() nounwind {
39entry:
40  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_w_ARG1
41  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_w_ARG2
42  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_w_ARG3
43  %3 = tail call <4 x i32> @llvm.mips.dpadd.s.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
44  store <4 x i32> %3, <4 x i32>* @llvm_mips_dpadd_s_w_RES
45  ret void
46}
47
48declare <4 x i32> @llvm.mips.dpadd.s.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
49
50; CHECK: llvm_mips_dpadd_s_w_test:
51; CHECK: ld.h
52; CHECK: ld.h
53; CHECK: ld.w
54; CHECK: dpadd_s.w
55; CHECK: st.w
56; CHECK: .size llvm_mips_dpadd_s_w_test
57;
58@llvm_mips_dpadd_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
59@llvm_mips_dpadd_s_d_ARG2 = global <4 x i32> <i32 2, i32 3, i32 4, i32 5>, align 16
60@llvm_mips_dpadd_s_d_ARG3 = global <4 x i32> <i32 6, i32 7, i32 8, i32 9>, align 16
61@llvm_mips_dpadd_s_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
62
63define void @llvm_mips_dpadd_s_d_test() nounwind {
64entry:
65  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpadd_s_d_ARG1
66  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_d_ARG2
67  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_d_ARG3
68  %3 = tail call <2 x i64> @llvm.mips.dpadd.s.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
69  store <2 x i64> %3, <2 x i64>* @llvm_mips_dpadd_s_d_RES
70  ret void
71}
72
73declare <2 x i64> @llvm.mips.dpadd.s.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
74
75; CHECK: llvm_mips_dpadd_s_d_test:
76; CHECK: ld.w
77; CHECK: ld.w
78; CHECK: ld.d
79; CHECK: dpadd_s.d
80; CHECK: st.d
81; CHECK: .size llvm_mips_dpadd_s_d_test
82;
83@llvm_mips_dpadd_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
84@llvm_mips_dpadd_u_h_ARG2 = global <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23>, align 16
85@llvm_mips_dpadd_u_h_ARG3 = global <16 x i8> <i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39>, align 16
86@llvm_mips_dpadd_u_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
87
88define void @llvm_mips_dpadd_u_h_test() nounwind {
89entry:
90  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_h_ARG1
91  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_u_h_ARG2
92  %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_u_h_ARG3
93  %3 = tail call <8 x i16> @llvm.mips.dpadd.u.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
94  store <8 x i16> %3, <8 x i16>* @llvm_mips_dpadd_u_h_RES
95  ret void
96}
97
98declare <8 x i16> @llvm.mips.dpadd.u.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
99
100; CHECK: llvm_mips_dpadd_u_h_test:
101; CHECK: ld.b
102; CHECK: ld.b
103; CHECK: ld.h
104; CHECK: dpadd_u.h
105; CHECK: st.h
106; CHECK: .size llvm_mips_dpadd_u_h_test
107;
108@llvm_mips_dpadd_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
109@llvm_mips_dpadd_u_w_ARG2 = global <8 x i16> <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>, align 16
110@llvm_mips_dpadd_u_w_ARG3 = global <8 x i16> <i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>, align 16
111@llvm_mips_dpadd_u_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
112
113define void @llvm_mips_dpadd_u_w_test() nounwind {
114entry:
115  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_w_ARG1
116  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_w_ARG2
117  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_w_ARG3
118  %3 = tail call <4 x i32> @llvm.mips.dpadd.u.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
119  store <4 x i32> %3, <4 x i32>* @llvm_mips_dpadd_u_w_RES
120  ret void
121}
122
123declare <4 x i32> @llvm.mips.dpadd.u.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
124
125; CHECK: llvm_mips_dpadd_u_w_test:
126; CHECK: ld.h
127; CHECK: ld.h
128; CHECK: ld.w
129; CHECK: dpadd_u.w
130; CHECK: st.w
131; CHECK: .size llvm_mips_dpadd_u_w_test
132;
133@llvm_mips_dpadd_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
134@llvm_mips_dpadd_u_d_ARG2 = global <4 x i32> <i32 2, i32 3, i32 4, i32 5>, align 16
135@llvm_mips_dpadd_u_d_ARG3 = global <4 x i32> <i32 6, i32 7, i32 8, i32 9>, align 16
136@llvm_mips_dpadd_u_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
137
138define void @llvm_mips_dpadd_u_d_test() nounwind {
139entry:
140  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpadd_u_d_ARG1
141  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_d_ARG2
142  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_d_ARG3
143  %3 = tail call <2 x i64> @llvm.mips.dpadd.u.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
144  store <2 x i64> %3, <2 x i64>* @llvm_mips_dpadd_u_d_RES
145  ret void
146}
147
148declare <2 x i64> @llvm.mips.dpadd.u.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
149
150; CHECK: llvm_mips_dpadd_u_d_test:
151; CHECK: ld.w
152; CHECK: ld.w
153; CHECK: ld.d
154; CHECK: dpadd_u.d
155; CHECK: st.d
156; CHECK: .size llvm_mips_dpadd_u_d_test
157;
158@llvm_mips_dpsub_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
159@llvm_mips_dpsub_s_h_ARG2 = global <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23>, align 16
160@llvm_mips_dpsub_s_h_ARG3 = global <16 x i8> <i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39>, align 16
161@llvm_mips_dpsub_s_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
162
163define void @llvm_mips_dpsub_s_h_test() nounwind {
164entry:
165  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_s_h_ARG1
166  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_s_h_ARG2
167  %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_s_h_ARG3
168  %3 = tail call <8 x i16> @llvm.mips.dpsub.s.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
169  store <8 x i16> %3, <8 x i16>* @llvm_mips_dpsub_s_h_RES
170  ret void
171}
172
173declare <8 x i16> @llvm.mips.dpsub.s.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
174
175; CHECK: llvm_mips_dpsub_s_h_test:
176; CHECK: ld.b
177; CHECK: ld.b
178; CHECK: ld.h
179; CHECK: dpsub_s.h
180; CHECK: st.h
181; CHECK: .size llvm_mips_dpsub_s_h_test
182;
183@llvm_mips_dpsub_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
184@llvm_mips_dpsub_s_w_ARG2 = global <8 x i16> <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>, align 16
185@llvm_mips_dpsub_s_w_ARG3 = global <8 x i16> <i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>, align 16
186@llvm_mips_dpsub_s_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
187
188define void @llvm_mips_dpsub_s_w_test() nounwind {
189entry:
190  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_s_w_ARG1
191  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_s_w_ARG2
192  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_s_w_ARG3
193  %3 = tail call <4 x i32> @llvm.mips.dpsub.s.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
194  store <4 x i32> %3, <4 x i32>* @llvm_mips_dpsub_s_w_RES
195  ret void
196}
197
198declare <4 x i32> @llvm.mips.dpsub.s.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
199
200; CHECK: llvm_mips_dpsub_s_w_test:
201; CHECK: ld.h
202; CHECK: ld.h
203; CHECK: ld.w
204; CHECK: dpsub_s.w
205; CHECK: st.w
206; CHECK: .size llvm_mips_dpsub_s_w_test
207;
208@llvm_mips_dpsub_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
209@llvm_mips_dpsub_s_d_ARG2 = global <4 x i32> <i32 2, i32 3, i32 4, i32 5>, align 16
210@llvm_mips_dpsub_s_d_ARG3 = global <4 x i32> <i32 6, i32 7, i32 8, i32 9>, align 16
211@llvm_mips_dpsub_s_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
212
213define void @llvm_mips_dpsub_s_d_test() nounwind {
214entry:
215  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpsub_s_d_ARG1
216  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_s_d_ARG2
217  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_s_d_ARG3
218  %3 = tail call <2 x i64> @llvm.mips.dpsub.s.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
219  store <2 x i64> %3, <2 x i64>* @llvm_mips_dpsub_s_d_RES
220  ret void
221}
222
223declare <2 x i64> @llvm.mips.dpsub.s.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
224
225; CHECK: llvm_mips_dpsub_s_d_test:
226; CHECK: ld.w
227; CHECK: ld.w
228; CHECK: ld.d
229; CHECK: dpsub_s.d
230; CHECK: st.d
231; CHECK: .size llvm_mips_dpsub_s_d_test
232;
233@llvm_mips_dpsub_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
234@llvm_mips_dpsub_u_h_ARG2 = global <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23>, align 16
235@llvm_mips_dpsub_u_h_ARG3 = global <16 x i8> <i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39>, align 16
236@llvm_mips_dpsub_u_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
237
238define void @llvm_mips_dpsub_u_h_test() nounwind {
239entry:
240  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_u_h_ARG1
241  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_u_h_ARG2
242  %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_u_h_ARG3
243  %3 = tail call <8 x i16> @llvm.mips.dpsub.u.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
244  store <8 x i16> %3, <8 x i16>* @llvm_mips_dpsub_u_h_RES
245  ret void
246}
247
248declare <8 x i16> @llvm.mips.dpsub.u.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
249
250; CHECK: llvm_mips_dpsub_u_h_test:
251; CHECK: ld.b
252; CHECK: ld.b
253; CHECK: ld.h
254; CHECK: dpsub_u.h
255; CHECK: st.h
256; CHECK: .size llvm_mips_dpsub_u_h_test
257;
258@llvm_mips_dpsub_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
259@llvm_mips_dpsub_u_w_ARG2 = global <8 x i16> <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>, align 16
260@llvm_mips_dpsub_u_w_ARG3 = global <8 x i16> <i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>, align 16
261@llvm_mips_dpsub_u_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
262
263define void @llvm_mips_dpsub_u_w_test() nounwind {
264entry:
265  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_u_w_ARG1
266  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_u_w_ARG2
267  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_u_w_ARG3
268  %3 = tail call <4 x i32> @llvm.mips.dpsub.u.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
269  store <4 x i32> %3, <4 x i32>* @llvm_mips_dpsub_u_w_RES
270  ret void
271}
272
273declare <4 x i32> @llvm.mips.dpsub.u.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
274
275; CHECK: llvm_mips_dpsub_u_w_test:
276; CHECK: ld.h
277; CHECK: ld.h
278; CHECK: ld.w
279; CHECK: dpsub_u.w
280; CHECK: st.w
281; CHECK: .size llvm_mips_dpsub_u_w_test
282;
283@llvm_mips_dpsub_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
284@llvm_mips_dpsub_u_d_ARG2 = global <4 x i32> <i32 2, i32 3, i32 4, i32 5>, align 16
285@llvm_mips_dpsub_u_d_ARG3 = global <4 x i32> <i32 6, i32 7, i32 8, i32 9>, align 16
286@llvm_mips_dpsub_u_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
287
288define void @llvm_mips_dpsub_u_d_test() nounwind {
289entry:
290  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpsub_u_d_ARG1
291  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_u_d_ARG2
292  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_u_d_ARG3
293  %3 = tail call <2 x i64> @llvm.mips.dpsub.u.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
294  store <2 x i64> %3, <2 x i64>* @llvm_mips_dpsub_u_d_RES
295  ret void
296}
297
298declare <2 x i64> @llvm.mips.dpsub.u.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
299
300; CHECK: llvm_mips_dpsub_u_d_test:
301; CHECK: ld.w
302; CHECK: ld.w
303; CHECK: ld.d
304; CHECK: dpsub_u.d
305; CHECK: st.d
306; CHECK: .size llvm_mips_dpsub_u_d_test
307;
308