1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
3
4declare <16 x i8> @llvm.mips.addv.b(<16 x i8>, <16 x i8>)
5define void @add_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
6; P5600-LABEL: add_v16i8_builtin:
7; P5600:       # %bb.0: # %entry
8; P5600-NEXT:    ld.b $w0, 0($4)
9; P5600-NEXT:    ld.b $w1, 0($5)
10; P5600-NEXT:    addv.b $w0, $w0, $w1
11; P5600-NEXT:    st.b $w0, 0($6)
12; P5600-NEXT:    jr $ra
13; P5600-NEXT:    nop
14entry:
15  %0 = load <16 x i8>, <16 x i8>* %a, align 16
16  %1 = load <16 x i8>, <16 x i8>* %b, align 16
17  %2 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %1)
18  store <16 x i8> %2, <16 x i8>* %c, align 16
19  ret void
20}
21
22declare <8 x i16> @llvm.mips.addv.h(<8 x i16>, <8 x i16>)
23define void @add_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
24; P5600-LABEL: add_v8i16_builtin:
25; P5600:       # %bb.0: # %entry
26; P5600-NEXT:    ld.h $w0, 0($4)
27; P5600-NEXT:    ld.h $w1, 0($5)
28; P5600-NEXT:    addv.h $w0, $w0, $w1
29; P5600-NEXT:    st.h $w0, 0($6)
30; P5600-NEXT:    jr $ra
31; P5600-NEXT:    nop
32entry:
33  %0 = load <8 x i16>, <8 x i16>* %a, align 16
34  %1 = load <8 x i16>, <8 x i16>* %b, align 16
35  %2 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %1)
36  store <8 x i16> %2, <8 x i16>* %c, align 16
37  ret void
38}
39
40declare <4 x i32> @llvm.mips.addv.w(<4 x i32>, <4 x i32>)
41define void @add_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
42; P5600-LABEL: add_v4i32_builtin:
43; P5600:       # %bb.0: # %entry
44; P5600-NEXT:    ld.w $w0, 0($4)
45; P5600-NEXT:    ld.w $w1, 0($5)
46; P5600-NEXT:    addv.w $w0, $w0, $w1
47; P5600-NEXT:    st.w $w0, 0($6)
48; P5600-NEXT:    jr $ra
49; P5600-NEXT:    nop
50entry:
51  %0 = load <4 x i32>, <4 x i32>* %a, align 16
52  %1 = load <4 x i32>, <4 x i32>* %b, align 16
53  %2 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %1)
54  store <4 x i32> %2, <4 x i32>* %c, align 16
55  ret void
56}
57
58declare <2 x i64> @llvm.mips.addv.d(<2 x i64>, <2 x i64>)
59define void @add_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
60; P5600-LABEL: add_v2i64_builtin:
61; P5600:       # %bb.0: # %entry
62; P5600-NEXT:    ld.d $w0, 0($4)
63; P5600-NEXT:    ld.d $w1, 0($5)
64; P5600-NEXT:    addv.d $w0, $w0, $w1
65; P5600-NEXT:    st.d $w0, 0($6)
66; P5600-NEXT:    jr $ra
67; P5600-NEXT:    nop
68entry:
69  %0 = load <2 x i64>, <2 x i64>* %a, align 16
70  %1 = load <2 x i64>, <2 x i64>* %b, align 16
71  %2 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %1)
72  store <2 x i64> %2, <2 x i64>* %c, align 16
73  ret void
74}
75
76declare <16 x i8> @llvm.mips.addvi.b(<16 x i8>, i32 immarg)
77define void @add_v16i8_builtin_imm(<16 x i8>* %a, <16 x i8>* %c) {
78; P5600-LABEL: add_v16i8_builtin_imm:
79; P5600:       # %bb.0: # %entry
80; P5600-NEXT:    ld.b $w0, 0($4)
81; P5600-NEXT:    addvi.b $w0, $w0, 3
82; P5600-NEXT:    st.b $w0, 0($5)
83; P5600-NEXT:    jr $ra
84; P5600-NEXT:    nop
85entry:
86  %0 = load <16 x i8>, <16 x i8>* %a, align 16
87  %1 = tail call <16 x i8> @llvm.mips.addvi.b(<16 x i8> %0, i32 3)
88  store <16 x i8> %1, <16 x i8>* %c, align 16
89  ret void
90}
91
92declare <8 x i16> @llvm.mips.addvi.h(<8 x i16>, i32 immarg)
93define void @add_v8i16_builtin_imm(<8 x i16>* %a, <8 x i16>* %c) {
94; P5600-LABEL: add_v8i16_builtin_imm:
95; P5600:       # %bb.0: # %entry
96; P5600-NEXT:    ld.h $w0, 0($4)
97; P5600-NEXT:    addvi.h $w0, $w0, 18
98; P5600-NEXT:    st.h $w0, 0($5)
99; P5600-NEXT:    jr $ra
100; P5600-NEXT:    nop
101entry:
102  %0 = load <8 x i16>, <8 x i16>* %a, align 16
103  %1 = tail call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %0, i32 18)
104  store <8 x i16> %1, <8 x i16>* %c, align 16
105  ret void
106}
107
108declare <4 x i32> @llvm.mips.addvi.w(<4 x i32>, i32 immarg)
109define void @add_v4i32_builtin_imm(<4 x i32>* %a, <4 x i32>* %c) {
110; P5600-LABEL: add_v4i32_builtin_imm:
111; P5600:       # %bb.0: # %entry
112; P5600-NEXT:    ld.w $w0, 0($4)
113; P5600-NEXT:    addvi.w $w0, $w0, 25
114; P5600-NEXT:    st.w $w0, 0($5)
115; P5600-NEXT:    jr $ra
116; P5600-NEXT:    nop
117entry:
118  %0 = load <4 x i32>, <4 x i32>* %a, align 16
119  %1 = tail call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %0, i32 25)
120  store <4 x i32> %1, <4 x i32>* %c, align 16
121  ret void
122}
123
124declare <2 x i64> @llvm.mips.addvi.d(<2 x i64>, i32 immarg)
125define void @add_v2i64_builtin_imm(<2 x i64>* %a, <2 x i64>* %c) {
126; P5600-LABEL: add_v2i64_builtin_imm:
127; P5600:       # %bb.0: # %entry
128; P5600-NEXT:    ld.d $w0, 0($4)
129; P5600-NEXT:    addvi.d $w0, $w0, 31
130; P5600-NEXT:    st.d $w0, 0($5)
131; P5600-NEXT:    jr $ra
132; P5600-NEXT:    nop
133entry:
134  %0 = load <2 x i64>, <2 x i64>* %a, align 16
135  %1 = tail call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %0, i32 31)
136  store <2 x i64> %1, <2 x i64>* %c, align 16
137  ret void
138}
139