1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple riscv32 -mattr=+experimental-v %s -o - \
3; RUN:     -verify-machineinstrs | FileCheck %s
4; RUN: llc -mtriple riscv64 -mattr=+experimental-v %s -o - \
5; RUN:     -verify-machineinstrs | FileCheck %s
6
7define void @vadd_vint8m1(<vscale x 8 x i8> *%pc, <vscale x 8 x i8> *%pa, <vscale x 8 x i8> *%pb) nounwind {
8; CHECK-LABEL: vadd_vint8m1:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    vsetvli a3, zero, e8,m1,tu,mu
11; CHECK-NEXT:    vle8.v v25, (a1)
12; CHECK-NEXT:    vsetvli a1, zero, e8,m1,tu,mu
13; CHECK-NEXT:    vle8.v v26, (a2)
14; CHECK-NEXT:    vsetvli a1, zero, e8,m1,tu,mu
15; CHECK-NEXT:    vadd.vv v25, v25, v26
16; CHECK-NEXT:    vsetvli a1, zero, e8,m1,tu,mu
17; CHECK-NEXT:    vse8.v v25, (a0)
18; CHECK-NEXT:    ret
19  %va = load <vscale x 8 x i8>, <vscale x 8 x i8>* %pa
20  %vb = load <vscale x 8 x i8>, <vscale x 8 x i8>* %pb
21  %vc = add <vscale x 8 x i8> %va, %vb
22  store <vscale x 8 x i8> %vc, <vscale x 8 x i8> *%pc
23  ret void
24}
25
26define void @vadd_vint8m2(<vscale x 16 x i8> *%pc, <vscale x 16 x i8> *%pa, <vscale x 16 x i8> *%pb) nounwind {
27; CHECK-LABEL: vadd_vint8m2:
28; CHECK:       # %bb.0:
29; CHECK-NEXT:    vsetvli a3, zero, e8,m2,tu,mu
30; CHECK-NEXT:    vle8.v v26, (a1)
31; CHECK-NEXT:    vsetvli a1, zero, e8,m2,tu,mu
32; CHECK-NEXT:    vle8.v v28, (a2)
33; CHECK-NEXT:    vsetvli a1, zero, e8,m2,tu,mu
34; CHECK-NEXT:    vadd.vv v26, v26, v28
35; CHECK-NEXT:    vsetvli a1, zero, e8,m2,tu,mu
36; CHECK-NEXT:    vse8.v v26, (a0)
37; CHECK-NEXT:    ret
38  %va = load <vscale x 16 x i8>, <vscale x 16 x i8>* %pa
39  %vb = load <vscale x 16 x i8>, <vscale x 16 x i8>* %pb
40  %vc = add <vscale x 16 x i8> %va, %vb
41  store <vscale x 16 x i8> %vc, <vscale x 16 x i8> *%pc
42  ret void
43}
44
45define void @vadd_vint8m4(<vscale x 32 x i8> *%pc, <vscale x 32 x i8> *%pa, <vscale x 32 x i8> *%pb) nounwind {
46; CHECK-LABEL: vadd_vint8m4:
47; CHECK:       # %bb.0:
48; CHECK-NEXT:    vsetvli a3, zero, e8,m4,tu,mu
49; CHECK-NEXT:    vle8.v v28, (a1)
50; CHECK-NEXT:    vsetvli a1, zero, e8,m4,tu,mu
51; CHECK-NEXT:    vle8.v v8, (a2)
52; CHECK-NEXT:    vsetvli a1, zero, e8,m4,tu,mu
53; CHECK-NEXT:    vadd.vv v28, v28, v8
54; CHECK-NEXT:    vsetvli a1, zero, e8,m4,tu,mu
55; CHECK-NEXT:    vse8.v v28, (a0)
56; CHECK-NEXT:    ret
57  %va = load <vscale x 32 x i8>, <vscale x 32 x i8>* %pa
58  %vb = load <vscale x 32 x i8>, <vscale x 32 x i8>* %pb
59  %vc = add <vscale x 32 x i8> %va, %vb
60  store <vscale x 32 x i8> %vc, <vscale x 32 x i8> *%pc
61  ret void
62}
63
64define void @vadd_vint8m8(<vscale x 64 x i8> *%pc, <vscale x 64 x i8> *%pa, <vscale x 64 x i8> *%pb) nounwind {
65; CHECK-LABEL: vadd_vint8m8:
66; CHECK:       # %bb.0:
67; CHECK-NEXT:    vsetvli a3, zero, e8,m8,tu,mu
68; CHECK-NEXT:    vle8.v v8, (a1)
69; CHECK-NEXT:    vsetvli a1, zero, e8,m8,tu,mu
70; CHECK-NEXT:    vle8.v v16, (a2)
71; CHECK-NEXT:    vsetvli a1, zero, e8,m8,tu,mu
72; CHECK-NEXT:    vadd.vv v8, v8, v16
73; CHECK-NEXT:    vsetvli a1, zero, e8,m8,tu,mu
74; CHECK-NEXT:    vse8.v v8, (a0)
75; CHECK-NEXT:    ret
76  %va = load <vscale x 64 x i8>, <vscale x 64 x i8>* %pa
77  %vb = load <vscale x 64 x i8>, <vscale x 64 x i8>* %pb
78  %vc = add <vscale x 64 x i8> %va, %vb
79  store <vscale x 64 x i8> %vc, <vscale x 64 x i8> *%pc
80  ret void
81}
82
83define void @vadd_vint8mf2(<vscale x 4 x i8> *%pc, <vscale x 4 x i8> *%pa, <vscale x 4 x i8> *%pb) nounwind {
84; CHECK-LABEL: vadd_vint8mf2:
85; CHECK:       # %bb.0:
86; CHECK-NEXT:    vsetvli a3, zero, e8,mf2,tu,mu
87; CHECK-NEXT:    vle8.v v25, (a1)
88; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,tu,mu
89; CHECK-NEXT:    vle8.v v26, (a2)
90; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,tu,mu
91; CHECK-NEXT:    vadd.vv v25, v25, v26
92; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,tu,mu
93; CHECK-NEXT:    vse8.v v25, (a0)
94; CHECK-NEXT:    ret
95  %va = load <vscale x 4 x i8>, <vscale x 4 x i8>* %pa
96  %vb = load <vscale x 4 x i8>, <vscale x 4 x i8>* %pb
97  %vc = add <vscale x 4 x i8> %va, %vb
98  store <vscale x 4 x i8> %vc, <vscale x 4 x i8> *%pc
99  ret void
100}
101
102define void @vadd_vint8mf4(<vscale x 2 x i8> *%pc, <vscale x 2 x i8> *%pa, <vscale x 2 x i8> *%pb) nounwind {
103; CHECK-LABEL: vadd_vint8mf4:
104; CHECK:       # %bb.0:
105; CHECK-NEXT:    vsetvli a3, zero, e8,mf4,tu,mu
106; CHECK-NEXT:    vle8.v v25, (a1)
107; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,tu,mu
108; CHECK-NEXT:    vle8.v v26, (a2)
109; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,tu,mu
110; CHECK-NEXT:    vadd.vv v25, v25, v26
111; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,tu,mu
112; CHECK-NEXT:    vse8.v v25, (a0)
113; CHECK-NEXT:    ret
114  %va = load <vscale x 2 x i8>, <vscale x 2 x i8>* %pa
115  %vb = load <vscale x 2 x i8>, <vscale x 2 x i8>* %pb
116  %vc = add <vscale x 2 x i8> %va, %vb
117  store <vscale x 2 x i8> %vc, <vscale x 2 x i8> *%pc
118  ret void
119}
120
121define void @vadd_vint8mf8(<vscale x 1 x i8> *%pc, <vscale x 1 x i8> *%pa, <vscale x 1 x i8> *%pb) nounwind {
122; CHECK-LABEL: vadd_vint8mf8:
123; CHECK:       # %bb.0:
124; CHECK-NEXT:    vsetvli a3, zero, e8,mf8,tu,mu
125; CHECK-NEXT:    vle8.v v25, (a1)
126; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,tu,mu
127; CHECK-NEXT:    vle8.v v26, (a2)
128; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,tu,mu
129; CHECK-NEXT:    vadd.vv v25, v25, v26
130; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,tu,mu
131; CHECK-NEXT:    vse8.v v25, (a0)
132; CHECK-NEXT:    ret
133  %va = load <vscale x 1 x i8>, <vscale x 1 x i8>* %pa
134  %vb = load <vscale x 1 x i8>, <vscale x 1 x i8>* %pb
135  %vc = add <vscale x 1 x i8> %va, %vb
136  store <vscale x 1 x i8> %vc, <vscale x 1 x i8> *%pc
137  ret void
138}
139