1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple riscv32 -mattr=+experimental-v %s -o - \ 3; RUN: -verify-machineinstrs | FileCheck %s 4; RUN: llc -mtriple riscv64 -mattr=+experimental-v %s -o - \ 5; RUN: -verify-machineinstrs | FileCheck %s 6 7define void @vadd_vint32m1(<vscale x 2 x i32> *%pc, <vscale x 2 x i32> *%pa, <vscale x 2 x i32> *%pb) nounwind { 8; CHECK-LABEL: vadd_vint32m1: 9; CHECK: # %bb.0: 10; CHECK-NEXT: vsetvli a3, zero, e32,m1,tu,mu 11; CHECK-NEXT: vle32.v v25, (a1) 12; CHECK-NEXT: vsetvli a1, zero, e32,m1,tu,mu 13; CHECK-NEXT: vle32.v v26, (a2) 14; CHECK-NEXT: vsetvli a1, zero, e32,m1,tu,mu 15; CHECK-NEXT: vadd.vv v25, v25, v26 16; CHECK-NEXT: vsetvli a1, zero, e32,m1,tu,mu 17; CHECK-NEXT: vse32.v v25, (a0) 18; CHECK-NEXT: ret 19 %va = load <vscale x 2 x i32>, <vscale x 2 x i32>* %pa 20 %vb = load <vscale x 2 x i32>, <vscale x 2 x i32>* %pb 21 %vc = add <vscale x 2 x i32> %va, %vb 22 store <vscale x 2 x i32> %vc, <vscale x 2 x i32> *%pc 23 ret void 24} 25 26define void @vadd_vint32m2(<vscale x 4 x i32> *%pc, <vscale x 4 x i32> *%pa, <vscale x 4 x i32> *%pb) nounwind { 27; CHECK-LABEL: vadd_vint32m2: 28; CHECK: # %bb.0: 29; CHECK-NEXT: vsetvli a3, zero, e32,m2,tu,mu 30; CHECK-NEXT: vle32.v v26, (a1) 31; CHECK-NEXT: vsetvli a1, zero, e32,m2,tu,mu 32; CHECK-NEXT: vle32.v v28, (a2) 33; CHECK-NEXT: vsetvli a1, zero, e32,m2,tu,mu 34; CHECK-NEXT: vadd.vv v26, v26, v28 35; CHECK-NEXT: vsetvli a1, zero, e32,m2,tu,mu 36; CHECK-NEXT: vse32.v v26, (a0) 37; CHECK-NEXT: ret 38 %va = load <vscale x 4 x i32>, <vscale x 4 x i32>* %pa 39 %vb = load <vscale x 4 x i32>, <vscale x 4 x i32>* %pb 40 %vc = add <vscale x 4 x i32> %va, %vb 41 store <vscale x 4 x i32> %vc, <vscale x 4 x i32> *%pc 42 ret void 43} 44 45define void @vadd_vint32m4(<vscale x 8 x i32> *%pc, <vscale x 8 x i32> *%pa, <vscale x 8 x i32> *%pb) nounwind { 46; CHECK-LABEL: vadd_vint32m4: 47; CHECK: # %bb.0: 48; CHECK-NEXT: vsetvli a3, zero, e32,m4,tu,mu 49; CHECK-NEXT: vle32.v v28, (a1) 50; CHECK-NEXT: vsetvli a1, zero, e32,m4,tu,mu 51; CHECK-NEXT: vle32.v v8, (a2) 52; CHECK-NEXT: vsetvli a1, zero, e32,m4,tu,mu 53; CHECK-NEXT: vadd.vv v28, v28, v8 54; CHECK-NEXT: vsetvli a1, zero, e32,m4,tu,mu 55; CHECK-NEXT: vse32.v v28, (a0) 56; CHECK-NEXT: ret 57 %va = load <vscale x 8 x i32>, <vscale x 8 x i32>* %pa 58 %vb = load <vscale x 8 x i32>, <vscale x 8 x i32>* %pb 59 %vc = add <vscale x 8 x i32> %va, %vb 60 store <vscale x 8 x i32> %vc, <vscale x 8 x i32> *%pc 61 ret void 62} 63 64define void @vadd_vint32m8(<vscale x 16 x i32> *%pc, <vscale x 16 x i32> *%pa, <vscale x 16 x i32> *%pb) nounwind { 65; CHECK-LABEL: vadd_vint32m8: 66; CHECK: # %bb.0: 67; CHECK-NEXT: vsetvli a3, zero, e32,m8,tu,mu 68; CHECK-NEXT: vle32.v v8, (a1) 69; CHECK-NEXT: vsetvli a1, zero, e32,m8,tu,mu 70; CHECK-NEXT: vle32.v v16, (a2) 71; CHECK-NEXT: vsetvli a1, zero, e32,m8,tu,mu 72; CHECK-NEXT: vadd.vv v8, v8, v16 73; CHECK-NEXT: vsetvli a1, zero, e32,m8,tu,mu 74; CHECK-NEXT: vse32.v v8, (a0) 75; CHECK-NEXT: ret 76 %va = load <vscale x 16 x i32>, <vscale x 16 x i32>* %pa 77 %vb = load <vscale x 16 x i32>, <vscale x 16 x i32>* %pb 78 %vc = add <vscale x 16 x i32> %va, %vb 79 store <vscale x 16 x i32> %vc, <vscale x 16 x i32> *%pc 80 ret void 81} 82 83define void @vadd_vint32mf2(<vscale x 1 x i32> *%pc, <vscale x 1 x i32> *%pa, <vscale x 1 x i32> *%pb) nounwind { 84; CHECK-LABEL: vadd_vint32mf2: 85; CHECK: # %bb.0: 86; CHECK-NEXT: vsetvli a3, zero, e32,mf2,tu,mu 87; CHECK-NEXT: vle32.v v25, (a1) 88; CHECK-NEXT: vsetvli a1, zero, e32,mf2,tu,mu 89; CHECK-NEXT: vle32.v v26, (a2) 90; CHECK-NEXT: vsetvli a1, zero, e32,mf2,tu,mu 91; CHECK-NEXT: vadd.vv v25, v25, v26 92; CHECK-NEXT: vsetvli a1, zero, e32,mf2,tu,mu 93; CHECK-NEXT: vse32.v v25, (a0) 94; CHECK-NEXT: ret 95 %va = load <vscale x 1 x i32>, <vscale x 1 x i32>* %pa 96 %vb = load <vscale x 1 x i32>, <vscale x 1 x i32>* %pb 97 %vc = add <vscale x 1 x i32> %va, %vb 98 store <vscale x 1 x i32> %vc, <vscale x 1 x i32> *%pc 99 ret void 100} 101