1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple riscv32 -mattr=+experimental-v %s -o - \ 3; RUN: -verify-machineinstrs | FileCheck %s 4; RUN: llc -mtriple riscv64 -mattr=+experimental-v %s -o - \ 5; RUN: -verify-machineinstrs | FileCheck %s 6 7define void @vadd_vint64m1(<vscale x 1 x i64> *%pc, <vscale x 1 x i64> *%pa, <vscale x 1 x i64> *%pb) nounwind { 8; CHECK-LABEL: vadd_vint64m1: 9; CHECK: # %bb.0: 10; CHECK-NEXT: vsetvli a3, zero, e64,m1,tu,mu 11; CHECK-NEXT: vle64.v v25, (a1) 12; CHECK-NEXT: vsetvli a1, zero, e64,m1,tu,mu 13; CHECK-NEXT: vle64.v v26, (a2) 14; CHECK-NEXT: vsetvli a1, zero, e64,m1,tu,mu 15; CHECK-NEXT: vadd.vv v25, v25, v26 16; CHECK-NEXT: vsetvli a1, zero, e64,m1,tu,mu 17; CHECK-NEXT: vse64.v v25, (a0) 18; CHECK-NEXT: ret 19 %va = load <vscale x 1 x i64>, <vscale x 1 x i64>* %pa 20 %vb = load <vscale x 1 x i64>, <vscale x 1 x i64>* %pb 21 %vc = add <vscale x 1 x i64> %va, %vb 22 store <vscale x 1 x i64> %vc, <vscale x 1 x i64> *%pc 23 ret void 24} 25 26define void @vadd_vint64m2(<vscale x 2 x i64> *%pc, <vscale x 2 x i64> *%pa, <vscale x 2 x i64> *%pb) nounwind { 27; CHECK-LABEL: vadd_vint64m2: 28; CHECK: # %bb.0: 29; CHECK-NEXT: vsetvli a3, zero, e64,m2,tu,mu 30; CHECK-NEXT: vle64.v v26, (a1) 31; CHECK-NEXT: vsetvli a1, zero, e64,m2,tu,mu 32; CHECK-NEXT: vle64.v v28, (a2) 33; CHECK-NEXT: vsetvli a1, zero, e64,m2,tu,mu 34; CHECK-NEXT: vadd.vv v26, v26, v28 35; CHECK-NEXT: vsetvli a1, zero, e64,m2,tu,mu 36; CHECK-NEXT: vse64.v v26, (a0) 37; CHECK-NEXT: ret 38 %va = load <vscale x 2 x i64>, <vscale x 2 x i64>* %pa 39 %vb = load <vscale x 2 x i64>, <vscale x 2 x i64>* %pb 40 %vc = add <vscale x 2 x i64> %va, %vb 41 store <vscale x 2 x i64> %vc, <vscale x 2 x i64> *%pc 42 ret void 43} 44 45define void @vadd_vint64m4(<vscale x 4 x i64> *%pc, <vscale x 4 x i64> *%pa, <vscale x 4 x i64> *%pb) nounwind { 46; CHECK-LABEL: vadd_vint64m4: 47; CHECK: # %bb.0: 48; CHECK-NEXT: vsetvli a3, zero, e64,m4,tu,mu 49; CHECK-NEXT: vle64.v v28, (a1) 50; CHECK-NEXT: vsetvli a1, zero, e64,m4,tu,mu 51; CHECK-NEXT: vle64.v v8, (a2) 52; CHECK-NEXT: vsetvli a1, zero, e64,m4,tu,mu 53; CHECK-NEXT: vadd.vv v28, v28, v8 54; CHECK-NEXT: vsetvli a1, zero, e64,m4,tu,mu 55; CHECK-NEXT: vse64.v v28, (a0) 56; CHECK-NEXT: ret 57 %va = load <vscale x 4 x i64>, <vscale x 4 x i64>* %pa 58 %vb = load <vscale x 4 x i64>, <vscale x 4 x i64>* %pb 59 %vc = add <vscale x 4 x i64> %va, %vb 60 store <vscale x 4 x i64> %vc, <vscale x 4 x i64> *%pc 61 ret void 62} 63 64define void @vadd_vint64m8(<vscale x 8 x i64> *%pc, <vscale x 8 x i64> *%pa, <vscale x 8 x i64> *%pb) nounwind { 65; CHECK-LABEL: vadd_vint64m8: 66; CHECK: # %bb.0: 67; CHECK-NEXT: vsetvli a3, zero, e64,m8,tu,mu 68; CHECK-NEXT: vle64.v v8, (a1) 69; CHECK-NEXT: vsetvli a1, zero, e64,m8,tu,mu 70; CHECK-NEXT: vle64.v v16, (a2) 71; CHECK-NEXT: vsetvli a1, zero, e64,m8,tu,mu 72; CHECK-NEXT: vadd.vv v8, v8, v16 73; CHECK-NEXT: vsetvli a1, zero, e64,m8,tu,mu 74; CHECK-NEXT: vse64.v v8, (a0) 75; CHECK-NEXT: ret 76 %va = load <vscale x 8 x i64>, <vscale x 8 x i64>* %pa 77 %vb = load <vscale x 8 x i64>, <vscale x 8 x i64>* %pb 78 %vc = add <vscale x 8 x i64> %va, %vb 79 store <vscale x 8 x i64> %vc, <vscale x 8 x i64> *%pc 80 ret void 81} 82