1; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
2
3;;; Test load/save vector mask intrinsic instructions
4;;;
5;;; Note:
6;;;   We test LVMir_m, LVMyir_y, SVMmi, and SVMyi instructions.
7
8; Function Attrs: nounwind readnone
9define i64 @lvm_mmss(i8* nocapture readnone %0, i64 %1) {
10; CHECK-LABEL: lvm_mmss:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    lvm %vm1, 3, %s1
13; CHECK-NEXT:    svm %s0, %vm1, 3
14; CHECK-NEXT:    b.l.t (, %s10)
15  %3 = tail call <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1> undef, i64 3, i64 %1)
16  %4 = tail call i64 @llvm.ve.vl.svm.sms(<256 x i1> %3, i64 3)
17  ret i64 %4
18}
19
20; Function Attrs: nounwind readnone
21declare <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1>, i64, i64)
22
23; Function Attrs: nounwind readnone
24declare i64 @llvm.ve.vl.svm.sms(<256 x i1>, i64)
25
26; Function Attrs: nounwind readnone
27define i64 @lvml_MMss(i8* nocapture readnone %0, i64 %1) {
28; CHECK-LABEL: lvml_MMss:
29; CHECK:       # %bb.0:
30; CHECK-NEXT:    lvm %vm2, 1, %s1
31; CHECK-NEXT:    svm %s0, %vm3, 3
32; CHECK-NEXT:    svm %s1, %vm2, 2
33; CHECK-NEXT:    adds.l %s0, %s1, %s0
34; CHECK-NEXT:    b.l.t (, %s10)
35  %3 = tail call <512 x i1> @llvm.ve.vl.lvm.MMss(<512 x i1> undef, i64 5, i64 %1)
36  %4 = tail call i64 @llvm.ve.vl.svm.sMs(<512 x i1> %3, i64 3)
37  %5 = tail call i64 @llvm.ve.vl.svm.sMs(<512 x i1> %3, i64 6)
38  %6 = add i64 %5, %4
39  ret i64 %6
40}
41
42; Function Attrs: nounwind readnone
43declare <512 x i1> @llvm.ve.vl.lvm.MMss(<512 x i1>, i64, i64)
44
45; Function Attrs: nounwind readnone
46declare i64 @llvm.ve.vl.svm.sMs(<512 x i1>, i64)
47