1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S %s -scalarize-masked-mem-intrin -mtriple=x86_64-linux-gnu | FileCheck %s
3
4define <2 x i64> @scalarize_v2i64(i64* %p, <2 x i1> %mask, <2 x i64> %passthru) {
5; CHECK-LABEL: @scalarize_v2i64(
6; CHECK-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
7; CHECK-NEXT:    [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
8; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
9; CHECK-NEXT:    br i1 [[TMP2]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
10; CHECK:       cond.load:
11; CHECK-NEXT:    [[TMP3:%.*]] = load i64, i64* [[P:%.*]], align 1
12; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP3]], i64 0
13; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i64, i64* [[P]], i32 1
14; CHECK-NEXT:    br label [[ELSE]]
15; CHECK:       else:
16; CHECK-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i64> [ [[TMP4]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
17; CHECK-NEXT:    [[PTR_PHI_ELSE:%.*]] = phi i64* [ [[TMP5]], [[COND_LOAD]] ], [ [[P]], [[TMP0]] ]
18; CHECK-NEXT:    [[TMP6:%.*]] = and i2 [[SCALAR_MASK]], -2
19; CHECK-NEXT:    [[TMP7:%.*]] = icmp ne i2 [[TMP6]], 0
20; CHECK-NEXT:    br i1 [[TMP7]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
21; CHECK:       cond.load1:
22; CHECK-NEXT:    [[TMP8:%.*]] = load i64, i64* [[PTR_PHI_ELSE]], align 1
23; CHECK-NEXT:    [[TMP9:%.*]] = insertelement <2 x i64> [[RES_PHI_ELSE]], i64 [[TMP8]], i64 1
24; CHECK-NEXT:    br label [[ELSE2]]
25; CHECK:       else2:
26; CHECK-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i64> [ [[TMP9]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
27; CHECK-NEXT:    ret <2 x i64> [[RES_PHI_ELSE3]]
28;
29  %ret = call <2 x i64> @llvm.masked.expandload.v2i64.p0v2i64(i64* %p, <2 x i1> %mask, <2 x i64> %passthru)
30  ret <2 x i64> %ret
31}
32
33define <2 x i64> @scalarize_v2i64_ones_mask(i64* %p, <2 x i64> %passthru) {
34; CHECK-LABEL: @scalarize_v2i64_ones_mask(
35; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i64, i64* [[P:%.*]], i32 0
36; CHECK-NEXT:    [[LOAD0:%.*]] = load i64, i64* [[TMP1]], align 1
37; CHECK-NEXT:    [[RES0:%.*]] = insertelement <2 x i64> undef, i64 [[LOAD0]], i64 0
38; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, i64* [[P]], i32 1
39; CHECK-NEXT:    [[LOAD1:%.*]] = load i64, i64* [[TMP2]], align 1
40; CHECK-NEXT:    [[RES1:%.*]] = insertelement <2 x i64> [[RES0]], i64 [[LOAD1]], i64 1
41; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <2 x i64> [[RES1]], <2 x i64> [[PASSTHRU:%.*]], <2 x i32> <i32 0, i32 1>
42; CHECK-NEXT:    ret <2 x i64> [[TMP3]]
43;
44  %ret = call <2 x i64> @llvm.masked.expandload.v2i64.p0v2i64(i64* %p, <2 x i1> <i1 true, i1 true>, <2 x i64> %passthru)
45  ret <2 x i64> %ret
46}
47
48define <2 x i64> @scalarize_v2i64_zero_mask(i64* %p, <2 x i64> %passthru) {
49; CHECK-LABEL: @scalarize_v2i64_zero_mask(
50; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <2 x i64> undef, <2 x i64> [[PASSTHRU:%.*]], <2 x i32> <i32 2, i32 3>
51; CHECK-NEXT:    ret <2 x i64> [[TMP1]]
52;
53  %ret = call <2 x i64> @llvm.masked.expandload.v2i64.p0v2i64(i64* %p, <2 x i1> <i1 false, i1 false>, <2 x i64> %passthru)
54  ret <2 x i64> %ret
55}
56
57define <2 x i64> @scalarize_v2i64_const_mask(i64* %p, <2 x i64> %passthru) {
58; CHECK-LABEL: @scalarize_v2i64_const_mask(
59; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i64, i64* [[P:%.*]], i32 0
60; CHECK-NEXT:    [[LOAD1:%.*]] = load i64, i64* [[TMP1]], align 1
61; CHECK-NEXT:    [[RES1:%.*]] = insertelement <2 x i64> undef, i64 [[LOAD1]], i64 1
62; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <2 x i64> [[RES1]], <2 x i64> [[PASSTHRU:%.*]], <2 x i32> <i32 2, i32 1>
63; CHECK-NEXT:    ret <2 x i64> [[TMP2]]
64;
65  %ret = call <2 x i64> @llvm.masked.expandload.v2i64.p0v2i64(i64* %p, <2 x i1> <i1 false, i1 true>, <2 x i64> %passthru)
66  ret <2 x i64> %ret
67}
68
69declare <2 x i64> @llvm.masked.expandload.v2i64.p0v2i64(i64*,  <2 x i1>, <2 x i64>)
70