1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S %s -scalarize-masked-mem-intrin -mtriple=x86_64-linux-gnu | FileCheck %s
3
4define <2 x i64> @scalarize_v2i64(<2 x i64>* %p, <2 x i1> %mask, <2 x i64> %passthru) {
5; CHECK-LABEL: @scalarize_v2i64(
6; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
7; CHECK-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
8; CHECK-NEXT:    [[TMP2:%.*]] = and i2 [[SCALAR_MASK]], 1
9; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne i2 [[TMP2]], 0
10; CHECK-NEXT:    br i1 [[TMP3]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
11; CHECK:       cond.load:
12; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 0
13; CHECK-NEXT:    [[TMP5:%.*]] = load i64, i64* [[TMP4]], align 8
14; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP5]], i64 0
15; CHECK-NEXT:    br label [[ELSE]]
16; CHECK:       else:
17; CHECK-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i64> [ [[TMP6]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
18; CHECK-NEXT:    [[TMP7:%.*]] = and i2 [[SCALAR_MASK]], -2
19; CHECK-NEXT:    [[TMP8:%.*]] = icmp ne i2 [[TMP7]], 0
20; CHECK-NEXT:    br i1 [[TMP8]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
21; CHECK:       cond.load1:
22; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 1
23; CHECK-NEXT:    [[TMP10:%.*]] = load i64, i64* [[TMP9]], align 8
24; CHECK-NEXT:    [[TMP11:%.*]] = insertelement <2 x i64> [[RES_PHI_ELSE]], i64 [[TMP10]], i64 1
25; CHECK-NEXT:    br label [[ELSE2]]
26; CHECK:       else2:
27; CHECK-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i64> [ [[TMP11]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
28; CHECK-NEXT:    ret <2 x i64> [[RES_PHI_ELSE3]]
29;
30  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %p, i32 128, <2 x i1> %mask, <2 x i64> %passthru)
31  ret <2 x i64> %ret
32}
33
34define <2 x i64> @scalarize_v2i64_ones_mask(<2 x i64>* %p, <2 x i64> %passthru) {
35; CHECK-LABEL: @scalarize_v2i64_ones_mask(
36; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[P:%.*]], align 8
37; CHECK-NEXT:    ret <2 x i64> [[TMP1]]
38;
39  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %p, i32 8, <2 x i1> <i1 true, i1 true>, <2 x i64> %passthru)
40  ret <2 x i64> %ret
41}
42
43define <2 x i64> @scalarize_v2i64_zero_mask(<2 x i64>* %p, <2 x i64> %passthru) {
44; CHECK-LABEL: @scalarize_v2i64_zero_mask(
45; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
46; CHECK-NEXT:    ret <2 x i64> [[PASSTHRU:%.*]]
47;
48  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %p, i32 8, <2 x i1> <i1 false, i1 false>, <2 x i64> %passthru)
49  ret <2 x i64> %ret
50}
51
52define <2 x i64> @scalarize_v2i64_const_mask(<2 x i64>* %p, <2 x i64> %passthru) {
53; CHECK-LABEL: @scalarize_v2i64_const_mask(
54; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
55; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 1
56; CHECK-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP2]], align 8
57; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP3]], i64 1
58; CHECK-NEXT:    ret <2 x i64> [[TMP4]]
59;
60  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %p, i32 8, <2 x i1> <i1 false, i1 true>, <2 x i64> %passthru)
61  ret <2 x i64> %ret
62}
63
64; This use a byte sized but non power of 2 element size. This used to crash due to bad alignment calculation.
65define <2 x i24> @scalarize_v2i24(<2 x i24>* %p, <2 x i1> %mask, <2 x i24> %passthru) {
66; CHECK-LABEL: @scalarize_v2i24(
67; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i24>* [[P:%.*]] to i24*
68; CHECK-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
69; CHECK-NEXT:    [[TMP2:%.*]] = and i2 [[SCALAR_MASK]], 1
70; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne i2 [[TMP2]], 0
71; CHECK-NEXT:    br i1 [[TMP3]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
72; CHECK:       cond.load:
73; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i24, i24* [[TMP1]], i32 0
74; CHECK-NEXT:    [[TMP5:%.*]] = load i24, i24* [[TMP4]], align 1
75; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i24> [[PASSTHRU:%.*]], i24 [[TMP5]], i64 0
76; CHECK-NEXT:    br label [[ELSE]]
77; CHECK:       else:
78; CHECK-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i24> [ [[TMP6]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
79; CHECK-NEXT:    [[TMP7:%.*]] = and i2 [[SCALAR_MASK]], -2
80; CHECK-NEXT:    [[TMP8:%.*]] = icmp ne i2 [[TMP7]], 0
81; CHECK-NEXT:    br i1 [[TMP8]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
82; CHECK:       cond.load1:
83; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i24, i24* [[TMP1]], i32 1
84; CHECK-NEXT:    [[TMP10:%.*]] = load i24, i24* [[TMP9]], align 1
85; CHECK-NEXT:    [[TMP11:%.*]] = insertelement <2 x i24> [[RES_PHI_ELSE]], i24 [[TMP10]], i64 1
86; CHECK-NEXT:    br label [[ELSE2]]
87; CHECK:       else2:
88; CHECK-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i24> [ [[TMP11]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
89; CHECK-NEXT:    ret <2 x i24> [[RES_PHI_ELSE3]]
90;
91  %ret = call <2 x i24> @llvm.masked.load.v2i24.p0v2i24(<2 x i24>* %p, i32 8, <2 x i1> %mask, <2 x i24> %passthru)
92  ret <2 x i24> %ret
93}
94
95; This use a byte sized but non power of 2 element size. This used to crash due to bad alignment calculation.
96define <2 x i48> @scalarize_v2i48(<2 x i48>* %p, <2 x i1> %mask, <2 x i48> %passthru) {
97; CHECK-LABEL: @scalarize_v2i48(
98; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i48>* [[P:%.*]] to i48*
99; CHECK-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
100; CHECK-NEXT:    [[TMP2:%.*]] = and i2 [[SCALAR_MASK]], 1
101; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne i2 [[TMP2]], 0
102; CHECK-NEXT:    br i1 [[TMP3]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
103; CHECK:       cond.load:
104; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i48, i48* [[TMP1]], i32 0
105; CHECK-NEXT:    [[TMP5:%.*]] = load i48, i48* [[TMP4]], align 2
106; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i48> [[PASSTHRU:%.*]], i48 [[TMP5]], i64 0
107; CHECK-NEXT:    br label [[ELSE]]
108; CHECK:       else:
109; CHECK-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i48> [ [[TMP6]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
110; CHECK-NEXT:    [[TMP7:%.*]] = and i2 [[SCALAR_MASK]], -2
111; CHECK-NEXT:    [[TMP8:%.*]] = icmp ne i2 [[TMP7]], 0
112; CHECK-NEXT:    br i1 [[TMP8]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
113; CHECK:       cond.load1:
114; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i48, i48* [[TMP1]], i32 1
115; CHECK-NEXT:    [[TMP10:%.*]] = load i48, i48* [[TMP9]], align 2
116; CHECK-NEXT:    [[TMP11:%.*]] = insertelement <2 x i48> [[RES_PHI_ELSE]], i48 [[TMP10]], i64 1
117; CHECK-NEXT:    br label [[ELSE2]]
118; CHECK:       else2:
119; CHECK-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i48> [ [[TMP11]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
120; CHECK-NEXT:    ret <2 x i48> [[RES_PHI_ELSE3]]
121;
122  %ret = call <2 x i48> @llvm.masked.load.v2i48.p0v2i48(<2 x i48>* %p, i32 16, <2 x i1> %mask, <2 x i48> %passthru)
123  ret <2 x i48> %ret
124}
125
126declare <2 x i24> @llvm.masked.load.v2i24.p0v2i24(<2 x i24>*, i32, <2 x i1>, <2 x i24>)
127declare <2 x i48> @llvm.masked.load.v2i48.p0v2i48(<2 x i48>*, i32, <2 x i1>, <2 x i48>)
128declare <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>*, i32, <2 x i1>, <2 x i64>)
129