• Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt < %s -instcombine -S | FileCheck %s
2
3;; MASKED LOADS
4
5; If the mask isn't constant, do nothing.
6
7define <4 x float> @mload(i8* %f, <4 x i32> %mask) {
8  %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> %mask)
9  ret <4 x float> %ld
10
11; CHECK-LABEL: @mload(
12; CHECK-NEXT:  %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> %mask)
13; CHECK-NEXT:  ret <4 x float> %ld
14}
15
16; Zero mask returns a zero vector.
17
18define <4 x float> @mload_zeros(i8* %f) {
19  %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> zeroinitializer)
20  ret <4 x float> %ld
21
22; CHECK-LABEL: @mload_zeros(
23; CHECK-NEXT:  ret <4 x float> zeroinitializer
24}
25
26; Only the sign bit matters.
27
28define <4 x float> @mload_fake_ones(i8* %f) {
29  %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> <i32 1, i32 2, i32 3, i32 2147483647>)
30  ret <4 x float> %ld
31
32; CHECK-LABEL: @mload_fake_ones(
33; CHECK-NEXT:  ret <4 x float> zeroinitializer
34}
35
36; All mask bits are set, so this is just a vector load.
37
38define <4 x float> @mload_real_ones(i8* %f) {
39  %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> <i32 -1, i32 -2, i32 -3, i32 2147483648>)
40  ret <4 x float> %ld
41
42; CHECK-LABEL: @mload_real_ones(
43; CHECK-NEXT:  %castvec = bitcast i8* %f to <4 x float>*
44; CHECK-NEXT:  %unmaskedload = load <4 x float>, <4 x float>* %castvec
45; CHECK-NEXT:  ret <4 x float> %unmaskedload
46}
47
48; It's a constant mask, so convert to an LLVM intrinsic. The backend should optimize further.
49
50define <4 x float> @mload_one_one(i8* %f) {
51  %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> <i32 0, i32 0, i32 0, i32 -1>)
52  ret <4 x float> %ld
53
54; CHECK-LABEL: @mload_one_one(
55; CHECK-NEXT:  %castvec = bitcast i8* %f to <4 x float>*
56; CHECK-NEXT:  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %castvec, i32 1, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x float> zeroinitializer)
57; CHECK-NEXT:  ret <4 x float> %1
58}
59
60; Try doubles.
61
62define <2 x double> @mload_one_one_double(i8* %f) {
63  %ld = tail call <2 x double> @llvm.x86.avx.maskload.pd(i8* %f, <2 x i64> <i64 -1, i64 0>)
64  ret <2 x double> %ld
65
66; CHECK-LABEL: @mload_one_one_double(
67; CHECK-NEXT:  %castvec = bitcast i8* %f to <2 x double>*
68; CHECK-NEXT:  %1 = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %castvec, i32 1, <2 x i1> <i1 true, i1 false>, <2 x double> zeroinitializer)
69; CHECK-NEXT:  ret <2 x double> %1
70}
71
72; Try 256-bit FP ops.
73
74define <8 x float> @mload_v8f32(i8* %f) {
75  %ld = tail call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %f, <8 x i32> <i32 0, i32 0, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 0>)
76  ret <8 x float> %ld
77
78; CHECK-LABEL: @mload_v8f32(
79; CHECK-NEXT:  %castvec = bitcast i8* %f to <8 x float>*
80; CHECK-NEXT:  %1 = call <8 x float> @llvm.masked.load.v8f32.p0v8f32(<8 x float>* %castvec, i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x float> zeroinitializer)
81; CHECK-NEXT:  ret <8 x float> %1
82}
83
84define <4 x double> @mload_v4f64(i8* %f) {
85  %ld = tail call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %f, <4 x i64> <i64 -1, i64 0, i64 0, i64 0>)
86  ret <4 x double> %ld
87
88; CHECK-LABEL: @mload_v4f64(
89; CHECK-NEXT:  %castvec = bitcast i8* %f to <4 x double>*
90; CHECK-NEXT:  %1 = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %castvec, i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x double> zeroinitializer)
91; CHECK-NEXT:  ret <4 x double> %1
92}
93
94; Try the AVX2 variants.
95
96define <4 x i32> @mload_v4i32(i8* %f) {
97  %ld = tail call <4 x i32> @llvm.x86.avx2.maskload.d(i8* %f, <4 x i32> <i32 0, i32 0, i32 0, i32 -1>)
98  ret <4 x i32> %ld
99
100; CHECK-LABEL: @mload_v4i32(
101; CHECK-NEXT:  %castvec = bitcast i8* %f to <4 x i32>*
102; CHECK-NEXT:  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %castvec, i32 1, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x i32> zeroinitializer)
103; CHECK-NEXT:  ret <4 x i32> %1
104}
105
106define <2 x i64> @mload_v2i64(i8* %f) {
107  %ld = tail call <2 x i64> @llvm.x86.avx2.maskload.q(i8* %f, <2 x i64> <i64 -1, i64 0>)
108  ret <2 x i64> %ld
109
110; CHECK-LABEL: @mload_v2i64(
111; CHECK-NEXT:  %castvec = bitcast i8* %f to <2 x i64>*
112; CHECK-NEXT:  %1 = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %castvec, i32 1, <2 x i1> <i1 true, i1 false>, <2 x i64> zeroinitializer)
113; CHECK-NEXT:  ret <2 x i64> %1
114}
115
116define <8 x i32> @mload_v8i32(i8* %f) {
117  %ld = tail call <8 x i32> @llvm.x86.avx2.maskload.d.256(i8* %f, <8 x i32> <i32 0, i32 0, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 0>)
118  ret <8 x i32> %ld
119
120; CHECK-LABEL: @mload_v8i32(
121; CHECK-NEXT:  %castvec = bitcast i8* %f to <8 x i32>*
122; CHECK-NEXT:  %1 = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* %castvec, i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x i32> zeroinitializer)
123; CHECK-NEXT:  ret <8 x i32> %1
124}
125
126define <4 x i64> @mload_v4i64(i8* %f) {
127  %ld = tail call <4 x i64> @llvm.x86.avx2.maskload.q.256(i8* %f, <4 x i64> <i64 -1, i64 0, i64 0, i64 0>)
128  ret <4 x i64> %ld
129
130; CHECK-LABEL: @mload_v4i64(
131; CHECK-NEXT:  %castvec = bitcast i8* %f to <4 x i64>*
132; CHECK-NEXT:  %1 = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* %castvec, i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i64> zeroinitializer)
133; CHECK-NEXT:  ret <4 x i64> %1
134}
135
136
137;; MASKED STORES
138
139; If the mask isn't constant, do nothing.
140
141define void @mstore(i8* %f, <4 x i32> %mask, <4 x float> %v) {
142  tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> %mask, <4 x float> %v)
143  ret void
144
145; CHECK-LABEL: @mstore(
146; CHECK-NEXT:  tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> %mask, <4 x float> %v)
147; CHECK-NEXT:  ret void
148}
149
150; Zero mask is a nop.
151
152define void @mstore_zeros(i8* %f, <4 x float> %v)  {
153  tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> zeroinitializer, <4 x float> %v)
154  ret void
155
156; CHECK-LABEL: @mstore_zeros(
157; CHECK-NEXT:  ret void
158}
159
160; Only the sign bit matters.
161
162define void @mstore_fake_ones(i8* %f, <4 x float> %v) {
163  tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> <i32 1, i32 2, i32 3, i32 2147483647>, <4 x float> %v)
164  ret void
165
166; CHECK-LABEL: @mstore_fake_ones(
167; CHECK-NEXT:  ret void
168}
169
170; All mask bits are set, so this is just a vector store.
171
172define void @mstore_real_ones(i8* %f, <4 x float> %v) {
173  tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> <i32 -1, i32 -2, i32 -3, i32 -2147483648>, <4 x float> %v)
174  ret void
175
176; CHECK-LABEL: @mstore_real_ones(
177; CHECK-NEXT:  %castvec = bitcast i8* %f to <4 x float>*
178; CHECK-NEXT:  store <4 x float> %v, <4 x float>* %castvec
179; CHECK-NEXT:  ret void
180}
181
182; It's a constant mask, so convert to an LLVM intrinsic. The backend should optimize further.
183
184define void @mstore_one_one(i8* %f, <4 x float> %v) {
185  tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> <i32 0, i32 0, i32 0, i32 -1>, <4 x float> %v)
186  ret void
187
188; CHECK-LABEL: @mstore_one_one(
189; CHECK-NEXT:  %castvec = bitcast i8* %f to <4 x float>*
190; CHECK-NEXT:  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %v, <4 x float>* %castvec, i32 1, <4 x i1> <i1 false, i1 false, i1 false, i1 true>)
191; CHECK-NEXT:  ret void
192}
193
194; Try doubles.
195
196define void @mstore_one_one_double(i8* %f, <2 x double> %v) {
197  tail call void @llvm.x86.avx.maskstore.pd(i8* %f, <2 x i64> <i64 -1, i64 0>, <2 x double> %v)
198  ret void
199
200; CHECK-LABEL: @mstore_one_one_double(
201; CHECK-NEXT:  %castvec = bitcast i8* %f to <2 x double>*
202; CHECK-NEXT:  call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %v, <2 x double>* %castvec, i32 1, <2 x i1> <i1 true, i1 false>)
203; CHECK-NEXT:  ret void
204}
205
206; Try 256-bit FP ops.
207
208define void @mstore_v8f32(i8* %f, <8 x float> %v) {
209  tail call void @llvm.x86.avx.maskstore.ps.256(i8* %f, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 -1, i32 -2, i32 -3, i32 -4>, <8 x float> %v)
210  ret void
211
212; CHECK-LABEL: @mstore_v8f32(
213; CHECK-NEXT:  %castvec = bitcast i8* %f to <8 x float>*
214; CHECK-NEXT:  call void @llvm.masked.store.v8f32.p0v8f32(<8 x float> %v, <8 x float>* %castvec, i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>)
215; CHECK-NEXT:  ret void
216}
217
218define void @mstore_v4f64(i8* %f, <4 x double> %v) {
219  tail call void @llvm.x86.avx.maskstore.pd.256(i8* %f, <4 x i64> <i64 -1, i64 0, i64 1, i64 2>, <4 x double> %v)
220  ret void
221
222; CHECK-LABEL: @mstore_v4f64(
223; CHECK-NEXT:  %castvec = bitcast i8* %f to <4 x double>*
224; CHECK-NEXT:  call void @llvm.masked.store.v4f64.p0v4f64(<4 x double> %v, <4 x double>* %castvec, i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>)
225; CHECK-NEXT:  ret void
226}
227
228; Try the AVX2 variants.
229
230define void @mstore_v4i32(i8* %f, <4 x i32> %v) {
231  tail call void @llvm.x86.avx2.maskstore.d(i8* %f, <4 x i32> <i32 0, i32 1, i32 -1, i32 -2>, <4 x i32> %v)
232  ret void
233
234; CHECK-LABEL: @mstore_v4i32(
235; CHECK-NEXT:  %castvec = bitcast i8* %f to <4 x i32>*
236; CHECK-NEXT:  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %v, <4 x i32>* %castvec, i32 1, <4 x i1> <i1 false, i1 false, i1 true, i1 true>)
237; CHECK-NEXT:  ret void
238}
239
240define void @mstore_v2i64(i8* %f, <2 x i64> %v) {
241  tail call void @llvm.x86.avx2.maskstore.q(i8* %f, <2 x i64> <i64 -1, i64 0>, <2 x i64> %v)
242  ret void
243
244; CHECK-LABEL: @mstore_v2i64(
245; CHECK-NEXT:  %castvec = bitcast i8* %f to <2 x i64>*
246; CHECK-NEXT:  call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> %v, <2 x i64>* %castvec, i32 1, <2 x i1> <i1 true, i1 false>)
247; CHECK-NEXT:  ret void
248}
249
250define void @mstore_v8i32(i8* %f, <8 x i32> %v) {
251  tail call void @llvm.x86.avx2.maskstore.d.256(i8* %f, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 -1, i32 -2, i32 -3, i32 -4>, <8 x i32> %v)
252  ret void
253
254; CHECK-LABEL: @mstore_v8i32(
255; CHECK-NEXT:  %castvec = bitcast i8* %f to <8 x i32>*
256; CHECK-NEXT:  call void @llvm.masked.store.v8i32.p0v8i32(<8 x i32> %v, <8 x i32>* %castvec, i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>)
257; CHECK-NEXT:  ret void
258}
259
260define void @mstore_v4i64(i8* %f, <4 x i64> %v) {
261  tail call void @llvm.x86.avx2.maskstore.q.256(i8* %f, <4 x i64> <i64 -1, i64 0, i64 1, i64 2>, <4 x i64> %v)
262  ret void
263
264; CHECK-LABEL: @mstore_v4i64(
265; CHECK-NEXT:  %castvec = bitcast i8* %f to <4 x i64>*
266; CHECK-NEXT:  call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %v, <4 x i64>* %castvec, i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>)
267; CHECK-NEXT:  ret void
268}
269
270; The original SSE2 masked store variant.
271
272define void @mstore_v16i8_sse2_zeros(<16 x i8> %d, i8* %p) {
273  tail call void @llvm.x86.sse2.maskmov.dqu(<16 x i8> %d, <16 x i8> zeroinitializer, i8* %p)
274  ret void
275
276; CHECK-LABEL: @mstore_v16i8_sse2_zeros(
277; CHECK-NEXT:  ret void
278}
279
280
281declare <4 x float> @llvm.x86.avx.maskload.ps(i8*, <4 x i32>)
282declare <2 x double> @llvm.x86.avx.maskload.pd(i8*, <2 x i64>)
283declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8*, <8 x i32>)
284declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8*, <4 x i64>)
285
286declare <4 x i32> @llvm.x86.avx2.maskload.d(i8*, <4 x i32>)
287declare <2 x i64> @llvm.x86.avx2.maskload.q(i8*, <2 x i64>)
288declare <8 x i32> @llvm.x86.avx2.maskload.d.256(i8*, <8 x i32>)
289declare <4 x i64> @llvm.x86.avx2.maskload.q.256(i8*, <4 x i64>)
290
291declare void @llvm.x86.avx.maskstore.ps(i8*, <4 x i32>, <4 x float>)
292declare void @llvm.x86.avx.maskstore.pd(i8*, <2 x i64>, <2 x double>)
293declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>)
294declare void @llvm.x86.avx.maskstore.pd.256(i8*, <4 x i64>, <4 x double>)
295
296declare void @llvm.x86.avx2.maskstore.d(i8*, <4 x i32>, <4 x i32>)
297declare void @llvm.x86.avx2.maskstore.q(i8*, <2 x i64>, <2 x i64>)
298declare void @llvm.x86.avx2.maskstore.d.256(i8*, <8 x i32>, <8 x i32>)
299declare void @llvm.x86.avx2.maskstore.q.256(i8*, <4 x i64>, <4 x i64>)
300
301declare void @llvm.x86.sse2.maskmov.dqu(<16 x i8>, <16 x i8>, i8*)
302
303