1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=core-avx2 -stop-after finalize-isel -o - %s | FileCheck %s
3
4declare void @llvm.masked.store.v16f32.p0v16f32(<16 x float>, <16 x float>*, i32, <16 x i1>)
5declare <16 x float> @llvm.masked.load.v16f32.p0v16f32(<16 x float>*, i32, <16 x i1>, <16 x float>)
6
7define void @test_v16f(<16 x i32> %x) {
8  ; CHECK-LABEL: name: test_v16f
9  ; CHECK: bb.0.bb:
10  ; CHECK:   liveins: $ymm0, $ymm1
11  ; CHECK:   [[COPY:%[0-9]+]]:vr256 = COPY $ymm1
12  ; CHECK:   [[COPY1:%[0-9]+]]:vr256 = COPY $ymm0
13  ; CHECK:   [[AVX_SET0_:%[0-9]+]]:vr256 = AVX_SET0
14  ; CHECK:   [[VPCMPEQDYrr:%[0-9]+]]:vr256 = VPCMPEQDYrr [[COPY]], [[AVX_SET0_]]
15  ; CHECK:   [[VPCMPEQDYrr1:%[0-9]+]]:vr256 = VPCMPEQDYrr [[COPY1]], [[AVX_SET0_]]
16  ; CHECK:   [[VMASKMOVPSYrm:%[0-9]+]]:vr256 = VMASKMOVPSYrm [[VPCMPEQDYrr1]], %stack.0.stack_input_vec, 1, $noreg, 0, $noreg :: (load 32 from %ir.stack_input_vec, align 4)
17  ; CHECK:   [[VMASKMOVPSYrm1:%[0-9]+]]:vr256 = VMASKMOVPSYrm [[VPCMPEQDYrr]], %stack.0.stack_input_vec, 1, $noreg, 32, $noreg :: (load 32 from %ir.stack_input_vec + 32, align 4)
18  ; CHECK:   VMASKMOVPSYmr %stack.1.stack_output_vec, 1, $noreg, 32, $noreg, [[VPCMPEQDYrr]], killed [[VMASKMOVPSYrm1]] :: (store 32 into %ir.stack_output_vec + 32, align 4)
19  ; CHECK:   VMASKMOVPSYmr %stack.1.stack_output_vec, 1, $noreg, 0, $noreg, [[VPCMPEQDYrr1]], killed [[VMASKMOVPSYrm]] :: (store 32 into %ir.stack_output_vec, align 4)
20  ; CHECK:   RET 0
21bb:
22  %stack_input_vec = alloca <16 x float>, align 64
23  %stack_output_vec = alloca <16 x float>, align 64
24  %mask = icmp eq <16 x i32> %x, zeroinitializer
25  %masked_loaded_vec = call <16 x float> @llvm.masked.load.v16f32.p0v16f32(<16 x float>* nonnull %stack_input_vec, i32 4, <16 x i1> %mask, <16 x float> undef)
26  call void @llvm.masked.store.v16f32.p0v16f32(<16 x float> %masked_loaded_vec, <16 x float>* nonnull %stack_output_vec, i32 4, <16 x i1> %mask)
27  ret void
28}
29
30declare void @llvm.masked.store.v8f64.p0v8f64(<8 x double>, <8 x double>*, i32, <8 x i1>)
31declare <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>*, i32, <8 x i1>, <8 x double>)
32
33define void @test_v8d(<8 x i64> %x) {
34  ; CHECK-LABEL: name: test_v8d
35  ; CHECK: bb.0.bb:
36  ; CHECK:   liveins: $ymm0, $ymm1
37  ; CHECK:   [[COPY:%[0-9]+]]:vr256 = COPY $ymm1
38  ; CHECK:   [[COPY1:%[0-9]+]]:vr256 = COPY $ymm0
39  ; CHECK:   [[AVX_SET0_:%[0-9]+]]:vr256 = AVX_SET0
40  ; CHECK:   [[VPCMPEQQYrr:%[0-9]+]]:vr256 = VPCMPEQQYrr [[COPY]], [[AVX_SET0_]]
41  ; CHECK:   [[VPCMPEQQYrr1:%[0-9]+]]:vr256 = VPCMPEQQYrr [[COPY1]], [[AVX_SET0_]]
42  ; CHECK:   [[VMASKMOVPDYrm:%[0-9]+]]:vr256 = VMASKMOVPDYrm [[VPCMPEQQYrr1]], %stack.0.stack_input_vec, 1, $noreg, 0, $noreg :: (load 32 from %ir.stack_input_vec, align 4)
43  ; CHECK:   [[VMASKMOVPDYrm1:%[0-9]+]]:vr256 = VMASKMOVPDYrm [[VPCMPEQQYrr]], %stack.0.stack_input_vec, 1, $noreg, 32, $noreg :: (load 32 from %ir.stack_input_vec + 32, align 4)
44  ; CHECK:   VMASKMOVPDYmr %stack.1.stack_output_vec, 1, $noreg, 32, $noreg, [[VPCMPEQQYrr]], killed [[VMASKMOVPDYrm1]] :: (store 32 into %ir.stack_output_vec + 32, align 4)
45  ; CHECK:   VMASKMOVPDYmr %stack.1.stack_output_vec, 1, $noreg, 0, $noreg, [[VPCMPEQQYrr1]], killed [[VMASKMOVPDYrm]] :: (store 32 into %ir.stack_output_vec, align 4)
46  ; CHECK:   RET 0
47bb:
48  %stack_input_vec = alloca <8 x double>, align 64
49  %stack_output_vec = alloca <8 x double>, align 64
50  %mask = icmp eq <8 x i64> %x, zeroinitializer
51  %masked_loaded_vec = call <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>* nonnull %stack_input_vec, i32 4, <8 x i1> %mask, <8 x double> undef)
52  call void @llvm.masked.store.v8f64.p0v8f64(<8 x double> %masked_loaded_vec, <8 x double>* nonnull %stack_output_vec, i32 4, <8 x i1> %mask)
53  ret void
54}
55
56define <2 x double> @mload_constmask_v2f64(<2 x double>* %addr, <2 x double> %dst) {
57  ; CHECK-LABEL: name: mload_constmask_v2f64
58  ; CHECK: bb.0 (%ir-block.0):
59  ; CHECK:   liveins: $rdi, $xmm0
60  ; CHECK:   [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
61  ; CHECK:   [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
62  ; CHECK:   [[VMOVHPDrm:%[0-9]+]]:vr128 = VMOVHPDrm [[COPY]], [[COPY1]], 1, $noreg, 8, $noreg :: (load 8 from %ir.addr + 8, align 4)
63  ; CHECK:   $xmm0 = COPY [[VMOVHPDrm]]
64  ; CHECK:   RET 0, $xmm0
65  %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %addr, i32 4, <2 x i1> <i1 0, i1 1>, <2 x double> %dst)
66  ret <2 x double> %res
67}
68
69define void @one_mask_bit_set2(<4 x float>* %addr, <4 x float> %val) {
70  ; CHECK-LABEL: name: one_mask_bit_set2
71  ; CHECK: bb.0 (%ir-block.0):
72  ; CHECK:   liveins: $rdi, $xmm0
73  ; CHECK:   [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
74  ; CHECK:   [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
75  ; CHECK:   VEXTRACTPSmr [[COPY1]], 1, $noreg, 8, $noreg, [[COPY]], 2 :: (store 4 into %ir.addr + 8)
76  ; CHECK:   RET 0
77  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %val, <4 x float>* %addr, i32 4, <4 x i1><i1 false, i1 false, i1 true, i1 false>)
78  ret void
79}
80
81declare <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>*, i32, <2 x i1>, <2 x double>)
82declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>)
83