1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -instcombine -S -mtriple=arm -o - %s | FileCheck %s
3
4target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
5
6define arm_aapcs_vfpcc i32 @test_vmladavaq_s32(i32 %z, <4 x i32> %x, <4 x i32> %y) {
7; CHECK-LABEL: @test_vmladavaq_s32(
8; CHECK-NEXT:  entry:
9; CHECK-NEXT:  %0 = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 0, i32 0, i32 %z, <4 x i32> %x, <4 x i32> %y)
10; CHECK-NEXT:  ret i32 %0
11entry:
12  %0 = tail call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 0, i32 0, i32 0, <4 x i32> %x, <4 x i32> %y)
13  %1 = add nsw i32 %0, %z
14  ret i32 %1
15}
16
17define arm_aapcs_vfpcc i32 @test_vmladavaq_s16(i32 %z, <8 x i16> %x, <8 x i16> %y) {
18; CHECK-LABEL: @test_vmladavaq_s16(
19; CHECK-NEXT:  entry:
20; CHECK-NEXT:  %0 = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 0, i32 %z, <8 x i16> %x, <8 x i16> %y)
21; CHECK-NEXT:  ret i32 %0
22entry:
23  %0 = tail call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 0, i32 0, <8 x i16> %x, <8 x i16> %y)
24  %1 = add nsw i32 %0, %z
25  ret i32 %1
26}
27
28define arm_aapcs_vfpcc i32 @test_vmladavaq_s8(i32 %z, <16 x i8> %x, <16 x i8> %y) {
29; CHECK-LABEL: @test_vmladavaq_s8(
30; CHECK-NEXT:  entry:
31; CHECK-NEXT:  %0 = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 0, i32 0, i32 %z, <16 x i8> %x, <16 x i8> %y)
32; CHECK-NEXT:  ret i32 %0
33entry:
34  %0 = tail call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 0, i32 0, i32 0, <16 x i8> %x, <16 x i8> %y)
35  %1 = add nsw i32 %0, %z
36  ret i32 %1
37}
38
39define arm_aapcs_vfpcc i32 @test_vmladavaq_u32(i32 %z, <4 x i32> %x, <4 x i32> %y) {
40; CHECK-LABEL: @test_vmladavaq_u32(
41; CHECK-NEXT:  entry:
42; CHECK-NEXT:  %0 = call i32 @llvm.arm.mve.vmldava.v4i32(i32 1, i32 0, i32 0, i32 %z, <4 x i32> %x, <4 x i32> %y)
43; CHECK-NEXT:  ret i32 %0
44entry:
45  %0 = tail call i32 @llvm.arm.mve.vmldava.v4i32(i32 1, i32 0, i32 0, i32 0, <4 x i32> %x, <4 x i32> %y)
46  %1 = add nsw i32 %0, %z
47  ret i32 %1
48}
49
50define arm_aapcs_vfpcc i32 @test_vmladavaq_u16(i32 %z, <8 x i16> %x, <8 x i16> %y) {
51; CHECK-LABEL: @test_vmladavaq_u16(
52; CHECK-NEXT:  entry:
53; CHECK-NEXT:  %0 = call i32 @llvm.arm.mve.vmldava.v8i16(i32 1, i32 0, i32 0, i32 %z, <8 x i16> %x, <8 x i16> %y)
54; CHECK-NEXT:  ret i32 %0
55entry:
56  %0 = tail call i32 @llvm.arm.mve.vmldava.v8i16(i32 1, i32 0, i32 0, i32 0, <8 x i16> %x, <8 x i16> %y)
57  %1 = add nsw i32 %0, %z
58  ret i32 %1
59}
60
61define arm_aapcs_vfpcc i32 @test_vmladavaq_u8(i32 %z, <16 x i8> %x, <16 x i8> %y) {
62; CHECK-LABEL: @test_vmladavaq_u8(
63; CHECK-NEXT:  entry:
64; CHECK-NEXT:  %0 = call i32 @llvm.arm.mve.vmldava.v16i8(i32 1, i32 0, i32 0, i32 %z, <16 x i8> %x, <16 x i8> %y)
65; CHECK-NEXT:  ret i32 %0
66entry:
67  %0 = tail call i32 @llvm.arm.mve.vmldava.v16i8(i32 1, i32 0, i32 0, i32 0, <16 x i8> %x, <16 x i8> %y)
68  %1 = add nsw i32 %0, %z
69  ret i32 %1
70}
71
72define arm_aapcs_vfpcc i32 @test_vmlsdavaq_s32(i32 %z, <4 x i32> %x, <4 x i32> %y) {
73; CHECK-LABEL: @test_vmlsdavaq_s32(
74; CHECK-NEXT:  entry:
75; CHECK-NEXT:  %0 = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 1, i32 0, i32 %z, <4 x i32> %x, <4 x i32> %y)
76; CHECK-NEXT:  ret i32 %0
77entry:
78  %0 = tail call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 1, i32 0, i32 0, <4 x i32> %x, <4 x i32> %y)
79  %1 = add nsw i32 %0, %z
80  ret i32 %1
81}
82
83define arm_aapcs_vfpcc i32 @test_vmlsdavaq_s16(i32 %z, <8 x i16> %x, <8 x i16> %y) {
84; CHECK-LABEL: @test_vmlsdavaq_s16(
85; CHECK-NEXT:  entry:
86; CHECK-NEXT:  %0 = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 1, i32 0, i32 %z, <8 x i16> %x, <8 x i16> %y)
87; CHECK-NEXT:  ret i32 %0
88entry:
89  %0 = tail call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 1, i32 0, i32 0, <8 x i16> %x, <8 x i16> %y)
90  %1 = add nsw i32 %0, %z
91  ret i32 %1
92}
93
94define arm_aapcs_vfpcc i32 @test_vmlsdavaq_s8(i32 %z, <16 x i8> %x, <16 x i8> %y) {
95; CHECK-LABEL: @test_vmlsdavaq_s8(
96; CHECK-NEXT:  entry:
97; CHECK-NEXT:  %0 = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 1, i32 0, i32 %z, <16 x i8> %x, <16 x i8> %y)
98; CHECK-NEXT:  ret i32 %0
99entry:
100  %0 = tail call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 1, i32 0, i32 0, <16 x i8> %x, <16 x i8> %y)
101  %1 = add nsw i32 %0, %z
102  ret i32 %1
103}
104
105declare i32 @llvm.arm.mve.vmldava.v4i32(i32, i32, i32, i32, <4 x i32>, <4 x i32>)
106declare i32 @llvm.arm.mve.vmldava.v8i16(i32, i32, i32, i32, <8 x i16>, <8 x i16>)
107declare i32 @llvm.arm.mve.vmldava.v16i8(i32, i32, i32, i32, <16 x i8>, <16 x i8>)
108