1; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s
2; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
3
4; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it.
5; WARN-NOT: warning
6
7define <vscale x 4 x i32> @sdiv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
8; CHECK-LABEL: sdiv_i32:
9; CHECK: sdiv z0.s, p0/m, z0.s, z1.s
10; CHECK-NEXT: ret
11  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sdiv.nxv4i32(<vscale x 4 x i1> %pg,
12                                                               <vscale x 4 x i32> %a,
13                                                               <vscale x 4 x i32> %b)
14  ret <vscale x 4 x i32> %out
15}
16
17define <vscale x 2 x i64> @sdiv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
18; CHECK-LABEL: sdiv_i64:
19; CHECK: sdiv z0.d, p0/m, z0.d, z1.d
20; CHECK-NEXT: ret
21  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sdiv.nxv2i64(<vscale x 2 x i1> %pg,
22                                                               <vscale x 2 x i64> %a,
23                                                               <vscale x 2 x i64> %b)
24  ret <vscale x 2 x i64> %out
25}
26
27define <vscale x 4 x i32> @udiv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
28; CHECK-LABEL: udiv_i32:
29; CHECK: udiv z0.s, p0/m, z0.s, z1.s
30; CHECK-NEXT: ret
31  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.udiv.nxv4i32(<vscale x 4 x i1> %pg,
32                                                               <vscale x 4 x i32> %a,
33                                                               <vscale x 4 x i32> %b)
34  ret <vscale x 4 x i32> %out
35}
36
37define <vscale x 2 x i64> @udiv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
38; CHECK-LABEL: udiv_i64:
39; CHECK: udiv z0.d, p0/m, z0.d, z1.d
40; CHECK-NEXT: ret
41  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.udiv.nxv2i64(<vscale x 2 x i1> %pg,
42                                                               <vscale x 2 x i64> %a,
43                                                               <vscale x 2 x i64> %b)
44  ret <vscale x 2 x i64> %out
45}
46
47define <vscale x 4 x i32> @sdivr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
48; CHECK-LABEL: sdivr_i32:
49; CHECK: sdivr z0.s, p0/m, z0.s, z1.s
50; CHECK-NEXT: ret
51  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sdivr.nxv4i32(<vscale x 4 x i1> %pg,
52                                                               <vscale x 4 x i32> %a,
53                                                               <vscale x 4 x i32> %b)
54  ret <vscale x 4 x i32> %out
55}
56
57define <vscale x 2 x i64> @sdivr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
58; CHECK-LABEL: sdivr_i64:
59; CHECK: sdivr z0.d, p0/m, z0.d, z1.d
60; CHECK-NEXT: ret
61  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sdivr.nxv2i64(<vscale x 2 x i1> %pg,
62                                                               <vscale x 2 x i64> %a,
63                                                               <vscale x 2 x i64> %b)
64  ret <vscale x 2 x i64> %out
65}
66
67define <vscale x 4 x i32> @udivr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
68; CHECK-LABEL: udivr_i32:
69; CHECK: udivr z0.s, p0/m, z0.s, z1.s
70; CHECK-NEXT: ret
71  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.udivr.nxv4i32(<vscale x 4 x i1> %pg,
72                                                               <vscale x 4 x i32> %a,
73                                                               <vscale x 4 x i32> %b)
74  ret <vscale x 4 x i32> %out
75}
76
77define <vscale x 2 x i64> @udivr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
78; CHECK-LABEL: udivr_i64:
79; CHECK: udivr z0.d, p0/m, z0.d, z1.d
80; CHECK-NEXT: ret
81  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.udivr.nxv2i64(<vscale x 2 x i1> %pg,
82                                                               <vscale x 2 x i64> %a,
83                                                               <vscale x 2 x i64> %b)
84  ret <vscale x 2 x i64> %out
85}
86
87declare <vscale x  4 x i32> @llvm.aarch64.sve.sdiv.nxv4i32(<vscale x  4 x i1>, <vscale x  4 x i32>, <vscale x  4 x i32>)
88declare <vscale x  2 x i64> @llvm.aarch64.sve.sdiv.nxv2i64(<vscale x  2 x i1>, <vscale x  2 x i64>, <vscale x  2 x i64>)
89declare <vscale x  4 x i32> @llvm.aarch64.sve.udiv.nxv4i32(<vscale x  4 x i1>, <vscale x  4 x i32>, <vscale x  4 x i32>)
90declare <vscale x  2 x i64> @llvm.aarch64.sve.udiv.nxv2i64(<vscale x  2 x i1>, <vscale x  2 x i64>, <vscale x  2 x i64>)
91declare <vscale x  4 x i32> @llvm.aarch64.sve.sdivr.nxv4i32(<vscale x  4 x i1>, <vscale x  4 x i32>, <vscale x  4 x i32>)
92declare <vscale x  2 x i64> @llvm.aarch64.sve.sdivr.nxv2i64(<vscale x  2 x i1>, <vscale x  2 x i64>, <vscale x  2 x i64>)
93declare <vscale x  4 x i32> @llvm.aarch64.sve.udivr.nxv4i32(<vscale x  4 x i1>, <vscale x  4 x i32>, <vscale x  4 x i32>)
94declare <vscale x  2 x i64> @llvm.aarch64.sve.udivr.nxv2i64(<vscale x  2 x i1>, <vscale x  2 x i64>, <vscale x  2 x i64>)
95
96