1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s
3
4define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_base_wb_s32(<4 x i32>* %addr) {
5; CHECK-LABEL: test_vldrwq_gather_base_wb_s32:
6; CHECK:       @ %bb.0: @ %entry
7; CHECK-NEXT:    vldrw.u32 q1, [r0]
8; CHECK-NEXT:    vldrw.u32 q0, [q1, #80]!
9; CHECK-NEXT:    vstrw.32 q1, [r0]
10; CHECK-NEXT:    bx lr
11entry:
12  %0 = load <4 x i32>, <4 x i32>* %addr, align 8
13  %1 = tail call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4i32.v4i32(<4 x i32> %0, i32 80)
14  %2 = extractvalue { <4 x i32>, <4 x i32> } %1, 1
15  store <4 x i32> %2, <4 x i32>* %addr, align 8
16  %3 = extractvalue { <4 x i32>, <4 x i32> } %1, 0
17  ret <4 x i32> %3
18}
19
20declare { <4 x i32>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4i32.v4i32(<4 x i32>, i32)
21
22define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_base_wb_f32(<4 x i32>* %addr) {
23; CHECK-LABEL: test_vldrwq_gather_base_wb_f32:
24; CHECK:       @ %bb.0: @ %entry
25; CHECK-NEXT:    vldrw.u32 q1, [r0]
26; CHECK-NEXT:    vldrw.u32 q0, [q1, #64]!
27; CHECK-NEXT:    vstrw.32 q1, [r0]
28; CHECK-NEXT:    bx lr
29entry:
30  %0 = load <4 x i32>, <4 x i32>* %addr, align 8
31  %1 = tail call { <4 x float>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4f32.v4i32(<4 x i32> %0, i32 64)
32  %2 = extractvalue { <4 x float>, <4 x i32> } %1, 1
33  store <4 x i32> %2, <4 x i32>* %addr, align 8
34  %3 = extractvalue { <4 x float>, <4 x i32> } %1, 0
35  ret <4 x float> %3
36}
37
38declare { <4 x float>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4f32.v4i32(<4 x i32>, i32)
39
40define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_z_u64(<2 x i64>* %addr, i16 zeroext %p) {
41; CHECK-LABEL: test_vldrdq_gather_base_wb_z_u64:
42; CHECK:       @ %bb.0: @ %entry
43; CHECK-NEXT:    vmsr p0, r1
44; CHECK-NEXT:    vldrw.u32 q1, [r0]
45; CHECK-NEXT:    vpst
46; CHECK-NEXT:    vldrdt.u64 q0, [q1, #656]!
47; CHECK-NEXT:    vstrw.32 q1, [r0]
48; CHECK-NEXT:    bx lr
49entry:
50  %0 = load <2 x i64>, <2 x i64>* %addr, align 8
51  %1 = zext i16 %p to i32
52  %2 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
53  %3 = tail call { <2 x i64>, <2 x i64> } @llvm.arm.mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1(<2 x i64> %0, i32 656, <4 x i1> %2)
54  %4 = extractvalue { <2 x i64>, <2 x i64> } %3, 1
55  store <2 x i64> %4, <2 x i64>* %addr, align 8
56  %5 = extractvalue { <2 x i64>, <2 x i64> } %3, 0
57  ret <2 x i64> %5
58}
59
60declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32)
61
62declare { <2 x i64>, <2 x i64> } @llvm.arm.mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1(<2 x i64>, i32, <4 x i1>)
63