1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -tail-predication=force-enabled-no-reductions -o - %s | FileCheck %s
3
4define arm_aapcs_vfpcc <16 x i8> @vcmp_vpst_combination(<16 x i8>* %pSrc, i16 zeroext %blockSize, i8* nocapture %pResult, i32* nocapture %pIndex) {
5; CHECK-LABEL: vcmp_vpst_combination:
6; CHECK:       @ %bb.0: @ %entry
7; CHECK-NEXT:    .save {r7, lr}
8; CHECK-NEXT:    push {r7, lr}
9; CHECK-NEXT:    vmov.i8 q0, #0x7f
10; CHECK-NEXT:    dlstp.8 lr, r1
11; CHECK-NEXT:  .LBB0_1: @ %do.body
12; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
13; CHECK-NEXT:    vldrb.u8 q1, [r0]
14; CHECK-NEXT:    vpt.s8 ge, q0, q1
15; CHECK-NEXT:    vmovt q0, q1
16; CHECK-NEXT:    letp lr, .LBB0_1
17; CHECK-NEXT:  @ %bb.2: @ %do.end
18; CHECK-NEXT:    pop {r7, pc}
19entry:
20  %conv = zext i16 %blockSize to i32
21  %0 = tail call { <16 x i8>, i32 } @llvm.arm.mve.vidup.v16i8(i32 0, i32 1)
22  %1 = extractvalue { <16 x i8>, i32 } %0, 0
23  br label %do.body
24
25do.body:                                          ; preds = %do.body, %entry
26  %indexVec.0 = phi <16 x i8> [ %1, %entry ], [ %add, %do.body ]
27  %curExtremIdxVec.0 = phi <16 x i8> [ zeroinitializer, %entry ], [ %6, %do.body ]
28  %curExtremValVec.0 = phi <16 x i8> [ <i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127>, %entry ], [ %6, %do.body ]
29  %blkCnt.0 = phi i32 [ %conv, %entry ], [ %sub2, %do.body ]
30  %2 = tail call <16 x i1> @llvm.arm.mve.vctp8(i32 %blkCnt.0)
31  %3 = tail call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %pSrc, i32 1, <16 x i1> %2, <16 x i8> zeroinitializer)
32  %4 = icmp sle <16 x i8> %3, %curExtremValVec.0
33  %5 = and <16 x i1> %4, %2
34  %6 = tail call <16 x i8> @llvm.arm.mve.orr.predicated.v16i8.v16i1(<16 x i8> %3, <16 x i8> %3, <16 x i1> %5, <16 x i8> %curExtremValVec.0)
35  %add = add <16 x i8> %indexVec.0, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16>
36  %sub2 = add nsw i32 %blkCnt.0, -16
37  %cmp = icmp sgt i32 %blkCnt.0, 16
38  br i1 %cmp, label %do.body, label %do.end
39
40do.end:                                           ; preds = %do.body
41  ret <16 x i8> %6
42}
43
44define i32 @vcmp_new_vpst_combination(i32 %len, i32* nocapture readonly %arr) {
45; CHECK-LABEL: vcmp_new_vpst_combination:
46; CHECK:       @ %bb.0: @ %entry
47; CHECK-NEXT:    .save {r7, lr}
48; CHECK-NEXT:    push {r7, lr}
49; CHECK-NEXT:    cmp r0, #1
50; CHECK-NEXT:    blt .LBB1_4
51; CHECK-NEXT:  @ %bb.1: @ %vector.ph
52; CHECK-NEXT:    vmov.i32 q0, #0x0
53; CHECK-NEXT:    vmov.i32 q1, #0x1
54; CHECK-NEXT:    movs r2, #0
55; CHECK-NEXT:    dlstp.32 lr, r0
56; CHECK-NEXT:  .LBB1_2: @ %vector.body
57; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
58; CHECK-NEXT:    vldrw.u32 q2, [r1], #16
59; CHECK-NEXT:    vcmp.i32 ne, q2, zr
60; CHECK-NEXT:    vmov q2, q0
61; CHECK-NEXT:    vpst
62; CHECK-NEXT:    vmovt q2, q1
63; CHECK-NEXT:    vaddva.u32 r2, q2
64; CHECK-NEXT:    letp lr, .LBB1_2
65; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
66; CHECK-NEXT:    mov r0, r2
67; CHECK-NEXT:    pop {r7, pc}
68; CHECK-NEXT:  .LBB1_4:
69; CHECK-NEXT:    movs r2, #0
70; CHECK-NEXT:    mov r0, r2
71; CHECK-NEXT:    pop {r7, pc}
72entry:
73  %cmp7 = icmp sgt i32 %len, 0
74  br i1 %cmp7, label %vector.ph, label %for.cond.cleanup
75
76vector.ph:                                        ; preds = %entry
77  %n.rnd.up = add i32 %len, 3
78  %n.vec = and i32 %n.rnd.up, -4
79  br label %vector.body
80
81vector.body:                                      ; preds = %vector.body, %vector.ph
82  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
83  %vec.phi = phi i32 [ 0, %vector.ph ], [ %5, %vector.body ]
84  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %len)
85  %0 = getelementptr inbounds i32, i32* %arr, i32 %index
86  %1 = bitcast i32* %0 to <4 x i32>*
87  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
88  %2 = icmp ne <4 x i32> %wide.masked.load, zeroinitializer
89  %narrow = and <4 x i1> %active.lane.mask, %2
90  %3 = zext <4 x i1> %narrow to <4 x i32>
91  %4 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %3)
92  %5 = add i32 %4, %vec.phi
93  %index.next = add i32 %index, 4
94  %6 = icmp eq i32 %index.next, %n.vec
95  br i1 %6, label %for.cond.cleanup, label %vector.body
96
97for.cond.cleanup:                                 ; preds = %vector.body, %entry
98  %count.0.lcssa = phi i32 [ 0, %entry ], [ %5, %vector.body ]
99  ret i32 %count.0.lcssa
100}
101
102declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
103
104declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
105
106declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
107
108declare { <16 x i8>, i32 } @llvm.arm.mve.vidup.v16i8(i32, i32)
109
110declare <16 x i1> @llvm.arm.mve.vctp8(i32)
111
112declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32 immarg, <16 x i1>, <16 x i8>)
113
114declare <16 x i8> @llvm.arm.mve.orr.predicated.v16i8.v16i1(<16 x i8>, <16 x i8>, <16 x i1>, <16 x i8>)
115