Lines Matching refs:sminv
5 ; CHECK: sminv.8b b[[REGNUM:[0-9]+]], v0
9 %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> %a1)
16 ; CHECK: sminv.4h h[[REGNUM:[0-9]+]], v0
20 %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> %a1)
32 %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v2i32(<2 x i32> %a1)
38 ; CHECK: sminv.16b b[[REGNUM:[0-9]+]], v0
42 %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> %a1)
49 ; CHECK: sminv.8h h[[REGNUM:[0-9]+]], v0
53 %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> %a1)
60 ; CHECK: sminv.4s [[REGNUM:s[0-9]+]], v0
64 %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> %a1)
70 ; CHECK: sminv.8b b[[REGNUM:[0-9]+]], v1
74 %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> %a2)
82 ; CHECK: sminv.4h h[[REGNUM:[0-9]+]], v1
86 %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> %a2)
98 %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v2i32(<2 x i32> %a2)
105 ; CHECK: sminv.16b b[[REGNUM:[0-9]+]], v1
109 %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> %a2)
117 ; CHECK: sminv.8h h[[REGNUM:[0-9]+]], v1
121 %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> %a2)
129 ; CHECK: sminv.4s s[[REGNUM:[0-9]+]], v1
133 %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> %a2)
138 declare i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32>)
139 declare i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16>)
140 declare i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8>)
141 declare i32 @llvm.aarch64.neon.sminv.i32.v2i32(<2 x i32>)
142 declare i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16>)
143 declare i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8>)