Lines Matching refs:uaddlv
61 declare i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32>)
63 declare i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16>)
65 declare i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8>)
73 declare i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16>)
75 declare i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8>)
100 ; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.8b
102 %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> %a)
109 ; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.4h
111 %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16> %a)
142 ; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.16b
144 %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8> %a)
151 ; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.8h
153 %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16> %a)
159 ; CHECK: uaddlv d{{[0-9]+}}, {{v[0-9]+}}.4s
161 %uaddlvv.i = tail call i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32> %a)