Lines Matching full:neon
1 ; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s
3 declare float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float>)
5 declare float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float>)
7 declare float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float>)
9 declare float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float>)
11 declare i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32>)
13 declare i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16>)
15 declare i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8>)
17 declare i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16>)
19 declare i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8>)
21 declare i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32>)
23 declare i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16>)
25 declare i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8>)
27 declare i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32>)
29 declare i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16>)
31 declare i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8>)
33 declare i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16>)
35 declare i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8>)
37 declare i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16>)
39 declare i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8>)
41 declare i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32>)
43 declare i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16>)
45 declare i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8>)
47 declare i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32>)
49 declare i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16>)
51 declare i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8>)
53 declare i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16>)
55 declare i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8>)
57 declare i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16>)
59 declare i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8>)
61 declare i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32>)
63 declare i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16>)
65 declare i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8>)
67 declare i64 @llvm.aarch64.neon.saddlv.i64.v4i32(<4 x i32>)
69 declare i32 @llvm.aarch64.neon.saddlv.i32.v8i16(<8 x i16>)
71 declare i32 @llvm.aarch64.neon.saddlv.i32.v16i8(<16 x i8>)
73 declare i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16>)
75 declare i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8>)
77 declare i32 @llvm.aarch64.neon.saddlv.i32.v4i16(<4 x i16>)
79 declare i32 @llvm.aarch64.neon.saddlv.i32.v8i8(<8 x i8>)
85 %saddlvv.i = tail call i32 @llvm.aarch64.neon.saddlv.i32.v8i8(<8 x i8> %a)
94 %saddlvv.i = tail call i32 @llvm.aarch64.neon.saddlv.i32.v4i16(<4 x i16> %a)
102 %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> %a)
111 %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16> %a)
119 %saddlvv.i = tail call i32 @llvm.aarch64.neon.saddlv.i32.v16i8(<16 x i8> %a)
128 %saddlvv.i = tail call i32 @llvm.aarch64.neon.saddlv.i32.v8i16(<8 x i16> %a)
136 %saddlvv.i = tail call i64 @llvm.aarch64.neon.saddlv.i64.v4i32(<4 x i32> %a)
144 %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8> %a)
153 %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16> %a)
161 %uaddlvv.i = tail call i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32> %a)
169 %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> %a)
178 %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> %a)
187 %umaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> %a)
196 %umaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> %a)
205 %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> %a)
214 %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> %a)
223 %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> %a)
231 %umaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %a)
240 %umaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> %a)
249 %umaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32> %a)
257 %sminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> %a)
266 %sminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> %a)
275 %uminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> %a)
284 %uminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> %a)
293 %sminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> %a)
302 %sminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> %a)
311 %sminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> %a)
319 %uminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %a)
328 %uminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> %a)
337 %uminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32> %a)
345 %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8> %a)
354 %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> %a)
363 %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8> %a)
372 %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> %a)
381 %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8> %a)
390 %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> %a)
399 %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> %a)
407 %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8> %a)
416 %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> %a)
425 %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> %a)
433 %0 = call float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float> %a)
441 %0 = call float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float> %a)
449 %0 = call float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float> %a)
457 %0 = call float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float> %a)