/external/llvm/test/CodeGen/X86/ |
D | ssse3-intrinsics-x86.ll | 5 %res = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %a0) ; <<16 x i8>> [#uses=1] 8 declare <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8>) nounwind readnone 13 %res = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %a0) ; <<4 x i32>> [#uses=1] 16 declare <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32>) nounwind readnone 21 %res = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a0) ; <<8 x i16>> [#uses=1] 24 declare <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16>) nounwind readnone
|
D | ssse3-intrinsics-fast-isel.ll | 17 %call = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %arg) 21 declare <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8>) nounwind readnone 34 %call = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %arg) 38 declare <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16>) nounwind readnone 51 %call = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %arg) 55 declare <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32>) nounwind readnone
|
D | stack-folding-mmx.ll | 66 %2 = call x86_mmx @llvm.x86.ssse3.pabs.b(x86_mmx %a0) nounwind readnone 69 declare x86_mmx @llvm.x86.ssse3.pabs.b(x86_mmx) nounwind readnone 75 %2 = call x86_mmx @llvm.x86.ssse3.pabs.d(x86_mmx %a0) nounwind readnone 78 declare x86_mmx @llvm.x86.ssse3.pabs.d(x86_mmx) nounwind readnone 84 %2 = call x86_mmx @llvm.x86.ssse3.pabs.w(x86_mmx %a0) nounwind readnone 87 declare x86_mmx @llvm.x86.ssse3.pabs.w(x86_mmx) nounwind readnone
|
D | avx2-intrinsics-x86.ll | 317 %res = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %a0) ; <<32 x i8>> [#uses=1] 320 declare <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8>) nounwind readnone 325 %res = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %a0) ; <<8 x i32>> [#uses=1] 328 declare <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32>) nounwind readnone 333 %res = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %a0) ; <<16 x i16>> [#uses=1] 336 declare <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16>) nounwind readnone
|
D | mmx-intrinsics.ll | 1206 declare x86_mmx @llvm.x86.ssse3.pabs.d(x86_mmx) nounwind readnone 1214 %2 = tail call x86_mmx @llvm.x86.ssse3.pabs.d(x86_mmx %1) nounwind readnone 1221 declare x86_mmx @llvm.x86.ssse3.pabs.w(x86_mmx) nounwind readnone 1229 %2 = tail call x86_mmx @llvm.x86.ssse3.pabs.w(x86_mmx %1) nounwind readnone 1236 declare x86_mmx @llvm.x86.ssse3.pabs.b(x86_mmx) nounwind readnone 1244 %2 = tail call x86_mmx @llvm.x86.ssse3.pabs.b(x86_mmx %1) nounwind readnone
|
D | stack-folding-int-avx1.ll | 118 %2 = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %a0) 121 declare <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8>) nounwind readnone 127 %2 = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %a0) 130 declare <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32>) nounwind readnone 136 %2 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a0) 139 declare <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16>) nounwind readnone
|
D | stack-folding-int-sse42.ll | 145 %2 = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %a0) 148 declare <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8>) nounwind readnone 154 %2 = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %a0) 157 declare <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32>) nounwind readnone 163 %2 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a0) 166 declare <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16>) nounwind readnone
|
D | stack-folding-int-avx2.ll | 77 %2 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %a0) 80 declare <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8>) nounwind readnone 86 %2 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %a0) 89 declare <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32>) nounwind readnone 95 %2 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %a0) 98 declare <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16>) nounwind readnone
|
D | avx512bwvl-intrinsics.ll | 3714 declare <16 x i8> @llvm.x86.avx512.mask.pabs.b.128(<16 x i8>, <16 x i8>, i16) 3721 %res = call <16 x i8> @llvm.x86.avx512.mask.pabs.b.128(<16 x i8> %x0, <16 x i8> %x1, i16 %x2) 3722 %res1 = call <16 x i8> @llvm.x86.avx512.mask.pabs.b.128(<16 x i8> %x0, <16 x i8> %x1, i16 -1) 3727 declare <32 x i8> @llvm.x86.avx512.mask.pabs.b.256(<32 x i8>, <32 x i8>, i32) 3734 %res = call <32 x i8> @llvm.x86.avx512.mask.pabs.b.256(<32 x i8> %x0, <32 x i8> %x1, i32 %x2) 3735 %res1 = call <32 x i8> @llvm.x86.avx512.mask.pabs.b.256(<32 x i8> %x0, <32 x i8> %x1, i32 -1) 3740 declare <8 x i16> @llvm.x86.avx512.mask.pabs.w.128(<8 x i16>, <8 x i16>, i8) 3747 %res = call <8 x i16> @llvm.x86.avx512.mask.pabs.w.128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2) 3748 %res1 = call <8 x i16> @llvm.x86.avx512.mask.pabs.w.128(<8 x i16> %x0, <8 x i16> %x1, i8 -1) 3753 declare <16 x i16> @llvm.x86.avx512.mask.pabs.w.256(<16 x i16>, <16 x i16>, i16) [all …]
|
D | avx512bw-intrinsics.ll | 2185 declare <32 x i16> @llvm.x86.avx512.mask.pabs.w.512(<32 x i16>, <32 x i16>, i32) 2203 %res = call <32 x i16> @llvm.x86.avx512.mask.pabs.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 %x2) 2204 %res1 = call <32 x i16> @llvm.x86.avx512.mask.pabs.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 -1) 2209 declare <64 x i8> @llvm.x86.avx512.mask.pabs.b.512(<64 x i8>, <64 x i8>, i64) 2229 %res = call <64 x i8> @llvm.x86.avx512.mask.pabs.b.512(<64 x i8> %x0, <64 x i8> %x1, i64 %x2) 2230 %res1 = call <64 x i8> @llvm.x86.avx512.mask.pabs.b.512(<64 x i8> %x0, <64 x i8> %x1, i64 -1)
|
D | avx-intrinsics-x86.ll | 2085 %res = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %a0) ; <<16 x i8>> [#uses=1] 2088 declare <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8>) nounwind readnone 2096 %res = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %a0) ; <<4 x i32>> [#uses=1] 2099 declare <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32>) nounwind readnone 2107 %res = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a0) ; <<8 x i16>> [#uses=1] 2110 declare <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16>) nounwind readnone
|
D | avx512vl-intrinsics.ll | 2905 declare <2 x i64> @llvm.x86.avx512.mask.pabs.q.128(<2 x i64>, <2 x i64>, i8) 2912 %res = call <2 x i64> @llvm.x86.avx512.mask.pabs.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2) 2913 %res1 = call <2 x i64> @llvm.x86.avx512.mask.pabs.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 -1) 2918 declare <4 x i64> @llvm.x86.avx512.mask.pabs.q.256(<4 x i64>, <4 x i64>, i8) 2925 %res = call <4 x i64> @llvm.x86.avx512.mask.pabs.q.256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2) 2926 %res1 = call <4 x i64> @llvm.x86.avx512.mask.pabs.q.256(<4 x i64> %x0, <4 x i64> %x1, i8 -1) 2931 declare <4 x i32> @llvm.x86.avx512.mask.pabs.d.128(<4 x i32>, <4 x i32>, i8) 2938 %res = call <4 x i32> @llvm.x86.avx512.mask.pabs.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2) 2939 %res1 = call <4 x i32> @llvm.x86.avx512.mask.pabs.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 -1) 2944 declare <8 x i32> @llvm.x86.avx512.mask.pabs.d.256(<8 x i32>, <8 x i32>, i8) [all …]
|
D | avx512-intrinsics.ll | 792 declare <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32>, <16 x i32>, i16) 802 %res = call <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) 803 %res1 = call <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32> %x0, <16 x i32> %x1, i16 -1) 808 declare <8 x i64> @llvm.x86.avx512.mask.pabs.q.512(<8 x i64>, <8 x i64>, i8) 819 %res = call <8 x i64> @llvm.x86.avx512.mask.pabs.q.512(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) 820 %res1 = call <8 x i64> @llvm.x86.avx512.mask.pabs.q.512(<8 x i64> %x0, <8 x i64> %x1, i8 -1)
|
/external/eigen/unsupported/Eigen/src/MoreVectorization/ |
D | MathFunctions.h | 40 Packet4f a = pabs(x);//got the absolute value in pasin()
|
/external/eigen/Eigen/src/Core/arch/SSE/ |
D | MathFunctions.h | 287 x = pabs(x); 384 x = pabs(x);
|
D | PacketMath.h | 426 template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) 431 template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) 436 template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)
|
/external/eigen/Eigen/src/Core/arch/NEON/ |
D | PacketMath.h | 246 template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vabsq_f32(a); } 247 template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s32(a); }
|
/external/eigen/Eigen/src/Core/arch/AltiVec/ |
D | PacketMath.h | 350 template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vec_abs(a); } 351 template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vec_abs(a); }
|
/external/eigen/Eigen/src/Core/ |
D | GenericPacketMath.h | 133 pabs(const Packet& a) { using std::abs; return abs(a); } in pabs() function
|
D | Functors.h | 335 { return internal::pabs(a); }
|
/external/eigen/bench/ |
D | bench_norm.cpp | 146 Packet ax = internal::pabs(v.template packet<Aligned>(j)); in pblueNorm()
|
/external/eigen/test/ |
D | packetmath.cpp | 293 CHECK_CWISE1(abs, internal::pabs); in packetmath_notcomplex()
|