Home
last modified time | relevance | path

Searched refs:vcgtq_f32 (Results 1 – 21 of 21) sorted by relevance

/external/XNNPACK/src/f32-argmaxpool/
D9p8x-neon-c4.c69 const uint32x4_t vm1 = vcgtq_f32(vi1, vmax); in xnn_f32_argmaxpool_ukernel_9p8x__neon_c4()
73 const uint32x4_t vm2 = vcgtq_f32(vi2, vmax); in xnn_f32_argmaxpool_ukernel_9p8x__neon_c4()
77 const uint32x4_t vm3 = vcgtq_f32(vi3, vmax); in xnn_f32_argmaxpool_ukernel_9p8x__neon_c4()
81 const uint32x4_t vm4 = vcgtq_f32(vi4, vmax); in xnn_f32_argmaxpool_ukernel_9p8x__neon_c4()
85 const uint32x4_t vm5 = vcgtq_f32(vi5, vmax); in xnn_f32_argmaxpool_ukernel_9p8x__neon_c4()
89 const uint32x4_t vm6 = vcgtq_f32(vi6, vmax); in xnn_f32_argmaxpool_ukernel_9p8x__neon_c4()
93 const uint32x4_t vm7 = vcgtq_f32(vi7, vmax); in xnn_f32_argmaxpool_ukernel_9p8x__neon_c4()
97 const uint32x4_t vm8 = vcgtq_f32(vi8, vmax); in xnn_f32_argmaxpool_ukernel_9p8x__neon_c4()
144 const uint32x4_t vm0 = vcgtq_f32(vi0, vmax); in xnn_f32_argmaxpool_ukernel_9p8x__neon_c4()
148 const uint32x4_t vm1 = vcgtq_f32(vi1, vmax); in xnn_f32_argmaxpool_ukernel_9p8x__neon_c4()
[all …]
D9x-neon-c4.c88 const uint32x4_t vm1 = vcgtq_f32(vi1, vmax); in xnn_f32_argmaxpool_ukernel_9x__neon_c4()
92 const uint32x4_t vm2 = vcgtq_f32(vi2, vmax); in xnn_f32_argmaxpool_ukernel_9x__neon_c4()
96 const uint32x4_t vm3 = vcgtq_f32(vi3, vmax); in xnn_f32_argmaxpool_ukernel_9x__neon_c4()
100 const uint32x4_t vm4 = vcgtq_f32(vi4, vmax); in xnn_f32_argmaxpool_ukernel_9x__neon_c4()
104 const uint32x4_t vm5 = vcgtq_f32(vi5, vmax); in xnn_f32_argmaxpool_ukernel_9x__neon_c4()
108 const uint32x4_t vm6 = vcgtq_f32(vi6, vmax); in xnn_f32_argmaxpool_ukernel_9x__neon_c4()
112 const uint32x4_t vm7 = vcgtq_f32(vi7, vmax); in xnn_f32_argmaxpool_ukernel_9x__neon_c4()
116 const uint32x4_t vm8 = vcgtq_f32(vi8, vmax); in xnn_f32_argmaxpool_ukernel_9x__neon_c4()
137 const uint32x4_t vm1 = vcgtq_f32(vi1, vmax); in xnn_f32_argmaxpool_ukernel_9x__neon_c4()
141 const uint32x4_t vm2 = vcgtq_f32(vi2, vmax); in xnn_f32_argmaxpool_ukernel_9x__neon_c4()
[all …]
D4x-neon-c4.c58 const uint32x4_t vm1 = vcgtq_f32(vi1, vmax); in xnn_f32_argmaxpool_ukernel_4x__neon_c4()
62 const uint32x4_t vm2 = vcgtq_f32(vi2, vmax); in xnn_f32_argmaxpool_ukernel_4x__neon_c4()
66 const uint32x4_t vm3 = vcgtq_f32(vi3, vmax); in xnn_f32_argmaxpool_ukernel_4x__neon_c4()
82 const uint32x4_t vm1 = vcgtq_f32(vi1, vmax); in xnn_f32_argmaxpool_ukernel_4x__neon_c4()
86 const uint32x4_t vm2 = vcgtq_f32(vi2, vmax); in xnn_f32_argmaxpool_ukernel_4x__neon_c4()
90 const uint32x4_t vm3 = vcgtq_f32(vi3, vmax); in xnn_f32_argmaxpool_ukernel_4x__neon_c4()
/external/XNNPACK/src/f32-vrnd/gen/
Dvrndd-neon-x8.c49 const uint32x4_t vadjmask0123 = vcgtq_f32(vrndx0123, vx0123); in xnn_f32_vrndd_ukernel__neon_x8()
50 const uint32x4_t vadjmask4567 = vcgtq_f32(vrndx4567, vx4567); in xnn_f32_vrndd_ukernel__neon_x8()
68 const uint32x4_t vadjmask = vcgtq_f32(vrndx, vx); in xnn_f32_vrndd_ukernel__neon_x8()
80 const uint32x4_t vadjmask = vcgtq_f32(vrndx, vx); in xnn_f32_vrndd_ukernel__neon_x8()
Dvrndd-neon-x4.c43 const uint32x4_t vadjmask0123 = vcgtq_f32(vrndx0123, vx0123); in xnn_f32_vrndd_ukernel__neon_x4()
58 const uint32x4_t vadjmask = vcgtq_f32(vrndx, vx); in xnn_f32_vrndd_ukernel__neon_x4()
/external/XNNPACK/src/f32-vrnd/
Dvrndd-neon.c.in49 const uint32x4_t vadjmask${ABC[N:N+4]} = vcgtq_f32(vrndx${ABC[N:N+4]}, vx${ABC[N:N+4]});
68 const uint32x4_t vadjmask = vcgtq_f32(vrndx, vx);
80 const uint32x4_t vadjmask = vcgtq_f32(vrndx, vx);
/external/XNNPACK/src/math/
Droundd-neon-cvt.c50 …const float32x4_t vy = vsubq_f32(vrndx, vreinterpretq_f32_u32(vandq_u32(vcgtq_f32(vrndx, vx), vone… in xnn_math_f32_roundd__neon_cvt()
Droundd-neon-addsub.c57 …const float32x4_t vy = vsubq_f32(vrndx, vreinterpretq_f32_u32(vandq_u32(vcgtq_f32(vrndx, vx), vone… in xnn_math_f32_roundd__neon_addsub()
Droundz-neon-addsub.c52 …const float32x4_t vadjustment = vreinterpretq_f32_u32(vandq_u32(vone, vcgtq_f32(vrndabsx, vabsx))); in xnn_math_f32_roundz__neon_addsub()
Dexp-neonfma-rr2-p5.c93 vf = vbslq_f32(vcgtq_f32(vx, vinf_cutoff), vplus_inf, vf); in xnn_math_f32_exp__neonfma_rr2_p5()
Dexp-neonfma-rr2-lut64-p2.c104 vf = vbslq_f32(vcgtq_f32(vx, vinf_cutoff), vplus_inf, vf); in xnn_math_f32_exp__neonfma_rr2_lut64_p2()
/external/eigen/Eigen/src/Core/arch/NEON/
DMathFunctions.h50 Packet4ui mask = vcgtq_f32(tmp, fx);
/external/llvm-project/clang/test/CodeGen/
Daarch64-neon-intrinsics-constrained.c429 return vcgtq_f32(v1, v2); in test_vcgtq_f32()
Daarch64-neon-intrinsics.c2293 return vcgtq_f32(v1, v2); in test_vcgtq_f32()
Darm_neon_intrinsics.c1615 return vcgtq_f32(a, b); in test_vcgtq_f32()
/external/skia/include/private/
DSkNx_neon.h21 auto too_big = vcgtq_f32(roundtrip, v); in emulate_vrndmq_f32()
206 AI SkNx operator >(const SkNx& o) const {return vreinterpretq_f32_u32(vcgtq_f32(fVec, o.fVec));}
/external/skqp/include/private/
DSkNx_neon.h21 auto too_big = vcgtq_f32(roundtrip, v); in emulate_vrndmq_f32()
223 AI SkNx operator >(const SkNx& o) const {return vreinterpretq_f32_u32(vcgtq_f32(fVec, o.fVec));}
/external/pffft/
Dsse2neon.h3840 vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); in _mm_cmpgt_ps()
4198 vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)); in _mm_comigt_ss()
/external/neon_2_sse/
DNEON_2_SSE.h771 _NEON2SSESTORAGE uint32x4_t vcgtq_f32(float32x4_t a, float32x4_t b); // VCGT.F32 q0, q0, q0
5449 _NEON2SSESTORAGE uint32x4_t vcgtq_f32(float32x4_t a, float32x4_t b); // VCGT.F32 q0, q0, q0
5450 _NEON2SSE_INLINE uint32x4_t vcgtq_f32(float32x4_t a, float32x4_t b) in vcgtq_f32() function
5524 #define vcltq_f32(a,b) vcgtq_f32(b, a) //swap the arguments!!
/external/clang/test/CodeGen/
Daarch64-neon-intrinsics.c2476 return vcgtq_f32(v1, v2); in test_vcgtq_f32()
Darm_neon_intrinsics.c1764 return vcgtq_f32(a, b); in test_vcgtq_f32()