Home
last modified time | relevance | path

Searched refs:vminq_s32 (Results 1 – 25 of 29) sorted by relevance

12

/external/XNNPACK/src/f32-hswish/gen/
Dhswish-neon-x16.c52 vacc0123 = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc0123), vsix)); in xnn_f32_hswish_ukernel__neon_x16()
53 vacc4567 = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc4567), vsix)); in xnn_f32_hswish_ukernel__neon_x16()
54 vacc89AB = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc89AB), vsix)); in xnn_f32_hswish_ukernel__neon_x16()
55 vaccCDEF = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vaccCDEF), vsix)); in xnn_f32_hswish_ukernel__neon_x16()
72 vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix)); in xnn_f32_hswish_ukernel__neon_x16()
81 vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix)); in xnn_f32_hswish_ukernel__neon_x16()
Dhswish-neon-x8.c44 vacc0123 = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc0123), vsix)); in xnn_f32_hswish_ukernel__neon_x8()
45 vacc4567 = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc4567), vsix)); in xnn_f32_hswish_ukernel__neon_x8()
58 vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix)); in xnn_f32_hswish_ukernel__neon_x8()
67 vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix)); in xnn_f32_hswish_ukernel__neon_x8()
Dhswish-neon-x4.c37 vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix)); in xnn_f32_hswish_ukernel__neon_x4()
46 vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix)); in xnn_f32_hswish_ukernel__neon_x4()
/external/XNNPACK/src/f32-hswish/
Dneon.c.in44 …vacc${ABC[N:N+4]} = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc${ABC[N:N+4]}), vsix…
57 vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix));
66 vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix));
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/integer_ops/
Dmean.h100 temp_sum.val[0] = vminq_s32(vmaxq_s32(temp_sum.val[0], min_dup), max_dup); in MeanImpl()
101 temp_sum.val[1] = vminq_s32(vmaxq_s32(temp_sum.val[1], min_dup), max_dup); in MeanImpl()
102 temp_sum.val[2] = vminq_s32(vmaxq_s32(temp_sum.val[2], min_dup), max_dup); in MeanImpl()
103 temp_sum.val[3] = vminq_s32(vmaxq_s32(temp_sum.val[3], min_dup), max_dup); in MeanImpl()
/external/webrtc/common_audio/signal_processing/
Dmin_max_operations_neon.c261 min32x4_0 = vminq_s32(min32x4_0, in32x4_0); in WebRtcSpl_MinValueW32Neon()
262 min32x4_1 = vminq_s32(min32x4_1, in32x4_1); in WebRtcSpl_MinValueW32Neon()
265 int32x4_t min32x4 = vminq_s32(min32x4_0, min32x4_1); in WebRtcSpl_MinValueW32Neon()
/external/libaom/libaom/av1/common/arm/
Dconvolve_neon.h110 sum_0 = vminq_s32(sum_0, round_vec_1); in wiener_convolve8_horiz_8x8()
111 sum_1 = vminq_s32(sum_1, round_vec_1); in wiener_convolve8_horiz_8x8()
153 sum_0 = vminq_s32(sum_0, round_vec_1); in wiener_convolve8_horiz_4x8()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Dneon_tensor_utils.cc884 vmaxq_s32(vminq_s32(temp_val.val[0], max_val_dup), min_val_dup); in NeonMatrixBatchVectorAccumulateImpl()
886 vmaxq_s32(vminq_s32(temp_val.val[1], max_val_dup), min_val_dup); in NeonMatrixBatchVectorAccumulateImpl()
949 vmaxq_s32(vminq_s32(temp_val.val[0], max_val_dup), min_val_dup); in NeonMatrixBatchVectorAccumulateImpl()
951 vmaxq_s32(vminq_s32(temp_val.val[1], max_val_dup), min_val_dup); in NeonMatrixBatchVectorAccumulateImpl()
953 vmaxq_s32(vminq_s32(temp_val.val[2], max_val_dup), min_val_dup); in NeonMatrixBatchVectorAccumulateImpl()
955 vmaxq_s32(vminq_s32(temp_val.val[3], max_val_dup), min_val_dup); in NeonMatrixBatchVectorAccumulateImpl()
1787 vmaxq_s32(vminq_s32(temp_val.val[0], max_val_dup), min_val_dup); in NeonCwiseMul()
1789 vmaxq_s32(vminq_s32(temp_val.val[1], max_val_dup), min_val_dup); in NeonCwiseMul()
2340 int32x4_t min0_i32x4 = vminq_s32(max0_i32x4, scale_i32x4); in NeonSymmetricQuantizeFloats()
2341 int32x4_t min1_i32x4 = vminq_s32(max1_i32x4, scale_i32x4); in NeonSymmetricQuantizeFloats()
[all …]
Doptimized_ops.h1046 temp_sum.val[0] = vminq_s32(vmaxq_s32(temp_sum.val[0], min_dup), max_dup); in MeanImpl()
1047 temp_sum.val[1] = vminq_s32(vmaxq_s32(temp_sum.val[1], min_dup), max_dup); in MeanImpl()
1048 temp_sum.val[2] = vminq_s32(vmaxq_s32(temp_sum.val[2], min_dup), max_dup); in MeanImpl()
1049 temp_sum.val[3] = vminq_s32(vmaxq_s32(temp_sum.val[3], min_dup), max_dup); in MeanImpl()
4041 vmaxq_s32(vminq_s32(temp_val.val[0], max_val_dup), min_val_dup); in SoftmaxInt8LUT()
4043 vmaxq_s32(vminq_s32(temp_val.val[1], max_val_dup), min_val_dup); in SoftmaxInt8LUT()
4045 vmaxq_s32(vminq_s32(temp_val.val[2], max_val_dup), min_val_dup); in SoftmaxInt8LUT()
4047 vmaxq_s32(vminq_s32(temp_val.val[3], max_val_dup), min_val_dup); in SoftmaxInt8LUT()
5813 vmaxq_s32(vminq_s32(temp_val.val[0], max_val_dup), min_val_dup); in Quantize()
5815 vmaxq_s32(vminq_s32(temp_val.val[1], max_val_dup), min_val_dup); in Quantize()
[all …]
Ddepthwiseconv_uint8.h1888 acc[j] = vminq_s32(acc[j], output_activation_max_vec);
1930 acc0 = vminq_s32(acc0, output_activation_max_vec);
1931 acc1 = vminq_s32(acc1, output_activation_max_vec);
1961 acc = vminq_s32(acc, output_activation_max_vec);
/external/libopus/silk/arm/
DLPC_inv_pred_gain_neon_intr.c129 min_s32x4 = vminq_s32( min_s32x4, s0_s32x4 ); in LPC_inverse_pred_gain_QA_neon()
131 min_s32x4 = vminq_s32( min_s32x4, s1_s32x4 ); in LPC_inverse_pred_gain_QA_neon()
DNSQ_del_dec_neon_intr.c698 tmp1_s32x4 = vminq_s32( tmp1_s32x4, vdupq_n_s32( 30 << 10 ) ); in silk_noise_shape_quantizer_del_dec_neon()
754 tmp1_s32x4 = vaddq_s32( tmp2_s32x4, vminq_s32( rd1_Q10_s32x4, rd2_Q10_s32x4 ) ); in silk_noise_shape_quantizer_del_dec_neon()
/external/XNNPACK/src/math/
Dexp-neonfma-rr2-p5.c62 ven = vminq_s32(ven, vmax_exponent); in xnn_math_f32_exp__neonfma_rr2_p5()
Dexp-neonfma-rr2-lut64-p2.c64 ven = vminq_s32(ven, vmax_exponent); in xnn_math_f32_exp__neonfma_rr2_lut64_p2()
/external/skqp/src/core/
DSkBitmapProcState_matrixProcs.cpp326 res = vminq_s32(res, vdupq_n_s32(max)); in clamp4()
/external/gemmlowp/internal/
Dsimd_wrappers_neon.h100 inline Int32x4 Min(Int32x4 a, Int32x4 b) { return vminq_s32(a, b); }
/external/tensorflow/tensorflow/lite/kernels/
Dcpu_backend_gemm_custom_gemv.h577 vminq_s32(multiplier_exponent, vdupq_n_s32(0));
/external/eigen/Eigen/src/Core/arch/NEON/
DPacketMath.h216 …ONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); }
/external/skia/include/private/
DSkNx_neon.h540 AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.fVec); } in Min()
/external/libaom/libaom/aom_dsp/simd/
Dv128_intrinsics_arm.h407 vminq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y))); in v128_min_s32()
/external/skqp/include/private/
DSkNx_neon.h562 AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.fVec); } in Min()
/external/libgav1/libgav1/src/dsp/arm/
Dinverse_transform_10bit_neon.cc181 *a = vmaxq_s32(vminq_s32(x, *max), *min); in HadamardRotation()
182 *b = vmaxq_s32(vminq_s32(y, *max), *min); in HadamardRotation()
/external/psimd/include/
Dpsimd.h898 return (psimd_s32) vminq_s32((int32x4_t) a, (int32x4_t) b); in psimd_min_s32()
/external/pffft/
Dsse2neon.h3548 vminq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); in _mm_min_epi32()
/external/llvm-project/clang/test/CodeGen/
Daarch64-neon-intrinsics.c3949 return vminq_s32(a, b); in test_vminq_s32()

12