/external/gemmlowp/internal/ |
D | fixedpoint_neon.h | 28 inline int32x4_t BitAnd(int32x4_t a, int32x4_t b) { in BitAnd() 33 inline int32x4_t BitOr(int32x4_t a, int32x4_t b) { in BitOr() 38 inline int32x4_t BitXor(int32x4_t a, int32x4_t b) { in BitXor() 43 inline int32x4_t BitNot(int32x4_t a) { in BitNot() 48 inline int32x4_t Add(int32x4_t a, int32x4_t b) { in Add() 53 inline int32x4_t Sub(int32x4_t a, int32x4_t b) { in Sub() 58 inline int32x4_t Neg(int32x4_t a) { in Neg() 63 inline int32x4_t ShiftLeft(int32x4_t a, int offset) { in ShiftLeft() 68 inline int32x4_t ShiftRight(int32x4_t a, int offset) { in ShiftRight() 73 inline int32x4_t SelectUsingMask(int32x4_t if_mask, int32x4_t then_val, in SelectUsingMask() [all …]
|
D | unpack_neon.h | 28 int32x4_t RoundingMultiplyByConstantFraction(int32x4_t x) { in RoundingMultiplyByConstantFraction() 45 const int32x4_t remaining_product = in RoundingMultiplyByConstantFraction() 52 int32x4_t get_int32x4_t_and_inc( in get_int32x4_t_and_inc() 54 const int32x4_t result = vld1q_s32(iterator->get()); in get_int32x4_t_and_inc() 60 int32x4_t get_int32x4_t_and_inc( in get_int32x4_t_and_inc() 62 const int32x4_t result = vdupq_n_s32(**iterator); in get_int32x4_t_and_inc() 107 int32x4_t raw_xx[4]; 112 int32x4_t raw_x1[4]; 114 const int32x4_t sum_x1 = vld1q_s32(sums_of_each_slice_ptr); 118 int32x4_t raw_1x[4]; [all …]
|
D | output_neon.h | 27 typedef Fragment<int32x4_t, 4, 1, MapOrder::ColMajor> NEONFragmentInt32x4x1; 80 const int32x4_t a = vaddq_s32(input, vdupq_n_s32(result_offset)); 81 const int32x4_t b = 106 const int32x4_t result_mult_int = 108 const int32x4_t result_offset = 110 const int32x4_t a = vaddq_s32(input, result_offset); 111 const int32x4_t b = 136 const int32x4_t result_mult_int = 138 const int32x4_t result_offset = 140 const int32x4_t a = vaddq_s32(input, result_offset); [all …]
|
/external/clang/test/CodeGen/ |
D | aarch64-neon-2velem.c | 39 int32x4_t test_vmlaq_lane_s32(int32x4_t a, int32x4_t b, int32x2_t v) { in test_vmlaq_lane_s32() 66 int32x2_t test_vmla_laneq_s32(int32x2_t a, int32x2_t b, int32x4_t v) { in test_vmla_laneq_s32() 75 int32x4_t test_vmlaq_laneq_s32(int32x4_t a, int32x4_t b, int32x4_t v) { in test_vmlaq_laneq_s32() 111 int32x4_t test_vmlsq_lane_s32(int32x4_t a, int32x4_t b, int32x2_t v) { in test_vmlsq_lane_s32() 138 int32x2_t test_vmls_laneq_s32(int32x2_t a, int32x2_t b, int32x4_t v) { in test_vmls_laneq_s32() 147 int32x4_t test_vmlsq_laneq_s32(int32x4_t a, int32x4_t b, int32x4_t v) { in test_vmlsq_laneq_s32() 179 int32x4_t test_vmulq_lane_s32(int32x4_t a, int32x2_t v) { in test_vmulq_lane_s32() 235 int32x2_t test_vmul_laneq_s32(int32x2_t a, int32x4_t v) { in test_vmul_laneq_s32() 243 int32x4_t test_vmulq_laneq_s32(int32x4_t a, int32x4_t v) { in test_vmulq_laneq_s32() 505 int32x4_t test_vmlal_lane_s16(int32x4_t a, int16x4_t b, int16x4_t v) { in test_vmlal_lane_s16() [all …]
|
D | arm-v8.1a-neon-intrinsics.c | 44 int32x4_t test_vqrdmlahq_s32(int32x4_t a, int32x4_t b, int32x4_t c) { in test_vqrdmlahq_s32() 90 int32x4_t test_vqrdmlahq_lane_s32(int32x4_t a, int32x4_t b, int32x2_t c) { in test_vqrdmlahq_lane_s32() 132 int32x4_t test_vqrdmlshq_s32(int32x4_t a, int32x4_t b, int32x4_t c) { in test_vqrdmlshq_s32() 178 int32x4_t test_vqrdmlshq_lane_s32(int32x4_t a, int32x4_t b, int32x2_t c) { in test_vqrdmlshq_lane_s32()
|
D | aarch64-v8.1a-neon-intrinsics.c | 17 int32x2_t test_vqrdmlah_laneq_s32(int32x2_t a, int32x2_t b, int32x4_t v) { in test_vqrdmlah_laneq_s32() 33 int32x4_t test_vqrdmlahq_laneq_s32(int32x4_t a, int32x4_t b, int32x4_t v) { in test_vqrdmlahq_laneq_s32() 97 int32_t test_vqrdmlahs_laneq_s32(int32_t a, int32_t b, int32x4_t c) { in test_vqrdmlahs_laneq_s32() 113 int32x2_t test_vqrdmlsh_laneq_s32(int32x2_t a, int32x2_t b, int32x4_t v) { in test_vqrdmlsh_laneq_s32() 129 int32x4_t test_vqrdmlshq_laneq_s32(int32x4_t a, int32x4_t b, int32x4_t v) { in test_vqrdmlshq_laneq_s32() 193 int32_t test_vqrdmlshs_laneq_s32(int32_t a, int32_t b, int32x4_t c) { in test_vqrdmlshs_laneq_s32()
|
D | aarch64-neon-misc.c | 91 uint32x4_t test_vceqzq_s32(int32x4_t a) { in test_vceqzq_s32() 319 uint32x4_t test_vcgezq_s32(int32x4_t a) { in test_vcgezq_s32() 435 uint32x4_t test_vclezq_s32(int32x4_t a) { in test_vclezq_s32() 551 uint32x4_t test_vcgtzq_s32(int32x4_t a) { in test_vcgtzq_s32() 667 uint32x4_t test_vcltzq_s32(int32x4_t a) { in test_vcltzq_s32() 927 int32x4_t test_vrev64q_s32(int32x4_t a) { in test_vrev64q_s32() 1035 int32x4_t test_vpaddlq_s16(int16x8_t a) { in test_vpaddlq_s16() 1044 int64x2_t test_vpaddlq_s32(int32x4_t a) { in test_vpaddlq_s32() 1159 int32x4_t test_vpadalq_s16(int32x4_t a, int16x8_t b) { in test_vpadalq_s16() 1171 int64x2_t test_vpadalq_s32(int64x2_t a, int32x4_t b) { in test_vpadalq_s32() [all …]
|
D | arm_neon_intrinsics.c | 113 int32x4_t test_vabaq_s32(int32x4_t a, int32x4_t b, int32x4_t c) { in test_vabaq_s32() 176 int32x4_t test_vabal_s16(int32x4_t a, int16x4_t b, int16x4_t c) { in test_vabal_s16() 349 int32x4_t test_vabdq_s32(int32x4_t a, int32x4_t b) { in test_vabdq_s32() 420 int32x4_t test_vabdl_s16(int16x4_t a, int16x4_t b) { in test_vabdl_s16() 536 int32x4_t test_vabsq_s32(int32x4_t a) { in test_vabsq_s32() 630 int32x4_t test_vaddq_s32(int32x4_t a, int32x4_t b) { in test_vaddq_s32() 699 int16x4_t test_vaddhn_s32(int32x4_t a, int32x4_t b) { in test_vaddhn_s32() 774 int32x4_t test_vaddl_s16(int16x4_t a, int16x4_t b) { in test_vaddl_s16() 841 int32x4_t test_vaddw_s16(int32x4_t a, int16x4_t b) { in test_vaddw_s16() 957 int32x4_t test_vandq_s32(int32x4_t a, int32x4_t b) { in test_vandq_s32() [all …]
|
D | aarch64-neon-intrinsics.c | 90 int32x4_t test_vaddq_s32(int32x4_t v1,int32x4_t v2) { in test_vaddq_s32() 221 int32x4_t test_vsubq_s32(int32x4_t v1,int32x4_t v2) { in test_vsubq_s32() 341 int32x4_t test_vmulq_s32(int32x4_t v1, int32x4_t v2) { in test_vmulq_s32() 476 int32x4_t test_vmlaq_s32(int32x4_t v1, int32x4_t v2, int32x4_t v3) { in test_vmlaq_s32() 596 int32x4_t test_vmlsq_s32(int32x4_t v1, int32x4_t v2, int32x4_t v3) { in test_vmlsq_s32() 831 int32x4_t test_vabaq_s32(int32x4_t v1, int32x4_t v2, int32x4_t v3) { in test_vabaq_s32() 961 int32x4_t test_vabdq_s32(int32x4_t v1, int32x4_t v2) { in test_vabdq_s32() 1233 int32x4_t test_vbslq_s32(uint32x4_t v1, int32x4_t v2, int32x4_t v3) { in test_vbslq_s32() 1291 int32x4_t test_vbslq_u32(uint32x4_t v1, int32x4_t v2, int32x4_t v3) { in test_vbslq_u32() 1732 uint32x4_t test_vtstq_s32(int32x4_t v1, int32x4_t v2) { in test_vtstq_s32() [all …]
|
D | aarch64-neon-3v.c | 45 int32x4_t test_vandq_s32(int32x4_t a, int32x4_t b) { in test_vandq_s32() 157 int32x4_t test_vorrq_s32(int32x4_t a, int32x4_t b) { in test_vorrq_s32() 269 int32x4_t test_veorq_s32(int32x4_t a, int32x4_t b) { in test_veorq_s32() 387 int32x4_t test_vbicq_s32(int32x4_t a, int32x4_t b) { in test_vbicq_s32() 515 int32x4_t test_vornq_s32(int32x4_t a, int32x4_t b) { in test_vornq_s32()
|
D | arm64-vrnd.c | 7 int32x4_t rnd3(float32x4_t a) { return vrndq_f32(a); } in rnd3() 15 int32x4_t rnd8(float32x4_t a) { return vrndnq_f32(a); } in rnd8() 24 int32x4_t rnd12(float32x4_t a) { return vrndmq_f32(a); } in rnd12() 33 int32x4_t rnd16(float32x4_t a) { return vrndpq_f32(a); } in rnd16() 40 int32x4_t rnd20(float32x4_t a) { return vrndaq_f32(a); } in rnd20() 47 int32x4_t rnd24(float32x4_t a) { return vrndxq_f32(a); } in rnd24()
|
D | aarch64-neon-perm.c | 45 int32x4_t test_vuzp1q_s32(int32x4_t a, int32x4_t b) { in test_vuzp1q_s32() 192 int32x4_t test_vuzp2q_s32(int32x4_t a, int32x4_t b) { in test_vuzp2q_s32() 339 int32x4_t test_vzip1q_s32(int32x4_t a, int32x4_t b) { in test_vzip1q_s32() 486 int32x4_t test_vzip2q_s32(int32x4_t a, int32x4_t b) { in test_vzip2q_s32() 633 int32x4_t test_vtrn1q_s32(int32x4_t a, int32x4_t b) { in test_vtrn1q_s32() 780 int32x4_t test_vtrn2q_s32(int32x4_t a, int32x4_t b) { in test_vtrn2q_s32() 1195 int32x4x2_t test_vuzpq_s32(int32x4_t a, int32x4_t b) { in test_vuzpq_s32() 1659 int32x4x2_t test_vzipq_s32(int32x4_t a, int32x4_t b) { in test_vzipq_s32() 2123 int32x4x2_t test_vtrnq_s32(int32x4_t a, int32x4_t b) { in test_vtrnq_s32()
|
/external/libopus/silk/arm/ |
D | NSQ_neon.c | 40 int32x4_t coef0 = vld1q_s32(coef32); in silk_noise_shape_quantizer_short_prediction_neon() 41 int32x4_t coef1 = vld1q_s32(coef32 + 4); in silk_noise_shape_quantizer_short_prediction_neon() 42 int32x4_t coef2 = vld1q_s32(coef32 + 8); in silk_noise_shape_quantizer_short_prediction_neon() 43 int32x4_t coef3 = vld1q_s32(coef32 + 12); in silk_noise_shape_quantizer_short_prediction_neon() 45 int32x4_t a0 = vld1q_s32(buf32 - 15); in silk_noise_shape_quantizer_short_prediction_neon() 46 int32x4_t a1 = vld1q_s32(buf32 - 11); in silk_noise_shape_quantizer_short_prediction_neon() 47 int32x4_t a2 = vld1q_s32(buf32 - 7); in silk_noise_shape_quantizer_short_prediction_neon() 48 int32x4_t a3 = vld1q_s32(buf32 - 3); in silk_noise_shape_quantizer_short_prediction_neon() 50 int32x4_t b0 = vqdmulhq_s32(coef0, a0); in silk_noise_shape_quantizer_short_prediction_neon() 51 int32x4_t b1 = vqdmulhq_s32(coef1, a1); in silk_noise_shape_quantizer_short_prediction_neon() [all …]
|
/external/webrtc/webrtc/modules/audio_coding/codecs/isac/fix/source/ |
D | transform_neon.c | 35 int32x4_t factq = vdupq_n_s32(fact); in ComplexMulAndFindMaxNeon() 50 int32x4_t tmp0 = vmull_s16(vget_low_s16(tmpr), vget_low_s16(inre1)); in ComplexMulAndFindMaxNeon() 51 int32x4_t tmp1 = vmull_s16(vget_low_s16(tmpr), vget_low_s16(inre2)); in ComplexMulAndFindMaxNeon() 55 int32x4_t tmp2 = vmull_high_s16(tmpr, inre1); in ComplexMulAndFindMaxNeon() 56 int32x4_t tmp3 = vmull_high_s16(tmpr, inre2); in ComplexMulAndFindMaxNeon() 60 int32x4_t tmp2 = vmull_s16(vget_high_s16(tmpr), vget_high_s16(inre1)); in ComplexMulAndFindMaxNeon() 61 int32x4_t tmp3 = vmull_s16(vget_high_s16(tmpr), vget_high_s16(inre2)); in ComplexMulAndFindMaxNeon() 66 int32x4_t outr_0 = vqdmulhq_s32(tmp0, factq); in ComplexMulAndFindMaxNeon() 67 int32x4_t outr_1 = vqdmulhq_s32(tmp2, factq); in ComplexMulAndFindMaxNeon() 68 int32x4_t outi_0 = vqdmulhq_s32(tmp1, factq); in ComplexMulAndFindMaxNeon() [all …]
|
D | entropy_coding_neon.c | 44 int32x4_t shift32x4 = vdupq_n_s32(shift); in WebRtcIsacfix_MatrixProduct1Neon() 46 int32x4_t sum_32x4 = vdupq_n_s32(0); in WebRtcIsacfix_MatrixProduct1Neon() 60 int32x4_t matrix0_32x4 = in WebRtcIsacfix_MatrixProduct1Neon() 62 int32x4_t matrix1_32x4 = in WebRtcIsacfix_MatrixProduct1Neon() 64 int32x4_t multi_32x4 = vqdmulhq_s32(matrix0_32x4, matrix1_32x4); in WebRtcIsacfix_MatrixProduct1Neon() 102 int32x4_t matrix1_32x4 = vdupq_n_s32(matrix1[matrix1_index] << shift); in WebRtcIsacfix_MatrixProduct1Neon() 103 int32x4_t matrix0_32x4 = in WebRtcIsacfix_MatrixProduct1Neon() 105 int32x4_t multi_32x4 = vqdmulhq_s32(matrix0_32x4, matrix1_32x4); in WebRtcIsacfix_MatrixProduct1Neon() 146 int32x4_t matrix1_32x4 = in WebRtcIsacfix_MatrixProduct1Neon() 148 int32x4_t matrix0_32x4 = in WebRtcIsacfix_MatrixProduct1Neon() [all …]
|
/external/skia/src/opts/ |
D | SkBitmapProcState_matrixProcs_neon.cpp | 22 static inline int16x8_t sbpsm_clamp_tile8(int32x4_t low, int32x4_t high, unsigned max) { in sbpsm_clamp_tile8() 36 static inline int32x4_t sbpsm_clamp_tile4(int32x4_t f, unsigned max) { in sbpsm_clamp_tile4() 37 int32x4_t res; in sbpsm_clamp_tile4() 50 static inline int32x4_t sbpsm_clamp_tile4_low_bits(int32x4_t fx) { in sbpsm_clamp_tile4_low_bits() 51 int32x4_t ret; in sbpsm_clamp_tile4_low_bits() 64 static inline int16x8_t sbpsm_repeat_tile8(int32x4_t low, int32x4_t high, unsigned max) { in sbpsm_repeat_tile8() 82 static inline int32x4_t sbpsm_repeat_tile4(int32x4_t f, unsigned max) { in sbpsm_repeat_tile4() 99 static inline int32x4_t sbpsm_repeat_tile4_low_bits(int32x4_t fx, unsigned max) { in sbpsm_repeat_tile4_low_bits() 102 int32x4_t ret; in sbpsm_repeat_tile4_low_bits() 150 int32x4_t vdx8 = vdupq_n_s32(dx8); in decal_nofilter_scale_neon() [all …]
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | highbd_idct8x8_add_neon.c | 71 const int32x4_t cospis0, const int32x4_t cospis1, int32x4_t *const io0, in idct8x8_12_half1d_bd10() 72 int32x4_t *const io1, int32x4_t *const io2, int32x4_t *const io3, in idct8x8_12_half1d_bd10() 73 int32x4_t *const io4, int32x4_t *const io5, int32x4_t *const io6, in idct8x8_12_half1d_bd10() 74 int32x4_t *const io7) { in idct8x8_12_half1d_bd10() 75 int32x4_t step1[8], step2[8]; in idct8x8_12_half1d_bd10() 126 const int32x4_t cospis0, const int32x4_t cospis1, int32x4_t *const io0, in idct8x8_12_half1d_bd12() 127 int32x4_t *const io1, int32x4_t *const io2, int32x4_t *const io3, in idct8x8_12_half1d_bd12() 128 int32x4_t *const io4, int32x4_t *const io5, int32x4_t *const io6, in idct8x8_12_half1d_bd12() 129 int32x4_t *const io7) { in idct8x8_12_half1d_bd12() 132 int32x4_t step1[8], step2[8]; in idct8x8_12_half1d_bd12() [all …]
|
D | highbd_idct4x4_add_neon.c | 68 static INLINE void idct4x4_16_kernel_bd10(const int32x4_t cospis, in idct4x4_16_kernel_bd10() 69 int32x4_t *const a0, in idct4x4_16_kernel_bd10() 70 int32x4_t *const a1, in idct4x4_16_kernel_bd10() 71 int32x4_t *const a2, in idct4x4_16_kernel_bd10() 72 int32x4_t *const a3) { in idct4x4_16_kernel_bd10() 73 int32x4_t b0, b1, b2, b3; in idct4x4_16_kernel_bd10() 94 static INLINE void idct4x4_16_kernel_bd12(const int32x4_t cospis, in idct4x4_16_kernel_bd12() 95 int32x4_t *const a0, in idct4x4_16_kernel_bd12() 96 int32x4_t *const a1, in idct4x4_16_kernel_bd12() 97 int32x4_t *const a2, in idct4x4_16_kernel_bd12() [all …]
|
D | fdct_neon.c | 51 const int32x4_t s_0_p_s_1 = vaddl_s16(s_0, s_1); in vpx_fdct4x4_neon() 52 const int32x4_t s_0_m_s_1 = vsubl_s16(s_0, s_1); in vpx_fdct4x4_neon() 53 const int32x4_t temp1 = vmulq_n_s32(s_0_p_s_1, (int16_t)cospi_16_64); in vpx_fdct4x4_neon() 54 const int32x4_t temp2 = vmulq_n_s32(s_0_m_s_1, (int16_t)cospi_16_64); in vpx_fdct4x4_neon() 62 const int32x4_t s_3_cospi_8_64 = vmull_n_s16(s_3, (int16_t)cospi_8_64); in vpx_fdct4x4_neon() 63 const int32x4_t s_3_cospi_24_64 = vmull_n_s16(s_3, (int16_t)cospi_24_64); in vpx_fdct4x4_neon() 65 const int32x4_t temp3 = in vpx_fdct4x4_neon() 67 const int32x4_t temp4 = in vpx_fdct4x4_neon()
|
/external/libhevc/common/arm/ |
D | ihevc_weighted_pred_neon_intr.c | 118 int32x4_t i4_tmp1_t; in ihevc_weighted_pred_uni_neonintr() 119 int32x4_t i4_tmp2_t; in ihevc_weighted_pred_uni_neonintr() 120 int32x4_t sto_res_tmp1; in ihevc_weighted_pred_uni_neonintr() 124 int32x4_t tmp_lvl_shift_t; in ihevc_weighted_pred_uni_neonintr() 126 int32x4_t tmp_shift_t; in ihevc_weighted_pred_uni_neonintr() 251 int32x4_t i4_tmp1_t; in ihevc_weighted_pred_chroma_uni_neonintr() 252 int32x4_t i4_tmp2_t; in ihevc_weighted_pred_chroma_uni_neonintr() 253 int32x4_t sto_res_tmp1; in ihevc_weighted_pred_chroma_uni_neonintr() 257 int32x4_t tmp_lvl_shift_t_u, tmp_lvl_shift_t_v; in ihevc_weighted_pred_chroma_uni_neonintr() 260 int32x4_t tmp_shift_t; in ihevc_weighted_pred_chroma_uni_neonintr() [all …]
|
/external/libvpx/libvpx/vpx_dsp/ppc/ |
D | bitdepth_conversion_vsx.h | 23 int32x4_t u = vec_vsx_ld(c, s); in load_tran_low() 24 int32x4_t v = vec_vsx_ld(c, s + 4); in load_tran_low() 36 const int32x4_t even = vec_mule(v, one); in store_tran_low() 37 const int32x4_t odd = vec_mulo(v, one); in store_tran_low() 38 const int32x4_t high = vec_mergeh(even, odd); in store_tran_low() 39 const int32x4_t low = vec_mergel(even, odd); in store_tran_low()
|
/external/webrtc/webrtc/common_audio/signal_processing/ |
D | min_max_operations_neon.c | 87 int32x4_t in32x4_0 = vld1q_s32(p_start); in WebRtcSpl_MaxAbsValueW32Neon() 89 int32x4_t in32x4_1 = vld1q_s32(p_start); in WebRtcSpl_MaxAbsValueW32Neon() 172 int32x4_t max32x4_0 = vdupq_n_s32(WEBRTC_SPL_WORD32_MIN); in WebRtcSpl_MaxValueW32Neon() 173 int32x4_t max32x4_1 = vdupq_n_s32(WEBRTC_SPL_WORD32_MIN); in WebRtcSpl_MaxValueW32Neon() 177 int32x4_t in32x4_0 = vld1q_s32(p_start); in WebRtcSpl_MaxValueW32Neon() 179 int32x4_t in32x4_1 = vld1q_s32(p_start); in WebRtcSpl_MaxValueW32Neon() 185 int32x4_t max32x4 = vmaxq_s32(max32x4_0, max32x4_1); in WebRtcSpl_MaxValueW32Neon() 252 int32x4_t min32x4_0 = vdupq_n_s32(WEBRTC_SPL_WORD32_MAX); in WebRtcSpl_MinValueW32Neon() 253 int32x4_t min32x4_1 = vdupq_n_s32(WEBRTC_SPL_WORD32_MAX); in WebRtcSpl_MinValueW32Neon() 257 int32x4_t in32x4_0 = vld1q_s32(p_start); in WebRtcSpl_MinValueW32Neon() [all …]
|
/external/webrtc/webrtc/modules/audio_processing/ns/ |
D | nsx_core_neon.c | 64 int32x4_t twentyOne32x4 = vdupq_n_s32(21); in UpdateNoiseEstimateNeon() 65 int32x4_t constA32x4 = vdupq_n_s32(0x1fffff); in UpdateNoiseEstimateNeon() 66 int32x4_t constB32x4 = vdupq_n_s32(0x200000); in UpdateNoiseEstimateNeon() 76 int32x4_t qNoise32x4 = vdupq_n_s32(inst->qNoise); in UpdateNoiseEstimateNeon() 85 int32x4_t v32x4B = vmull_s16(v16x4, kExp2Const16x4); in UpdateNoiseEstimateNeon() 88 int32x4_t v32x4A = vandq_s32(v32x4B, constA32x4); in UpdateNoiseEstimateNeon() 209 int32x4_t tmp32x4; in WebRtcNsx_NoiseEstimationNeon() 380 int32x4_t tmp_r_0 = vmull_s16(vget_low_s16(real), vget_low_s16(ns_filter)); in WebRtcNsx_PrepareSpectrumNeon() 381 int32x4_t tmp_i_0 = vmull_s16(vget_low_s16(imag), vget_low_s16(ns_filter)); in WebRtcNsx_PrepareSpectrumNeon() 382 int32x4_t tmp_r_1 = vmull_s16(vget_high_s16(real), in WebRtcNsx_PrepareSpectrumNeon() [all …]
|
/external/webp/src/dsp/ |
D | enc_neon.c | 306 const int32x4_t kCst937 = vdupq_n_s32(937); in FTransform() 307 const int32x4_t kCst1812 = vdupq_n_s32(1812); in FTransform() 315 const int32x4_t a3_2217 = vmull_n_s16(vget_low_s16(a3a2), 2217); in FTransform() 316 const int32x4_t a2_2217 = vmull_n_s16(vget_high_s16(a3a2), 2217); in FTransform() 317 const int32x4_t a2_p_a3 = vmlal_n_s16(a2_2217, vget_low_s16(a3a2), 5352); in FTransform() 318 const int32x4_t a3_m_a2 = vmlsl_n_s16(a3_2217, vget_high_s16(a3a2), 5352); in FTransform() 325 const int32x4_t kCst12000 = vdupq_n_s32(12000 + (1 << 16)); in FTransform() 326 const int32x4_t kCst51000 = vdupq_n_s32(51000); in FTransform() 332 const int32x4_t a3_2217 = vmull_n_s16(vget_low_s16(a3a2), 2217); in FTransform() 333 const int32x4_t a2_2217 = vmull_n_s16(vget_high_s16(a3a2), 2217); in FTransform() [all …]
|
/external/libopus/celt/arm/ |
D | celt_neon_intr.c | 44 int32x4_t a = vld1q_s32(sum); in xcorr_kernel_neon_fixed() 60 int32x4_t a0 = vmlal_lane_s16(a, y0, x0, 0); in xcorr_kernel_neon_fixed() 61 int32x4_t a1 = vmlal_lane_s16(a0, y4, x4, 0); in xcorr_kernel_neon_fixed() 65 int32x4_t a2 = vmlal_lane_s16(a1, y1, x0, 1); in xcorr_kernel_neon_fixed() 66 int32x4_t a3 = vmlal_lane_s16(a2, y5, x4, 1); in xcorr_kernel_neon_fixed() 70 int32x4_t a4 = vmlal_lane_s16(a3, y2, x0, 2); in xcorr_kernel_neon_fixed() 71 int32x4_t a5 = vmlal_lane_s16(a4, y6, x4, 2); in xcorr_kernel_neon_fixed() 75 int32x4_t a6 = vmlal_lane_s16(a5, y3, x0, 3); in xcorr_kernel_neon_fixed() 76 int32x4_t a7 = vmlal_lane_s16(a6, y7, x4, 3); in xcorr_kernel_neon_fixed() 87 int32x4_t a0 = vmlal_s16(a, y0, x0); in xcorr_kernel_neon_fixed()
|