Home
last modified time | relevance | path

Searched refs:vmovl_u16 (Results 1 – 20 of 20) sorted by relevance

/external/tensorflow/tensorflow/core/kernels/
Dquantized_instance_norm.cc169 vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_high))), in MinAndMax()
170 vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_high))), in MinAndMax()
171 vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_low))), in MinAndMax()
172 vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_low)))}; in MinAndMax()
225 vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_high))), in InstanceNorm()
226 vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_high))), in InstanceNorm()
227 vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_low))), in InstanceNorm()
228 vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_low)))}; in InstanceNorm()
/external/libaom/libaom/av1/common/arm/
Dselfguided_neon.c237 s0 = vmulq_u32(s0, vmovl_u16(vget_low_u16(s16_4))); in calc_ab_internal_common()
238 s1 = vmulq_u32(s1, vmovl_u16(vget_low_u16(s16_5))); in calc_ab_internal_common()
239 s2 = vmulq_u32(s2, vmovl_u16(vget_low_u16(s16_6))); in calc_ab_internal_common()
240 s3 = vmulq_u32(s3, vmovl_u16(vget_low_u16(s16_7))); in calc_ab_internal_common()
241 s4 = vmulq_u32(s4, vmovl_u16(vget_high_u16(s16_4))); in calc_ab_internal_common()
242 s5 = vmulq_u32(s5, vmovl_u16(vget_high_u16(s16_5))); in calc_ab_internal_common()
243 s6 = vmulq_u32(s6, vmovl_u16(vget_high_u16(s16_6))); in calc_ab_internal_common()
244 s7 = vmulq_u32(s7, vmovl_u16(vget_high_u16(s16_7))); in calc_ab_internal_common()
955 vaddq_u32(vmovl_u16(vget_low_u16(r0)), vmovl_u16(vget_low_u16(r1)))); in cross_sum_inp_u16()
957 vaddq_u32(vmovl_u16(vget_high_u16(r0)), vmovl_u16(vget_high_u16(r1)))); in cross_sum_inp_u16()
[all …]
Dwarp_plane_neon.c641 int32x4_t tmp32_lo = vreinterpretq_s32_u32(vmovl_u16(tmp16_lo)); in av1_warp_affine_neon()
672 int32x4_t tmp32_hi = vreinterpretq_s32_u32(vmovl_u16(tmp16_hi)); in av1_warp_affine_neon()
/external/libhevc/encoder/arm/
Dihevce_coarse_layer_sad_neon.c520 total_cost_0 = vmulq_u32(v_lambda, vmovl_u16(vget_low_u16(mv_wt))); in hme_combine_4x4_sads_and_compute_cost_high_speed_neon()
521 total_cost_1 = vmulq_u32(v_lambda, vmovl_u16(vget_high_u16(mv_wt))); in hme_combine_4x4_sads_and_compute_cost_high_speed_neon()
559 total_cost = vmulq_u32(v_lambda, vmovl_u16(mv_wt)); in hme_combine_4x4_sads_and_compute_cost_high_speed_neon()
681 total_cost_0 = vmulq_u32(v_lambda, vmovl_u16(vget_low_u16(mv_wt))); in hme_combine_4x4_sads_and_compute_cost_high_quality_neon()
682 total_cost_1 = vmulq_u32(v_lambda, vmovl_u16(vget_high_u16(mv_wt))); in hme_combine_4x4_sads_and_compute_cost_high_quality_neon()
720 total_cost = vmulq_u32(v_lambda, vmovl_u16(mv_wt)); in hme_combine_4x4_sads_and_compute_cost_high_quality_neon()
/external/libvpx/libvpx/vp8/encoder/arm/neon/
Dfastquantizeb_neon.c77 eob_q32 = vmovl_u16(eob_d16); in vp8_fast_quantize_b_neon()
/external/webp/src/dsp/
Dcost_neon.c39 const uint32x4_t eob_32x4 = vmovl_u16(eob_16x4); in SetResidualCoeffs_NEON()
/external/skia/include/private/
DSkNx_neon.h625 return vcvtq_f32_u32(vmovl_u16(src.fVec));
636 return vmovl_u16(vget_low_u16(_16));
696 return vreinterpretq_s32_u32(vmovl_u16(src.fVec));
/external/libaom/libaom/aom_dsp/simd/
Dv128_intrinsics_arm.h612 return vreinterpretq_s64_u32(vmovl_u16(vreinterpret_u16_s64(a))); in v128_unpack_u16_s32()
621 vmovl_u16(vreinterpret_u16_s64(vget_low_s64(a)))); in v128_unpacklo_u16_s32()
631 vmovl_u16(vreinterpret_u16_s64(vget_high_s64(a)))); in v128_unpackhi_u16_s32()
Dv64_intrinsics_arm.h510 return vreinterpret_s64_u32(vget_low_u32(vmovl_u16(vreinterpret_u16_s64(x)))); in v64_unpacklo_u16_s32()
520 vget_high_u32(vmovl_u16(vreinterpret_u16_s64(x)))); in v64_unpackhi_u16_s32()
/external/skqp/include/private/
DSkNx_neon.h647 return vcvtq_f32_u32(vmovl_u16(src.fVec));
658 return vmovl_u16(vget_low_u16(_16));
718 return vreinterpretq_s32_u32(vmovl_u16(src.fVec));
/external/gemmlowp/internal/
Dpack_neon.h239 int32x4_t s = vreinterpretq_s32_u32(vmovl_u16(sums_of_16[cell])); in Pack()
/external/pffft/
Dsse2neon.h4415 vcvtq_f32_u32(vmovl_u16(vreinterpret_u16_m64(a)))); in _mm_cvtpu16_ps()
4432 vmovl_u16(vget_low_u16(vmovl_u8(vreinterpret_u8_m64(a)))))); in _mm_cvtpu8_ps()
4467 uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000D 000C 000B 000A */ in _mm_cvtepu8_epi32()
4477 uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */ in _mm_cvtepu8_epi64()
4535 vmovl_u16(vget_low_u16(vreinterpretq_u16_m128i(a)))); in _mm_cvtepu16_epi32()
4543 uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */ in _mm_cvtepu16_epi64()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Doptimized_ops.h4024 vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(exp_value1))); in SoftmaxInt8LUT()
4026 vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(exp_value1))); in SoftmaxInt8LUT()
4028 vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(exp_value2))); in SoftmaxInt8LUT()
4030 vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(exp_value2))); in SoftmaxInt8LUT()
6218 input.val[0] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(first_half)));
6219 input.val[1] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(first_half)));
6220 input.val[2] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(second_half)));
6221 input.val[3] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(second_half)));
6373 input.val[0] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(first_half)));
6374 input.val[1] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(first_half)));
[all …]
/external/libgav1/libgav1/src/dsp/arm/
Dfilm_grain_neon.cc570 const uint32x4_t steps = vmovl_u16(vcreate_u16(0x0003000200010000)); in InitializeScalingLookupTable_NEON()
Dintrapred_cfl_neon.cc821 const uint32x4_t final_fill_to_sum = vmovl_u16(final_fill); in CflSubsampler420_4xH_NEON()
/external/llvm-project/clang/test/CodeGen/
Daarch64-neon-intrinsics.c6402 return vmovl_u16(a); in test_vmovl_u16()
Darm_neon_intrinsics.c7986 return vmovl_u16(a); in test_vmovl_u16()
/external/clang/test/CodeGen/
Daarch64-neon-intrinsics.c7060 return vmovl_u16(a); in test_vmovl_u16()
Darm_neon_intrinsics.c9858 return vmovl_u16(a); in test_vmovl_u16()
/external/neon_2_sse/
DNEON_2_SSE.h1804 _NEON2SSESTORAGE uint32x4_t vmovl_u16(uint16x4_t a); // VMOVL.U16 q0,d0
12951 _NEON2SSESTORAGE uint32x4_t vmovl_u16(uint16x4_t a); // VMOVL.s16 q0,d0
12952 _NEON2SSE_INLINE uint32x4_t vmovl_u16(uint16x4_t a) in vmovl_u16() function