Searched refs:_mm256_avg_epu8 (Results 1 – 12 of 12) sorted by relevance
/external/libaom/libaom/aom_dsp/x86/ |
D | variance_impl_avx2.c | 64 src_reg = _mm256_avg_epu8(src_reg, src_next_reg); 122 src_reg = _mm256_avg_epu8(src_reg, src_next_reg); 244 src_avg = _mm256_avg_epu8(src_avg, src_reg); in aom_sub_pixel_variance32xh_avx2() 309 src_pack = _mm256_avg_epu8(src_pack, src_reg); in aom_sub_pixel_variance32xh_avx2() 413 src_reg = _mm256_avg_epu8(src_reg, src_next_reg); in aom_sub_pixel_variance16xh_avx2() 425 src_avg = _mm256_avg_epu8(src_reg, src_next_reg); in aom_sub_pixel_variance16xh_avx2() 429 src_next_reg = _mm256_avg_epu8(src_reg, src_next_reg); in aom_sub_pixel_variance16xh_avx2() 431 src_temp = _mm256_avg_epu8(src_avg, src_temp); in aom_sub_pixel_variance16xh_avx2() 448 src_avg = _mm256_avg_epu8(src_avg, src_next_reg); in aom_sub_pixel_variance16xh_avx2() 460 src_avg = _mm256_avg_epu8(src_reg, src_next_reg); in aom_sub_pixel_variance16xh_avx2() [all …]
|
D | sad_avx2.c | 110 ref1_reg = _mm256_avg_epu8( \ 112 ref2_reg = _mm256_avg_epu8( \ 148 ref1_reg = _mm256_avg_epu8( \ 150 ref2_reg = _mm256_avg_epu8( \
|
D | sad_impl_avx2.c | 99 ref1_reg = _mm256_avg_epu8( in sad_w64_avg_avx2() 101 ref2_reg = _mm256_avg_epu8( in sad_w64_avg_avx2()
|
D | blend_a64_mask_avx2.c | 269 _mm256_avg_epu8(_mm256_adds_epu8(m_i00, m_i10), zeros); in lowbd_blend_a64_d16_mask_subw0_subh1_w32_avx2() 589 _mm256_avg_epu8(v_rl_b, _mm256_srli_si256(v_rl_b, 1)); in blend_a64_mask_sx_w16_avx2() 620 _mm256_avg_epu8(v_r0_s_b, _mm256_srli_si256(v_r0_s_b, 8)); in blend_a64_mask_sx_w32n_avx2() 622 _mm256_avg_epu8(v_r1_s_b, _mm256_srli_si256(v_r1_s_b, 8)); in blend_a64_mask_sx_w32n_avx2() 729 const __m256i v_m0_b = _mm256_avg_epu8(v_ra_b, v_rb_b); in blend_a64_mask_sy_w32n_avx2()
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | sad_avx2.c | 103 ref1_reg = _mm256_avg_epu8( \ 105 ref2_reg = _mm256_avg_epu8( \ 140 ref1_reg = _mm256_avg_epu8( \ 142 ref2_reg = _mm256_avg_epu8( \
|
D | variance_avx2.c | 238 const __m256i avg_reg = _mm256_avg_epu8(src_reg, sec_reg); in spv32_x0_y0() 266 const __m256i src_avg = _mm256_avg_epu8(src_0, src_1); in spv32_half_zero() 269 const __m256i avg_reg = _mm256_avg_epu8(src_avg, sec_reg); in spv32_half_zero() 309 __m256i prev_src_avg = _mm256_avg_epu8(src_a, src_b); in spv32_x4_y4() 317 const __m256i src_avg = _mm256_avg_epu8(src_0, src_1); in spv32_x4_y4() 318 const __m256i current_avg = _mm256_avg_epu8(prev_src_avg, src_avg); in spv32_x4_y4() 323 const __m256i avg_reg = _mm256_avg_epu8(current_avg, sec_reg); in spv32_x4_y4() 362 const __m256i avg_reg = _mm256_avg_epu8(exp_src, sec_reg); in spv32_bilin_zero() 402 __m256i prev_src_avg = _mm256_avg_epu8(src_a, src_b); in spv32_x4_yb() 410 const __m256i src_avg = _mm256_avg_epu8(src_0, src_1); in spv32_x4_yb() [all …]
|
/external/libaom/libaom/aom_dsp/simd/ |
D | v256_intrinsics_x86.h | 524 SIMD_INLINE v256 v256_avg_u8(v256 a, v256 b) { return _mm256_avg_epu8(a, b); } in v256_avg_u8() 528 _mm256_avg_epu8(a, b), in v256_rdavg_u8()
|
/external/clang/test/CodeGen/ |
D | avx2-builtins.c | 105 return _mm256_avg_epu8(a, b); in test_mm256_avg_epu8()
|
/external/llvm-project/clang/test/CodeGen/X86/ |
D | avx2-builtins.c | 105 return _mm256_avg_epu8(a, b); in test_mm256_avg_epu8()
|
/external/llvm-project/clang/lib/Headers/ |
D | avx2intrin.h | 133 _mm256_avg_epu8(__m256i __a, __m256i __b) in _mm256_avg_epu8() function
|
D | avx512vlbwintrin.h | 814 (__v32qi)_mm256_avg_epu8(__A, __B), in _mm256_mask_avg_epu8() 822 (__v32qi)_mm256_avg_epu8(__A, __B), in _mm256_maskz_avg_epu8()
|
/external/clang/lib/Headers/ |
D | avx2intrin.h | 146 _mm256_avg_epu8(__m256i __a, __m256i __b) in _mm256_avg_epu8() function
|