/external/libaom/libaom/aom_dsp/x86/ |
D | avg_intrin_avx2.c | 158 b0 = _mm256_srai_epi16(b0, 1); in hadamard_16x16_avx2() 159 b1 = _mm256_srai_epi16(b1, 1); in hadamard_16x16_avx2() 160 b2 = _mm256_srai_epi16(b2, 1); in hadamard_16x16_avx2() 161 b3 = _mm256_srai_epi16(b3, 1); in hadamard_16x16_avx2() 203 b0 = _mm256_srai_epi16(b0, 1); in aom_hadamard_lp_16x16_avx2() 204 b1 = _mm256_srai_epi16(b1, 1); in aom_hadamard_lp_16x16_avx2() 205 b2 = _mm256_srai_epi16(b2, 1); in aom_hadamard_lp_16x16_avx2() 206 b3 = _mm256_srai_epi16(b3, 1); in aom_hadamard_lp_16x16_avx2() 244 b0 = _mm256_srai_epi16(b0, 2); in aom_hadamard_32x32_avx2() 245 b1 = _mm256_srai_epi16(b1, 2); in aom_hadamard_32x32_avx2() [all …]
|
D | aom_subpixel_8t_intrin_avx2.c | 116 srcRegFilt32b1_1 = _mm256_srai_epi16(srcRegFilt32b1_1, 6); in aom_filter_block1d4_h4_avx2() 212 srcRegFilt32b1_1 = _mm256_srai_epi16(srcRegFilt32b1_1, 6); in aom_filter_block1d4_h8_avx2() 312 srcRegFilt32b1_1 = _mm256_srai_epi16(srcRegFilt32b1_1, 6); in aom_filter_block1d8_h4_avx2() 426 srcRegFilt32b1_1 = _mm256_srai_epi16(srcRegFilt32b1_1, 6); in aom_filter_block1d8_h8_avx2() 555 srcRegFilt32b1_1 = _mm256_srai_epi16(srcRegFilt32b1_1, 6); in aom_filter_block1d16_h4_avx2() 556 srcRegFilt32b2_1 = _mm256_srai_epi16(srcRegFilt32b2_1, 6); in aom_filter_block1d16_h4_avx2() 590 srcRegFilt1_1 = _mm256_srai_epi16(srcRegFilt1_1, 6); in aom_filter_block1d16_h4_avx2() 702 srcRegFilt32b1_1 = _mm256_srai_epi16(srcRegFilt32b1_1, 6); in aom_filter_block1d16_h8_avx2() 703 srcRegFilt32b2_1 = _mm256_srai_epi16(srcRegFilt32b2_1, 6); in aom_filter_block1d16_h8_avx2() 867 resReglo = _mm256_srai_epi16(resReglo, 6); in aom_filter_block1d8_v4_avx2() [all …]
|
D | convolve_avx2.h | 286 const __m256i coeffs_1 = _mm256_srai_epi16(filter_coeffs, 1); in prepare_coeffs_lowbd() 420 res = _mm256_srai_epi16(wt_res, 1); in comp_avg() 430 const __m256i res_round = _mm256_srai_epi16( in convolve_rounding()
|
D | adaptive_quantize_avx2.c | 89 __m256i coeff_sign = _mm256_srai_epi16(coeff_vals, 15); in store_coefficients_avx2()
|
D | variance_impl_avx2.c | 49 exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4); \ 50 exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
|
D | intrapred_avx2.c | 324 sum_left = _mm256_srai_epi16(sum_left, 6); in aom_dc_predictor_32x32_avx2() 338 sum = _mm256_srai_epi16(sum, 5); in aom_dc_top_predictor_32x32_avx2() 352 sum = _mm256_srai_epi16(sum, 5); in aom_dc_left_predictor_32x32_avx2() 491 sum = _mm256_srai_epi16(sum, 5); in aom_dc_top_predictor_32x16_avx2() 505 sum = _mm256_srai_epi16(sum, 5); in aom_dc_top_predictor_32x64_avx2() 519 sum = _mm256_srai_epi16(sum, 6); in aom_dc_top_predictor_64x64_avx2() 533 sum = _mm256_srai_epi16(sum, 6); in aom_dc_top_predictor_64x32_avx2() 547 sum = _mm256_srai_epi16(sum, 6); in aom_dc_top_predictor_64x16_avx2() 576 sum = _mm256_srai_epi16(sum, 6); in aom_dc_left_predictor_32x64_avx2() 590 sum = _mm256_srai_epi16(sum, 6); in aom_dc_left_predictor_64x64_avx2() [all …]
|
D | txfm_common_avx2.h | 240 in[i] = _mm256_srai_epi16(in[i], bit); in round_shift_16bit_w16_avx2()
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | avg_intrin_avx2.c | 364 b0 = _mm256_srai_epi16(b0, 1); in hadamard_16x16_avx2() 365 b1 = _mm256_srai_epi16(b1, 1); in hadamard_16x16_avx2() 366 b2 = _mm256_srai_epi16(b2, 1); in hadamard_16x16_avx2() 367 b3 = _mm256_srai_epi16(b3, 1); in hadamard_16x16_avx2() 422 b0 = _mm256_srai_epi16(b0, 2); in vpx_hadamard_32x32_avx2() 423 b1 = _mm256_srai_epi16(b1, 2); in vpx_hadamard_32x32_avx2() 424 b2 = _mm256_srai_epi16(b2, 2); in vpx_hadamard_32x32_avx2() 425 b3 = _mm256_srai_epi16(b3, 2); in vpx_hadamard_32x32_avx2()
|
D | fwd_dct32x32_impl_avx2.h | 544 step2[0] = _mm256_srai_epi16(step2[0], 2); in FDCT32x32_2D_AVX2() 545 step2[1] = _mm256_srai_epi16(step2[1], 2); in FDCT32x32_2D_AVX2() 546 step2[2] = _mm256_srai_epi16(step2[2], 2); in FDCT32x32_2D_AVX2() 547 step2[3] = _mm256_srai_epi16(step2[3], 2); in FDCT32x32_2D_AVX2() 548 step2[4] = _mm256_srai_epi16(step2[4], 2); in FDCT32x32_2D_AVX2() 549 step2[5] = _mm256_srai_epi16(step2[5], 2); in FDCT32x32_2D_AVX2() 550 step2[6] = _mm256_srai_epi16(step2[6], 2); in FDCT32x32_2D_AVX2() 551 step2[7] = _mm256_srai_epi16(step2[7], 2); in FDCT32x32_2D_AVX2() 552 step2[8] = _mm256_srai_epi16(step2[8], 2); in FDCT32x32_2D_AVX2() 553 step2[9] = _mm256_srai_epi16(step2[9], 2); in FDCT32x32_2D_AVX2() [all …]
|
D | convolve_avx2.h | 72 sum1 = _mm256_srai_epi16(sum1, 7); in convolve8_16_avx2() 148 return _mm256_srai_epi16(nearest_src, depth); in mm256_round_epi16()
|
D | variance_avx2.c | 185 exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4); \ 186 exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
|
/external/libaom/libaom/aom_dsp/simd/ |
D | v256_intrinsics_x86.h | 211 _mm256_packs_epi16(_mm256_srai_epi16(b, 8), _mm256_srai_epi16(a, 8)), in v256_unziphi_8() 278 return _mm256_srai_epi16( in v256_unpacklo_s8_s16() 285 return _mm256_srai_epi16( in v256_unpackhi_s8_s16() 687 _mm256_packs_epi16(_mm256_srai_epi16(_mm256_unpacklo_epi8(a, a), (c) + 8), \ 688 _mm256_srai_epi16(_mm256_unpackhi_epi8(a, a), (c) + 8)) 691 #define v256_shr_n_s16(a, c) _mm256_srai_epi16(a, c)
|
/external/libaom/libaom/av1/encoder/x86/ |
D | av1_quantize_avx2.c | 66 *thr = _mm256_srai_epi16(qp[2], 1 + log_scale); in init_qp() 73 *thr = _mm256_srai_epi16(qp[2], 1 + log_scale); in update_qp() 78 __m256i sign_bits = _mm256_srai_epi16(q, 15); \ 217 thr256 = _mm256_srai_epi16(dequant256, 1); in av1_quantize_lp_avx2()
|
/external/libvpx/libvpx/vp9/encoder/x86/ |
D | vp9_quantize_avx2.c | 109 thr256 = _mm256_srai_epi16(dequant256, 1); in vp9_quantize_fp_avx2()
|
/external/libgav1/libgav1/src/dsp/x86/ |
D | common_avx2.inc | 114 return _mm256_srai_epi16(v_tmp_d, bits);
|
D | loop_restoration_avx2.cc | 52 _mm256_srai_epi16(sum, kInterRoundBitsHorizontal); in WienerHorizontalClip() 456 const __m256i c0 = _mm256_srai_epi16(b0, 4); in WienerVerticalTap1Kernel() 457 const __m256i c1 = _mm256_srai_epi16(b1, 4); in WienerVerticalTap1Kernel()
|
D | loop_restoration_10bit_avx2.cc | 445 const __m256i c = _mm256_srai_epi16(b, 4); in WienerVerticalTap1Kernel()
|
/external/gemmlowp/fixedpoint/ |
D | fixedpoint_avx.h | 149 return to_int16x16_m256i(_mm256_srai_epi16(a.v, offset));
|
/external/llvm-project/clang/test/CodeGen/X86/ |
D | avx2-builtins.c | 1049 return _mm256_srai_epi16(a, 3); in test_mm256_srai_epi16() 1055 return _mm256_srai_epi16(a, b); in test_mm256_srai_epi16_2()
|
/external/libaom/libaom/av1/common/x86/ |
D | warp_plane_avx2.c | 694 res_lo_16 = _mm256_srai_epi16(_mm256_add_epi16(p_16, temp_lo_16), 1); in store_vertical_filter_output_avx2() 697 res_lo_16 = _mm256_srai_epi16( in store_vertical_filter_output_avx2() 737 res_hi_16 = _mm256_srai_epi16(_mm256_add_epi16(p4_16, temp_hi_16), 1); in store_vertical_filter_output_avx2() 740 res_hi_16 = _mm256_srai_epi16( in store_vertical_filter_output_avx2()
|
D | reconinter_avx2.c | 541 __m256i diff = _mm256_srai_epi16( in av1_build_compound_diffwtd_mask_highbd_avx2() 561 __m256i diff = _mm256_srai_epi16( in av1_build_compound_diffwtd_mask_highbd_avx2()
|
/external/clang/test/CodeGen/ |
D | avx2-builtins.c | 1033 return _mm256_srai_epi16(a, 3); in test_mm256_srai_epi16()
|
/external/llvm-project/clang/lib/Headers/ |
D | avx2intrin.h | 539 _mm256_srai_epi16(__m256i __a, int __count) in _mm256_srai_epi16() function
|
D | avx512vlbwintrin.h | 2115 (__v16hi)_mm256_srai_epi16(__A, __B), in _mm256_mask_srai_epi16() 2123 (__v16hi)_mm256_srai_epi16(__A, __B), in _mm256_maskz_srai_epi16()
|
/external/clang/lib/Headers/ |
D | avx2intrin.h | 632 _mm256_srai_epi16(__m256i __a, int __count) in _mm256_srai_epi16() function
|