Home
last modified time | relevance | path

Searched refs:_mm256_add_epi16 (Results 1 – 5 of 5) sorted by relevance

/external/libvpx/libvpx/vpx_dsp/x86/
Dloopfilter_avx2.c600 pixelFilter_p = _mm256_add_epi16(_mm256_add_epi16(p256_6, p256_5), in vpx_lpf_horizontal_16_dual_avx2()
601 _mm256_add_epi16(p256_4, p256_3)); in vpx_lpf_horizontal_16_dual_avx2()
602 pixelFilter_q = _mm256_add_epi16(_mm256_add_epi16(q256_6, q256_5), in vpx_lpf_horizontal_16_dual_avx2()
603 _mm256_add_epi16(q256_4, q256_3)); in vpx_lpf_horizontal_16_dual_avx2()
606 _mm256_add_epi16(p256_0, _mm256_add_epi16(p256_2, p256_1)); in vpx_lpf_horizontal_16_dual_avx2()
607 pixelFilter_p = _mm256_add_epi16(pixelFilter_p, pixetFilter_p2p1p0); in vpx_lpf_horizontal_16_dual_avx2()
610 _mm256_add_epi16(q256_0, _mm256_add_epi16(q256_2, q256_1)); in vpx_lpf_horizontal_16_dual_avx2()
611 pixelFilter_q = _mm256_add_epi16(pixelFilter_q, pixetFilter_q2q1q0); in vpx_lpf_horizontal_16_dual_avx2()
613 pixelFilter_p = _mm256_add_epi16( in vpx_lpf_horizontal_16_dual_avx2()
614 eight, _mm256_add_epi16(pixelFilter_p, pixelFilter_q)); in vpx_lpf_horizontal_16_dual_avx2()
[all …]
Dfwd_dct32x32_impl_avx2.h131 step1a[0] = _mm256_add_epi16(ina0, inb0); in FDCT32x32_2D_AVX2()
132 step1a[1] = _mm256_add_epi16(ina1, inb1); in FDCT32x32_2D_AVX2()
133 step1a[2] = _mm256_add_epi16(ina2, inb2); in FDCT32x32_2D_AVX2()
134 step1a[3] = _mm256_add_epi16(ina3, inb3); in FDCT32x32_2D_AVX2()
167 step1a[0] = _mm256_add_epi16(ina0, inb0); in FDCT32x32_2D_AVX2()
168 step1a[1] = _mm256_add_epi16(ina1, inb1); in FDCT32x32_2D_AVX2()
169 step1a[2] = _mm256_add_epi16(ina2, inb2); in FDCT32x32_2D_AVX2()
170 step1a[3] = _mm256_add_epi16(ina3, inb3); in FDCT32x32_2D_AVX2()
203 step1a[0] = _mm256_add_epi16(ina0, inb0); in FDCT32x32_2D_AVX2()
204 step1a[1] = _mm256_add_epi16(ina1, inb1); in FDCT32x32_2D_AVX2()
[all …]
Dvariance_impl_avx2.c75 src_expand_low = _mm256_add_epi16(src_expand_low, src_expand_high); in vpx_get16x16var_avx2()
80 sum_ref_src = _mm256_add_epi16(sum_ref_src, src_expand_low); in vpx_get16x16var_avx2()
174 src_expand_low = _mm256_add_epi16(src_expand_low, src_expand_high); in vpx_get32x32var_avx2()
179 sum_ref_src = _mm256_add_epi16(sum_ref_src, src_expand_low); in vpx_get32x32var_avx2()
238 exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8); \
239 exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8); \
271 sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo); \
273 sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi); \
/external/clang/test/CodeGen/
Davx2-builtins.c38 return _mm256_add_epi16(a, b); in test_mm256_add_epi16()
/external/clang/lib/Headers/
Davx2intrin.h88 _mm256_add_epi16(__m256i __a, __m256i __b) in _mm256_add_epi16() function