Home
last modified time | relevance | path

Searched refs:DCT_CONST_BITS (Results 1 – 25 of 37) sorted by relevance

12

/external/libvpx/libvpx/vp9/encoder/x86/
Dvp9_dct_intrin_sse2.c98 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); in fdct4_sse2()
99 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); in fdct4_sse2()
100 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); in fdct4_sse2()
101 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); in fdct4_sse2()
146 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); in fadst4_sse2()
147 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); in fadst4_sse2()
148 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); in fadst4_sse2()
149 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); in fadst4_sse2()
286 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); in vp9_fdct8x8_quant_sse2()
287 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); in vp9_fdct8x8_quant_sse2()
[all …]
Dvp9_dct_ssse3.c129 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); in vp9_fdct8x8_quant_ssse3()
130 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); in vp9_fdct8x8_quant_ssse3()
131 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); in vp9_fdct8x8_quant_ssse3()
132 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); in vp9_fdct8x8_quant_ssse3()
134 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); in vp9_fdct8x8_quant_ssse3()
135 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); in vp9_fdct8x8_quant_ssse3()
136 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); in vp9_fdct8x8_quant_ssse3()
137 const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS); in vp9_fdct8x8_quant_ssse3()
180 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); in vp9_fdct8x8_quant_ssse3()
181 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); in vp9_fdct8x8_quant_ssse3()
[all …]
/external/libvpx/libvpx/vpx_dsp/x86/
Dfwd_dct32x32_impl_avx2.h414 const __m256i s2_20_6 = _mm256_srai_epi32(s2_20_4, DCT_CONST_BITS); in FDCT32x32_2D_AVX2()
415 const __m256i s2_20_7 = _mm256_srai_epi32(s2_20_5, DCT_CONST_BITS); in FDCT32x32_2D_AVX2()
416 const __m256i s2_21_6 = _mm256_srai_epi32(s2_21_4, DCT_CONST_BITS); in FDCT32x32_2D_AVX2()
417 const __m256i s2_21_7 = _mm256_srai_epi32(s2_21_5, DCT_CONST_BITS); in FDCT32x32_2D_AVX2()
418 const __m256i s2_22_6 = _mm256_srai_epi32(s2_22_4, DCT_CONST_BITS); in FDCT32x32_2D_AVX2()
419 const __m256i s2_22_7 = _mm256_srai_epi32(s2_22_5, DCT_CONST_BITS); in FDCT32x32_2D_AVX2()
420 const __m256i s2_23_6 = _mm256_srai_epi32(s2_23_4, DCT_CONST_BITS); in FDCT32x32_2D_AVX2()
421 const __m256i s2_23_7 = _mm256_srai_epi32(s2_23_5, DCT_CONST_BITS); in FDCT32x32_2D_AVX2()
422 const __m256i s2_24_6 = _mm256_srai_epi32(s2_24_4, DCT_CONST_BITS); in FDCT32x32_2D_AVX2()
423 const __m256i s2_24_7 = _mm256_srai_epi32(s2_24_5, DCT_CONST_BITS); in FDCT32x32_2D_AVX2()
[all …]
Dfwd_dct32x32_impl_sse2.h438 const __m128i s2_20_6 = _mm_srai_epi32(s2_20_4, DCT_CONST_BITS); in FDCT32x32_2D()
439 const __m128i s2_20_7 = _mm_srai_epi32(s2_20_5, DCT_CONST_BITS); in FDCT32x32_2D()
440 const __m128i s2_21_6 = _mm_srai_epi32(s2_21_4, DCT_CONST_BITS); in FDCT32x32_2D()
441 const __m128i s2_21_7 = _mm_srai_epi32(s2_21_5, DCT_CONST_BITS); in FDCT32x32_2D()
442 const __m128i s2_22_6 = _mm_srai_epi32(s2_22_4, DCT_CONST_BITS); in FDCT32x32_2D()
443 const __m128i s2_22_7 = _mm_srai_epi32(s2_22_5, DCT_CONST_BITS); in FDCT32x32_2D()
444 const __m128i s2_23_6 = _mm_srai_epi32(s2_23_4, DCT_CONST_BITS); in FDCT32x32_2D()
445 const __m128i s2_23_7 = _mm_srai_epi32(s2_23_5, DCT_CONST_BITS); in FDCT32x32_2D()
446 const __m128i s2_24_6 = _mm_srai_epi32(s2_24_4, DCT_CONST_BITS); in FDCT32x32_2D()
447 const __m128i s2_24_7 = _mm_srai_epi32(s2_24_5, DCT_CONST_BITS); in FDCT32x32_2D()
[all …]
Dfwd_txfm_impl_sse2.h75 const int DCT_CONST_BITS2 = DCT_CONST_BITS + 2; in FDCT4x4_2D()
159 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); in FDCT4x4_2D()
160 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); in FDCT4x4_2D()
161 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); in FDCT4x4_2D()
162 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); in FDCT4x4_2D()
358 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); in FDCT8x8_2D()
359 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); in FDCT8x8_2D()
360 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); in FDCT8x8_2D()
361 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); in FDCT8x8_2D()
362 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); in FDCT8x8_2D()
[all …]
/external/libvpx/libvpx/vpx_dsp/arm/
Dhighbd_idct_neon.h51 b0 = vrshrq_n_s32(b0, DCT_CONST_BITS); in idct4x4_16_kernel_bd10()
52 b1 = vrshrq_n_s32(b1, DCT_CONST_BITS); in idct4x4_16_kernel_bd10()
53 b2 = vrshrq_n_s32(b2, DCT_CONST_BITS); in idct4x4_16_kernel_bd10()
54 b3 = vrshrq_n_s32(b3, DCT_CONST_BITS); in idct4x4_16_kernel_bd10()
85 b0 = vcombine_s32(vrshrn_n_s64(c[0], DCT_CONST_BITS), in idct4x4_16_kernel_bd12()
86 vrshrn_n_s64(c[1], DCT_CONST_BITS)); in idct4x4_16_kernel_bd12()
87 b1 = vcombine_s32(vrshrn_n_s64(c[2], DCT_CONST_BITS), in idct4x4_16_kernel_bd12()
88 vrshrn_n_s64(c[3], DCT_CONST_BITS)); in idct4x4_16_kernel_bd12()
89 b2 = vcombine_s32(vrshrn_n_s64(c[4], DCT_CONST_BITS), in idct4x4_16_kernel_bd12()
90 vrshrn_n_s64(c[5], DCT_CONST_BITS)); in idct4x4_16_kernel_bd12()
[all …]
Dfwd_txfm_neon.c65 const int16x4_t a = vrshrn_n_s32(v_t0_lo, DCT_CONST_BITS); in vpx_fdct8x8_neon()
66 const int16x4_t b = vrshrn_n_s32(v_t0_hi, DCT_CONST_BITS); in vpx_fdct8x8_neon()
67 const int16x4_t c = vrshrn_n_s32(v_t1_lo, DCT_CONST_BITS); in vpx_fdct8x8_neon()
68 const int16x4_t d = vrshrn_n_s32(v_t1_hi, DCT_CONST_BITS); in vpx_fdct8x8_neon()
69 const int16x4_t e = vrshrn_n_s32(v_t2_lo, DCT_CONST_BITS); in vpx_fdct8x8_neon()
70 const int16x4_t f = vrshrn_n_s32(v_t2_hi, DCT_CONST_BITS); in vpx_fdct8x8_neon()
71 const int16x4_t g = vrshrn_n_s32(v_t3_lo, DCT_CONST_BITS); in vpx_fdct8x8_neon()
72 const int16x4_t h = vrshrn_n_s32(v_t3_hi, DCT_CONST_BITS); in vpx_fdct8x8_neon()
86 const int16x4_t a = vrshrn_n_s32(v_t0_lo, DCT_CONST_BITS); in vpx_fdct8x8_neon()
87 const int16x4_t b = vrshrn_n_s32(v_t0_hi, DCT_CONST_BITS); in vpx_fdct8x8_neon()
[all …]
Dhighbd_idct8x8_add_neon.c85 step1[4] = vrshrq_n_s32(step1[4], DCT_CONST_BITS); in idct8x8_12_half1d_bd10()
86 step1[5] = vrshrq_n_s32(step1[5], DCT_CONST_BITS); in idct8x8_12_half1d_bd10()
87 step1[6] = vrshrq_n_s32(step1[6], DCT_CONST_BITS); in idct8x8_12_half1d_bd10()
88 step1[7] = vrshrq_n_s32(step1[7], DCT_CONST_BITS); in idct8x8_12_half1d_bd10()
94 step2[1] = vrshrq_n_s32(step2[1], DCT_CONST_BITS); in idct8x8_12_half1d_bd10()
95 step2[2] = vrshrq_n_s32(step2[2], DCT_CONST_BITS); in idct8x8_12_half1d_bd10()
96 step2[3] = vrshrq_n_s32(step2[3], DCT_CONST_BITS); in idct8x8_12_half1d_bd10()
112 step1[5] = vrshrq_n_s32(step1[5], DCT_CONST_BITS); in idct8x8_12_half1d_bd10()
113 step1[6] = vrshrq_n_s32(step1[6], DCT_CONST_BITS); in idct8x8_12_half1d_bd10()
157 t32[0] = vrshrn_n_s64(t64[0], DCT_CONST_BITS); in idct8x8_12_half1d_bd12()
[all …]
Dfdct_neon.c58 int16x4_t out_0 = vrshrn_n_s32(temp1, DCT_CONST_BITS); in vpx_fdct4x4_neon()
59 int16x4_t out_2 = vrshrn_n_s32(temp2, DCT_CONST_BITS); in vpx_fdct4x4_neon()
70 int16x4_t out_1 = vrshrn_n_s32(temp3, DCT_CONST_BITS); in vpx_fdct4x4_neon()
71 int16x4_t out_3 = vrshrn_n_s32(temp4, DCT_CONST_BITS); in vpx_fdct4x4_neon()
Didct4x4_1_add_neon.asm32 add r0, r0, #0x2000 ; +(1 << ((DCT_CONST_BITS) - 1))
33 asr r0, r0, #14 ; >> DCT_CONST_BITS
38 add r0, r0, #0x2000 ; +(1 << ((DCT_CONST_BITS) - 1))
39 asr r0, r0, #14 ; >> DCT_CONST_BITS
Dfdct32x32_neon.c207 const int16x4_t rounded0 = vqrshrn_n_s32(sum0, DCT_CONST_BITS); in butterfly_one_coeff()
208 const int16x4_t rounded1 = vqrshrn_n_s32(sum1, DCT_CONST_BITS); in butterfly_one_coeff()
209 const int16x4_t rounded2 = vqrshrn_n_s32(diff0, DCT_CONST_BITS); in butterfly_one_coeff()
210 const int16x4_t rounded3 = vqrshrn_n_s32(diff1, DCT_CONST_BITS); in butterfly_one_coeff()
228 const int16x4_t rounded0 = vqrshrn_n_s32(sum0, DCT_CONST_BITS); in butterfly_two_coeff()
229 const int16x4_t rounded1 = vqrshrn_n_s32(sum1, DCT_CONST_BITS); in butterfly_two_coeff()
230 const int16x4_t rounded2 = vqrshrn_n_s32(diff0, DCT_CONST_BITS); in butterfly_two_coeff()
231 const int16x4_t rounded3 = vqrshrn_n_s32(diff1, DCT_CONST_BITS); in butterfly_two_coeff()
576 *add_lo = vrshrq_n_s32(sum0, DCT_CONST_BITS); in butterfly_one_coeff_s16_s32()
577 *add_hi = vrshrq_n_s32(sum1, DCT_CONST_BITS); in butterfly_one_coeff_s16_s32()
[all …]
/external/libaom/libaom/aom_dsp/arm/
Dfwd_txfm_neon.c62 const int16x4_t a = vrshrn_n_s32(v_t0_lo, DCT_CONST_BITS); in aom_fdct8x8_neon()
63 const int16x4_t b = vrshrn_n_s32(v_t0_hi, DCT_CONST_BITS); in aom_fdct8x8_neon()
64 const int16x4_t c = vrshrn_n_s32(v_t1_lo, DCT_CONST_BITS); in aom_fdct8x8_neon()
65 const int16x4_t d = vrshrn_n_s32(v_t1_hi, DCT_CONST_BITS); in aom_fdct8x8_neon()
66 const int16x4_t e = vrshrn_n_s32(v_t2_lo, DCT_CONST_BITS); in aom_fdct8x8_neon()
67 const int16x4_t f = vrshrn_n_s32(v_t2_hi, DCT_CONST_BITS); in aom_fdct8x8_neon()
68 const int16x4_t g = vrshrn_n_s32(v_t3_lo, DCT_CONST_BITS); in aom_fdct8x8_neon()
69 const int16x4_t h = vrshrn_n_s32(v_t3_hi, DCT_CONST_BITS); in aom_fdct8x8_neon()
83 const int16x4_t a = vrshrn_n_s32(v_t0_lo, DCT_CONST_BITS); in aom_fdct8x8_neon()
84 const int16x4_t b = vrshrn_n_s32(v_t0_hi, DCT_CONST_BITS); in aom_fdct8x8_neon()
[all …]
/external/libvpx/libvpx/vpx_dsp/mips/
Ditrans8_dspr2.c642 x0 = ROUND_POWER_OF_TWO((s0 + s4), DCT_CONST_BITS); in iadst8_dspr2()
643 x1 = ROUND_POWER_OF_TWO((s1 + s5), DCT_CONST_BITS); in iadst8_dspr2()
644 x2 = ROUND_POWER_OF_TWO((s2 + s6), DCT_CONST_BITS); in iadst8_dspr2()
645 x3 = ROUND_POWER_OF_TWO((s3 + s7), DCT_CONST_BITS); in iadst8_dspr2()
646 x4 = ROUND_POWER_OF_TWO((s0 - s4), DCT_CONST_BITS); in iadst8_dspr2()
647 x5 = ROUND_POWER_OF_TWO((s1 - s5), DCT_CONST_BITS); in iadst8_dspr2()
648 x6 = ROUND_POWER_OF_TWO((s2 - s6), DCT_CONST_BITS); in iadst8_dspr2()
649 x7 = ROUND_POWER_OF_TWO((s3 - s7), DCT_CONST_BITS); in iadst8_dspr2()
665 x4 = ROUND_POWER_OF_TWO((s4 + s6), DCT_CONST_BITS); in iadst8_dspr2()
666 x5 = ROUND_POWER_OF_TWO((s5 + s7), DCT_CONST_BITS); in iadst8_dspr2()
[all …]
Dtxfm_macros_msa.h32 SRARI_W2_SW(s1_m, s0_m, DCT_CONST_BITS); \
36 SRARI_W2_SW(s1_m, s0_m, DCT_CONST_BITS); \
52 SRARI_W4_SW(tp1_m, tp9_m, tp7_m, tp5_m, DCT_CONST_BITS); \
53 SRARI_W4_SW(tp3_m, tp0_m, tp4_m, tp2_m, DCT_CONST_BITS); \
64 SRARI_W2_SW(tp1_m, tp0_m, DCT_CONST_BITS); \
78 SRARI_W4_SW(madd0_m, madd1_m, madd2_m, madd3_m, DCT_CONST_BITS); \
93 SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, DCT_CONST_BITS); \
98 SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, DCT_CONST_BITS); \
Didct8x8_msa.c59 SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS); in vpx_idct8x8_12_add_msa()
71 SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS); in vpx_idct8x8_12_add_msa()
81 SRARI_W2_SW(tmp0, tmp1, DCT_CONST_BITS); in vpx_idct8x8_12_add_msa()
108 out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS); in vpx_idct8x8_1_add_msa()
109 out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS); in vpx_idct8x8_1_add_msa()
Dinv_txfm_msa.h126 SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS); \
179 SRARI_W4_SW(int0_m, int1_m, int2_m, int3_m, DCT_CONST_BITS); \
205 SRARI_W4_SW(tmp0_madd, tmp1_madd, tmp2_madd, tmp3_madd, DCT_CONST_BITS); \
209 SRARI_W4_SW(tmp0_madd, tmp1_madd, tmp2_madd, tmp3_madd, DCT_CONST_BITS); \
235 SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS); \
273 SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \
277 SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \
291 SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \
295 SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \
309 SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \
[all …]
Dfwd_txfm_msa.h41 SRARI_W4_SW(vec4_m, vec5_m, vec6_m, vec7_m, DCT_CONST_BITS); \
344 tp0_m = __msa_srari_d(tp0_m, DCT_CONST_BITS); \
345 tp1_m = __msa_srari_d(tp1_m, DCT_CONST_BITS); \
346 tp2_m = __msa_srari_d(tp2_m, DCT_CONST_BITS); \
347 tp3_m = __msa_srari_d(tp3_m, DCT_CONST_BITS); \
353 tp0_m = __msa_srari_d(tp0_m, DCT_CONST_BITS); \
354 tp1_m = __msa_srari_d(tp1_m, DCT_CONST_BITS); \
355 tp2_m = __msa_srari_d(tp2_m, DCT_CONST_BITS); \
356 tp3_m = __msa_srari_d(tp3_m, DCT_CONST_BITS); \
Didct4x4_msa.c92 out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS); in vpx_idct4x4_1_add_msa()
93 out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS); in vpx_idct4x4_1_add_msa()
/external/libaom/libaom/aom_dsp/x86/
Dfwd_txfm_impl_sse2.h133 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); in FDCT8x8_2D()
134 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); in FDCT8x8_2D()
135 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); in FDCT8x8_2D()
136 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); in FDCT8x8_2D()
137 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); in FDCT8x8_2D()
138 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); in FDCT8x8_2D()
139 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); in FDCT8x8_2D()
140 const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS); in FDCT8x8_2D()
169 const __m128i s0 = _mm_srai_epi32(f0, DCT_CONST_BITS); in FDCT8x8_2D()
170 const __m128i s1 = _mm_srai_epi32(f1, DCT_CONST_BITS); in FDCT8x8_2D()
[all …]
/external/libvpx/libvpx/vp9/common/arm/neon/
Dvp9_highbd_iht4x4_add_neon.c67 io[0] = vcombine_s32(vrshrn_n_s64(t[0].val[0], DCT_CONST_BITS), in highbd_iadst4()
68 vrshrn_n_s64(t[0].val[1], DCT_CONST_BITS)); in highbd_iadst4()
69 io[1] = vcombine_s32(vrshrn_n_s64(t[1].val[0], DCT_CONST_BITS), in highbd_iadst4()
70 vrshrn_n_s64(t[1].val[1], DCT_CONST_BITS)); in highbd_iadst4()
71 io[2] = vcombine_s32(vrshrn_n_s64(t[2].val[0], DCT_CONST_BITS), in highbd_iadst4()
72 vrshrn_n_s64(t[2].val[1], DCT_CONST_BITS)); in highbd_iadst4()
73 io[3] = vcombine_s32(vrshrn_n_s64(t[3].val[0], DCT_CONST_BITS), in highbd_iadst4()
74 vrshrn_n_s64(t[3].val[1], DCT_CONST_BITS)); in highbd_iadst4()
Dvp9_highbd_iht16x16_add_neon.c58 out.val[0] = vcombine_s32(vrshrn_n_s64(in[0].val[0], DCT_CONST_BITS), in highbd_dct_const_round_shift_low_8()
59 vrshrn_n_s64(in[1].val[0], DCT_CONST_BITS)); in highbd_dct_const_round_shift_low_8()
60 out.val[1] = vcombine_s32(vrshrn_n_s64(in[0].val[1], DCT_CONST_BITS), in highbd_dct_const_round_shift_low_8()
61 vrshrn_n_s64(in[1].val[1], DCT_CONST_BITS)); in highbd_dct_const_round_shift_low_8()
126 out_lo.val[0] = vrshrn_n_s64(sum_lo.val[0], DCT_CONST_BITS); in highbd_add_dct_const_round_shift_low_8()
127 out_lo.val[1] = vrshrn_n_s64(sum_lo.val[1], DCT_CONST_BITS); in highbd_add_dct_const_round_shift_low_8()
128 out_hi.val[0] = vrshrn_n_s64(sum_hi.val[0], DCT_CONST_BITS); in highbd_add_dct_const_round_shift_low_8()
129 out_hi.val[1] = vrshrn_n_s64(sum_hi.val[1], DCT_CONST_BITS); in highbd_add_dct_const_round_shift_low_8()
139 out_lo.val[0] = vrshrn_n_s64(sub_lo.val[0], DCT_CONST_BITS); in highbd_sub_dct_const_round_shift_low_8()
140 out_lo.val[1] = vrshrn_n_s64(sub_lo.val[1], DCT_CONST_BITS); in highbd_sub_dct_const_round_shift_low_8()
[all …]
Dvp9_highbd_iht8x8_add_neon.c29 const int32x2_t out0_lo = vrshrn_n_s64(t0_lo, DCT_CONST_BITS); in highbd_iadst_half_butterfly_neon()
30 const int32x2_t out1_lo = vrshrn_n_s64(t1_lo, DCT_CONST_BITS); in highbd_iadst_half_butterfly_neon()
31 const int32x2_t out0_hi = vrshrn_n_s64(t0_hi, DCT_CONST_BITS); in highbd_iadst_half_butterfly_neon()
32 const int32x2_t out1_hi = vrshrn_n_s64(t1_hi, DCT_CONST_BITS); in highbd_iadst_half_butterfly_neon()
74 const int32x2_t out_lo = vrshrn_n_s64(sum_lo, DCT_CONST_BITS); in highbd_add_dct_const_round_shift_low_8()
75 const int32x2_t out_hi = vrshrn_n_s64(sum_hi, DCT_CONST_BITS); in highbd_add_dct_const_round_shift_low_8()
83 const int32x2_t out_lo = vrshrn_n_s64(sub_lo, DCT_CONST_BITS); in highbd_sub_dct_const_round_shift_low_8()
84 const int32x2_t out_hi = vrshrn_n_s64(sub_hi, DCT_CONST_BITS); in highbd_sub_dct_const_round_shift_low_8()
/external/libvpx/config/arm-neon/vpx_dsp/arm/
Didct4x4_1_add_neon.asm.S39 add r0, r0, #0x2000 @ +(1 << ((DCT_CONST_BITS) - 1))
40 asr r0, r0, #14 @ >> DCT_CONST_BITS
45 add r0, r0, #0x2000 @ +(1 << ((DCT_CONST_BITS) - 1))
46 asr r0, r0, #14 @ >> DCT_CONST_BITS
/external/libaom/libaom/aom_dsp/
Dtxfm_common.h19 #define DCT_CONST_BITS 14 macro
20 #define DCT_CONST_ROUNDING (1 << (DCT_CONST_BITS - 1))
87 tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS); in fdct_round_shift()
/external/libvpx/libvpx/vpx_dsp/
Dtxfm_common.h17 #define DCT_CONST_BITS 14 macro
18 #define DCT_CONST_ROUNDING (1 << (DCT_CONST_BITS - 1))

12