Home
last modified time | relevance | path

Searched refs:cospi_16_64 (Results 1 – 25 of 26) sorted by relevance

12

/external/libvpx/libvpx/vp9/common/arm/neon/
Dvp9_idct8x8_add_neon.asm47 ; dct_const_round_shift(input_dc * cospi_16_64)
51 ; dct_const_round_shift(input_dc * cospi_16_64)
71 ; dct_const_round_shift(input_dc * cospi_16_64)
76 vdup.16 d0, r7 ; duplicate cospi_16_64
78 ; dct_const_round_shift(input_dc * cospi_16_64)
82 ; input[0] * cospi_16_64
86 ; input[0] * cospi_16_64
90 ; (input[0] + input[2]) * cospi_16_64
94 ; (input[0] - input[2]) * cospi_16_64
101 ; dct_const_round_shift(input_dc * cospi_16_64)
[all …]
Dvp9_iht8x8_add_neon.asm64 ; generate cospi_16_64 = 11585
88 ; generate cospi_16_64 = 11585
146 ; dct_const_round_shift(input_dc * cospi_16_64)
150 ; dct_const_round_shift(input_dc * cospi_16_64)
170 ; dct_const_round_shift(input_dc * cospi_16_64)
175 vdup.16 d0, r7 ; duplicate cospi_16_64
177 ; dct_const_round_shift(input_dc * cospi_16_64)
181 ; input[0] * cospi_16_64
185 ; input[0] * cospi_16_64
189 ; (input[0] + input[2]) * cospi_16_64
[all …]
Dvp9_dc_only_idct_add_neon.asm29 ; generate cospi_16_64 = 11585
33 ; dct_const_round_shift(input_dc * cospi_16_64)
34 mul r0, r0, r12 ; input_dc * cospi_16_64
38 ; dct_const_round_shift(out * cospi_16_64)
39 mul r0, r0, r12 ; out * cospi_16_64
Dvp9_idct4x4_1_add_neon.asm28 ; generate cospi_16_64 = 11585
32 ; out = dct_const_round_shift(input[0] * cospi_16_64)
33 mul r0, r0, r12 ; input[0] * cospi_16_64
37 ; out = dct_const_round_shift(out * cospi_16_64)
38 mul r0, r0, r12 ; out * cospi_16_64
Dvp9_idct16x16_add_neon.asm115 ; generate cospi_16_64 = 11585
148 vdup.16 d30, r3 ; cospi_16_64
150 ; step1[0] * cospi_16_64
154 ; step1[1] * cospi_16_64
165 ; temp1 = (step1[0] + step1[1]) * cospi_16_64
169 ; temp2 = (step1[0] - step1[1]) * cospi_16_64
211 ; generate cospi_16_64 = 11585
221 vdup.16 d16, r3; ; duplicate cospi_16_64
223 ; step2[5] * cospi_16_64
227 ; step2[6] * cospi_16_64
[all …]
Dvp9_idct8x8_1_add_neon.asm28 ; generate cospi_16_64 = 11585
32 ; out = dct_const_round_shift(input[0] * cospi_16_64)
33 mul r0, r0, r12 ; input[0] * cospi_16_64
37 ; out = dct_const_round_shift(out * cospi_16_64)
38 mul r0, r0, r12 ; out * cospi_16_64
Dvp9_idct32x32_1_add_neon.asm80 ; generate cospi_16_64 = 11585
84 ; out = dct_const_round_shift(input[0] * cospi_16_64)
85 mul r0, r0, r12 ; input[0] * cospi_16_64
89 ; out = dct_const_round_shift(out * cospi_16_64)
90 mul r0, r0, r12 ; out * cospi_16_64
Dvp9_idct4x4_add_neon.asm42 ; cospi_16_64 = 11585 = 0x2d41
59 vdup.16 d21, r3 ; replicate cospi_16_64
82 ; (input[0] + input[2]) * cospi_16_64;
83 ; (input[0] - input[2]) * cospi_16_64;
133 ; (input[0] + input[2]) * cospi_16_64;
134 ; (input[0] - input[2]) * cospi_16_64;
Dvp9_idct32x32_add_neon.asm28 cospi_16_64 EQU 11585 define
684 ;temp1 = (step1b[25][i] - step1b[22][i]) * cospi_16_64;
685 ;temp2 = (step1b[25][i] + step1b[22][i]) * cospi_16_64;
688 DO_BUTTERFLY_STD cospi_16_64, cospi_16_64, d26, d27, d28, d29
692 ;temp1 = (step1b[24][i] - step1b[23][i]) * cospi_16_64;
693 ;temp2 = (step1b[24][i] + step1b[23][i]) * cospi_16_64;
699 DO_BUTTERFLY_STD cospi_16_64, cospi_16_64, d26, d27, d28, d29
753 ;temp1 = (step1b[27][i] - step1b[20][i]) * cospi_16_64;
754 ;temp2 = (step1b[27][i] + step1b[20][i]) * cospi_16_64;
757 DO_BUTTERFLY_STD cospi_16_64, cospi_16_64, d26, d27, d28, d29
[all …]
Dvp9_iht4x4_add_neon.asm20 ; cospi_16_64. d2 must contain cospi_24_64. The output will be stored back
31 vmull.s16 q13, d23, d1 ; (input[0] + input[2]) * cospi_16_64
32 vmull.s16 q14, d24, d1 ; (input[0] - input[2]) * cospi_16_64
95 ; cospi_16_64 = 11585 = 0x2d41
104 vdup.16 d1, r3 ; duplicate cospi_16_64
Dvp9_idct16x16_1_add_neon.asm28 ; generate cospi_16_64 = 11585
32 ; out = dct_const_round_shift(input[0] * cospi_16_64)
33 mul r0, r0, r12 ; input[0] * cospi_16_64
37 ; out = dct_const_round_shift(out * cospi_16_64)
38 mul r0, r0, r12 ; out * cospi_16_64
/external/libvpx/libvpx/vp9/encoder/
Dvp9_dct.c36 temp1 = (step[0] + step[1]) * cospi_16_64; in fdct4()
37 temp2 = (step[0] - step[1]) * cospi_16_64; in fdct4()
96 temp1 = (step[0] + step[1]) * cospi_16_64; in vp9_fdct4x4_c()
97 temp2 = (step[0] - step[1]) * cospi_16_64; in vp9_fdct4x4_c()
222 t0 = (x0 + x1) * cospi_16_64; in fdct8()
223 t1 = (x0 - x1) * cospi_16_64; in fdct8()
232 t0 = (s6 - s5) * cospi_16_64; in fdct8()
233 t1 = (s6 + s5) * cospi_16_64; in fdct8()
293 t0 = (x0 + x1) * cospi_16_64; in vp9_fdct8x8_c()
294 t1 = (x0 - x1) * cospi_16_64; in vp9_fdct8x8_c()
[all …]
/external/libvpx/libvpx/vp9/common/mips/dspr2/
Dvp9_itrans32_dspr2.c523 [cospi_16_64] "r" (cospi_16_64) in idct32_rows_dspr2()
672 [cospi_16_64] "r" (cospi_16_64), in idct32_rows_dspr2()
739 [cospi_16_64] "r" (cospi_16_64) in idct32_rows_dspr2()
778 [cospi_16_64] "r" (cospi_16_64) in idct32_rows_dspr2()
781 temp21 = (step2_20 + step2_27) * cospi_16_64; in idct32_rows_dspr2()
794 [cospi_16_64] "r" (cospi_16_64) in idct32_rows_dspr2()
797 temp21 = (step2_21 + step2_26) * cospi_16_64; in idct32_rows_dspr2()
810 [cospi_16_64] "r" (cospi_16_64) in idct32_rows_dspr2()
813 temp21 = (step2_22 + step2_25) * cospi_16_64; in idct32_rows_dspr2()
826 [cospi_16_64] "r" (cospi_16_64) in idct32_rows_dspr2()
[all …]
Dvp9_itrans32_cols_dspr2.c457 [step2_15] "r" (step2_15), [cospi_16_64] "r" (cospi_16_64) in vp9_idct32_cols_add_blk_dspr2()
605 [cospi_16_64] "r" (cospi_16_64) in vp9_idct32_cols_add_blk_dspr2()
666 [cospi_16_64] "r" (cospi_16_64) in vp9_idct32_cols_add_blk_dspr2()
705 [step2_27] "r" (step2_27), [cospi_16_64] "r" (cospi_16_64) in vp9_idct32_cols_add_blk_dspr2()
708 temp21 = (step2_20 + step2_27) * cospi_16_64; in vp9_idct32_cols_add_blk_dspr2()
720 [step2_21] "r" (step2_21), [cospi_16_64] "r" (cospi_16_64) in vp9_idct32_cols_add_blk_dspr2()
723 temp21 = (step2_21 + step2_26) * cospi_16_64; in vp9_idct32_cols_add_blk_dspr2()
735 [step2_22] "r" (step2_22), [cospi_16_64] "r" (cospi_16_64) in vp9_idct32_cols_add_blk_dspr2()
738 temp21 = (step2_22 + step2_25) * cospi_16_64; in vp9_idct32_cols_add_blk_dspr2()
750 [step2_23] "r" (step2_23), [cospi_16_64] "r" (cospi_16_64) in vp9_idct32_cols_add_blk_dspr2()
[all …]
Dvp9_itrans16_dspr2.c81 [cospi_16_64] "r" (cospi_16_64) in idct16_rows_dspr2()
270 [cospi_16_64] "r" (cospi_16_64) in idct16_rows_dspr2()
321 [cospi_16_64] "r" (cospi_16_64) in idct16_rows_dspr2()
477 [cospi_16_64] "r" (cospi_16_64) in idct16_cols_add_blk_dspr2()
667 [cospi_16_64] "r" (cospi_16_64) in idct16_cols_add_blk_dspr2()
718 [cospi_16_64] "r" (cospi_16_64) in idct16_cols_add_blk_dspr2()
1049 s2 = (- cospi_16_64) * (x2 + x3); in iadst16()
1050 s3 = cospi_16_64 * (x2 - x3); in iadst16()
1051 s6 = cospi_16_64 * (x6 + x7); in iadst16()
1052 s7 = cospi_16_64 * (- x6 + x7); in iadst16()
[all …]
Dvp9_itrans8_dspr2.c191 [cospi_16_64] "r" (cospi_16_64), [cospi_28_64] "r" (cospi_28_64), in idct8_rows_dspr2()
440 [cospi_16_64] "r" (cospi_16_64), [cospi_28_64] "r" (cospi_28_64), in idct8_columns_add_blk_dspr2()
529 s2 = cospi_16_64 * (x2 + x3); in iadst8_dspr2()
530 s3 = cospi_16_64 * (x2 - x3); in iadst8_dspr2()
531 s6 = cospi_16_64 * (x6 + x7); in iadst8_dspr2()
532 s7 = cospi_16_64 * (x6 - x7); in iadst8_dspr2()
Dvp9_itrans4_dspr2.c97 [cospi_8_64] "r" (cospi_8_64), [cospi_16_64] "r" (cospi_16_64), in vp9_idct4_rows_dspr2()
220 [cospi_8_64] "r" (cospi_8_64), [cospi_16_64] "r" (cospi_16_64), in vp9_idct4_columns_add_blk_dspr2()
Dvp9_common_dspr2.h50 [cospi_16_64] "r" (cospi_16_64) \
/external/libvpx/libvpx/vp9/common/
Dvp9_idct.c103 temp1 = (input[0] + input[2]) * cospi_16_64; in idct4()
104 temp2 = (input[0] - input[2]) * cospi_16_64; in idct4()
146 int16_t out = dct_const_round_shift(input[0] * cospi_16_64); in vp9_idct4x4_1_add_c()
147 out = dct_const_round_shift(out * cospi_16_64); in vp9_idct4x4_1_add_c()
187 temp1 = (step2[6] - step2[5]) * cospi_16_64; in idct8()
188 temp2 = (step2[5] + step2[6]) * cospi_16_64; in idct8()
231 int16_t out = dct_const_round_shift(input[0] * cospi_16_64); in vp9_idct8x8_1_add_c()
232 out = dct_const_round_shift(out * cospi_16_64); in vp9_idct8x8_1_add_c()
371 s2 = cospi_16_64 * (x2 + x3); in iadst8()
372 s3 = cospi_16_64 * (x2 - x3); in iadst8()
[all …]
Dvp9_idct.h59 static const int cospi_16_64 = 11585; variable
/external/libvpx/libvpx/vp9/encoder/arm/neon/
Dvp9_dct_neon.c74 v_t0_lo = vmulq_n_s32(v_t0_lo, cospi_16_64); in vp9_fdct8x8_neon()
75 v_t0_hi = vmulq_n_s32(v_t0_hi, cospi_16_64); in vp9_fdct8x8_neon()
76 v_t1_lo = vmulq_n_s32(v_t1_lo, cospi_16_64); in vp9_fdct8x8_neon()
77 v_t1_hi = vmulq_n_s32(v_t1_hi, cospi_16_64); in vp9_fdct8x8_neon()
95 v_t0_lo = vmull_n_s16(vget_low_s16(v_x0), (int16_t)cospi_16_64); in vp9_fdct8x8_neon()
96 v_t0_hi = vmull_n_s16(vget_high_s16(v_x0), (int16_t)cospi_16_64); in vp9_fdct8x8_neon()
97 v_t1_lo = vmull_n_s16(vget_low_s16(v_x1), (int16_t)cospi_16_64); in vp9_fdct8x8_neon()
98 v_t1_hi = vmull_n_s16(vget_high_s16(v_x1), (int16_t)cospi_16_64); in vp9_fdct8x8_neon()
/external/libvpx/libvpx/vp9/encoder/x86/
Dvp9_dct_sse2.c59 const __m128i k__cospi_A = _mm_setr_epi16(cospi_16_64, cospi_16_64, in vp9_fdct4x4_sse2()
60 cospi_16_64, cospi_16_64, in vp9_fdct4x4_sse2()
61 cospi_16_64, -cospi_16_64, in vp9_fdct4x4_sse2()
62 cospi_16_64, -cospi_16_64); in vp9_fdct4x4_sse2()
63 const __m128i k__cospi_B = _mm_setr_epi16(cospi_16_64, -cospi_16_64, in vp9_fdct4x4_sse2()
64 cospi_16_64, -cospi_16_64, in vp9_fdct4x4_sse2()
65 cospi_16_64, cospi_16_64, in vp9_fdct4x4_sse2()
66 cospi_16_64, cospi_16_64); in vp9_fdct4x4_sse2()
75 const __m128i k__cospi_E = _mm_setr_epi16(cospi_16_64, cospi_16_64, in vp9_fdct4x4_sse2()
76 cospi_16_64, cospi_16_64, in vp9_fdct4x4_sse2()
[all …]
Dvp9_dct32x32_sse2.c47 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(+cospi_16_64); in FDCT32x32_2D()
48 const __m128i k__cospi_p16_m16 = pair_set_epi16(+cospi_16_64, -cospi_16_64); in FDCT32x32_2D()
1383 const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64); in FDCT32x32_2D()
1384 const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64); in FDCT32x32_2D()
1535 const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64); in FDCT32x32_2D()
1536 const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64); in FDCT32x32_2D()
/external/libvpx/libvpx/vp9/common/x86/
Dvp9_idct_intrin_sse2.c26 const __m128i cst = _mm_setr_epi16((int16_t)cospi_16_64, (int16_t)cospi_16_64, in vp9_idct4x4_16_add_sse2()
27 (int16_t)cospi_16_64, (int16_t)-cospi_16_64, in vp9_idct4x4_16_add_sse2()
158 a = dct_const_round_shift(input[0] * cospi_16_64); in vp9_idct4x4_1_add_sse2()
159 a = dct_const_round_shift(a * cospi_16_64); in vp9_idct4x4_1_add_sse2()
179 const __m128i k__cospi_p16_p16 = pair_set_epi16(cospi_16_64, cospi_16_64); in idct4_sse2()
180 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); in idct4_sse2()
524 const __m128i stg2_0 = pair_set_epi16(cospi_16_64, cospi_16_64); in vp9_idct8x8_64_add_sse2()
525 const __m128i stg2_1 = pair_set_epi16(cospi_16_64, -cospi_16_64); in vp9_idct8x8_64_add_sse2()
590 a = dct_const_round_shift(input[0] * cospi_16_64); in vp9_idct8x8_1_add_sse2()
591 a = dct_const_round_shift(a * cospi_16_64); in vp9_idct8x8_1_add_sse2()
[all …]
Dvp9_idct_intrin_ssse3.c39 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); in idct16_8col()
40 const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64); in idct16_8col()

12