Home
last modified time | relevance | path

Searched refs:cospi_20_64 (Results 1 – 16 of 16) sorted by relevance

/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/mips/dspr2/
Dvp9_itrans16_dspr2.c268 [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64), in idct16_rows_dspr2()
665 [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64), in idct16_cols_add_blk_dspr2()
989 s10 = x10 * cospi_20_64 + x11 * cospi_12_64; in iadst16()
990 s11 = x10 * cospi_12_64 - x11 * cospi_20_64; in iadst16()
993 s14 = - x14 * cospi_12_64 + x15 * cospi_20_64; in iadst16()
994 s15 = x14 * cospi_20_64 + x15 * cospi_12_64; in iadst16()
Dvp9_itrans32_cols_dspr2.c239 [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64) in vp9_idct32_cols_add_blk_dspr2()
296 [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64) in vp9_idct32_cols_add_blk_dspr2()
664 [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64), in vp9_idct32_cols_add_blk_dspr2()
Dvp9_itrans32_dspr2.c285 [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64) in idct32_rows_dspr2()
347 [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64) in idct32_rows_dspr2()
737 [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64), in idct32_rows_dspr2()
Dvp9_itrans8_dspr2.c193 [cospi_20_64] "r" (cospi_20_64), [cospi_8_64] "r" (cospi_8_64), in idct8_rows_dspr2()
442 [cospi_20_64] "r" (cospi_20_64), [cospi_8_64] "r" (cospi_8_64), in idct8_columns_add_blk_dspr2()
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/
Dvp9_dct.c234 t1 = x1 * cospi_12_64 + x2 * cospi_20_64; in fdct8()
235 t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; in fdct8()
294 t1 = x1 * cospi_12_64 + x2 * cospi_20_64; in vp9_fdct8x8_c()
295 t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; in vp9_fdct8x8_c()
418 t1 = x1 * cospi_12_64 + x2 * cospi_20_64; in vp9_fdct16x16_c()
419 t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; in vp9_fdct16x16_c()
727 t1 = x1 * cospi_12_64 + x2 * cospi_20_64; in fdct16()
728 t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; in fdct16()
864 s10 = x10 * cospi_20_64 + x11 * cospi_12_64; in fadst16()
865 s11 = x10 * cospi_12_64 - x11 * cospi_20_64; in fadst16()
[all …]
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/arm/neon/
Dvp9_idct8x8_add_neon.asm29 vdup.16 d3, r6 ; duplicate cospi_20_64
43 ; input[5] * cospi_12_64 - input[3] * cospi_20_64
59 ; input[5] * cospi_20_64
67 ; input[5] * cospi_20_64 + input[3] * cospi_12_64
230 ; generate cospi_20_64 = 9102
342 ; generate cospi_20_64 = 9102
374 vdup.16 q0, r12 ; duplicate -cospi_20_64*2
382 ; dct_const_round_shift(- input[3] * cospi_20_64)
Dvp9_iht8x8_add_neon.asm84 ; generate cospi_20_64 = 9102
128 vdup.16 d3, r6 ; duplicate cospi_20_64
142 ; input[5] * cospi_12_64 - input[3] * cospi_20_64
158 ; input[5] * cospi_20_64
166 ; input[5] * cospi_20_64 + input[3] * cospi_12_64
Dvp9_idct32x32_add_neon.asm32 cospi_20_64 EQU 9102 define
602 ;temp1 = step1b[26][i] * cospi_12_64 - step1b[21][i] * cospi_20_64;
603 ;temp2 = step1b[26][i] * cospi_20_64 + step1b[21][i] * cospi_12_64;
606 DO_BUTTERFLY_STD cospi_12_64, cospi_20_64, d2, d3, d6, d7
637 ;temp1 = step1b[22][i] * (-cospi_20_64) - step1b[25][i] * (-cospi_12_64);
638 ;temp2 = step1b[22][i] * (-cospi_12_64) + step1b[25][i] * (-cospi_20_64);
641 DO_BUTTERFLY_STD (-cospi_20_64), (-cospi_12_64), d8, d9, d14, d15
899 ;temp1 = input[20 * 32] * cospi_12_64 - input[12 * 32] * cospi_20_64;
900 ;temp2 = input[20 * 32] * cospi_20_64 + input[12 * 32] * cospi_12_64;
904 DO_BUTTERFLY_STD cospi_12_64, cospi_20_64, d2, d3, d6, d7
Dvp9_idct16x16_add_neon.asm83 ; generate cospi_20_64 = 9102
104 vdup.16 d3, r12 ; duplicate cospi_20_64
127 ; step2[5] * cospi_20_64
131 ; temp1 = input[5] * cospi_12_64 - input[3] * cospi_20_64
135 ; temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/
Dvp9_idct.c171 temp1 = input[5] * cospi_12_64 - input[3] * cospi_20_64; in idct8()
172 temp2 = input[5] * cospi_20_64 + input[3] * cospi_12_64; in idct8()
511 temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64; in idct16()
512 temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64; in idct16()
713 s10 = x10 * cospi_20_64 + x11 * cospi_12_64; in iadst16()
714 s11 = x10 * cospi_12_64 - x11 * cospi_20_64; in iadst16()
717 s14 = - x14 * cospi_12_64 + x15 * cospi_20_64; in iadst16()
718 s15 = x14 * cospi_20_64 + x15 * cospi_12_64; in iadst16()
999 temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64; in idct32()
1000 temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64; in idct32()
[all …]
Dvp9_idct.h63 static const int cospi_20_64 = 9102; variable
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/x86/
Dvp9_idct_intrin_sse2.c546 const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64); in vp9_idct8x8_64_add_sse2()
547 const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64); in vp9_idct8x8_64_add_sse2()
681 const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64); in idct8_sse2()
682 const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64); in idct8_sse2()
1004 const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64); in vp9_idct8x8_10_add_sse2()
1005 const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64); in vp9_idct8x8_10_add_sse2()
1392 const __m128i stg3_2 = pair_set_epi16(cospi_12_64, -cospi_20_64); in vp9_idct16x16_256_add_sse2()
1393 const __m128i stg3_3 = pair_set_epi16(cospi_20_64, cospi_12_64); in vp9_idct16x16_256_add_sse2()
1614 const __m128i k__cospi_p20_p12 = pair_set_epi16(cospi_20_64, cospi_12_64); in iadst16_8col()
1615 const __m128i k__cospi_p12_m20 = pair_set_epi16(cospi_12_64, -cospi_20_64); in iadst16_8col()
[all …]
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/
Dvp9_dct_avx2.c291 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); in vp9_fdct8x8_avx2()
292 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); in vp9_fdct8x8_avx2()
673 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); in fdct8_avx2()
674 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); in fdct8_avx2()
1093 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); in vp9_fdct16x16_avx2()
1094 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); in vp9_fdct16x16_avx2()
1754 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); in fdct16_8col_avx2()
1755 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); in fdct16_8col_avx2()
2086 const __m128i k__cospi_p20_p12 = pair_set_epi16(cospi_20_64, cospi_12_64); in fadst16_8col_avx2()
2087 const __m128i k__cospi_p12_m20 = pair_set_epi16(cospi_12_64, -cospi_20_64); in fadst16_8col_avx2()
[all …]
Dvp9_dct_sse2.c392 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); in vp9_fdct8x8_sse2()
393 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); in vp9_fdct8x8_sse2()
774 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); in fdct8_sse2()
775 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); in fdct8_sse2()
1194 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); in vp9_fdct16x16_sse2()
1195 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); in vp9_fdct16x16_sse2()
1855 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); in fdct16_8col()
1856 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); in fdct16_8col()
2187 const __m128i k__cospi_p20_p12 = pair_set_epi16(cospi_20_64, cospi_12_64); in fadst16_8col()
2188 const __m128i k__cospi_p12_m20 = pair_set_epi16(cospi_12_64, -cospi_20_64); in fadst16_8col()
[all …]
Dvp9_dct32x32_avx2.c58 const __m256i k__cospi_p12_p20 = pair256_set_epi16(+cospi_12_64, cospi_20_64); in FDCT32x32_2D_AVX2()
59 const __m256i k__cospi_m20_p12 = pair256_set_epi16(-cospi_20_64, cospi_12_64); in FDCT32x32_2D_AVX2()
63 const __m256i k__cospi_m12_m20 = pair256_set_epi16(-cospi_12_64, -cospi_20_64); in FDCT32x32_2D_AVX2()
1738 const __m256i k32_p12_p20 = pair256_set_epi32(cospi_12_64, cospi_20_64); in FDCT32x32_2D_AVX2()
1739 const __m256i k32_m20_p12 = pair256_set_epi32(-cospi_20_64, cospi_12_64); in FDCT32x32_2D_AVX2()
1865 const __m256i k32_m20_p12 = pair256_set_epi32(-cospi_20_64, cospi_12_64); in FDCT32x32_2D_AVX2()
1867 -cospi_20_64); in FDCT32x32_2D_AVX2()
1868 const __m256i k32_p12_p20 = pair256_set_epi32(cospi_12_64, cospi_20_64); in FDCT32x32_2D_AVX2()
Dvp9_dct32x32_sse2.c49 const __m128i k__cospi_p12_p20 = pair_set_epi16(+cospi_12_64, cospi_20_64); in FDCT32x32_2D()
50 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); in FDCT32x32_2D()
54 const __m128i k__cospi_m12_m20 = pair_set_epi16(-cospi_12_64, -cospi_20_64); in FDCT32x32_2D()
1729 const __m128i k32_p12_p20 = pair_set_epi32(cospi_12_64, cospi_20_64); in FDCT32x32_2D()
1730 const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64); in FDCT32x32_2D()
1856 const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64); in FDCT32x32_2D()
1858 -cospi_20_64); in FDCT32x32_2D()
1859 const __m128i k32_p12_p20 = pair_set_epi32(cospi_12_64, cospi_20_64); in FDCT32x32_2D()