/external/libvpx/libvpx/vpx_dsp/x86/ |
D | inv_txfm_ssse3.c | 60 partial_butterfly_ssse3(in[4], cospi_28_64, cospi_4_64, &step1[4], &step1[7]); in idct32_34_8x32_quarter_1() 142 butterfly(step1[31], step1[16], cospi_28_64, cospi_4_64, &step1[17], in idct32_34_8x32_quarter_3_4() 144 butterfly(step1[28], step1[19], -cospi_4_64, cospi_28_64, &step1[18], in idct32_34_8x32_quarter_3_4() 195 partial_butterfly_ssse3(in[4], cospi_28_64, cospi_4_64, &step1[4], &step1[7]); in idct32_135_8x32_quarter_1() 316 butterfly(step2[30], step2[17], cospi_28_64, cospi_4_64, &step1[17], in idct32_135_8x32_quarter_3_4() 318 butterfly(step2[29], step2[18], -cospi_4_64, cospi_28_64, &step1[18], in idct32_135_8x32_quarter_3_4()
|
D | highbd_idct32x32_add_sse4.c | 149 highbd_butterfly_sse4_1(in[4], in[28], cospi_28_64, cospi_4_64, &step1[4], in highbd_idct32_1024_4x32_quarter_1() 274 highbd_butterfly_sse4_1(step2[30], step2[17], cospi_28_64, cospi_4_64, in highbd_idct32_1024_4x32_quarter_3_4() 276 highbd_butterfly_sse4_1(step2[29], step2[18], -cospi_4_64, cospi_28_64, in highbd_idct32_1024_4x32_quarter_3_4() 373 highbd_partial_butterfly_sse4_1(in[4], cospi_28_64, cospi_4_64, &step1[4], in highbd_idct32_135_4x32_quarter_1() 498 highbd_butterfly_sse4_1(step2[30], step2[17], cospi_28_64, cospi_4_64, in highbd_idct32_135_4x32_quarter_3_4() 500 highbd_butterfly_sse4_1(step2[29], step2[18], -cospi_4_64, cospi_28_64, in highbd_idct32_135_4x32_quarter_3_4() 586 highbd_partial_butterfly_sse4_1(in[4], cospi_28_64, cospi_4_64, &step1[4], in highbd_idct32_34_4x32_quarter_1() 694 highbd_butterfly_sse4_1(step2[30], step2[17], cospi_28_64, cospi_4_64, in highbd_idct32_34_4x32_quarter_3_4() 696 highbd_butterfly_sse4_1(step2[29], step2[18], -cospi_4_64, cospi_28_64, in highbd_idct32_34_4x32_quarter_3_4()
|
D | highbd_idct32x32_add_sse2.c | 145 highbd_butterfly_sse2(in[4], in[28], cospi_28_64, cospi_4_64, &step1[4], in highbd_idct32_1024_4x32_quarter_1() 270 highbd_butterfly_sse2(step2[30], step2[17], cospi_28_64, cospi_4_64, in highbd_idct32_1024_4x32_quarter_3_4() 272 highbd_butterfly_sse2(step2[18], step2[29], cospi_4_64, cospi_28_64, in highbd_idct32_1024_4x32_quarter_3_4() 369 highbd_partial_butterfly_sse2(in[4], cospi_28_64, cospi_4_64, &step1[4], in highbd_idct32_135_4x32_quarter_1() 494 highbd_butterfly_sse2(step2[30], step2[17], cospi_28_64, cospi_4_64, in highbd_idct32_135_4x32_quarter_3_4() 496 highbd_butterfly_sse2(step2[18], step2[29], cospi_4_64, cospi_28_64, in highbd_idct32_135_4x32_quarter_3_4() 586 highbd_partial_butterfly_sse2(in[4], cospi_28_64, cospi_4_64, &step1[4], in highbd_idct32_34_4x32_quarter_1() 706 highbd_butterfly_sse2(step2[30], step2[17], cospi_28_64, cospi_4_64, in highbd_idct32_34_4x32_quarter_3_4() 708 highbd_butterfly_sse2(step2[18], step2[29], cospi_4_64, cospi_28_64, in highbd_idct32_34_4x32_quarter_3_4()
|
D | inv_txfm_ssse3.h | 22 const __m128i cp_28d_4d = dual_set_epi16(2 * cospi_28_64, 2 * cospi_4_64); in idct8x8_12_add_kernel_ssse3() 29 const __m128i cospi_4_64d = _mm_set1_epi16((int16_t)(2 * cospi_4_64)); in idct8x8_12_add_kernel_ssse3()
|
D | inv_txfm_sse2.h | 255 butterfly(in[1], in[7], cospi_28_64, cospi_4_64, &step1[4], &step1[7]); in idct8() 297 const __m128i cp_28_n4 = pair_set_epi16(cospi_28_64, -cospi_4_64); in idct8x8_12_add_kernel_sse2() 298 const __m128i cp_4_28 = pair_set_epi16(cospi_4_64, cospi_28_64); in idct8x8_12_add_kernel_sse2() 354 butterfly(in[2], in[14], cospi_28_64, cospi_4_64, &step1[4], &step1[7]); in idct16_8col() 454 const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64); in idct16x16_10_pass1() 455 const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64); in idct16x16_10_pass1() 546 butterfly(io[2], zero, cospi_28_64, cospi_4_64, &step1[4], &step1[7]); in idct16x16_10_pass2()
|
D | highbd_idct16x16_add_sse4.c | 70 highbd_butterfly_sse4_1(io[2], io[14], cospi_28_64, cospi_4_64, &step1[4], in vpx_highbd_idct16_4col_sse4_1() 120 highbd_partial_butterfly_sse4_1(io[2], cospi_28_64, cospi_4_64, &step1[4], in highbd_idct16x16_38_4col() 168 highbd_partial_butterfly_sse4_1(io[2], cospi_28_64, cospi_4_64, &step1[4], in highbd_idct16x16_10_4col()
|
D | highbd_idct16x16_add_sse2.c | 69 highbd_butterfly_sse2(io[2], io[14], cospi_28_64, cospi_4_64, &step1[4], in highbd_idct16_4col() 119 highbd_partial_butterfly_sse2(io[2], cospi_28_64, cospi_4_64, &step1[4], in highbd_idct16x16_38_4col() 167 highbd_partial_butterfly_sse2(io[2], cospi_28_64, cospi_4_64, &step1[4], in highbd_idct16x16_10_4col()
|
D | highbd_idct8x8_add_sse4.c | 30 highbd_butterfly_sse4_1(io[1], io[7], cospi_28_64, cospi_4_64, &step1[4], in vpx_highbd_idct8x8_half1d_sse4_1() 67 step1[7] = multiplication_round_shift_sse4_1(temp1, cospi_4_64); in highbd_idct8x8_12_half1d()
|
D | highbd_idct8x8_add_sse2.c | 28 highbd_butterfly_sse2(io[1], io[7], cospi_28_64, cospi_4_64, &step1[4], in highbd_idct8x8_half1d() 65 step1[7] = multiplication_round_shift_sse2(temp1, sign, cospi_4_64); in highbd_idct8x8_12_half1d()
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | itrans16_dspr2.c | 261 [cospi_4_64] "r"(cospi_4_64), [cospi_28_64] "r"(cospi_28_64), in idct16_rows_dspr2() 647 [cospi_4_64] "r"(cospi_4_64), [cospi_28_64] "r"(cospi_28_64), in idct16_cols_add_blk_dspr2() 1132 s8 = x8 * cospi_4_64 + x9 * cospi_28_64; in iadst16_dspr2() 1133 s9 = x8 * cospi_28_64 - x9 * cospi_4_64; in iadst16_dspr2() 1136 s12 = -x12 * cospi_28_64 + x13 * cospi_4_64; in iadst16_dspr2() 1137 s13 = x12 * cospi_4_64 + x13 * cospi_28_64; in iadst16_dspr2()
|
D | itrans8_dspr2.c | 185 [cospi_4_64] "r"(cospi_4_64), [cospi_12_64] "r"(cospi_12_64), in idct8_rows_dspr2() 445 [cospi_4_64] "r"(cospi_4_64), [cospi_12_64] "r"(cospi_12_64), in idct8_columns_add_blk_dspr2()
|
D | itrans32_cols_dspr2.c | 110 [cospi_4_64] "r"(cospi_4_64), [cospi_17_64] "r"(cospi_17_64), in vpx_idct32_cols_add_blk_dspr2() 170 [cospi_4_64] "r"(cospi_4_64), [cospi_7_64] "r"(cospi_7_64), in vpx_idct32_cols_add_blk_dspr2() 683 [cospi_4_64] "r"(cospi_4_64), [cospi_28_64] "r"(cospi_28_64), in vpx_idct32_cols_add_blk_dspr2()
|
D | itrans32_dspr2.c | 154 [cospi_4_64] "r"(cospi_4_64), [cospi_17_64] "r"(cospi_17_64), in idct32_rows_dspr2() 214 [cospi_4_64] "r"(cospi_4_64), [cospi_7_64] "r"(cospi_7_64), in idct32_rows_dspr2() 727 [cospi_4_64] "r"(cospi_4_64), [cospi_28_64] "r"(cospi_28_64), in idct32_rows_dspr2()
|
D | idct8x8_msa.c | 54 k0 = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64); in vpx_idct8x8_12_add_msa() 55 k1 = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64); in vpx_idct8x8_12_add_msa()
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | idct32x32_34_add_neon.c | 78 s1[7] = multiply_shift_and_narrow_s16(in[4], cospi_4_64); in vpx_idct32_6_neon() 80 s1[17] = multiply_accumulate_shift_and_narrow_s16(s1[16], -cospi_4_64, s1[31], in vpx_idct32_6_neon() 83 cospi_4_64); in vpx_idct32_6_neon() 299 s1[7] = multiply_shift_and_narrow_s16(in[4], cospi_4_64); in vpx_idct32_8_neon() 301 s1[17] = multiply_accumulate_shift_and_narrow_s16(s1[16], -cospi_4_64, s1[31], in vpx_idct32_8_neon() 304 cospi_4_64); in vpx_idct32_8_neon() 308 s1[28], -cospi_4_64); in vpx_idct32_8_neon() 309 s1[29] = multiply_accumulate_shift_and_narrow_s16(s1[19], -cospi_4_64, s1[28], in vpx_idct32_8_neon()
|
D | idct32x32_135_add_neon.c | 145 s3[7] = multiply_shift_and_narrow_s16(in[4], cospi_4_64); in vpx_idct32_12_neon() 152 s3[17] = multiply_accumulate_shift_and_narrow_s16(s1[16], -cospi_4_64, s1[31], in vpx_idct32_12_neon() 155 cospi_4_64); in vpx_idct32_12_neon() 158 s2[29], -cospi_4_64); in vpx_idct32_12_neon() 159 s3[29] = multiply_accumulate_shift_and_narrow_s16(s2[18], -cospi_4_64, s2[29], in vpx_idct32_12_neon() 443 s3[7] = multiply_shift_and_narrow_s16(in[4], cospi_4_64); in vpx_idct32_16_neon() 457 s3[17] = multiply_accumulate_shift_and_narrow_s16(s2[17], -cospi_4_64, s2[30], in vpx_idct32_16_neon() 460 cospi_4_64); in vpx_idct32_16_neon() 463 s2[29], -cospi_4_64); in vpx_idct32_16_neon() 464 s3[29] = multiply_accumulate_shift_and_narrow_s16(s2[18], -cospi_4_64, s2[29], in vpx_idct32_16_neon()
|
D | highbd_idct32x32_34_add_neon.c | 86 s1[7] = multiply_shift_and_narrow_s32_dual(in[4], cospi_4_64); in vpx_highbd_idct32_6_neon() 88 s1[17] = multiply_accumulate_shift_and_narrow_s32_dual(s1[16], -cospi_4_64, in vpx_highbd_idct32_6_neon() 91 s1[31], cospi_4_64); in vpx_highbd_idct32_6_neon() 404 s1[7] = multiply_shift_and_narrow_s32_dual(in[4], cospi_4_64); in vpx_highbd_idct32_8_neon() 406 s1[17] = multiply_accumulate_shift_and_narrow_s32_dual(s1[16], -cospi_4_64, in vpx_highbd_idct32_8_neon() 409 s1[31], cospi_4_64); in vpx_highbd_idct32_8_neon() 413 s1[28], -cospi_4_64); in vpx_highbd_idct32_8_neon() 414 s1[29] = multiply_accumulate_shift_and_narrow_s32_dual(s1[19], -cospi_4_64, in vpx_highbd_idct32_8_neon()
|
D | highbd_idct32x32_135_add_neon.c | 156 s3[7] = multiply_shift_and_narrow_s32_dual(in[4], cospi_4_64); in vpx_highbd_idct32_12_neon() 163 s3[17] = multiply_accumulate_shift_and_narrow_s32_dual(s1[16], -cospi_4_64, in vpx_highbd_idct32_12_neon() 166 s1[31], cospi_4_64); in vpx_highbd_idct32_12_neon() 169 s2[29], -cospi_4_64); in vpx_highbd_idct32_12_neon() 170 s3[29] = multiply_accumulate_shift_and_narrow_s32_dual(s2[18], -cospi_4_64, in vpx_highbd_idct32_12_neon() 520 s3[7] = multiply_shift_and_narrow_s32_dual(in[4], cospi_4_64); in vpx_highbd_idct32_16_neon() 534 s3[17] = multiply_accumulate_shift_and_narrow_s32_dual(s2[17], -cospi_4_64, in vpx_highbd_idct32_16_neon() 537 s2[30], cospi_4_64); in vpx_highbd_idct32_16_neon() 540 s2[29], -cospi_4_64); in vpx_highbd_idct32_16_neon() 541 s3[29] = multiply_accumulate_shift_and_narrow_s32_dual(s2[18], -cospi_4_64, in vpx_highbd_idct32_16_neon()
|
D | fwd_txfm_neon.c | 99 v_t0_lo = vmull_n_s16(vget_low_s16(v_x3), cospi_4_64); in vpx_fdct8x8_neon() 100 v_t0_hi = vmull_n_s16(vget_high_s16(v_x3), cospi_4_64); in vpx_fdct8x8_neon() 113 v_t3_lo = vmlsl_n_s16(v_t3_lo, vget_low_s16(v_x0), cospi_4_64); in vpx_fdct8x8_neon() 114 v_t3_hi = vmlsl_n_s16(v_t3_hi, vget_high_s16(v_x0), cospi_4_64); in vpx_fdct8x8_neon()
|
/external/libvpx/libvpx/vpx_dsp/ |
D | fwd_txfm.c | 154 t0 = x0 * cospi_28_64 + x3 * cospi_4_64; in vpx_fdct8x8_c() 157 t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; in vpx_fdct8x8_c() 288 t0 = x0 * cospi_28_64 + x3 * cospi_4_64; in vpx_fdct16x16_c() 291 t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; in vpx_fdct16x16_c() 607 output[4] = dct_32_round(step[4] * cospi_28_64 + step[7] * cospi_4_64); in vpx_fdct32() 610 output[7] = dct_32_round(step[7] * cospi_28_64 + step[4] * -cospi_4_64); in vpx_fdct32() 621 output[17] = dct_32_round(step[17] * -cospi_4_64 + step[30] * cospi_28_64); in vpx_fdct32() 622 output[18] = dct_32_round(step[18] * -cospi_28_64 + step[29] * -cospi_4_64); in vpx_fdct32() 633 output[29] = dct_32_round(step[29] * cospi_28_64 + step[18] * -cospi_4_64); in vpx_fdct32() 634 output[30] = dct_32_round(step[30] * cospi_4_64 + step[17] * cospi_28_64); in vpx_fdct32()
|
D | inv_txfm.c | 280 temp1 = (int16_t)input[1] * cospi_28_64 - (int16_t)input[7] * cospi_4_64; in idct8_c() 281 temp2 = (int16_t)input[1] * cospi_4_64 + (int16_t)input[7] * cospi_28_64; in idct8_c() 459 s8 = x8 * cospi_4_64 + x9 * cospi_28_64; in iadst16_c() 460 s9 = x8 * cospi_28_64 - x9 * cospi_4_64; in iadst16_c() 463 s12 = -x12 * cospi_28_64 + x13 * cospi_4_64; in iadst16_c() 464 s13 = x12 * cospi_4_64 + x13 * cospi_28_64; in iadst16_c() 615 temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64; in idct16_c() 616 temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64; in idct16_c() 928 temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64; in idct32_c() 929 temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64; in idct32_c() [all …]
|
D | txfm_common.h | 31 static const tran_coef_t cospi_4_64 = 16069; variable
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_dct.c | 84 t0 = x0 * cospi_28_64 + x3 * cospi_4_64; in fdct8() 87 t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; in fdct8() 163 t0 = x0 * cospi_28_64 + x3 * cospi_4_64; in fdct16() 166 t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; in fdct16() 411 s8 = x8 * cospi_4_64 + x9 * cospi_28_64; in fadst16() 412 s9 = x8 * cospi_28_64 - x9 * cospi_4_64; in fadst16() 415 s12 = -x12 * cospi_28_64 + x13 * cospi_4_64; in fadst16() 416 s13 = x12 * cospi_4_64 + x13 * cospi_28_64; in fadst16() 617 t0 = x0 * cospi_28_64 + x3 * cospi_4_64; in vp9_fdct8x8_quant_c() 620 t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; in vp9_fdct8x8_quant_c()
|
/external/libaom/libaom/aom_dsp/arm/ |
D | fwd_txfm_neon.c | 96 v_t0_lo = vmull_n_s16(vget_low_s16(v_x3), (int16_t)cospi_4_64); in aom_fdct8x8_neon() 97 v_t0_hi = vmull_n_s16(vget_high_s16(v_x3), (int16_t)cospi_4_64); in aom_fdct8x8_neon() 110 v_t3_lo = vmlsl_n_s16(v_t3_lo, vget_low_s16(v_x0), (int16_t)cospi_4_64); in aom_fdct8x8_neon() 111 v_t3_hi = vmlsl_n_s16(v_t3_hi, vget_high_s16(v_x0), (int16_t)cospi_4_64); in aom_fdct8x8_neon()
|
/external/libaom/libaom/aom_dsp/ |
D | fwd_txfm.c | 80 t0 = x0 * cospi_28_64 + x3 * cospi_4_64; in aom_fdct8x8_c() 83 t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; in aom_fdct8x8_c()
|