/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/ppc/ |
D | encodemb_altivec.asm | 46 vmrghb v3, v0, v1 ;# unpack high src to short 54 vsubshs v3, v3, v4 56 stvx v3, 0, r3 ;# store out diff 60 vmrghb v3, v0, v1 ;# unpack high src to short 63 vsubshs v3, v3, v4 65 stvx v3, r10, r3 ;# store out diff 83 vmrghb v3, v0, v1 ;# unpack high src to short 91 vsubshs v3, v3, v4 93 stvx v3, 0, r3 ;# store out diff 97 vmrghb v3, v0, v1 ;# unpack high src to short [all …]
|
D | rdopt_altivec.asm | 31 vspltisw v3, 0 35 vmsumshm v2, v0, v0, v3 ;# multiply differences 43 vsumsws v1, v1, v3 ;# sum up
|
D | fdct_altivec.asm | 40 lvx v3, r6, r10 69 vmsumshm v11, v3, v9, v11 130 two_rows_vert v2, v3 159 two_rows_vert v2, v3 181 two_rows_vert v2, v3
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/ppc/ |
D | idctllm_altivec.asm | 41 vupkhsh v3, v1 42 vaddsws v6, v2, v3 ;# a1 = ip[0]+ip[8] 43 vsubsws v7, v2, v3 ;# b1 = ip[0]-ip[8] 57 vmulosh v3, v1, v8 58 vsraw v3, v3, v12 59 vaddsws v3, v3, v1 ;# ip[12] * sin(pi/8) * sqrt(2) 65 vaddsws v3, v3, v5 ;# d1 67 vaddsws v0, v6, v3 ;# a1 + d1 68 vsubsws v3, v6, v3 ;# a1 - d1 75 vmrghw v5, v2, v3 ;# c0 d0 c1 d1 [all …]
|
D | recon_altivec.asm | 20 lvx v3, 0, \Diff ;# v3 = d0..d7 21 vaddshs v2, v2, v3 ;# v2 = r0..r7 23 lvx v3, r8, \Diff ;# v3 = d8..d15 25 vaddshs v3, v3, v1 ;# v3 = r8..r15 26 vpkshus v2, v2, v3 ;# v2 = 8-bit r0..r15 59 lvx v3, 0, \Diff ;# v3 = d0..d7 60 vaddshs v2, v2, v3 ;# v2 = r0..r7 62 lvx v3, r8, \Diff ;# v2 = d8..d15 63 vaddshs v3, v3, v1 ;# v3 = r8..r15 64 vpkshus v2, v2, v3 ;# v3 = 8-bit r0..r15 [all …]
|
D | loopfilter_filters_altivec.asm | 119 Tpair v22,v23, v3,v11 128 Tpair v2,v3, v17,v25 153 ;# v3 = 49 50 ... 62 63 169 ;# v3 = 3 19 ... 47 63 178 ;# It acts in place on registers v0...v3, uses v4...v7 as temporaries, 189 vmrghb v6, v2, v3 190 vmrglb v7, v2, v3 197 vmrglh v3, v5, v7 203 vmrghw v6, v2, v3 204 vmrglw v7, v2, v3 [all …]
|
D | variance_altivec.asm | 22 lvsl v3, 0, \R ;# permutate value for alignment 27 vperm \V, v1, v2, v3 55 vmrghb v3, v7, v5 56 vsubshs v2, v2, v3 60 vmrglb v3, v7, v5 61 vsubshs v2, v2, v3 66 vsububs v3, v5, v4 67 vor v2, v2, v3 216 vsububs v3, v5, v4 217 vor v2, v2, v3
|
D | filter_altivec.asm | 40 vspltb v3, v0, 3 72 Msum v16, v17, \P3, v3, v8 422 Read8x8 v3, r3, r4, 1 430 interp_8x8 v3 469 Read8x8 v3, r3, r4, 1 492 vinterp_no_store_8x8 v0, v1, v2, v3, v4, v5 493 vinterp_no_store_8x8 v1, v2, v3, v4, v5, v6 494 vinterp_no_store_8x8 v2, v3, v4, v5, v6, v7 495 vinterp_no_store_8x8 v3, v4, v5, v6, v7, v8 503 w_8x8 v3, r7, r0, r8 [all …]
|
D | sad_altivec.asm | 19 lvsl v3, 0, \R ;# permutate value for alignment 24 vperm \V, v1, v2, v3 56 lvsl v3, 0, r5 ;# only needs to be done once per block 66 vperm v5, v1, v2, v3 87 vperm v5, v1, v2, v3 99 vperm v5, v1, v2, v3
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/ |
D | vp9_dct_sse2.c | 137 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); in vp9_fdct4x4_sse2() local 141 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); in vp9_fdct4x4_sse2() 182 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING2); in vp9_fdct4x4_sse2() local 186 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS2); in vp9_fdct4x4_sse2() 454 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); in vp9_fdct8x8_sse2() local 462 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); in vp9_fdct8x8_sse2() 516 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); in vp9_fdct8x8_sse2() local 524 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); in vp9_fdct8x8_sse2() 778 __m128i v0, v1, v2, v3, v4, v5, v6, v7; in fdct8_sse2() local 799 v3 = _mm_unpackhi_epi16(u2, u3); in fdct8_sse2() [all …]
|
D | vp9_dct_avx2.c | 353 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); in vp9_fdct8x8_avx2() local 361 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); in vp9_fdct8x8_avx2() 415 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); in vp9_fdct8x8_avx2() local 423 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); in vp9_fdct8x8_avx2() 677 __m128i v0, v1, v2, v3, v4, v5, v6, v7; in fdct8_avx2() local 698 v3 = _mm_unpackhi_epi16(u2, u3); in fdct8_avx2() 705 u5 = _mm_madd_epi16(v3, k__cospi_p24_p08); in fdct8_avx2() 707 u7 = _mm_madd_epi16(v3, k__cospi_m08_p24); in fdct8_avx2() 713 v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); in fdct8_avx2() 722 u3 = _mm_srai_epi32(v3, DCT_CONST_BITS); in fdct8_avx2() [all …]
|
/hardware/intel/common/libva/test/decode/ |
D | tinyjpeg.c | 631 int h1, h2, h3, v1, v2, v3; in tinyjpeg_decode() local 637 v3 = pic_param.components[2].v_sampling_factor; in tinyjpeg_decode() 640 v1 == 2 && v2 == 1 && v3 == 1) { in tinyjpeg_decode() 648 v1 == 1 && v2 == 1 && v3 == 1) { in tinyjpeg_decode() 656 v1 == 1 && v2 == 1 && v3 == 1) { in tinyjpeg_decode() 664 v1 == 1 && v2 == 1 && v3 == 1) { in tinyjpeg_decode() 671 v1 == 2 && v2 == 1 && v3 == 1) { in tinyjpeg_decode() 679 v1 == 2 && v2 == 2 && v3 == 2) { in tinyjpeg_decode() 687 v1 == 2 && v2 == 1 && v3 == 1) { in tinyjpeg_decode()
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/third_party/googletest/src/include/gtest/ |
D | gtest.h | 10729 ValueArray3(T1 v1, T2 v2, T3 v3) : v1_(v1), v2_(v2), v3_(v3) {} 10750 ValueArray4(T1 v1, T2 v2, T3 v3, T4 v4) : v1_(v1), v2_(v2), v3_(v3), 10773 ValueArray5(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5) : v1_(v1), v2_(v2), v3_(v3), 10798 ValueArray6(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6) : v1_(v1), v2_(v2), 10799 v3_(v3), v4_(v4), v5_(v5), v6_(v6) {} 10825 ValueArray7(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7) : v1_(v1), 10826 v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7) {} 10853 ValueArray8(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, 10854 T8 v8) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), 10883 ValueArray9(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, [all …]
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/x86/ |
D | vp9_idct_intrin_sse2.c | 720 __m128i v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15; in iadst8_sse2() local 789 v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING); in iadst8_sse2() 806 u3 = _mm_srai_epi32(v3, DCT_CONST_BITS); in iadst8_sse2() 843 v3 = _mm_madd_epi16(u1, k__cospi_p24_m08); in iadst8_sse2() 852 w3 = _mm_add_epi32(v3, v7); in iadst8_sse2() 856 w7 = _mm_sub_epi32(v3, v7); in iadst8_sse2() 861 v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING); in iadst8_sse2() 870 u3 = _mm_srai_epi32(v3, DCT_CONST_BITS); in iadst8_sse2() 891 v3 = _mm_madd_epi16(u1, k__cospi_p16_m16); in iadst8_sse2() 900 u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING); in iadst8_sse2() [all …]
|