Home
last modified time | relevance | path

Searched refs:v5 (Results 1 – 14 of 14) sorted by relevance

/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/ppc/
Didctllm_altivec.asm51 vmulosh v5, v1, v9
52 vsraw v5, v5, v12 ;# ip[12] * cos(pi/8) * sqrt(2)
53 vaddsws v5, v5, v1
55 vsubsws v4, v4, v5 ;# c1
61 vmulosh v5, v0, v9
62 vsraw v5, v5, v12 ;# ip[ 4] * cos(pi/8) * sqrt(2)
63 vaddsws v5, v5, v0
65 vaddsws v3, v3, v5 ;# d1
75 vmrghw v5, v2, v3 ;# c0 d0 c1 d1
80 vperm v0, v4, v5, v10 ;# a0 b0 c0 d0
[all …]
Dsad_altivec.asm46 ;# v6 = abs (v4 - v5)
47 vsububs v6, v4, v5
48 vsububs v7, v5, v4
51 ;# v8 += abs (v4 - v5)
66 vperm v5, v1, v2, v3
71 vsububs v6, v4, v5
72 vsububs v7, v5, v4
87 vperm v5, v1, v2, v3
92 vsububs v6, v9, v5
94 vsububs v7, v5, v9
[all …]
Dvariance_altivec.asm55 vmrghb v3, v7, v5
60 vmrglb v3, v7, v5
65 vsububs v2, v4, v5
66 vsububs v3, v5, v4
76 load_aligned_16 v5, r5, r10
109 load_aligned_16 v5, r5, r10
124 vmrghb v5, v5, v0
208 load_aligned_16 v5, r5, r10
215 vsububs v2, v4, v5
216 vsububs v3, v5, v4
[all …]
Dloopfilter_filters_altivec.asm121 Tpair v26,v27, v5,v13
129 Tpair v4,v5, v18,v26
188 vmrglb v5, v0, v1
196 vmrghh v2, v5, v7
197 vmrglh v3, v5, v7
202 vmrglw v5, v0, v1
210 vperm v2, v5, v7, \Vlo
211 vperm v3, v5, v7, \Vhi
252 Tpair v12, v13, v1, v5
260 Tpair v4, v5, v12, v16
[all …]
Dfilter_altivec.asm34 vspltish v5, 8
36 vslh v6, v5, v6 ;# 0x0040 0040 0040 0040 0040 0040 0040 0040
42 vspltb v5, v0, 5
73 Msum v16, v17, \P5, v5, v8
424 Read8x8 v5, r3, r4, 1
432 interp_8x8 v5
471 Read8x8 v5, r3, r4, 1
492 vinterp_no_store_8x8 v0, v1, v2, v3, v4, v5
493 vinterp_no_store_8x8 v1, v2, v3, v4, v5, v6
494 vinterp_no_store_8x8 v2, v3, v4, v5, v6, v7
[all …]
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/ppc/
Dencodemb_altivec.asm37 lvsl v5, 0, r4 ;# permutate value for alignment
44 vperm v1, v1, v0, v5
49 lvsl v5, 0, r4 ;# permutate value for alignment
58 vperm v1, v1, v0, v5
74 lvsl v5, 0, r5 ;# permutate value for alignment
81 vperm v1, v1, v0, v5
86 lvsl v5, 0, r5 ;# permutate value for alignment
95 vperm v1, v1, v0, v5
Dfdct_altivec.asm43 load_c v5, ppc_dctperm_tab, r6, r9, r10
73 vperm \Dst, v10, v10, v5 ;# Dest = A0 B0 A1 B1 A2 B2 A3 B3
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/third_party/googletest/src/include/gtest/
Dgtest.h10773 ValueArray5(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5) : v1_(v1), v2_(v2), v3_(v3),
10774 v4_(v4), v5_(v5) {}
10798 ValueArray6(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6) : v1_(v1), v2_(v2),
10799 v3_(v3), v4_(v4), v5_(v5), v6_(v6) {}
10825 ValueArray7(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7) : v1_(v1),
10826 v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7) {}
10853 ValueArray8(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
10854 T8 v8) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
10883 ValueArray9(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
10884 T9 v9) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
[all …]
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/
Dvp9_dct_avx2.c355 const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); in vp9_fdct8x8_avx2() local
363 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); in vp9_fdct8x8_avx2()
417 const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); in vp9_fdct8x8_avx2() local
425 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); in vp9_fdct8x8_avx2()
677 __m128i v0, v1, v2, v3, v4, v5, v6, v7; in fdct8_avx2() local
715 v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); in fdct8_avx2()
724 u5 = _mm_srai_epi32(v5, DCT_CONST_BITS); in fdct8_avx2()
773 v5 = _mm_madd_epi16(u3, k__cospi_m20_p12); in fdct8_avx2()
783 u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING); in fdct8_avx2()
792 v5 = _mm_srai_epi32(u5, DCT_CONST_BITS); in fdct8_avx2()
[all …]
Dvp9_dct_sse2.c456 const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); in vp9_fdct8x8_sse2() local
464 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); in vp9_fdct8x8_sse2()
518 const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); in vp9_fdct8x8_sse2() local
526 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); in vp9_fdct8x8_sse2()
778 __m128i v0, v1, v2, v3, v4, v5, v6, v7; in fdct8_sse2() local
816 v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); in fdct8_sse2()
825 u5 = _mm_srai_epi32(v5, DCT_CONST_BITS); in fdct8_sse2()
874 v5 = _mm_madd_epi16(u3, k__cospi_m20_p12); in fdct8_sse2()
884 u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING); in fdct8_sse2()
893 v5 = _mm_srai_epi32(u5, DCT_CONST_BITS); in fdct8_sse2()
[all …]
/hardware/bsp/intel/peripheral/libupm/cmake/modules/
DTargetArch.cmake3 # Currently handles arm (v5, v6, v7), x86 (32/64), ia64, and ppc (32/64)
/hardware/bsp/intel/peripheral/libmraa/cmake/modules/
DTargetArch.cmake3 # Currently handles arm (v5, v6, v7), x86 (32/64), ia64, and ppc (32/64)
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/x86/
Dvp9_idct_intrin_sse2.c720 __m128i v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15; in iadst8_sse2() local
791 v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING); in iadst8_sse2()
808 u5 = _mm_srai_epi32(v5, DCT_CONST_BITS); in iadst8_sse2()
845 v5 = _mm_madd_epi16(u3, k__cospi_m24_p08); in iadst8_sse2()
850 w1 = _mm_add_epi32(v1, v5); in iadst8_sse2()
854 w5 = _mm_sub_epi32(v1, v5); in iadst8_sse2()
863 v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING); in iadst8_sse2()
872 u5 = _mm_srai_epi32(v5, DCT_CONST_BITS); in iadst8_sse2()
893 v5 = _mm_madd_epi16(u3, k__cospi_p16_p16); in iadst8_sse2()
902 u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING); in iadst8_sse2()
[all …]
/hardware/ril/libril/
Dril.cpp910 RIL_SIM_IO_v5 v5; in dispatchSIM_IO() member
957 size = (s_callbacks.version < 6) ? sizeof(simIO.v5) : sizeof(simIO.v6); in dispatchSIM_IO()