Home
last modified time | relevance | path

Searched refs:sse0 (Results 1 – 5 of 5) sorted by relevance

/external/libvpx/libvpx/vp9/encoder/x86/
Dvp9_variance_mmx.c42 unsigned int sse0, sse1, sse2, sse3; in vp9_mse16x16_mmx() local
45 vp9_get8x8var_mmx(src, src_stride, ref, ref_stride, &sse0, &sum0); in vp9_mse16x16_mmx()
52 *sse = sse0 + sse1 + sse2 + sse3; in vp9_mse16x16_mmx()
60 unsigned int sse0, sse1, sse2, sse3; in vp9_variance16x16_mmx() local
63 vp9_get8x8var_mmx(src, src_stride, ref, ref_stride, &sse0, &sum0); in vp9_variance16x16_mmx()
70 *sse = sse0 + sse1 + sse2 + sse3; in vp9_variance16x16_mmx()
78 unsigned int sse0, sse1; in vp9_variance16x8_mmx() local
81 vp9_get8x8var_mmx(src, src_stride, ref, ref_stride, &sse0, &sum0); in vp9_variance16x8_mmx()
84 *sse = sse0 + sse1; in vp9_variance16x8_mmx()
93 unsigned int sse0, sse1; in vp9_variance8x16_mmx() local
[all …]
Dvp9_variance_avx2.c74 unsigned int sse0; in variance_avx2() local
85 ref_ptr + recon_stride * i + j, recon_stride, &sse0, &sum0); in variance_avx2()
86 *sse += sse0; in variance_avx2()
114 unsigned int sse0; in vp9_mse16x16_avx2() local
116 vp9_get16x16var_avx2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, in vp9_mse16x16_avx2()
118 *sse = sse0; in vp9_mse16x16_avx2()
119 return sse0; in vp9_mse16x16_avx2()
Dvp9_variance_sse2.c44 unsigned int sse0; in variance_sse2() local
47 ref + ref_stride * i + j, ref_stride, &sse0, &sum0); in variance_sse2()
48 *sse += sse0; in variance_sse2()
/external/libvpx/libvpx/vp8/common/x86/
Dvariance_mmx.c121 unsigned int sse0, sse1, sse2, sse3, var; in vp8_mse16x16_mmx() local
125 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ; in vp8_mse16x16_mmx()
130 var = sse0 + sse1 + sse2 + sse3; in vp8_mse16x16_mmx()
143 unsigned int sse0, sse1, sse2, sse3, var; in vp8_variance16x16_mmx() local
147 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ; in vp8_variance16x16_mmx()
152 var = sse0 + sse1 + sse2 + sse3; in vp8_variance16x16_mmx()
165 unsigned int sse0, sse1, var; in vp8_variance16x8_mmx() local
168 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ; in vp8_variance16x8_mmx()
171 var = sse0 + sse1; in vp8_variance16x8_mmx()
186 unsigned int sse0, sse1, var; in vp8_variance8x16_mmx() local
[all …]
Dvariance_sse2.c180 unsigned int sse0; in vp8_variance16x16_wmt() local
184 vp8_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ; in vp8_variance16x16_wmt()
185 *sse = sse0; in vp8_variance16x16_wmt()
186 return (sse0 - (((unsigned int)sum0 * sum0) >> 8)); in vp8_variance16x16_wmt()
196 unsigned int sse0; in vp8_mse16x16_wmt() local
198 vp8_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ; in vp8_mse16x16_wmt()
199 *sse = sse0; in vp8_mse16x16_wmt()
200 return sse0; in vp8_mse16x16_wmt()
213 unsigned int sse0, sse1, var; in vp8_variance16x8_wmt() local
216 vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ; in vp8_variance16x8_wmt()
[all …]