/external/libvpx/libvpx/vp9/encoder/x86/ |
D | vp9_variance_mmx.c | 42 unsigned int sse0, sse1, sse2, sse3; in vp9_mse16x16_mmx() local 45 vp9_get8x8var_mmx(src, src_stride, ref, ref_stride, &sse0, &sum0); in vp9_mse16x16_mmx() 52 *sse = sse0 + sse1 + sse2 + sse3; in vp9_mse16x16_mmx() 60 unsigned int sse0, sse1, sse2, sse3; in vp9_variance16x16_mmx() local 63 vp9_get8x8var_mmx(src, src_stride, ref, ref_stride, &sse0, &sum0); in vp9_variance16x16_mmx() 70 *sse = sse0 + sse1 + sse2 + sse3; in vp9_variance16x16_mmx() 78 unsigned int sse0, sse1; in vp9_variance16x8_mmx() local 81 vp9_get8x8var_mmx(src, src_stride, ref, ref_stride, &sse0, &sum0); in vp9_variance16x8_mmx() 84 *sse = sse0 + sse1; in vp9_variance16x8_mmx() 93 unsigned int sse0, sse1; in vp9_variance8x16_mmx() local [all …]
|
D | vp9_variance_avx2.c | 74 unsigned int sse0; in variance_avx2() local 85 ref_ptr + recon_stride * i + j, recon_stride, &sse0, &sum0); in variance_avx2() 86 *sse += sse0; in variance_avx2() 114 unsigned int sse0; in vp9_mse16x16_avx2() local 116 vp9_get16x16var_avx2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, in vp9_mse16x16_avx2() 118 *sse = sse0; in vp9_mse16x16_avx2() 119 return sse0; in vp9_mse16x16_avx2()
|
D | vp9_variance_sse2.c | 44 unsigned int sse0; in variance_sse2() local 47 ref + ref_stride * i + j, ref_stride, &sse0, &sum0); in variance_sse2() 48 *sse += sse0; in variance_sse2()
|
/external/libvpx/libvpx/vp8/common/x86/ |
D | variance_mmx.c | 121 unsigned int sse0, sse1, sse2, sse3, var; in vp8_mse16x16_mmx() local 125 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ; in vp8_mse16x16_mmx() 130 var = sse0 + sse1 + sse2 + sse3; in vp8_mse16x16_mmx() 143 unsigned int sse0, sse1, sse2, sse3, var; in vp8_variance16x16_mmx() local 147 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ; in vp8_variance16x16_mmx() 152 var = sse0 + sse1 + sse2 + sse3; in vp8_variance16x16_mmx() 165 unsigned int sse0, sse1, var; in vp8_variance16x8_mmx() local 168 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ; in vp8_variance16x8_mmx() 171 var = sse0 + sse1; in vp8_variance16x8_mmx() 186 unsigned int sse0, sse1, var; in vp8_variance8x16_mmx() local [all …]
|
D | variance_sse2.c | 180 unsigned int sse0; in vp8_variance16x16_wmt() local 184 vp8_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ; in vp8_variance16x16_wmt() 185 *sse = sse0; in vp8_variance16x16_wmt() 186 return (sse0 - (((unsigned int)sum0 * sum0) >> 8)); in vp8_variance16x16_wmt() 196 unsigned int sse0; in vp8_mse16x16_wmt() local 198 vp8_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ; in vp8_mse16x16_wmt() 199 *sse = sse0; in vp8_mse16x16_wmt() 200 return sse0; in vp8_mse16x16_wmt() 213 unsigned int sse0, sse1, var; in vp8_variance16x8_wmt() local 216 vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ; in vp8_variance16x8_wmt() [all …]
|