/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/ |
D | vp9_variance_mmx.c | 70 unsigned int sse0, sse1, sse2, sse3, var; in vp9_mse16x16_mmx() local 74 vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, in vp9_mse16x16_mmx() 83 var = sse0 + sse1 + sse2 + sse3; in vp9_mse16x16_mmx() 95 unsigned int sse0, sse1, sse2, sse3, var; in vp9_variance16x16_mmx() local 98 vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, in vp9_variance16x16_mmx() 107 var = sse0 + sse1 + sse2 + sse3; in vp9_variance16x16_mmx() 119 unsigned int sse0, sse1, var; in vp9_variance16x8_mmx() local 122 vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, in vp9_variance16x8_mmx() 127 var = sse0 + sse1; in vp9_variance16x8_mmx() 140 unsigned int sse0, sse1, var; in vp9_variance8x16_mmx() local [all …]
|
D | vp9_variance_avx2.c | 75 unsigned int sse0; in variance_avx2() local 86 ref_ptr + recon_stride * i + j, recon_stride, &sse0, &sum0); in variance_avx2() 87 *sse += sse0; in variance_avx2() 115 unsigned int sse0; in vp9_mse16x16_avx2() local 117 vp9_get16x16var_avx2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, in vp9_mse16x16_avx2() 119 *sse = sse0; in vp9_mse16x16_avx2() 120 return sse0; in vp9_mse16x16_avx2()
|
D | vp9_variance_sse2.c | 119 unsigned int sse0; in variance_sse2() local 129 ref_ptr + recon_stride * i + j, recon_stride, &sse0, &sum0); in variance_sse2() 130 *sse += sse0; in variance_sse2() 249 unsigned int sse0; in vp9_mse16x16_sse2() local 251 vp9_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, in vp9_mse16x16_sse2() 253 *sse = sse0; in vp9_mse16x16_sse2() 254 return sse0; in vp9_mse16x16_sse2()
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/ |
D | variance_mmx.c | 122 unsigned int sse0, sse1, sse2, sse3, var; in vp8_mse16x16_mmx() local 126 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ; in vp8_mse16x16_mmx() 131 var = sse0 + sse1 + sse2 + sse3; in vp8_mse16x16_mmx() 144 unsigned int sse0, sse1, sse2, sse3, var; in vp8_variance16x16_mmx() local 148 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ; in vp8_variance16x16_mmx() 153 var = sse0 + sse1 + sse2 + sse3; in vp8_variance16x16_mmx() 166 unsigned int sse0, sse1, var; in vp8_variance16x8_mmx() local 169 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ; in vp8_variance16x8_mmx() 172 var = sse0 + sse1; in vp8_variance16x8_mmx() 187 unsigned int sse0, sse1, var; in vp8_variance8x16_mmx() local [all …]
|
D | variance_sse2.c | 181 unsigned int sse0; in vp8_variance16x16_wmt() local 185 vp8_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ; in vp8_variance16x16_wmt() 186 *sse = sse0; in vp8_variance16x16_wmt() 187 return (sse0 - (((unsigned int)sum0 * sum0) >> 8)); in vp8_variance16x16_wmt() 197 unsigned int sse0; in vp8_mse16x16_wmt() local 199 vp8_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ; in vp8_mse16x16_wmt() 200 *sse = sse0; in vp8_mse16x16_wmt() 201 return sse0; in vp8_mse16x16_wmt() 214 unsigned int sse0, sse1, var; in vp8_variance16x8_wmt() local 217 vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ; in vp8_variance16x8_wmt() [all …]
|