/external/libvpx/libvpx/vp9/encoder/x86/ |
D | vp9_variance_mmx.c | 42 unsigned int sse0, sse1, sse2, sse3; in vp9_mse16x16_mmx() local 46 vp9_get8x8var_mmx(src + 8, src_stride, ref + 8, ref_stride, &sse1, &sum1); in vp9_mse16x16_mmx() 52 *sse = sse0 + sse1 + sse2 + sse3; in vp9_mse16x16_mmx() 60 unsigned int sse0, sse1, sse2, sse3; in vp9_variance16x16_mmx() local 64 vp9_get8x8var_mmx(src + 8, src_stride, ref + 8, ref_stride, &sse1, &sum1); in vp9_variance16x16_mmx() 70 *sse = sse0 + sse1 + sse2 + sse3; in vp9_variance16x16_mmx() 78 unsigned int sse0, sse1; in vp9_variance16x8_mmx() local 82 vp9_get8x8var_mmx(src + 8, src_stride, ref + 8, ref_stride, &sse1, &sum1); in vp9_variance16x8_mmx() 84 *sse = sse0 + sse1; in vp9_variance16x8_mmx() 93 unsigned int sse0, sse1; in vp9_variance8x16_mmx() local [all …]
|
/external/libvpx/libvpx/vp8/common/x86/ |
D | variance_mmx.c | 121 unsigned int sse0, sse1, sse2, sse3, var; in vp8_mse16x16_mmx() local 126 vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1); in vp8_mse16x16_mmx() 130 var = sse0 + sse1 + sse2 + sse3; in vp8_mse16x16_mmx() 143 unsigned int sse0, sse1, sse2, sse3, var; in vp8_variance16x16_mmx() local 148 vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1); in vp8_variance16x16_mmx() 152 var = sse0 + sse1 + sse2 + sse3; in vp8_variance16x16_mmx() 165 unsigned int sse0, sse1, var; in vp8_variance16x8_mmx() local 169 vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1); in vp8_variance16x8_mmx() 171 var = sse0 + sse1; in vp8_variance16x8_mmx() 186 unsigned int sse0, sse1, var; in vp8_variance8x16_mmx() local [all …]
|
D | variance_sse2.c | 213 unsigned int sse0, sse1, var; in vp8_variance16x8_wmt() local 217 vp8_get8x8var_sse2(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1); in vp8_variance16x8_wmt() 219 var = sse0 + sse1; in vp8_variance16x8_wmt() 234 unsigned int sse0, sse1, var; in vp8_variance8x16_wmt() local 238 …c_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1) ; in vp8_variance8x16_wmt() 240 var = sse0 + sse1; in vp8_variance8x16_wmt()
|
/external/libvpx/libvpx/test/ |
D | variance_test.cc | 141 unsigned int sse1, sse2; in RefTest() local 144 var1 = variance_(src_, width_, ref_, width_, &sse1)); in RefTest() 147 EXPECT_EQ(sse1, sse2); in RefTest() 249 unsigned int sse1, sse2; in RefTest() local 252 src_, width_, &sse1)); in RefTest() 255 EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y; in RefTest() 272 unsigned int sse1, sse2; in RefTest() local 276 src_, width_, &sse1, sec_)); in RefTest() 280 EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y; in RefTest()
|
/external/libvpx/libvpx/vp8/encoder/ |
D | mcomp.c | 202 …)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=UINT_M… 209 unsigned int *sse1) in vp8_find_best_sub_pixel_step_iteratively() argument 266 besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1); in vp8_find_best_sub_pixel_step_iteratively() 367 unsigned int *sse1) in vp8_find_best_sub_pixel_step() argument 401 bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1); in vp8_find_best_sub_pixel_step() 416 *sse1 = sse; in vp8_find_best_sub_pixel_step() 428 *sse1 = sse; in vp8_find_best_sub_pixel_step() 442 *sse1 = sse; in vp8_find_best_sub_pixel_step() 454 *sse1 = sse; in vp8_find_best_sub_pixel_step() 494 *sse1 = sse; in vp8_find_best_sub_pixel_step() [all …]
|
D | rdopt.c | 476 unsigned int sse1 = 0; in VP8_UVSSE() local 505 mv_col & 7, mv_row & 7, vpred_ptr, uv_stride, &sse1); in VP8_UVSSE() 506 sse2 += sse1; in VP8_UVSSE() 513 vpred_ptr, uv_stride, &sse1); in VP8_UVSSE() 514 sse2 += sse1; in VP8_UVSSE()
|
/external/valgrind/memcheck/tests/x86/ |
D | sse1_memory.vgtest | 3 args: sse1
|
D | insn_mmxext.vgtest | 2 # mmxext is an old AMD subset of sse1, so either will do.
|
D | sse_memory.c | 391 Int sse1 = 0, sse2 = 0; in main() local 394 sse1 = 1; in main() 402 sse1 = sse2 = 1; in main() 410 if (sse1) { in main()
|
/external/valgrind/none/tests/x86/ |
D | insn_mmxext.vgtest | 2 # mmxext is an old AMD subset of sse1, so either will do.
|
/external/valgrind/memcheck/tests/amd64/ |
D | sse_memory.c | 391 Int sse1 = 0, sse2 = 0; in main() local 394 sse1 = 1; in main() 402 sse1 = sse2 = 1; in main() 410 if (sse1) { in main()
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_mcomp.h | 102 int *distortion, unsigned int *sse1,
|
D | vp9_mcomp.c | 188 *sse1 = sse; \ 268 unsigned int *sse1, in vp9_find_best_sub_pixel_tree() argument 309 besterr = vfp->vf(comp_pred, w, z, src_stride, sse1); in vp9_find_best_sub_pixel_tree() 311 besterr = vfp->vf(y + offset, y_stride, z, src_stride, sse1); in vp9_find_best_sub_pixel_tree()
|
/external/valgrind/docs/internals/ |
D | release-HOWTO.txt | 75 x86, sse1 (PIII)
|