1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <stdlib.h>
12
13 #include "./vp9_rtcd.h"
14 #include "./vpx_config.h"
15
16 #include "vpx/vpx_integer.h"
17 #include "vp9/encoder/vp9_variance.h"
18
sad(const uint8_t * a,int a_stride,const uint8_t * b,int b_stride,int width,int height)19 static INLINE unsigned int sad(const uint8_t *a, int a_stride,
20 const uint8_t *b, int b_stride,
21 int width, int height) {
22 int y, x;
23 unsigned int sad = 0;
24
25 for (y = 0; y < height; y++) {
26 for (x = 0; x < width; x++)
27 sad += abs(a[x] - b[x]);
28
29 a += a_stride;
30 b += b_stride;
31 }
32
33 return sad;
34 }
35
36 #define sadMxN(m, n) \
37 unsigned int vp9_sad##m##x##n##_c(const uint8_t *src, int src_stride, \
38 const uint8_t *ref, int ref_stride) { \
39 return sad(src, src_stride, ref, ref_stride, m, n); \
40 } \
41 unsigned int vp9_sad##m##x##n##_avg_c(const uint8_t *src, int src_stride, \
42 const uint8_t *ref, int ref_stride, \
43 const uint8_t *second_pred) { \
44 uint8_t comp_pred[m * n]; \
45 vp9_comp_avg_pred(comp_pred, second_pred, m, n, ref, ref_stride); \
46 return sad(src, src_stride, comp_pred, m, m, n); \
47 }
48
49 #define sadMxNxK(m, n, k) \
50 void vp9_sad##m##x##n##x##k##_c(const uint8_t *src, int src_stride, \
51 const uint8_t *ref, int ref_stride, \
52 unsigned int *sads) { \
53 int i; \
54 for (i = 0; i < k; ++i) \
55 sads[i] = vp9_sad##m##x##n##_c(src, src_stride, &ref[i], ref_stride); \
56 }
57
58 #define sadMxNx4D(m, n) \
59 void vp9_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride, \
60 const uint8_t *const refs[], int ref_stride, \
61 unsigned int *sads) { \
62 int i; \
63 for (i = 0; i < 4; ++i) \
64 sads[i] = vp9_sad##m##x##n##_c(src, src_stride, refs[i], ref_stride); \
65 }
66
67 // 64x64
68 sadMxN(64, 64)
69 sadMxNxK(64, 64, 3)
70 sadMxNxK(64, 64, 8)
71 sadMxNx4D(64, 64)
72
73 // 64x32
74 sadMxN(64, 32)
75 sadMxNx4D(64, 32)
76
77 // 32x64
78 sadMxN(32, 64)
79 sadMxNx4D(32, 64)
80
81 // 32x32
82 sadMxN(32, 32)
83 sadMxNxK(32, 32, 3)
84 sadMxNxK(32, 32, 8)
85 sadMxNx4D(32, 32)
86
87 // 32x16
88 sadMxN(32, 16)
89 sadMxNx4D(32, 16)
90
91 // 16x32
92 sadMxN(16, 32)
93 sadMxNx4D(16, 32)
94
95 // 16x16
96 sadMxN(16, 16)
97 sadMxNxK(16, 16, 3)
98 sadMxNxK(16, 16, 8)
99 sadMxNx4D(16, 16)
100
101 // 16x8
102 sadMxN(16, 8)
103 sadMxNxK(16, 8, 3)
104 sadMxNxK(16, 8, 8)
105 sadMxNx4D(16, 8)
106
107 // 8x16
108 sadMxN(8, 16)
109 sadMxNxK(8, 16, 3)
110 sadMxNxK(8, 16, 8)
111 sadMxNx4D(8, 16)
112
113 // 8x8
114 sadMxN(8, 8)
115 sadMxNxK(8, 8, 3)
116 sadMxNxK(8, 8, 8)
117 sadMxNx4D(8, 8)
118
119 // 8x4
120 sadMxN(8, 4)
121 sadMxNxK(8, 4, 8)
122 sadMxNx4D(8, 4)
123
124 // 4x8
125 sadMxN(4, 8)
126 sadMxNxK(4, 8, 8)
127 sadMxNx4D(4, 8)
128
129 // 4x4
130 sadMxN(4, 4)
131 sadMxNxK(4, 4, 3)
132 sadMxNxK(4, 4, 8)
133 sadMxNx4D(4, 4)
134