1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <assert.h>
13 #include <emmintrin.h> // SSE2
14
15 #include "config/aom_config.h"
16 #include "config/aom_dsp_rtcd.h"
17 #include "config/av1_rtcd.h"
18
19 #include "aom_dsp/blend.h"
20 #include "aom_dsp/x86/synonyms.h"
21
22 #include "aom_ports/mem.h"
23
24 #include "av1/common/filter.h"
25 #include "av1/common/onyxc_int.h"
26 #include "av1/common/reconinter.h"
27
aom_get_mb_ss_sse2(const int16_t * src)28 unsigned int aom_get_mb_ss_sse2(const int16_t *src) {
29 __m128i vsum = _mm_setzero_si128();
30 int i;
31
32 for (i = 0; i < 32; ++i) {
33 const __m128i v = xx_loadu_128(src);
34 vsum = _mm_add_epi32(vsum, _mm_madd_epi16(v, v));
35 src += 8;
36 }
37
38 vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 8));
39 vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 4));
40 return _mm_cvtsi128_si32(vsum);
41 }
42
load4x2_sse2(const uint8_t * const p,const int stride)43 static INLINE __m128i load4x2_sse2(const uint8_t *const p, const int stride) {
44 const __m128i p0 = _mm_cvtsi32_si128(*(const uint32_t *)(p + 0 * stride));
45 const __m128i p1 = _mm_cvtsi32_si128(*(const uint32_t *)(p + 1 * stride));
46 return _mm_unpacklo_epi8(_mm_unpacklo_epi32(p0, p1), _mm_setzero_si128());
47 }
48
load8_8to16_sse2(const uint8_t * const p)49 static INLINE __m128i load8_8to16_sse2(const uint8_t *const p) {
50 const __m128i p0 = _mm_loadl_epi64((const __m128i *)p);
51 return _mm_unpacklo_epi8(p0, _mm_setzero_si128());
52 }
53
54 // Accumulate 4 32bit numbers in val to 1 32bit number
add32x4_sse2(__m128i val)55 static INLINE unsigned int add32x4_sse2(__m128i val) {
56 val = _mm_add_epi32(val, _mm_srli_si128(val, 8));
57 val = _mm_add_epi32(val, _mm_srli_si128(val, 4));
58 return _mm_cvtsi128_si32(val);
59 }
60
61 // Accumulate 8 16bit in sum to 4 32bit number
sum_to_32bit_sse2(const __m128i sum)62 static INLINE __m128i sum_to_32bit_sse2(const __m128i sum) {
63 const __m128i sum_lo = _mm_srai_epi32(_mm_unpacklo_epi16(sum, sum), 16);
64 const __m128i sum_hi = _mm_srai_epi32(_mm_unpackhi_epi16(sum, sum), 16);
65 return _mm_add_epi32(sum_lo, sum_hi);
66 }
67
variance_kernel_sse2(const __m128i src,const __m128i ref,__m128i * const sse,__m128i * const sum)68 static INLINE void variance_kernel_sse2(const __m128i src, const __m128i ref,
69 __m128i *const sse,
70 __m128i *const sum) {
71 const __m128i diff = _mm_sub_epi16(src, ref);
72 *sse = _mm_add_epi32(*sse, _mm_madd_epi16(diff, diff));
73 *sum = _mm_add_epi16(*sum, diff);
74 }
75
76 // Can handle 128 pixels' diff sum (such as 8x16 or 16x8)
77 // Slightly faster than variance_final_256_pel_sse2()
78 // diff sum of 128 pixels can still fit in 16bit integer
variance_final_128_pel_sse2(__m128i vsse,__m128i vsum,unsigned int * const sse,int * const sum)79 static INLINE void variance_final_128_pel_sse2(__m128i vsse, __m128i vsum,
80 unsigned int *const sse,
81 int *const sum) {
82 *sse = add32x4_sse2(vsse);
83
84 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8));
85 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 4));
86 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 2));
87 *sum = (int16_t)_mm_extract_epi16(vsum, 0);
88 }
89
90 // Can handle 256 pixels' diff sum (such as 16x16)
variance_final_256_pel_sse2(__m128i vsse,__m128i vsum,unsigned int * const sse,int * const sum)91 static INLINE void variance_final_256_pel_sse2(__m128i vsse, __m128i vsum,
92 unsigned int *const sse,
93 int *const sum) {
94 *sse = add32x4_sse2(vsse);
95
96 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8));
97 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 4));
98 *sum = (int16_t)_mm_extract_epi16(vsum, 0);
99 *sum += (int16_t)_mm_extract_epi16(vsum, 1);
100 }
101
102 // Can handle 512 pixels' diff sum (such as 16x32 or 32x16)
variance_final_512_pel_sse2(__m128i vsse,__m128i vsum,unsigned int * const sse,int * const sum)103 static INLINE void variance_final_512_pel_sse2(__m128i vsse, __m128i vsum,
104 unsigned int *const sse,
105 int *const sum) {
106 *sse = add32x4_sse2(vsse);
107
108 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8));
109 vsum = _mm_unpacklo_epi16(vsum, vsum);
110 vsum = _mm_srai_epi32(vsum, 16);
111 *sum = add32x4_sse2(vsum);
112 }
113
114 // Can handle 1024 pixels' diff sum (such as 32x32)
variance_final_1024_pel_sse2(__m128i vsse,__m128i vsum,unsigned int * const sse,int * const sum)115 static INLINE void variance_final_1024_pel_sse2(__m128i vsse, __m128i vsum,
116 unsigned int *const sse,
117 int *const sum) {
118 *sse = add32x4_sse2(vsse);
119
120 vsum = sum_to_32bit_sse2(vsum);
121 *sum = add32x4_sse2(vsum);
122 }
123
variance4_sse2(const uint8_t * src,const int src_stride,const uint8_t * ref,const int ref_stride,const int h,__m128i * const sse,__m128i * const sum)124 static INLINE void variance4_sse2(const uint8_t *src, const int src_stride,
125 const uint8_t *ref, const int ref_stride,
126 const int h, __m128i *const sse,
127 __m128i *const sum) {
128 assert(h <= 256); // May overflow for larger height.
129 *sum = _mm_setzero_si128();
130
131 for (int i = 0; i < h; i += 2) {
132 const __m128i s = load4x2_sse2(src, src_stride);
133 const __m128i r = load4x2_sse2(ref, ref_stride);
134
135 variance_kernel_sse2(s, r, sse, sum);
136 src += 2 * src_stride;
137 ref += 2 * ref_stride;
138 }
139 }
140
variance8_sse2(const uint8_t * src,const int src_stride,const uint8_t * ref,const int ref_stride,const int h,__m128i * const sse,__m128i * const sum)141 static INLINE void variance8_sse2(const uint8_t *src, const int src_stride,
142 const uint8_t *ref, const int ref_stride,
143 const int h, __m128i *const sse,
144 __m128i *const sum) {
145 assert(h <= 128); // May overflow for larger height.
146 *sum = _mm_setzero_si128();
147 for (int i = 0; i < h; i++) {
148 const __m128i s = load8_8to16_sse2(src);
149 const __m128i r = load8_8to16_sse2(ref);
150
151 variance_kernel_sse2(s, r, sse, sum);
152 src += src_stride;
153 ref += ref_stride;
154 }
155 }
156
variance16_kernel_sse2(const uint8_t * const src,const uint8_t * const ref,__m128i * const sse,__m128i * const sum)157 static INLINE void variance16_kernel_sse2(const uint8_t *const src,
158 const uint8_t *const ref,
159 __m128i *const sse,
160 __m128i *const sum) {
161 const __m128i zero = _mm_setzero_si128();
162 const __m128i s = _mm_loadu_si128((const __m128i *)src);
163 const __m128i r = _mm_loadu_si128((const __m128i *)ref);
164 const __m128i src0 = _mm_unpacklo_epi8(s, zero);
165 const __m128i ref0 = _mm_unpacklo_epi8(r, zero);
166 const __m128i src1 = _mm_unpackhi_epi8(s, zero);
167 const __m128i ref1 = _mm_unpackhi_epi8(r, zero);
168
169 variance_kernel_sse2(src0, ref0, sse, sum);
170 variance_kernel_sse2(src1, ref1, sse, sum);
171 }
172
variance16_sse2(const uint8_t * src,const int src_stride,const uint8_t * ref,const int ref_stride,const int h,__m128i * const sse,__m128i * const sum)173 static INLINE void variance16_sse2(const uint8_t *src, const int src_stride,
174 const uint8_t *ref, const int ref_stride,
175 const int h, __m128i *const sse,
176 __m128i *const sum) {
177 assert(h <= 64); // May overflow for larger height.
178 *sum = _mm_setzero_si128();
179
180 for (int i = 0; i < h; ++i) {
181 variance16_kernel_sse2(src, ref, sse, sum);
182 src += src_stride;
183 ref += ref_stride;
184 }
185 }
186
variance32_sse2(const uint8_t * src,const int src_stride,const uint8_t * ref,const int ref_stride,const int h,__m128i * const sse,__m128i * const sum)187 static INLINE void variance32_sse2(const uint8_t *src, const int src_stride,
188 const uint8_t *ref, const int ref_stride,
189 const int h, __m128i *const sse,
190 __m128i *const sum) {
191 assert(h <= 32); // May overflow for larger height.
192 // Don't initialize sse here since it's an accumulation.
193 *sum = _mm_setzero_si128();
194
195 for (int i = 0; i < h; ++i) {
196 variance16_kernel_sse2(src + 0, ref + 0, sse, sum);
197 variance16_kernel_sse2(src + 16, ref + 16, sse, sum);
198 src += src_stride;
199 ref += ref_stride;
200 }
201 }
202
variance64_sse2(const uint8_t * src,const int src_stride,const uint8_t * ref,const int ref_stride,const int h,__m128i * const sse,__m128i * const sum)203 static INLINE void variance64_sse2(const uint8_t *src, const int src_stride,
204 const uint8_t *ref, const int ref_stride,
205 const int h, __m128i *const sse,
206 __m128i *const sum) {
207 assert(h <= 16); // May overflow for larger height.
208 *sum = _mm_setzero_si128();
209
210 for (int i = 0; i < h; ++i) {
211 variance16_kernel_sse2(src + 0, ref + 0, sse, sum);
212 variance16_kernel_sse2(src + 16, ref + 16, sse, sum);
213 variance16_kernel_sse2(src + 32, ref + 32, sse, sum);
214 variance16_kernel_sse2(src + 48, ref + 48, sse, sum);
215 src += src_stride;
216 ref += ref_stride;
217 }
218 }
219
variance128_sse2(const uint8_t * src,const int src_stride,const uint8_t * ref,const int ref_stride,const int h,__m128i * const sse,__m128i * const sum)220 static INLINE void variance128_sse2(const uint8_t *src, const int src_stride,
221 const uint8_t *ref, const int ref_stride,
222 const int h, __m128i *const sse,
223 __m128i *const sum) {
224 assert(h <= 8); // May overflow for larger height.
225 *sum = _mm_setzero_si128();
226
227 for (int i = 0; i < h; ++i) {
228 for (int j = 0; j < 4; ++j) {
229 const int offset0 = j << 5;
230 const int offset1 = offset0 + 16;
231 variance16_kernel_sse2(src + offset0, ref + offset0, sse, sum);
232 variance16_kernel_sse2(src + offset1, ref + offset1, sse, sum);
233 }
234 src += src_stride;
235 ref += ref_stride;
236 }
237 }
238
239 #define AOM_VAR_NO_LOOP_SSE2(bw, bh, bits, max_pixels) \
240 unsigned int aom_variance##bw##x##bh##_sse2( \
241 const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
242 unsigned int *sse) { \
243 __m128i vsse = _mm_setzero_si128(); \
244 __m128i vsum; \
245 int sum = 0; \
246 variance##bw##_sse2(src, src_stride, ref, ref_stride, bh, &vsse, &vsum); \
247 variance_final_##max_pixels##_pel_sse2(vsse, vsum, sse, &sum); \
248 assert(sum <= 255 * bw * bh); \
249 assert(sum >= -255 * bw * bh); \
250 return *sse - (uint32_t)(((int64_t)sum * sum) >> bits); \
251 }
252
253 AOM_VAR_NO_LOOP_SSE2(4, 4, 4, 128);
254 AOM_VAR_NO_LOOP_SSE2(4, 8, 5, 128);
255 AOM_VAR_NO_LOOP_SSE2(4, 16, 6, 128);
256
257 AOM_VAR_NO_LOOP_SSE2(8, 4, 5, 128);
258 AOM_VAR_NO_LOOP_SSE2(8, 8, 6, 128);
259 AOM_VAR_NO_LOOP_SSE2(8, 16, 7, 128);
260 AOM_VAR_NO_LOOP_SSE2(8, 32, 8, 256);
261
262 AOM_VAR_NO_LOOP_SSE2(16, 4, 6, 128);
263 AOM_VAR_NO_LOOP_SSE2(16, 8, 7, 128);
264 AOM_VAR_NO_LOOP_SSE2(16, 16, 8, 256);
265 AOM_VAR_NO_LOOP_SSE2(16, 32, 9, 512);
266 AOM_VAR_NO_LOOP_SSE2(16, 64, 10, 1024);
267
268 AOM_VAR_NO_LOOP_SSE2(32, 8, 8, 256);
269 AOM_VAR_NO_LOOP_SSE2(32, 16, 9, 512);
270 AOM_VAR_NO_LOOP_SSE2(32, 32, 10, 1024);
271
272 #define AOM_VAR_LOOP_SSE2(bw, bh, bits, uh) \
273 unsigned int aom_variance##bw##x##bh##_sse2( \
274 const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
275 unsigned int *sse) { \
276 __m128i vsse = _mm_setzero_si128(); \
277 __m128i vsum = _mm_setzero_si128(); \
278 for (int i = 0; i < (bh / uh); ++i) { \
279 __m128i vsum16; \
280 variance##bw##_sse2(src, src_stride, ref, ref_stride, uh, &vsse, \
281 &vsum16); \
282 vsum = _mm_add_epi32(vsum, sum_to_32bit_sse2(vsum16)); \
283 src += (src_stride * uh); \
284 ref += (ref_stride * uh); \
285 } \
286 *sse = add32x4_sse2(vsse); \
287 int sum = add32x4_sse2(vsum); \
288 assert(sum <= 255 * bw * bh); \
289 assert(sum >= -255 * bw * bh); \
290 return *sse - (uint32_t)(((int64_t)sum * sum) >> bits); \
291 }
292
293 AOM_VAR_LOOP_SSE2(32, 64, 11, 32); // 32x32 * ( 64/32 )
294
295 AOM_VAR_NO_LOOP_SSE2(64, 16, 10, 1024);
296 AOM_VAR_LOOP_SSE2(64, 32, 11, 16); // 64x16 * ( 32/16 )
297 AOM_VAR_LOOP_SSE2(64, 64, 12, 16); // 64x16 * ( 64/16 )
298 AOM_VAR_LOOP_SSE2(64, 128, 13, 16); // 64x16 * ( 128/16 )
299
300 AOM_VAR_LOOP_SSE2(128, 64, 13, 8); // 128x8 * ( 64/8 )
301 AOM_VAR_LOOP_SSE2(128, 128, 14, 8); // 128x8 * ( 128/8 )
302
aom_mse8x8_sse2(const uint8_t * src,int src_stride,const uint8_t * ref,int ref_stride,unsigned int * sse)303 unsigned int aom_mse8x8_sse2(const uint8_t *src, int src_stride,
304 const uint8_t *ref, int ref_stride,
305 unsigned int *sse) {
306 aom_variance8x8_sse2(src, src_stride, ref, ref_stride, sse);
307 return *sse;
308 }
309
aom_mse8x16_sse2(const uint8_t * src,int src_stride,const uint8_t * ref,int ref_stride,unsigned int * sse)310 unsigned int aom_mse8x16_sse2(const uint8_t *src, int src_stride,
311 const uint8_t *ref, int ref_stride,
312 unsigned int *sse) {
313 aom_variance8x16_sse2(src, src_stride, ref, ref_stride, sse);
314 return *sse;
315 }
316
aom_mse16x8_sse2(const uint8_t * src,int src_stride,const uint8_t * ref,int ref_stride,unsigned int * sse)317 unsigned int aom_mse16x8_sse2(const uint8_t *src, int src_stride,
318 const uint8_t *ref, int ref_stride,
319 unsigned int *sse) {
320 aom_variance16x8_sse2(src, src_stride, ref, ref_stride, sse);
321 return *sse;
322 }
323
aom_mse16x16_sse2(const uint8_t * src,int src_stride,const uint8_t * ref,int ref_stride,unsigned int * sse)324 unsigned int aom_mse16x16_sse2(const uint8_t *src, int src_stride,
325 const uint8_t *ref, int ref_stride,
326 unsigned int *sse) {
327 aom_variance16x16_sse2(src, src_stride, ref, ref_stride, sse);
328 return *sse;
329 }
330
331 // The 2 unused parameters are place holders for PIC enabled build.
332 // These definitions are for functions defined in subpel_variance.asm
333 #define DECL(w, opt) \
334 int aom_sub_pixel_variance##w##xh_##opt( \
335 const uint8_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
336 const uint8_t *dst, ptrdiff_t dst_stride, int height, unsigned int *sse, \
337 void *unused0, void *unused)
338 #define DECLS(opt) \
339 DECL(4, opt); \
340 DECL(8, opt); \
341 DECL(16, opt)
342
343 DECLS(sse2);
344 DECLS(ssse3);
345 #undef DECLS
346 #undef DECL
347
348 #define FN(w, h, wf, wlog2, hlog2, opt, cast_prod, cast) \
349 unsigned int aom_sub_pixel_variance##w##x##h##_##opt( \
350 const uint8_t *src, int src_stride, int x_offset, int y_offset, \
351 const uint8_t *dst, int dst_stride, unsigned int *sse_ptr) { \
352 /*Avoid overflow in helper by capping height.*/ \
353 const int hf = AOMMIN(h, 64); \
354 unsigned int sse = 0; \
355 int se = 0; \
356 for (int i = 0; i < (w / wf); ++i) { \
357 const uint8_t *src_ptr = src; \
358 const uint8_t *dst_ptr = dst; \
359 for (int j = 0; j < (h / hf); ++j) { \
360 unsigned int sse2; \
361 const int se2 = aom_sub_pixel_variance##wf##xh_##opt( \
362 src_ptr, src_stride, x_offset, y_offset, dst_ptr, dst_stride, hf, \
363 &sse2, NULL, NULL); \
364 dst_ptr += hf * dst_stride; \
365 src_ptr += hf * src_stride; \
366 se += se2; \
367 sse += sse2; \
368 } \
369 src += wf; \
370 dst += wf; \
371 } \
372 *sse_ptr = sse; \
373 return sse - (unsigned int)(cast_prod(cast se * se) >> (wlog2 + hlog2)); \
374 }
375
376 #define FNS(opt) \
377 FN(128, 128, 16, 7, 7, opt, (int64_t), (int64_t)); \
378 FN(128, 64, 16, 7, 6, opt, (int64_t), (int64_t)); \
379 FN(64, 128, 16, 6, 7, opt, (int64_t), (int64_t)); \
380 FN(64, 64, 16, 6, 6, opt, (int64_t), (int64_t)); \
381 FN(64, 32, 16, 6, 5, opt, (int64_t), (int64_t)); \
382 FN(32, 64, 16, 5, 6, opt, (int64_t), (int64_t)); \
383 FN(32, 32, 16, 5, 5, opt, (int64_t), (int64_t)); \
384 FN(32, 16, 16, 5, 4, opt, (int64_t), (int64_t)); \
385 FN(16, 32, 16, 4, 5, opt, (int64_t), (int64_t)); \
386 FN(16, 16, 16, 4, 4, opt, (uint32_t), (int64_t)); \
387 FN(16, 8, 16, 4, 3, opt, (int32_t), (int32_t)); \
388 FN(8, 16, 8, 3, 4, opt, (int32_t), (int32_t)); \
389 FN(8, 8, 8, 3, 3, opt, (int32_t), (int32_t)); \
390 FN(8, 4, 8, 3, 2, opt, (int32_t), (int32_t)); \
391 FN(4, 8, 4, 2, 3, opt, (int32_t), (int32_t)); \
392 FN(4, 4, 4, 2, 2, opt, (int32_t), (int32_t)); \
393 FN(4, 16, 4, 2, 4, opt, (int32_t), (int32_t)); \
394 FN(16, 4, 16, 4, 2, opt, (int32_t), (int32_t)); \
395 FN(8, 32, 8, 3, 5, opt, (uint32_t), (int64_t)); \
396 FN(32, 8, 16, 5, 3, opt, (uint32_t), (int64_t)); \
397 FN(16, 64, 16, 4, 6, opt, (int64_t), (int64_t)); \
398 FN(64, 16, 16, 6, 4, opt, (int64_t), (int64_t))
399
400 FNS(sse2);
401 FNS(ssse3);
402
403 #undef FNS
404 #undef FN
405
406 // The 2 unused parameters are place holders for PIC enabled build.
407 #define DECL(w, opt) \
408 int aom_sub_pixel_avg_variance##w##xh_##opt( \
409 const uint8_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
410 const uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *sec, \
411 ptrdiff_t sec_stride, int height, unsigned int *sse, void *unused0, \
412 void *unused)
413 #define DECLS(opt) \
414 DECL(4, opt); \
415 DECL(8, opt); \
416 DECL(16, opt)
417
418 DECLS(sse2);
419 DECLS(ssse3);
420 #undef DECL
421 #undef DECLS
422
423 #define FN(w, h, wf, wlog2, hlog2, opt, cast_prod, cast) \
424 unsigned int aom_sub_pixel_avg_variance##w##x##h##_##opt( \
425 const uint8_t *src, int src_stride, int x_offset, int y_offset, \
426 const uint8_t *dst, int dst_stride, unsigned int *sse_ptr, \
427 const uint8_t *sec) { \
428 /*Avoid overflow in helper by capping height.*/ \
429 const int hf = AOMMIN(h, 64); \
430 unsigned int sse = 0; \
431 int se = 0; \
432 for (int i = 0; i < (w / wf); ++i) { \
433 const uint8_t *src_ptr = src; \
434 const uint8_t *dst_ptr = dst; \
435 const uint8_t *sec_ptr = sec; \
436 for (int j = 0; j < (h / hf); ++j) { \
437 unsigned int sse2; \
438 const int se2 = aom_sub_pixel_avg_variance##wf##xh_##opt( \
439 src_ptr, src_stride, x_offset, y_offset, dst_ptr, dst_stride, \
440 sec_ptr, w, hf, &sse2, NULL, NULL); \
441 dst_ptr += hf * dst_stride; \
442 src_ptr += hf * src_stride; \
443 sec_ptr += hf * w; \
444 se += se2; \
445 sse += sse2; \
446 } \
447 src += wf; \
448 dst += wf; \
449 sec += wf; \
450 } \
451 *sse_ptr = sse; \
452 return sse - (unsigned int)(cast_prod(cast se * se) >> (wlog2 + hlog2)); \
453 }
454
455 #define FNS(opt) \
456 FN(128, 128, 16, 7, 7, opt, (int64_t), (int64_t)); \
457 FN(128, 64, 16, 7, 6, opt, (int64_t), (int64_t)); \
458 FN(64, 128, 16, 6, 7, opt, (int64_t), (int64_t)); \
459 FN(64, 64, 16, 6, 6, opt, (int64_t), (int64_t)); \
460 FN(64, 32, 16, 6, 5, opt, (int64_t), (int64_t)); \
461 FN(32, 64, 16, 5, 6, opt, (int64_t), (int64_t)); \
462 FN(32, 32, 16, 5, 5, opt, (int64_t), (int64_t)); \
463 FN(32, 16, 16, 5, 4, opt, (int64_t), (int64_t)); \
464 FN(16, 32, 16, 4, 5, opt, (int64_t), (int64_t)); \
465 FN(16, 16, 16, 4, 4, opt, (uint32_t), (int64_t)); \
466 FN(16, 8, 16, 4, 3, opt, (uint32_t), (int32_t)); \
467 FN(8, 16, 8, 3, 4, opt, (uint32_t), (int32_t)); \
468 FN(8, 8, 8, 3, 3, opt, (uint32_t), (int32_t)); \
469 FN(8, 4, 8, 3, 2, opt, (uint32_t), (int32_t)); \
470 FN(4, 8, 4, 2, 3, opt, (uint32_t), (int32_t)); \
471 FN(4, 4, 4, 2, 2, opt, (uint32_t), (int32_t)); \
472 FN(4, 16, 4, 2, 4, opt, (int32_t), (int32_t)); \
473 FN(16, 4, 16, 4, 2, opt, (int32_t), (int32_t)); \
474 FN(8, 32, 8, 3, 5, opt, (uint32_t), (int64_t)); \
475 FN(32, 8, 16, 5, 3, opt, (uint32_t), (int64_t)); \
476 FN(16, 64, 16, 4, 6, opt, (int64_t), (int64_t)); \
477 FN(64, 16, 16, 6, 4, opt, (int64_t), (int64_t))
478
479 FNS(sse2);
480 FNS(ssse3);
481
482 #undef FNS
483 #undef FN
484
aom_upsampled_pred_sse2(MACROBLOCKD * xd,const struct AV1Common * const cm,int mi_row,int mi_col,const MV * const mv,uint8_t * comp_pred,int width,int height,int subpel_x_q3,int subpel_y_q3,const uint8_t * ref,int ref_stride,int subpel_search)485 void aom_upsampled_pred_sse2(MACROBLOCKD *xd, const struct AV1Common *const cm,
486 int mi_row, int mi_col, const MV *const mv,
487 uint8_t *comp_pred, int width, int height,
488 int subpel_x_q3, int subpel_y_q3,
489 const uint8_t *ref, int ref_stride,
490 int subpel_search) {
491 // expect xd == NULL only in tests
492 if (xd != NULL) {
493 const MB_MODE_INFO *mi = xd->mi[0];
494 const int ref_num = 0;
495 const int is_intrabc = is_intrabc_block(mi);
496 const struct scale_factors *const sf =
497 is_intrabc ? &cm->sf_identity : xd->block_ref_scale_factors[ref_num];
498 const int is_scaled = av1_is_scaled(sf);
499
500 if (is_scaled) {
501 // Note: This is mostly a copy from the >=8X8 case in
502 // build_inter_predictors() function, with some small tweaks.
503
504 // Some assumptions.
505 const int plane = 0;
506
507 // Get pre-requisites.
508 const struct macroblockd_plane *const pd = &xd->plane[plane];
509 const int ssx = pd->subsampling_x;
510 const int ssy = pd->subsampling_y;
511 assert(ssx == 0 && ssy == 0);
512 const struct buf_2d *const dst_buf = &pd->dst;
513 const struct buf_2d *const pre_buf =
514 is_intrabc ? dst_buf : &pd->pre[ref_num];
515 const int mi_x = mi_col * MI_SIZE;
516 const int mi_y = mi_row * MI_SIZE;
517
518 // Calculate subpel_x/y and x/y_step.
519 const int row_start = 0; // Because ss_y is 0.
520 const int col_start = 0; // Because ss_x is 0.
521 const int pre_x = (mi_x + MI_SIZE * col_start) >> ssx;
522 const int pre_y = (mi_y + MI_SIZE * row_start) >> ssy;
523 int orig_pos_y = pre_y << SUBPEL_BITS;
524 orig_pos_y += mv->row * (1 << (1 - ssy));
525 int orig_pos_x = pre_x << SUBPEL_BITS;
526 orig_pos_x += mv->col * (1 << (1 - ssx));
527 int pos_y = sf->scale_value_y(orig_pos_y, sf);
528 int pos_x = sf->scale_value_x(orig_pos_x, sf);
529 pos_x += SCALE_EXTRA_OFF;
530 pos_y += SCALE_EXTRA_OFF;
531
532 const int top = -AOM_LEFT_TOP_MARGIN_SCALED(ssy);
533 const int left = -AOM_LEFT_TOP_MARGIN_SCALED(ssx);
534 const int bottom = (pre_buf->height + AOM_INTERP_EXTEND)
535 << SCALE_SUBPEL_BITS;
536 const int right = (pre_buf->width + AOM_INTERP_EXTEND)
537 << SCALE_SUBPEL_BITS;
538 pos_y = clamp(pos_y, top, bottom);
539 pos_x = clamp(pos_x, left, right);
540
541 const uint8_t *const pre =
542 pre_buf->buf0 + (pos_y >> SCALE_SUBPEL_BITS) * pre_buf->stride +
543 (pos_x >> SCALE_SUBPEL_BITS);
544
545 const SubpelParams subpel_params = { sf->x_step_q4, sf->y_step_q4,
546 pos_x & SCALE_SUBPEL_MASK,
547 pos_y & SCALE_SUBPEL_MASK };
548
549 // Get warp types.
550 const WarpedMotionParams *const wm =
551 &xd->global_motion[mi->ref_frame[ref_num]];
552 const int is_global = is_global_mv_block(mi, wm->wmtype);
553 WarpTypesAllowed warp_types;
554 warp_types.global_warp_allowed = is_global;
555 warp_types.local_warp_allowed = mi->motion_mode == WARPED_CAUSAL;
556
557 // Get convolve parameters.
558 ConvolveParams conv_params = get_conv_params(0, plane, xd->bd);
559 const InterpFilters filters =
560 av1_broadcast_interp_filter(EIGHTTAP_REGULAR);
561
562 // Get the inter predictor.
563 const int build_for_obmc = 0;
564 av1_make_inter_predictor(pre, pre_buf->stride, comp_pred, width,
565 &subpel_params, sf, width, height, &conv_params,
566 filters, &warp_types, mi_x >> pd->subsampling_x,
567 mi_y >> pd->subsampling_y, plane, ref_num, mi,
568 build_for_obmc, xd, cm->allow_warped_motion);
569
570 return;
571 }
572 }
573
574 const InterpFilterParams *filter = av1_get_filter(subpel_search);
575 // (TODO:yunqing) 2-tap case uses 4-tap functions since there is no SIMD for
576 // 2-tap yet.
577 int filter_taps = (subpel_search <= USE_4_TAPS) ? 4 : SUBPEL_TAPS;
578
579 if (!subpel_x_q3 && !subpel_y_q3) {
580 if (width >= 16) {
581 int i;
582 assert(!(width & 15));
583 /*Read 16 pixels one row at a time.*/
584 for (i = 0; i < height; i++) {
585 int j;
586 for (j = 0; j < width; j += 16) {
587 xx_storeu_128(comp_pred, xx_loadu_128(ref));
588 comp_pred += 16;
589 ref += 16;
590 }
591 ref += ref_stride - width;
592 }
593 } else if (width >= 8) {
594 int i;
595 assert(!(width & 7));
596 assert(!(height & 1));
597 /*Read 8 pixels two rows at a time.*/
598 for (i = 0; i < height; i += 2) {
599 __m128i s0 = xx_loadl_64(ref + 0 * ref_stride);
600 __m128i s1 = xx_loadl_64(ref + 1 * ref_stride);
601 xx_storeu_128(comp_pred, _mm_unpacklo_epi64(s0, s1));
602 comp_pred += 16;
603 ref += 2 * ref_stride;
604 }
605 } else {
606 int i;
607 assert(!(width & 3));
608 assert(!(height & 3));
609 /*Read 4 pixels four rows at a time.*/
610 for (i = 0; i < height; i++) {
611 const __m128i row0 = xx_loadl_64(ref + 0 * ref_stride);
612 const __m128i row1 = xx_loadl_64(ref + 1 * ref_stride);
613 const __m128i row2 = xx_loadl_64(ref + 2 * ref_stride);
614 const __m128i row3 = xx_loadl_64(ref + 3 * ref_stride);
615 const __m128i reg = _mm_unpacklo_epi64(_mm_unpacklo_epi32(row0, row1),
616 _mm_unpacklo_epi32(row2, row3));
617 xx_storeu_128(comp_pred, reg);
618 comp_pred += 16;
619 ref += 4 * ref_stride;
620 }
621 }
622 } else if (!subpel_y_q3) {
623 const int16_t *const kernel =
624 av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
625 aom_convolve8_horiz(ref, ref_stride, comp_pred, width, kernel, 16, NULL, -1,
626 width, height);
627 } else if (!subpel_x_q3) {
628 const int16_t *const kernel =
629 av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
630 aom_convolve8_vert(ref, ref_stride, comp_pred, width, NULL, -1, kernel, 16,
631 width, height);
632 } else {
633 DECLARE_ALIGNED(16, uint8_t,
634 temp[((MAX_SB_SIZE * 2 + 16) + 16) * MAX_SB_SIZE]);
635 const int16_t *const kernel_x =
636 av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
637 const int16_t *const kernel_y =
638 av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
639 const uint8_t *ref_start = ref - ref_stride * ((filter_taps >> 1) - 1);
640 uint8_t *temp_start_horiz = (subpel_search <= USE_4_TAPS)
641 ? temp + (filter_taps >> 1) * MAX_SB_SIZE
642 : temp;
643 uint8_t *temp_start_vert = temp + MAX_SB_SIZE * ((filter->taps >> 1) - 1);
644 int intermediate_height =
645 (((height - 1) * 8 + subpel_y_q3) >> 3) + filter_taps;
646 assert(intermediate_height <= (MAX_SB_SIZE * 2 + 16) + 16);
647 aom_convolve8_horiz(ref_start, ref_stride, temp_start_horiz, MAX_SB_SIZE,
648 kernel_x, 16, NULL, -1, width, intermediate_height);
649 aom_convolve8_vert(temp_start_vert, MAX_SB_SIZE, comp_pred, width, NULL, -1,
650 kernel_y, 16, width, height);
651 }
652 }
653
aom_comp_avg_upsampled_pred_sse2(MACROBLOCKD * xd,const struct AV1Common * const cm,int mi_row,int mi_col,const MV * const mv,uint8_t * comp_pred,const uint8_t * pred,int width,int height,int subpel_x_q3,int subpel_y_q3,const uint8_t * ref,int ref_stride,int subpel_search)654 void aom_comp_avg_upsampled_pred_sse2(
655 MACROBLOCKD *xd, const struct AV1Common *const cm, int mi_row, int mi_col,
656 const MV *const mv, uint8_t *comp_pred, const uint8_t *pred, int width,
657 int height, int subpel_x_q3, int subpel_y_q3, const uint8_t *ref,
658 int ref_stride, int subpel_search) {
659 int n;
660 int i;
661 aom_upsampled_pred(xd, cm, mi_row, mi_col, mv, comp_pred, width, height,
662 subpel_x_q3, subpel_y_q3, ref, ref_stride, subpel_search);
663 /*The total number of pixels must be a multiple of 16 (e.g., 4x4).*/
664 assert(!(width * height & 15));
665 n = width * height >> 4;
666 for (i = 0; i < n; i++) {
667 __m128i s0 = xx_loadu_128(comp_pred);
668 __m128i p0 = xx_loadu_128(pred);
669 xx_storeu_128(comp_pred, _mm_avg_epu8(s0, p0));
670 comp_pred += 16;
671 pred += 16;
672 }
673 }
674
aom_comp_mask_upsampled_pred_sse2(MACROBLOCKD * xd,const AV1_COMMON * const cm,int mi_row,int mi_col,const MV * const mv,uint8_t * comp_pred,const uint8_t * pred,int width,int height,int subpel_x_q3,int subpel_y_q3,const uint8_t * ref,int ref_stride,const uint8_t * mask,int mask_stride,int invert_mask,int subpel_search)675 void aom_comp_mask_upsampled_pred_sse2(
676 MACROBLOCKD *xd, const AV1_COMMON *const cm, int mi_row, int mi_col,
677 const MV *const mv, uint8_t *comp_pred, const uint8_t *pred, int width,
678 int height, int subpel_x_q3, int subpel_y_q3, const uint8_t *ref,
679 int ref_stride, const uint8_t *mask, int mask_stride, int invert_mask,
680 int subpel_search) {
681 if (subpel_x_q3 | subpel_y_q3) {
682 aom_upsampled_pred(xd, cm, mi_row, mi_col, mv, comp_pred, width, height,
683 subpel_x_q3, subpel_y_q3, ref, ref_stride,
684 subpel_search);
685 ref = comp_pred;
686 ref_stride = width;
687 }
688 aom_comp_mask_pred(comp_pred, pred, width, height, ref, ref_stride, mask,
689 mask_stride, invert_mask);
690 }
691
highbd_comp_mask_pred_line_sse2(const __m128i s0,const __m128i s1,const __m128i a)692 static INLINE __m128i highbd_comp_mask_pred_line_sse2(const __m128i s0,
693 const __m128i s1,
694 const __m128i a) {
695 const __m128i alpha_max = _mm_set1_epi16((1 << AOM_BLEND_A64_ROUND_BITS));
696 const __m128i round_const =
697 _mm_set1_epi32((1 << AOM_BLEND_A64_ROUND_BITS) >> 1);
698 const __m128i a_inv = _mm_sub_epi16(alpha_max, a);
699
700 const __m128i s_lo = _mm_unpacklo_epi16(s0, s1);
701 const __m128i a_lo = _mm_unpacklo_epi16(a, a_inv);
702 const __m128i pred_lo = _mm_madd_epi16(s_lo, a_lo);
703 const __m128i pred_l = _mm_srai_epi32(_mm_add_epi32(pred_lo, round_const),
704 AOM_BLEND_A64_ROUND_BITS);
705
706 const __m128i s_hi = _mm_unpackhi_epi16(s0, s1);
707 const __m128i a_hi = _mm_unpackhi_epi16(a, a_inv);
708 const __m128i pred_hi = _mm_madd_epi16(s_hi, a_hi);
709 const __m128i pred_h = _mm_srai_epi32(_mm_add_epi32(pred_hi, round_const),
710 AOM_BLEND_A64_ROUND_BITS);
711
712 const __m128i comp = _mm_packs_epi32(pred_l, pred_h);
713
714 return comp;
715 }
716
aom_highbd_comp_mask_pred_sse2(uint8_t * comp_pred8,const uint8_t * pred8,int width,int height,const uint8_t * ref8,int ref_stride,const uint8_t * mask,int mask_stride,int invert_mask)717 void aom_highbd_comp_mask_pred_sse2(uint8_t *comp_pred8, const uint8_t *pred8,
718 int width, int height, const uint8_t *ref8,
719 int ref_stride, const uint8_t *mask,
720 int mask_stride, int invert_mask) {
721 int i = 0;
722 uint16_t *comp_pred = CONVERT_TO_SHORTPTR(comp_pred8);
723 uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
724 uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
725 const uint16_t *src0 = invert_mask ? pred : ref;
726 const uint16_t *src1 = invert_mask ? ref : pred;
727 const int stride0 = invert_mask ? width : ref_stride;
728 const int stride1 = invert_mask ? ref_stride : width;
729 const __m128i zero = _mm_setzero_si128();
730
731 if (width == 8) {
732 do {
733 const __m128i s0 = _mm_loadu_si128((const __m128i *)(src0));
734 const __m128i s1 = _mm_loadu_si128((const __m128i *)(src1));
735 const __m128i m_8 = _mm_loadl_epi64((const __m128i *)mask);
736 const __m128i m_16 = _mm_unpacklo_epi8(m_8, zero);
737
738 const __m128i comp = highbd_comp_mask_pred_line_sse2(s0, s1, m_16);
739
740 _mm_storeu_si128((__m128i *)comp_pred, comp);
741
742 src0 += stride0;
743 src1 += stride1;
744 mask += mask_stride;
745 comp_pred += width;
746 i += 1;
747 } while (i < height);
748 } else if (width == 16) {
749 do {
750 const __m128i s0 = _mm_loadu_si128((const __m128i *)(src0));
751 const __m128i s2 = _mm_loadu_si128((const __m128i *)(src0 + 8));
752 const __m128i s1 = _mm_loadu_si128((const __m128i *)(src1));
753 const __m128i s3 = _mm_loadu_si128((const __m128i *)(src1 + 8));
754
755 const __m128i m_8 = _mm_loadu_si128((const __m128i *)mask);
756 const __m128i m01_16 = _mm_unpacklo_epi8(m_8, zero);
757 const __m128i m23_16 = _mm_unpackhi_epi8(m_8, zero);
758
759 const __m128i comp = highbd_comp_mask_pred_line_sse2(s0, s1, m01_16);
760 const __m128i comp1 = highbd_comp_mask_pred_line_sse2(s2, s3, m23_16);
761
762 _mm_storeu_si128((__m128i *)comp_pred, comp);
763 _mm_storeu_si128((__m128i *)(comp_pred + 8), comp1);
764
765 src0 += stride0;
766 src1 += stride1;
767 mask += mask_stride;
768 comp_pred += width;
769 i += 1;
770 } while (i < height);
771 } else if (width == 32) {
772 do {
773 for (int j = 0; j < 2; j++) {
774 const __m128i s0 = _mm_loadu_si128((const __m128i *)(src0 + j * 16));
775 const __m128i s2 =
776 _mm_loadu_si128((const __m128i *)(src0 + 8 + j * 16));
777 const __m128i s1 = _mm_loadu_si128((const __m128i *)(src1 + j * 16));
778 const __m128i s3 =
779 _mm_loadu_si128((const __m128i *)(src1 + 8 + j * 16));
780
781 const __m128i m_8 = _mm_loadu_si128((const __m128i *)(mask + j * 16));
782 const __m128i m01_16 = _mm_unpacklo_epi8(m_8, zero);
783 const __m128i m23_16 = _mm_unpackhi_epi8(m_8, zero);
784
785 const __m128i comp = highbd_comp_mask_pred_line_sse2(s0, s1, m01_16);
786 const __m128i comp1 = highbd_comp_mask_pred_line_sse2(s2, s3, m23_16);
787
788 _mm_storeu_si128((__m128i *)(comp_pred + j * 16), comp);
789 _mm_storeu_si128((__m128i *)(comp_pred + 8 + j * 16), comp1);
790 }
791 src0 += stride0;
792 src1 += stride1;
793 mask += mask_stride;
794 comp_pred += width;
795 i += 1;
796 } while (i < height);
797 }
798 }
799