1 /*
2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3 *
4 * Usee of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <immintrin.h> // AVX2
12
13 #include "./vp9_rtcd.h"
14 #include "vpx/vpx_integer.h"
15
vp9_block_error_avx2(const int16_t * coeff,const int16_t * dqcoeff,intptr_t block_size,int64_t * ssz)16 int64_t vp9_block_error_avx2(const int16_t *coeff,
17 const int16_t *dqcoeff,
18 intptr_t block_size,
19 int64_t *ssz) {
20 __m256i sse_reg, ssz_reg, coeff_reg, dqcoeff_reg;
21 __m256i exp_dqcoeff_lo, exp_dqcoeff_hi, exp_coeff_lo, exp_coeff_hi;
22 __m256i sse_reg_64hi, ssz_reg_64hi;
23 __m128i sse_reg128, ssz_reg128;
24 int64_t sse;
25 int i;
26 const __m256i zero_reg = _mm256_set1_epi16(0);
27
28 // init sse and ssz registerd to zero
29 sse_reg = _mm256_set1_epi16(0);
30 ssz_reg = _mm256_set1_epi16(0);
31
32 for (i = 0 ; i < block_size ; i+= 16) {
33 // load 32 bytes from coeff and dqcoeff
34 coeff_reg = _mm256_loadu_si256((const __m256i *)(coeff + i));
35 dqcoeff_reg = _mm256_loadu_si256((const __m256i *)(dqcoeff + i));
36 // dqcoeff - coeff
37 dqcoeff_reg = _mm256_sub_epi16(dqcoeff_reg, coeff_reg);
38 // madd (dqcoeff - coeff)
39 dqcoeff_reg = _mm256_madd_epi16(dqcoeff_reg, dqcoeff_reg);
40 // madd coeff
41 coeff_reg = _mm256_madd_epi16(coeff_reg, coeff_reg);
42 // expand each double word of madd (dqcoeff - coeff) to quad word
43 exp_dqcoeff_lo = _mm256_unpacklo_epi32(dqcoeff_reg, zero_reg);
44 exp_dqcoeff_hi = _mm256_unpackhi_epi32(dqcoeff_reg, zero_reg);
45 // expand each double word of madd (coeff) to quad word
46 exp_coeff_lo = _mm256_unpacklo_epi32(coeff_reg, zero_reg);
47 exp_coeff_hi = _mm256_unpackhi_epi32(coeff_reg, zero_reg);
48 // add each quad word of madd (dqcoeff - coeff) and madd (coeff)
49 sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_lo);
50 ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_lo);
51 sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_hi);
52 ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_hi);
53 }
54 // save the higher 64 bit of each 128 bit lane
55 sse_reg_64hi = _mm256_srli_si256(sse_reg, 8);
56 ssz_reg_64hi = _mm256_srli_si256(ssz_reg, 8);
57 // add the higher 64 bit to the low 64 bit
58 sse_reg = _mm256_add_epi64(sse_reg, sse_reg_64hi);
59 ssz_reg = _mm256_add_epi64(ssz_reg, ssz_reg_64hi);
60
61 // add each 64 bit from each of the 128 bit lane of the 256 bit
62 sse_reg128 = _mm_add_epi64(_mm256_castsi256_si128(sse_reg),
63 _mm256_extractf128_si256(sse_reg, 1));
64
65 ssz_reg128 = _mm_add_epi64(_mm256_castsi256_si128(ssz_reg),
66 _mm256_extractf128_si256(ssz_reg, 1));
67
68 // store the results
69 _mm_storel_epi64((__m128i*)(&sse), sse_reg128);
70
71 _mm_storel_epi64((__m128i*)(ssz), ssz_reg128);
72 return sse;
73 }
74