1 /*
2  * Copyright (c) 2018, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 #include <smmintrin.h>
12 #include <immintrin.h>
13 
14 #include "config/aom_dsp_rtcd.h"
15 
16 #include "aom_ports/mem.h"
17 #include "aom_dsp/x86/synonyms.h"
18 #include "aom_dsp/x86/synonyms_avx2.h"
19 
sse_w32_avx2(__m256i * sum,const uint8_t * a,const uint8_t * b)20 static INLINE void sse_w32_avx2(__m256i *sum, const uint8_t *a,
21                                 const uint8_t *b) {
22   const __m256i v_a0 = yy_loadu_256(a);
23   const __m256i v_b0 = yy_loadu_256(b);
24   const __m256i zero = _mm256_setzero_si256();
25   const __m256i v_a00_w = _mm256_unpacklo_epi8(v_a0, zero);
26   const __m256i v_a01_w = _mm256_unpackhi_epi8(v_a0, zero);
27   const __m256i v_b00_w = _mm256_unpacklo_epi8(v_b0, zero);
28   const __m256i v_b01_w = _mm256_unpackhi_epi8(v_b0, zero);
29   const __m256i v_d00_w = _mm256_sub_epi16(v_a00_w, v_b00_w);
30   const __m256i v_d01_w = _mm256_sub_epi16(v_a01_w, v_b01_w);
31   *sum = _mm256_add_epi32(*sum, _mm256_madd_epi16(v_d00_w, v_d00_w));
32   *sum = _mm256_add_epi32(*sum, _mm256_madd_epi16(v_d01_w, v_d01_w));
33 }
34 
summary_all_avx2(const __m256i * sum_all)35 static INLINE int64_t summary_all_avx2(const __m256i *sum_all) {
36   int64_t sum;
37   __m256i zero = _mm256_setzero_si256();
38   const __m256i sum0_4x64 = _mm256_unpacklo_epi32(*sum_all, zero);
39   const __m256i sum1_4x64 = _mm256_unpackhi_epi32(*sum_all, zero);
40   const __m256i sum_4x64 = _mm256_add_epi64(sum0_4x64, sum1_4x64);
41   const __m128i sum_2x64 = _mm_add_epi64(_mm256_castsi256_si128(sum_4x64),
42                                          _mm256_extracti128_si256(sum_4x64, 1));
43   const __m128i sum_1x64 = _mm_add_epi64(sum_2x64, _mm_srli_si128(sum_2x64, 8));
44   xx_storel_64(&sum, sum_1x64);
45   return sum;
46 }
47 
summary_32_avx2(const __m256i * sum32,__m256i * sum)48 static INLINE void summary_32_avx2(const __m256i *sum32, __m256i *sum) {
49   const __m256i sum0_4x64 =
50       _mm256_cvtepu32_epi64(_mm256_castsi256_si128(*sum32));
51   const __m256i sum1_4x64 =
52       _mm256_cvtepu32_epi64(_mm256_extracti128_si256(*sum32, 1));
53   const __m256i sum_4x64 = _mm256_add_epi64(sum0_4x64, sum1_4x64);
54   *sum = _mm256_add_epi64(*sum, sum_4x64);
55 }
56 
summary_4x64_avx2(const __m256i sum_4x64)57 static INLINE int64_t summary_4x64_avx2(const __m256i sum_4x64) {
58   int64_t sum;
59   const __m128i sum_2x64 = _mm_add_epi64(_mm256_castsi256_si128(sum_4x64),
60                                          _mm256_extracti128_si256(sum_4x64, 1));
61   const __m128i sum_1x64 = _mm_add_epi64(sum_2x64, _mm_srli_si128(sum_2x64, 8));
62 
63   xx_storel_64(&sum, sum_1x64);
64   return sum;
65 }
66 
sse_w4x4_avx2(const uint8_t * a,int a_stride,const uint8_t * b,int b_stride,__m256i * sum)67 static INLINE void sse_w4x4_avx2(const uint8_t *a, int a_stride,
68                                  const uint8_t *b, int b_stride, __m256i *sum) {
69   const __m128i v_a0 = xx_loadl_32(a);
70   const __m128i v_a1 = xx_loadl_32(a + a_stride);
71   const __m128i v_a2 = xx_loadl_32(a + a_stride * 2);
72   const __m128i v_a3 = xx_loadl_32(a + a_stride * 3);
73   const __m128i v_b0 = xx_loadl_32(b);
74   const __m128i v_b1 = xx_loadl_32(b + b_stride);
75   const __m128i v_b2 = xx_loadl_32(b + b_stride * 2);
76   const __m128i v_b3 = xx_loadl_32(b + b_stride * 3);
77   const __m128i v_a0123 = _mm_unpacklo_epi64(_mm_unpacklo_epi32(v_a0, v_a1),
78                                              _mm_unpacklo_epi32(v_a2, v_a3));
79   const __m128i v_b0123 = _mm_unpacklo_epi64(_mm_unpacklo_epi32(v_b0, v_b1),
80                                              _mm_unpacklo_epi32(v_b2, v_b3));
81   const __m256i v_a_w = _mm256_cvtepu8_epi16(v_a0123);
82   const __m256i v_b_w = _mm256_cvtepu8_epi16(v_b0123);
83   const __m256i v_d_w = _mm256_sub_epi16(v_a_w, v_b_w);
84   *sum = _mm256_add_epi32(*sum, _mm256_madd_epi16(v_d_w, v_d_w));
85 }
sse_w8x2_avx2(const uint8_t * a,int a_stride,const uint8_t * b,int b_stride,__m256i * sum)86 static INLINE void sse_w8x2_avx2(const uint8_t *a, int a_stride,
87                                  const uint8_t *b, int b_stride, __m256i *sum) {
88   const __m128i v_a0 = xx_loadl_64(a);
89   const __m128i v_a1 = xx_loadl_64(a + a_stride);
90   const __m128i v_b0 = xx_loadl_64(b);
91   const __m128i v_b1 = xx_loadl_64(b + b_stride);
92   const __m256i v_a_w = _mm256_cvtepu8_epi16(_mm_unpacklo_epi64(v_a0, v_a1));
93   const __m256i v_b_w = _mm256_cvtepu8_epi16(_mm_unpacklo_epi64(v_b0, v_b1));
94   const __m256i v_d_w = _mm256_sub_epi16(v_a_w, v_b_w);
95   *sum = _mm256_add_epi32(*sum, _mm256_madd_epi16(v_d_w, v_d_w));
96 }
aom_sse_avx2(const uint8_t * a,int a_stride,const uint8_t * b,int b_stride,int width,int height)97 int64_t aom_sse_avx2(const uint8_t *a, int a_stride, const uint8_t *b,
98                      int b_stride, int width, int height) {
99   int32_t y = 0;
100   int64_t sse = 0;
101   __m256i sum = _mm256_setzero_si256();
102   __m256i zero = _mm256_setzero_si256();
103   switch (width) {
104     case 4:
105       do {
106         sse_w4x4_avx2(a, a_stride, b, b_stride, &sum);
107         a += a_stride << 2;
108         b += b_stride << 2;
109         y += 4;
110       } while (y < height);
111       sse = summary_all_avx2(&sum);
112       break;
113     case 8:
114       do {
115         sse_w8x2_avx2(a, a_stride, b, b_stride, &sum);
116         a += a_stride << 1;
117         b += b_stride << 1;
118         y += 2;
119       } while (y < height);
120       sse = summary_all_avx2(&sum);
121       break;
122     case 16:
123       do {
124         const __m128i v_a0 = xx_loadu_128(a);
125         const __m128i v_a1 = xx_loadu_128(a + a_stride);
126         const __m128i v_b0 = xx_loadu_128(b);
127         const __m128i v_b1 = xx_loadu_128(b + b_stride);
128         const __m256i v_a =
129             _mm256_insertf128_si256(_mm256_castsi128_si256(v_a0), v_a1, 0x01);
130         const __m256i v_b =
131             _mm256_insertf128_si256(_mm256_castsi128_si256(v_b0), v_b1, 0x01);
132         const __m256i v_al = _mm256_unpacklo_epi8(v_a, zero);
133         const __m256i v_au = _mm256_unpackhi_epi8(v_a, zero);
134         const __m256i v_bl = _mm256_unpacklo_epi8(v_b, zero);
135         const __m256i v_bu = _mm256_unpackhi_epi8(v_b, zero);
136         const __m256i v_asub = _mm256_sub_epi16(v_al, v_bl);
137         const __m256i v_bsub = _mm256_sub_epi16(v_au, v_bu);
138         const __m256i temp =
139             _mm256_add_epi32(_mm256_madd_epi16(v_asub, v_asub),
140                              _mm256_madd_epi16(v_bsub, v_bsub));
141         sum = _mm256_add_epi32(sum, temp);
142         a += a_stride << 1;
143         b += b_stride << 1;
144         y += 2;
145       } while (y < height);
146       sse = summary_all_avx2(&sum);
147       break;
148     case 32:
149       do {
150         sse_w32_avx2(&sum, a, b);
151         a += a_stride;
152         b += b_stride;
153         y += 1;
154       } while (y < height);
155       sse = summary_all_avx2(&sum);
156       break;
157     case 64:
158       do {
159         sse_w32_avx2(&sum, a, b);
160         sse_w32_avx2(&sum, a + 32, b + 32);
161         a += a_stride;
162         b += b_stride;
163         y += 1;
164       } while (y < height);
165       sse = summary_all_avx2(&sum);
166       break;
167     case 128:
168       do {
169         sse_w32_avx2(&sum, a, b);
170         sse_w32_avx2(&sum, a + 32, b + 32);
171         sse_w32_avx2(&sum, a + 64, b + 64);
172         sse_w32_avx2(&sum, a + 96, b + 96);
173         a += a_stride;
174         b += b_stride;
175         y += 1;
176       } while (y < height);
177       sse = summary_all_avx2(&sum);
178       break;
179     default:
180       if ((width & 0x07) == 0) {
181         do {
182           int i = 0;
183           do {
184             sse_w8x2_avx2(a + i, a_stride, b + i, b_stride, &sum);
185             i += 8;
186           } while (i < width);
187           a += a_stride << 1;
188           b += b_stride << 1;
189           y += 2;
190         } while (y < height);
191       } else {
192         do {
193           int i = 0;
194           do {
195             sse_w8x2_avx2(a + i, a_stride, b + i, b_stride, &sum);
196             const uint8_t *a2 = a + i + (a_stride << 1);
197             const uint8_t *b2 = b + i + (b_stride << 1);
198             sse_w8x2_avx2(a2, a_stride, b2, b_stride, &sum);
199             i += 8;
200           } while (i + 4 < width);
201           sse_w4x4_avx2(a + i, a_stride, b + i, b_stride, &sum);
202           a += a_stride << 2;
203           b += b_stride << 2;
204           y += 4;
205         } while (y < height);
206       }
207       sse = summary_all_avx2(&sum);
208       break;
209   }
210 
211   return sse;
212 }
213 
highbd_sse_w16_avx2(__m256i * sum,const uint16_t * a,const uint16_t * b)214 static INLINE void highbd_sse_w16_avx2(__m256i *sum, const uint16_t *a,
215                                        const uint16_t *b) {
216   const __m256i v_a_w = yy_loadu_256(a);
217   const __m256i v_b_w = yy_loadu_256(b);
218   const __m256i v_d_w = _mm256_sub_epi16(v_a_w, v_b_w);
219   *sum = _mm256_add_epi32(*sum, _mm256_madd_epi16(v_d_w, v_d_w));
220 }
221 
highbd_sse_w4x4_avx2(__m256i * sum,const uint16_t * a,int a_stride,const uint16_t * b,int b_stride)222 static INLINE void highbd_sse_w4x4_avx2(__m256i *sum, const uint16_t *a,
223                                         int a_stride, const uint16_t *b,
224                                         int b_stride) {
225   const __m128i v_a0 = xx_loadl_64(a);
226   const __m128i v_a1 = xx_loadl_64(a + a_stride);
227   const __m128i v_a2 = xx_loadl_64(a + a_stride * 2);
228   const __m128i v_a3 = xx_loadl_64(a + a_stride * 3);
229   const __m128i v_b0 = xx_loadl_64(b);
230   const __m128i v_b1 = xx_loadl_64(b + b_stride);
231   const __m128i v_b2 = xx_loadl_64(b + b_stride * 2);
232   const __m128i v_b3 = xx_loadl_64(b + b_stride * 3);
233   const __m256i v_a_w = yy_set_m128i(_mm_unpacklo_epi64(v_a0, v_a1),
234                                      _mm_unpacklo_epi64(v_a2, v_a3));
235   const __m256i v_b_w = yy_set_m128i(_mm_unpacklo_epi64(v_b0, v_b1),
236                                      _mm_unpacklo_epi64(v_b2, v_b3));
237   const __m256i v_d_w = _mm256_sub_epi16(v_a_w, v_b_w);
238   *sum = _mm256_add_epi32(*sum, _mm256_madd_epi16(v_d_w, v_d_w));
239 }
240 
highbd_sse_w8x2_avx2(__m256i * sum,const uint16_t * a,int a_stride,const uint16_t * b,int b_stride)241 static INLINE void highbd_sse_w8x2_avx2(__m256i *sum, const uint16_t *a,
242                                         int a_stride, const uint16_t *b,
243                                         int b_stride) {
244   const __m256i v_a_w = yy_loadu2_128(a + a_stride, a);
245   const __m256i v_b_w = yy_loadu2_128(b + b_stride, b);
246   const __m256i v_d_w = _mm256_sub_epi16(v_a_w, v_b_w);
247   *sum = _mm256_add_epi32(*sum, _mm256_madd_epi16(v_d_w, v_d_w));
248 }
aom_highbd_sse_avx2(const uint8_t * a8,int a_stride,const uint8_t * b8,int b_stride,int width,int height)249 int64_t aom_highbd_sse_avx2(const uint8_t *a8, int a_stride, const uint8_t *b8,
250                             int b_stride, int width, int height) {
251   int32_t y = 0;
252   int64_t sse = 0;
253   uint16_t *a = CONVERT_TO_SHORTPTR(a8);
254   uint16_t *b = CONVERT_TO_SHORTPTR(b8);
255   __m256i sum = _mm256_setzero_si256();
256   switch (width) {
257     case 4:
258       do {
259         highbd_sse_w4x4_avx2(&sum, a, a_stride, b, b_stride);
260         a += a_stride << 2;
261         b += b_stride << 2;
262         y += 4;
263       } while (y < height);
264       sse = summary_all_avx2(&sum);
265       break;
266     case 8:
267       do {
268         highbd_sse_w8x2_avx2(&sum, a, a_stride, b, b_stride);
269         a += a_stride << 1;
270         b += b_stride << 1;
271         y += 2;
272       } while (y < height);
273       sse = summary_all_avx2(&sum);
274       break;
275     case 16:
276       do {
277         highbd_sse_w16_avx2(&sum, a, b);
278         a += a_stride;
279         b += b_stride;
280         y += 1;
281       } while (y < height);
282       sse = summary_all_avx2(&sum);
283       break;
284     case 32:
285       do {
286         int l = 0;
287         __m256i sum32 = _mm256_setzero_si256();
288         do {
289           highbd_sse_w16_avx2(&sum32, a, b);
290           highbd_sse_w16_avx2(&sum32, a + 16, b + 16);
291           a += a_stride;
292           b += b_stride;
293           l += 1;
294         } while (l < 64 && l < (height - y));
295         summary_32_avx2(&sum32, &sum);
296         y += 64;
297       } while (y < height);
298       sse = summary_4x64_avx2(sum);
299       break;
300     case 64:
301       do {
302         int l = 0;
303         __m256i sum32 = _mm256_setzero_si256();
304         do {
305           highbd_sse_w16_avx2(&sum32, a, b);
306           highbd_sse_w16_avx2(&sum32, a + 16 * 1, b + 16 * 1);
307           highbd_sse_w16_avx2(&sum32, a + 16 * 2, b + 16 * 2);
308           highbd_sse_w16_avx2(&sum32, a + 16 * 3, b + 16 * 3);
309           a += a_stride;
310           b += b_stride;
311           l += 1;
312         } while (l < 32 && l < (height - y));
313         summary_32_avx2(&sum32, &sum);
314         y += 32;
315       } while (y < height);
316       sse = summary_4x64_avx2(sum);
317       break;
318     case 128:
319       do {
320         int l = 0;
321         __m256i sum32 = _mm256_setzero_si256();
322         do {
323           highbd_sse_w16_avx2(&sum32, a, b);
324           highbd_sse_w16_avx2(&sum32, a + 16 * 1, b + 16 * 1);
325           highbd_sse_w16_avx2(&sum32, a + 16 * 2, b + 16 * 2);
326           highbd_sse_w16_avx2(&sum32, a + 16 * 3, b + 16 * 3);
327           highbd_sse_w16_avx2(&sum32, a + 16 * 4, b + 16 * 4);
328           highbd_sse_w16_avx2(&sum32, a + 16 * 5, b + 16 * 5);
329           highbd_sse_w16_avx2(&sum32, a + 16 * 6, b + 16 * 6);
330           highbd_sse_w16_avx2(&sum32, a + 16 * 7, b + 16 * 7);
331           a += a_stride;
332           b += b_stride;
333           l += 1;
334         } while (l < 16 && l < (height - y));
335         summary_32_avx2(&sum32, &sum);
336         y += 16;
337       } while (y < height);
338       sse = summary_4x64_avx2(sum);
339       break;
340     default:
341       if (width & 0x7) {
342         do {
343           int i = 0;
344           __m256i sum32 = _mm256_setzero_si256();
345           do {
346             highbd_sse_w8x2_avx2(&sum32, a + i, a_stride, b + i, b_stride);
347             const uint16_t *a2 = a + i + (a_stride << 1);
348             const uint16_t *b2 = b + i + (b_stride << 1);
349             highbd_sse_w8x2_avx2(&sum32, a2, a_stride, b2, b_stride);
350             i += 8;
351           } while (i + 4 < width);
352           highbd_sse_w4x4_avx2(&sum32, a + i, a_stride, b + i, b_stride);
353           summary_32_avx2(&sum32, &sum);
354           a += a_stride << 2;
355           b += b_stride << 2;
356           y += 4;
357         } while (y < height);
358       } else {
359         do {
360           int l = 0;
361           __m256i sum32 = _mm256_setzero_si256();
362           do {
363             int i = 0;
364             do {
365               highbd_sse_w8x2_avx2(&sum32, a + i, a_stride, b + i, b_stride);
366               i += 8;
367             } while (i < width);
368             a += a_stride << 1;
369             b += b_stride << 1;
370             l += 2;
371           } while (l < 8 && l < (height - y));
372           summary_32_avx2(&sum32, &sum);
373           y += 8;
374         } while (y < height);
375       }
376       sse = summary_4x64_avx2(sum);
377       break;
378   }
379   return sse;
380 }
381