1 /*
2  * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include <emmintrin.h>  // SSE2
13 
14 #include "config/aom_dsp_rtcd.h"
15 
16 #include "aom_dsp/x86/lpf_common_sse2.h"
17 
pixel_clamp(const __m128i * min,const __m128i * max,__m128i * pixel)18 static AOM_FORCE_INLINE void pixel_clamp(const __m128i *min, const __m128i *max,
19                                          __m128i *pixel) {
20   *pixel = _mm_min_epi16(*pixel, *max);
21   *pixel = _mm_max_epi16(*pixel, *min);
22 }
23 
abs_diff16(__m128i a,__m128i b)24 static AOM_FORCE_INLINE __m128i abs_diff16(__m128i a, __m128i b) {
25   return _mm_or_si128(_mm_subs_epu16(a, b), _mm_subs_epu16(b, a));
26 }
27 
get_limit(const uint8_t * bl,const uint8_t * l,const uint8_t * t,int bd,__m128i * blt,__m128i * lt,__m128i * thr,__m128i * t80_out)28 static INLINE void get_limit(const uint8_t *bl, const uint8_t *l,
29                              const uint8_t *t, int bd, __m128i *blt,
30                              __m128i *lt, __m128i *thr, __m128i *t80_out) {
31   const int shift = bd - 8;
32   const __m128i zero = _mm_setzero_si128();
33 
34   __m128i x = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)bl), zero);
35   *blt = _mm_slli_epi16(x, shift);
36 
37   x = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)l), zero);
38   *lt = _mm_slli_epi16(x, shift);
39 
40   x = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)t), zero);
41   *thr = _mm_slli_epi16(x, shift);
42 
43   *t80_out = _mm_set1_epi16(1 << (bd - 1));
44 }
45 
get_limit_dual(const uint8_t * _blimit0,const uint8_t * _limit0,const uint8_t * _thresh0,const uint8_t * _blimit1,const uint8_t * _limit1,const uint8_t * _thresh1,int bd,__m128i * blt_out,__m128i * lt_out,__m128i * thr_out,__m128i * t80_out)46 static INLINE void get_limit_dual(
47     const uint8_t *_blimit0, const uint8_t *_limit0, const uint8_t *_thresh0,
48     const uint8_t *_blimit1, const uint8_t *_limit1, const uint8_t *_thresh1,
49     int bd, __m128i *blt_out, __m128i *lt_out, __m128i *thr_out,
50     __m128i *t80_out) {
51   const int shift = bd - 8;
52   const __m128i zero = _mm_setzero_si128();
53 
54   __m128i x0 =
55       _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit0), zero);
56   __m128i x1 =
57       _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit1), zero);
58   x0 = _mm_unpacklo_epi64(x0, x1);
59   *blt_out = _mm_slli_epi16(x0, shift);
60 
61   x0 = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit0), zero);
62   x1 = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit1), zero);
63   x0 = _mm_unpacklo_epi64(x0, x1);
64   *lt_out = _mm_slli_epi16(x0, shift);
65 
66   x0 = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh0), zero);
67   x1 = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh1), zero);
68   x0 = _mm_unpacklo_epi64(x0, x1);
69   *thr_out = _mm_slli_epi16(x0, shift);
70 
71   *t80_out = _mm_set1_epi16(1 << (bd - 1));
72 }
73 
load_highbd_pixel(const uint16_t * s,int size,int pitch,__m128i * p,__m128i * q)74 static INLINE void load_highbd_pixel(const uint16_t *s, int size, int pitch,
75                                      __m128i *p, __m128i *q) {
76   int i;
77   for (i = 0; i < size; i++) {
78     p[i] = _mm_loadu_si128((__m128i *)(s - (i + 1) * pitch));
79     q[i] = _mm_loadu_si128((__m128i *)(s + i * pitch));
80   }
81 }
82 
highbd_filter_mask_dual(const __m128i * p,const __m128i * q,const __m128i * l,const __m128i * bl,__m128i * mask)83 static INLINE void highbd_filter_mask_dual(const __m128i *p, const __m128i *q,
84                                            const __m128i *l, const __m128i *bl,
85                                            __m128i *mask) {
86   __m128i abs_p0q0 = abs_diff16(p[0], q[0]);
87   __m128i abs_p1q1 = abs_diff16(p[1], q[1]);
88   abs_p0q0 = _mm_adds_epu16(abs_p0q0, abs_p0q0);
89   abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1);
90 
91   const __m128i zero = _mm_setzero_si128();
92   const __m128i one = _mm_set1_epi16(1);
93   const __m128i ffff = _mm_set1_epi16(0xFFFF);
94 
95   __m128i max = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), *bl);
96   max = _mm_xor_si128(_mm_cmpeq_epi16(max, zero), ffff);
97   max = _mm_and_si128(max, _mm_adds_epu16(*l, one));
98 
99   int i;
100   for (i = 1; i < 4; ++i) {
101     max = _mm_max_epi16(max, abs_diff16(p[i], p[i - 1]));
102     max = _mm_max_epi16(max, abs_diff16(q[i], q[i - 1]));
103   }
104   max = _mm_subs_epu16(max, *l);
105   *mask = _mm_cmpeq_epi16(max, zero);  // return ~mask
106 }
107 
highbd_hev_filter_mask_x_sse2(__m128i * pq,int x,__m128i * p1p0,__m128i * q1q0,__m128i * abs_p1p0,__m128i * l,__m128i * bl,__m128i * t,__m128i * hev,__m128i * mask)108 static INLINE void highbd_hev_filter_mask_x_sse2(__m128i *pq, int x,
109                                                  __m128i *p1p0, __m128i *q1q0,
110                                                  __m128i *abs_p1p0, __m128i *l,
111                                                  __m128i *bl, __m128i *t,
112                                                  __m128i *hev, __m128i *mask) {
113   const __m128i zero = _mm_setzero_si128();
114   const __m128i one = _mm_set1_epi16(1);
115   const __m128i ffff = _mm_set1_epi16(0xFFFF);
116   __m128i abs_p0q0_p1q1, abs_p0q0, abs_p1q1, abs_q1q0;
117   __m128i max, max01, h;
118 
119   *p1p0 = _mm_unpacklo_epi64(pq[0], pq[1]);
120   *q1q0 = _mm_unpackhi_epi64(pq[0], pq[1]);
121 
122   abs_p0q0_p1q1 = abs_diff16(*p1p0, *q1q0);
123   abs_p0q0 = _mm_adds_epu16(abs_p0q0_p1q1, abs_p0q0_p1q1);
124   abs_p0q0 = _mm_unpacklo_epi64(abs_p0q0, zero);
125 
126   abs_p1q1 = _mm_srli_si128(abs_p0q0_p1q1, 8);
127   abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1);  // divide by 2
128 
129   max = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), *bl);
130   max = _mm_xor_si128(_mm_cmpeq_epi16(max, zero), ffff);
131   // mask |= (abs(*p0 - *q0) * 2 + abs(*p1 - *q1) / 2  > blimit) * -1;
132   // So taking maximums continues to work:
133   max = _mm_and_si128(max, _mm_adds_epu16(*l, one));
134 
135   *abs_p1p0 = abs_diff16(pq[0], pq[1]);
136   abs_q1q0 = _mm_srli_si128(*abs_p1p0, 8);
137   max01 = _mm_max_epi16(*abs_p1p0, abs_q1q0);
138   // mask |= (abs(*p1 - *p0) > limit) * -1;
139   // mask |= (abs(*q1 - *q0) > limit) * -1;
140   h = _mm_subs_epu16(max01, *t);
141 
142   *hev = _mm_xor_si128(_mm_cmpeq_epi16(h, zero), ffff);
143   // replicate for the further "merged variables" usage
144   *hev = _mm_unpacklo_epi64(*hev, *hev);
145 
146   max = _mm_max_epi16(max, max01);
147   int i;
148   for (i = 2; i < x; ++i) {
149     max = _mm_max_epi16(max, abs_diff16(pq[i], pq[i - 1]));
150   }
151   max = _mm_max_epi16(max, _mm_srli_si128(max, 8));
152 
153   max = _mm_subs_epu16(max, *l);
154   *mask = _mm_cmpeq_epi16(max, zero);  //  ~mask
155 }
156 
flat_mask_internal(const __m128i * th,const __m128i * pq,int start,int end,__m128i * flat)157 static INLINE void flat_mask_internal(const __m128i *th, const __m128i *pq,
158                                       int start, int end, __m128i *flat) {
159   int i;
160   __m128i max = _mm_max_epi16(abs_diff16(pq[start], pq[0]),
161                               abs_diff16(pq[start + 1], pq[0]));
162 
163   for (i = start + 2; i < end; ++i) {
164     max = _mm_max_epi16(max, abs_diff16(pq[i], pq[0]));
165   }
166   max = _mm_max_epi16(max, _mm_srli_si128(max, 8));
167 
168   __m128i ft;
169   ft = _mm_subs_epu16(max, *th);
170 
171   const __m128i zero = _mm_setzero_si128();
172   *flat = _mm_cmpeq_epi16(ft, zero);
173 }
174 
flat_mask_internal_dual(const __m128i * th,const __m128i * p,const __m128i * q,int start,int end,__m128i * flat)175 static INLINE void flat_mask_internal_dual(const __m128i *th, const __m128i *p,
176                                            const __m128i *q, int start, int end,
177                                            __m128i *flat) {
178   int i;
179   __m128i max =
180       _mm_max_epi16(abs_diff16(q[start], q[0]), abs_diff16(p[start], p[0]));
181 
182   for (i = start + 1; i < end; ++i) {
183     max = _mm_max_epi16(max, abs_diff16(p[i], p[0]));
184     max = _mm_max_epi16(max, abs_diff16(q[i], q[0]));
185   }
186 
187   __m128i ft;
188   ft = _mm_subs_epu16(max, *th);
189 
190   const __m128i zero = _mm_setzero_si128();
191   *flat = _mm_cmpeq_epi16(ft, zero);
192 }
193 
highbd_flat_mask4_sse2(__m128i * pq,__m128i * flat,__m128i * flat2,int bd)194 static INLINE void highbd_flat_mask4_sse2(__m128i *pq, __m128i *flat,
195                                           __m128i *flat2, int bd) {
196   // check the distance 1,2,3 against 0
197   __m128i th = _mm_set1_epi16(1);
198   th = _mm_slli_epi16(th, bd - 8);
199   flat_mask_internal(&th, pq, 1, 4, flat);
200   flat_mask_internal(&th, pq, 4, 7, flat2);
201 }
202 
highbd_flat_mask4_dual_sse2(const __m128i * p,const __m128i * q,__m128i * flat,__m128i * flat2,int bd)203 static INLINE void highbd_flat_mask4_dual_sse2(const __m128i *p,
204                                                const __m128i *q, __m128i *flat,
205                                                __m128i *flat2, int bd) {
206   // check the distance 1,2,3 against 0
207   __m128i th = _mm_set1_epi16(1);
208   th = _mm_slli_epi16(th, bd - 8);
209   flat_mask_internal_dual(&th, p, q, 1, 4, flat);
210   flat_mask_internal_dual(&th, p, q, 4, 7, flat2);
211 }
212 
highbd_filter4_sse2(__m128i * p1p0,__m128i * q1q0,__m128i * hev,__m128i * mask,__m128i * qs1qs0,__m128i * ps1ps0,__m128i * t80,int bd)213 static AOM_FORCE_INLINE void highbd_filter4_sse2(__m128i *p1p0, __m128i *q1q0,
214                                                  __m128i *hev, __m128i *mask,
215                                                  __m128i *qs1qs0,
216                                                  __m128i *ps1ps0, __m128i *t80,
217                                                  int bd) {
218   const __m128i zero = _mm_setzero_si128();
219   const __m128i one = _mm_set1_epi16(1);
220   const __m128i pmax =
221       _mm_subs_epi16(_mm_subs_epi16(_mm_slli_epi16(one, bd), one), *t80);
222   const __m128i pmin = _mm_subs_epi16(zero, *t80);
223 
224   const __m128i t3t4 = _mm_set_epi16(3, 3, 3, 3, 4, 4, 4, 4);
225   __m128i ps1ps0_work, qs1qs0_work, work;
226   __m128i filt, filter2filter1, filter2filt, filter1filt;
227 
228   ps1ps0_work = _mm_subs_epi16(*p1p0, *t80);
229   qs1qs0_work = _mm_subs_epi16(*q1q0, *t80);
230 
231   work = _mm_subs_epi16(ps1ps0_work, qs1qs0_work);
232   pixel_clamp(&pmin, &pmax, &work);
233   filt = _mm_and_si128(_mm_srli_si128(work, 8), *hev);
234 
235   filt = _mm_subs_epi16(filt, work);
236   filt = _mm_subs_epi16(filt, work);
237   filt = _mm_subs_epi16(filt, work);
238   // (aom_filter + 3 * (qs0 - ps0)) & mask
239   pixel_clamp(&pmin, &pmax, &filt);
240   filt = _mm_and_si128(filt, *mask);
241   filt = _mm_unpacklo_epi64(filt, filt);
242 
243   filter2filter1 = _mm_adds_epi16(filt, t3t4); /* signed_short_clamp */
244   pixel_clamp(&pmin, &pmax, &filter2filter1);
245   filter2filter1 = _mm_srai_epi16(filter2filter1, 3); /* >> 3 */
246 
247   filt = _mm_unpacklo_epi64(filter2filter1, filter2filter1);
248 
249   // filt >> 1
250   filt = _mm_adds_epi16(filt, one);
251   filt = _mm_srai_epi16(filt, 1);
252   filt = _mm_andnot_si128(*hev, filt);
253 
254   filter2filt = _mm_unpackhi_epi64(filter2filter1, filt);
255   filter1filt = _mm_unpacklo_epi64(filter2filter1, filt);
256 
257   qs1qs0_work = _mm_subs_epi16(qs1qs0_work, filter1filt);
258   ps1ps0_work = _mm_adds_epi16(ps1ps0_work, filter2filt);
259 
260   pixel_clamp(&pmin, &pmax, &qs1qs0_work);
261   pixel_clamp(&pmin, &pmax, &ps1ps0_work);
262 
263   *qs1qs0 = _mm_adds_epi16(qs1qs0_work, *t80);
264   *ps1ps0 = _mm_adds_epi16(ps1ps0_work, *t80);
265 }
266 
highbd_filter4_dual_sse2(__m128i * p,__m128i * q,__m128i * ps,__m128i * qs,const __m128i * mask,const __m128i * th,int bd,__m128i * t80)267 static INLINE void highbd_filter4_dual_sse2(__m128i *p, __m128i *q, __m128i *ps,
268                                             __m128i *qs, const __m128i *mask,
269                                             const __m128i *th, int bd,
270                                             __m128i *t80) {
271   __m128i ps0 = _mm_subs_epi16(p[0], *t80);
272   __m128i ps1 = _mm_subs_epi16(p[1], *t80);
273   __m128i qs0 = _mm_subs_epi16(q[0], *t80);
274   __m128i qs1 = _mm_subs_epi16(q[1], *t80);
275   const __m128i one = _mm_set1_epi16(1);
276   const __m128i pmax =
277       _mm_subs_epi16(_mm_subs_epi16(_mm_slli_epi16(one, bd), one), *t80);
278 
279   const __m128i zero = _mm_setzero_si128();
280   const __m128i pmin = _mm_subs_epi16(zero, *t80);
281   __m128i filter = _mm_subs_epi16(ps1, qs1);
282   pixel_clamp(&pmin, &pmax, &filter);
283 
284   // hev_filter
285   __m128i hev;
286   const __m128i abs_p1p0 = abs_diff16(p[1], p[0]);
287   const __m128i abs_q1q0 = abs_diff16(q[1], q[0]);
288   __m128i h = _mm_max_epi16(abs_p1p0, abs_q1q0);
289   h = _mm_subs_epu16(h, *th);
290   const __m128i ffff = _mm_cmpeq_epi16(h, h);
291   hev = _mm_xor_si128(_mm_cmpeq_epi16(h, zero), ffff);
292 
293   filter = _mm_and_si128(filter, hev);
294 
295   const __m128i x = _mm_subs_epi16(qs0, ps0);
296   filter = _mm_adds_epi16(filter, x);
297   filter = _mm_adds_epi16(filter, x);
298   filter = _mm_adds_epi16(filter, x);
299   pixel_clamp(&pmin, &pmax, &filter);
300   filter = _mm_and_si128(filter, *mask);
301   const __m128i t3 = _mm_set1_epi16(3);
302   const __m128i t4 = _mm_set1_epi16(4);
303   __m128i filter1 = _mm_adds_epi16(filter, t4);
304   __m128i filter2 = _mm_adds_epi16(filter, t3);
305   pixel_clamp(&pmin, &pmax, &filter1);
306   pixel_clamp(&pmin, &pmax, &filter2);
307   filter1 = _mm_srai_epi16(filter1, 3);
308   filter2 = _mm_srai_epi16(filter2, 3);
309   qs0 = _mm_subs_epi16(qs0, filter1);
310   pixel_clamp(&pmin, &pmax, &qs0);
311   ps0 = _mm_adds_epi16(ps0, filter2);
312   pixel_clamp(&pmin, &pmax, &ps0);
313   qs[0] = _mm_adds_epi16(qs0, *t80);
314   ps[0] = _mm_adds_epi16(ps0, *t80);
315   filter = _mm_adds_epi16(filter1, one);
316   filter = _mm_srai_epi16(filter, 1);
317   filter = _mm_andnot_si128(hev, filter);
318   qs1 = _mm_subs_epi16(qs1, filter);
319   pixel_clamp(&pmin, &pmax, &qs1);
320   ps1 = _mm_adds_epi16(ps1, filter);
321   pixel_clamp(&pmin, &pmax, &ps1);
322   qs[1] = _mm_adds_epi16(qs1, *t80);
323   ps[1] = _mm_adds_epi16(ps1, *t80);
324 }
325 
highbd_lpf_internal_14_sse2(__m128i * p,__m128i * q,__m128i * pq,const unsigned char * blt,const unsigned char * lt,const unsigned char * thr,int bd)326 static AOM_FORCE_INLINE void highbd_lpf_internal_14_sse2(
327     __m128i *p, __m128i *q, __m128i *pq, const unsigned char *blt,
328     const unsigned char *lt, const unsigned char *thr, int bd) {
329   int i;
330   const __m128i zero = _mm_setzero_si128();
331   __m128i blimit, limit, thresh;
332   __m128i t80;
333   get_limit(blt, lt, thr, bd, &blimit, &limit, &thresh, &t80);
334 
335   for (i = 0; i < 7; i++) {
336     pq[i] = _mm_unpacklo_epi64(p[i], q[i]);
337   }
338   __m128i mask, hevhev;
339   __m128i p1p0, q1q0, abs_p1p0;
340 
341   highbd_hev_filter_mask_x_sse2(pq, 4, &p1p0, &q1q0, &abs_p1p0, &limit, &blimit,
342                                 &thresh, &hevhev, &mask);
343 
344   __m128i ps0ps1, qs0qs1;
345   // filter4
346   highbd_filter4_sse2(&p1p0, &q1q0, &hevhev, &mask, &qs0qs1, &ps0ps1, &t80, bd);
347 
348   __m128i flat, flat2;
349   highbd_flat_mask4_sse2(pq, &flat, &flat2, bd);
350 
351   flat = _mm_and_si128(flat, mask);
352   flat2 = _mm_and_si128(flat2, flat);
353 
354   // replicate for the further "merged variables" usage
355   flat = _mm_unpacklo_epi64(flat, flat);
356   flat2 = _mm_unpacklo_epi64(flat2, flat2);
357 
358   // flat and wide flat calculations
359 
360   // if flat ==0 then flat2 is zero as well and we don't need any calc below
361   // sse4.1 if (0==_mm_test_all_zeros(flat,ff))
362   if (0xffff != _mm_movemask_epi8(_mm_cmpeq_epi16(flat, zero))) {
363     __m128i flat_p[3], flat_q[3], flat_pq[3];
364     __m128i flat2_p[6], flat2_q[6];
365     __m128i flat2_pq[6];
366     __m128i sum_p6, sum_p3;
367     const __m128i eight = _mm_set1_epi16(8);
368     const __m128i four = _mm_set1_epi16(4);
369 
370     __m128i work0, work0_0, work0_1, sum_p_0;
371     __m128i sum_p = _mm_add_epi16(pq[5], _mm_add_epi16(pq[4], pq[3]));
372     __m128i sum_lp = _mm_add_epi16(pq[0], _mm_add_epi16(pq[2], pq[1]));
373     sum_p = _mm_add_epi16(sum_p, sum_lp);
374 
375     __m128i sum_lq = _mm_srli_si128(sum_lp, 8);
376     __m128i sum_q = _mm_srli_si128(sum_p, 8);
377 
378     sum_p_0 = _mm_add_epi16(eight, _mm_add_epi16(sum_p, sum_q));
379     sum_lp = _mm_add_epi16(four, _mm_add_epi16(sum_lp, sum_lq));
380 
381     flat_p[0] = _mm_add_epi16(sum_lp, _mm_add_epi16(pq[3], pq[0]));
382     flat_q[0] = _mm_add_epi16(sum_lp, _mm_add_epi16(q[3], q[0]));
383 
384     sum_p6 = _mm_add_epi16(pq[6], pq[6]);
385     sum_p3 = _mm_add_epi16(pq[3], pq[3]);
386 
387     sum_q = _mm_sub_epi16(sum_p_0, pq[5]);
388     sum_p = _mm_sub_epi16(sum_p_0, q[5]);
389 
390     work0_0 = _mm_add_epi16(_mm_add_epi16(pq[6], pq[0]), pq[1]);
391     work0_1 = _mm_add_epi16(sum_p6,
392                             _mm_add_epi16(pq[1], _mm_add_epi16(pq[2], pq[0])));
393 
394     sum_lq = _mm_sub_epi16(sum_lp, pq[2]);
395     sum_lp = _mm_sub_epi16(sum_lp, q[2]);
396 
397     work0 = _mm_add_epi16(sum_p3, pq[1]);
398     flat_p[1] = _mm_add_epi16(sum_lp, work0);
399     flat_q[1] = _mm_add_epi16(sum_lq, _mm_srli_si128(work0, 8));
400 
401     flat_pq[0] = _mm_srli_epi16(_mm_unpacklo_epi64(flat_p[0], flat_q[0]), 3);
402     flat_pq[1] = _mm_srli_epi16(_mm_unpacklo_epi64(flat_p[1], flat_q[1]), 3);
403 
404     sum_lp = _mm_sub_epi16(sum_lp, q[1]);
405     sum_lq = _mm_sub_epi16(sum_lq, pq[1]);
406 
407     sum_p3 = _mm_add_epi16(sum_p3, pq[3]);
408     work0 = _mm_add_epi16(sum_p3, pq[2]);
409 
410     flat_p[2] = _mm_add_epi16(sum_lp, work0);
411     flat_q[2] = _mm_add_epi16(sum_lq, _mm_srli_si128(work0, 8));
412     flat_pq[2] = _mm_srli_epi16(_mm_unpacklo_epi64(flat_p[2], flat_q[2]), 3);
413 
414     int flat2_mask =
415         (0xffff != _mm_movemask_epi8(_mm_cmpeq_epi16(flat2, zero)));
416     if (flat2_mask) {
417       flat2_p[0] = _mm_add_epi16(sum_p_0, _mm_add_epi16(work0_0, q[0]));
418       flat2_q[0] = _mm_add_epi16(
419           sum_p_0, _mm_add_epi16(_mm_srli_si128(work0_0, 8), pq[0]));
420 
421       flat2_p[1] = _mm_add_epi16(sum_p, work0_1);
422       flat2_q[1] = _mm_add_epi16(sum_q, _mm_srli_si128(work0_1, 8));
423 
424       flat2_pq[0] =
425           _mm_srli_epi16(_mm_unpacklo_epi64(flat2_p[0], flat2_q[0]), 4);
426       flat2_pq[1] =
427           _mm_srli_epi16(_mm_unpacklo_epi64(flat2_p[1], flat2_q[1]), 4);
428 
429       sum_p = _mm_sub_epi16(sum_p, q[4]);
430       sum_q = _mm_sub_epi16(sum_q, pq[4]);
431 
432       sum_p6 = _mm_add_epi16(sum_p6, pq[6]);
433       work0 = _mm_add_epi16(sum_p6,
434                             _mm_add_epi16(pq[2], _mm_add_epi16(pq[3], pq[1])));
435       flat2_p[2] = _mm_add_epi16(sum_p, work0);
436       flat2_q[2] = _mm_add_epi16(sum_q, _mm_srli_si128(work0, 8));
437       flat2_pq[2] =
438           _mm_srli_epi16(_mm_unpacklo_epi64(flat2_p[2], flat2_q[2]), 4);
439 
440       sum_p6 = _mm_add_epi16(sum_p6, pq[6]);
441       sum_p = _mm_sub_epi16(sum_p, q[3]);
442       sum_q = _mm_sub_epi16(sum_q, pq[3]);
443 
444       work0 = _mm_add_epi16(sum_p6,
445                             _mm_add_epi16(pq[3], _mm_add_epi16(pq[4], pq[2])));
446       flat2_p[3] = _mm_add_epi16(sum_p, work0);
447       flat2_q[3] = _mm_add_epi16(sum_q, _mm_srli_si128(work0, 8));
448       flat2_pq[3] =
449           _mm_srli_epi16(_mm_unpacklo_epi64(flat2_p[3], flat2_q[3]), 4);
450 
451       sum_p6 = _mm_add_epi16(sum_p6, pq[6]);
452       sum_p = _mm_sub_epi16(sum_p, q[2]);
453       sum_q = _mm_sub_epi16(sum_q, pq[2]);
454 
455       work0 = _mm_add_epi16(sum_p6,
456                             _mm_add_epi16(pq[4], _mm_add_epi16(pq[5], pq[3])));
457       flat2_p[4] = _mm_add_epi16(sum_p, work0);
458       flat2_q[4] = _mm_add_epi16(sum_q, _mm_srli_si128(work0, 8));
459       flat2_pq[4] =
460           _mm_srli_epi16(_mm_unpacklo_epi64(flat2_p[4], flat2_q[4]), 4);
461 
462       sum_p6 = _mm_add_epi16(sum_p6, pq[6]);
463       sum_p = _mm_sub_epi16(sum_p, q[1]);
464       sum_q = _mm_sub_epi16(sum_q, pq[1]);
465 
466       work0 = _mm_add_epi16(sum_p6,
467                             _mm_add_epi16(pq[5], _mm_add_epi16(pq[6], pq[4])));
468       flat2_p[5] = _mm_add_epi16(sum_p, work0);
469       flat2_q[5] = _mm_add_epi16(sum_q, _mm_srli_si128(work0, 8));
470       flat2_pq[5] =
471           _mm_srli_epi16(_mm_unpacklo_epi64(flat2_p[5], flat2_q[5]), 4);
472     }  // flat2
473        // ~~~~~~~~~~ apply flat ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
474     // highbd_filter8
475     pq[0] = _mm_unpacklo_epi64(ps0ps1, qs0qs1);
476     pq[1] = _mm_unpackhi_epi64(ps0ps1, qs0qs1);
477 
478     for (i = 0; i < 3; i++) {
479       pq[i] = _mm_andnot_si128(flat, pq[i]);
480       flat_pq[i] = _mm_and_si128(flat, flat_pq[i]);
481       pq[i] = _mm_or_si128(pq[i], flat_pq[i]);
482     }
483 
484     // wide flat
485     // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
486     if (flat2_mask) {
487       for (i = 0; i < 6; i++) {
488         pq[i] = _mm_andnot_si128(flat2, pq[i]);
489         flat2_pq[i] = _mm_and_si128(flat2, flat2_pq[i]);
490         pq[i] = _mm_or_si128(pq[i], flat2_pq[i]);  // full list of pq values
491       }
492     }
493   } else {
494     pq[0] = _mm_unpacklo_epi64(ps0ps1, qs0qs1);
495     pq[1] = _mm_unpackhi_epi64(ps0ps1, qs0qs1);
496   }
497 }
498 
aom_highbd_lpf_horizontal_14_sse2(uint16_t * s,int pitch,const uint8_t * blt,const uint8_t * lt,const uint8_t * thr,int bd)499 void aom_highbd_lpf_horizontal_14_sse2(uint16_t *s, int pitch,
500                                        const uint8_t *blt, const uint8_t *lt,
501                                        const uint8_t *thr, int bd) {
502   __m128i p[7], q[7], pq[7];
503   int i;
504 
505   for (i = 0; i < 7; i++) {
506     p[i] = _mm_loadl_epi64((__m128i *)(s - (i + 1) * pitch));
507     q[i] = _mm_loadl_epi64((__m128i *)(s + i * pitch));
508   }
509 
510   highbd_lpf_internal_14_sse2(p, q, pq, blt, lt, thr, bd);
511 
512   for (i = 0; i < 6; i++) {
513     _mm_storel_epi64((__m128i *)(s - (i + 1) * pitch), pq[i]);
514     _mm_storel_epi64((__m128i *)(s + i * pitch), _mm_srli_si128(pq[i], 8));
515   }
516 }
517 
highbd_lpf_internal_14_dual_sse2(__m128i * p,__m128i * q,const uint8_t * blt0,const uint8_t * lt0,const uint8_t * thr0,const uint8_t * blt1,const uint8_t * lt1,const uint8_t * thr1,int bd)518 static AOM_FORCE_INLINE void highbd_lpf_internal_14_dual_sse2(
519     __m128i *p, __m128i *q, const uint8_t *blt0, const uint8_t *lt0,
520     const uint8_t *thr0, const uint8_t *blt1, const uint8_t *lt1,
521     const uint8_t *thr1, int bd) {
522   __m128i blimit, limit, thresh, t80;
523   const __m128i zero = _mm_setzero_si128();
524 
525   get_limit_dual(blt0, lt0, thr0, blt1, lt1, thr1, bd, &blimit, &limit, &thresh,
526                  &t80);
527   __m128i mask;
528   highbd_filter_mask_dual(p, q, &limit, &blimit, &mask);
529   __m128i flat, flat2;
530   highbd_flat_mask4_dual_sse2(p, q, &flat, &flat2, bd);
531 
532   flat = _mm_and_si128(flat, mask);
533   flat2 = _mm_and_si128(flat2, flat);
534   __m128i ps[2], qs[2];
535   highbd_filter4_dual_sse2(p, q, ps, qs, &mask, &thresh, bd, &t80);
536   // flat and wide flat calculations
537 
538   // if flat ==0 then flat2 is zero as well and we don't need any calc below
539   // sse4.1 if (0==_mm_test_all_zeros(flat,ff))
540   if (0xffff != _mm_movemask_epi8(_mm_cmpeq_epi16(flat, zero))) {
541     __m128i flat_p[3], flat_q[3];
542     __m128i flat2_p[6], flat2_q[6];
543     const __m128i eight = _mm_set1_epi16(8);
544     const __m128i four = _mm_set1_epi16(4);
545     __m128i sum_p_0 = _mm_add_epi16(p[5], _mm_add_epi16(p[4], p[3]));
546     __m128i sum_q = _mm_add_epi16(q[5], _mm_add_epi16(q[4], q[3]));
547     __m128i sum_lp = _mm_add_epi16(p[0], _mm_add_epi16(p[2], p[1]));
548     sum_p_0 = _mm_add_epi16(sum_p_0, sum_lp);
549     __m128i sum_lq = _mm_add_epi16(q[0], _mm_add_epi16(q[2], q[1]));
550     sum_q = _mm_add_epi16(sum_q, sum_lq);
551     sum_p_0 = _mm_add_epi16(eight, _mm_add_epi16(sum_p_0, sum_q));
552     sum_lp = _mm_add_epi16(four, _mm_add_epi16(sum_lp, sum_lq));
553     flat_p[0] =
554         _mm_srli_epi16(_mm_add_epi16(sum_lp, _mm_add_epi16(p[3], p[0])), 3);
555     flat_q[0] =
556         _mm_srli_epi16(_mm_add_epi16(sum_lp, _mm_add_epi16(q[3], q[0])), 3);
557     __m128i sum_p6 = _mm_add_epi16(p[6], p[6]);
558     __m128i sum_q6 = _mm_add_epi16(q[6], q[6]);
559     __m128i sum_p3 = _mm_add_epi16(p[3], p[3]);
560     __m128i sum_q3 = _mm_add_epi16(q[3], q[3]);
561 
562     sum_q = _mm_sub_epi16(sum_p_0, p[5]);
563     __m128i sum_p = _mm_sub_epi16(sum_p_0, q[5]);
564 
565     sum_lq = _mm_sub_epi16(sum_lp, p[2]);
566     sum_lp = _mm_sub_epi16(sum_lp, q[2]);
567     flat_p[1] =
568         _mm_srli_epi16(_mm_add_epi16(sum_lp, _mm_add_epi16(sum_p3, p[1])), 3);
569     flat_q[1] =
570         _mm_srli_epi16(_mm_add_epi16(sum_lq, _mm_add_epi16(sum_q3, q[1])), 3);
571 
572     sum_lp = _mm_sub_epi16(sum_lp, q[1]);
573     sum_lq = _mm_sub_epi16(sum_lq, p[1]);
574     sum_p3 = _mm_add_epi16(sum_p3, p[3]);
575     sum_q3 = _mm_add_epi16(sum_q3, q[3]);
576     flat_p[2] =
577         _mm_srli_epi16(_mm_add_epi16(sum_lp, _mm_add_epi16(sum_p3, p[2])), 3);
578     flat_q[2] =
579         _mm_srli_epi16(_mm_add_epi16(sum_lq, _mm_add_epi16(sum_q3, q[2])), 3);
580 
581     int flat2_mask =
582         (0xffff != _mm_movemask_epi8(_mm_cmpeq_epi16(flat2, zero)));
583     if (flat2_mask) {
584       flat2_p[0] = _mm_srli_epi16(
585           _mm_add_epi16(sum_p_0, _mm_add_epi16(_mm_add_epi16(p[6], p[0]),
586                                                _mm_add_epi16(p[1], q[0]))),
587           4);
588       flat2_q[0] = _mm_srli_epi16(
589           _mm_add_epi16(sum_p_0, _mm_add_epi16(_mm_add_epi16(q[6], q[0]),
590                                                _mm_add_epi16(p[0], q[1]))),
591           4);
592 
593       flat2_p[1] = _mm_srli_epi16(
594           _mm_add_epi16(
595               sum_p,
596               _mm_add_epi16(sum_p6,
597                             _mm_add_epi16(p[1], _mm_add_epi16(p[2], p[0])))),
598           4);
599       flat2_q[1] = _mm_srli_epi16(
600           _mm_add_epi16(
601               sum_q,
602               _mm_add_epi16(sum_q6,
603                             _mm_add_epi16(q[1], _mm_add_epi16(q[0], q[2])))),
604           4);
605       sum_p6 = _mm_add_epi16(sum_p6, p[6]);
606       sum_q6 = _mm_add_epi16(sum_q6, q[6]);
607       sum_p = _mm_sub_epi16(sum_p, q[4]);
608       sum_q = _mm_sub_epi16(sum_q, p[4]);
609       flat2_p[2] = _mm_srli_epi16(
610           _mm_add_epi16(
611               sum_p,
612               _mm_add_epi16(sum_p6,
613                             _mm_add_epi16(p[2], _mm_add_epi16(p[3], p[1])))),
614           4);
615       flat2_q[2] = _mm_srli_epi16(
616           _mm_add_epi16(
617               sum_q,
618               _mm_add_epi16(sum_q6,
619                             _mm_add_epi16(q[2], _mm_add_epi16(q[1], q[3])))),
620           4);
621       sum_p6 = _mm_add_epi16(sum_p6, p[6]);
622       sum_q6 = _mm_add_epi16(sum_q6, q[6]);
623       sum_p = _mm_sub_epi16(sum_p, q[3]);
624       sum_q = _mm_sub_epi16(sum_q, p[3]);
625       flat2_p[3] = _mm_srli_epi16(
626           _mm_add_epi16(
627               sum_p,
628               _mm_add_epi16(sum_p6,
629                             _mm_add_epi16(p[3], _mm_add_epi16(p[4], p[2])))),
630           4);
631       flat2_q[3] = _mm_srli_epi16(
632           _mm_add_epi16(
633               sum_q,
634               _mm_add_epi16(sum_q6,
635                             _mm_add_epi16(q[3], _mm_add_epi16(q[2], q[4])))),
636           4);
637       sum_p6 = _mm_add_epi16(sum_p6, p[6]);
638       sum_q6 = _mm_add_epi16(sum_q6, q[6]);
639       sum_p = _mm_sub_epi16(sum_p, q[2]);
640       sum_q = _mm_sub_epi16(sum_q, p[2]);
641       flat2_p[4] = _mm_srli_epi16(
642           _mm_add_epi16(
643               sum_p,
644               _mm_add_epi16(sum_p6,
645                             _mm_add_epi16(p[4], _mm_add_epi16(p[5], p[3])))),
646           4);
647       flat2_q[4] = _mm_srli_epi16(
648           _mm_add_epi16(
649               sum_q,
650               _mm_add_epi16(sum_q6,
651                             _mm_add_epi16(q[4], _mm_add_epi16(q[3], q[5])))),
652           4);
653       sum_p6 = _mm_add_epi16(sum_p6, p[6]);
654       sum_q6 = _mm_add_epi16(sum_q6, q[6]);
655       sum_p = _mm_sub_epi16(sum_p, q[1]);
656       sum_q = _mm_sub_epi16(sum_q, p[1]);
657       flat2_p[5] = _mm_srli_epi16(
658           _mm_add_epi16(
659               sum_p,
660               _mm_add_epi16(sum_p6,
661                             _mm_add_epi16(p[5], _mm_add_epi16(p[6], p[4])))),
662           4);
663       flat2_q[5] = _mm_srli_epi16(
664           _mm_add_epi16(
665               sum_q,
666               _mm_add_epi16(sum_q6,
667                             _mm_add_epi16(q[5], _mm_add_epi16(q[4], q[6])))),
668           4);
669     }
670     // highbd_filter8
671     int i;
672     for (i = 0; i < 2; i++) {
673       ps[i] = _mm_andnot_si128(flat, ps[i]);
674       flat_p[i] = _mm_and_si128(flat, flat_p[i]);
675       p[i] = _mm_or_si128(ps[i], flat_p[i]);
676       qs[i] = _mm_andnot_si128(flat, qs[i]);
677       flat_q[i] = _mm_and_si128(flat, flat_q[i]);
678       q[i] = _mm_or_si128(qs[i], flat_q[i]);
679     }
680     p[2] = _mm_andnot_si128(flat, p[2]);
681     //  p2 remains unchanged if !(flat && mask)
682     flat_p[2] = _mm_and_si128(flat, flat_p[2]);
683     //  when (flat && mask)
684     p[2] = _mm_or_si128(p[2], flat_p[2]);  // full list of p2 values
685     q[2] = _mm_andnot_si128(flat, q[2]);
686     flat_q[2] = _mm_and_si128(flat, flat_q[2]);
687     q[2] = _mm_or_si128(q[2], flat_q[2]);  // full list of q2 values
688 
689     for (i = 0; i < 2; i++) {
690       ps[i] = _mm_andnot_si128(flat, ps[i]);
691       flat_p[i] = _mm_and_si128(flat, flat_p[i]);
692       p[i] = _mm_or_si128(ps[i], flat_p[i]);
693       qs[i] = _mm_andnot_si128(flat, qs[i]);
694       flat_q[i] = _mm_and_si128(flat, flat_q[i]);
695       q[i] = _mm_or_si128(qs[i], flat_q[i]);
696     }
697     // highbd_filter16
698     if (flat2_mask) {
699       for (i = 0; i < 6; i++) {
700         //  p[i] remains unchanged if !(flat2 && flat && mask)
701         p[i] = _mm_andnot_si128(flat2, p[i]);
702         flat2_p[i] = _mm_and_si128(flat2, flat2_p[i]);
703         //  get values for when (flat2 && flat && mask)
704         p[i] = _mm_or_si128(p[i], flat2_p[i]);  // full list of p values
705         q[i] = _mm_andnot_si128(flat2, q[i]);
706         flat2_q[i] = _mm_and_si128(flat2, flat2_q[i]);
707         q[i] = _mm_or_si128(q[i], flat2_q[i]);
708       }
709     }
710   } else {
711     p[0] = ps[0];
712     q[0] = qs[0];
713     p[1] = ps[1];
714     q[1] = qs[1];
715   }
716 }
717 
aom_highbd_lpf_horizontal_14_dual_sse2(uint16_t * s,int pitch,const uint8_t * _blimit0,const uint8_t * _limit0,const uint8_t * _thresh0,const uint8_t * _blimit1,const uint8_t * _limit1,const uint8_t * _thresh1,int bd)718 void aom_highbd_lpf_horizontal_14_dual_sse2(
719     uint16_t *s, int pitch, const uint8_t *_blimit0, const uint8_t *_limit0,
720     const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
721     const uint8_t *_thresh1, int bd) {
722   __m128i p[7], q[7];
723   int i;
724   load_highbd_pixel(s, 7, pitch, p, q);
725 
726   highbd_lpf_internal_14_dual_sse2(p, q, _blimit0, _limit0, _thresh0, _blimit1,
727                                    _limit1, _thresh1, bd);
728 
729   for (i = 0; i < 6; i++) {
730     _mm_storeu_si128((__m128i *)(s - (i + 1) * pitch), p[i]);
731     _mm_storeu_si128((__m128i *)(s + i * pitch), q[i]);
732   }
733 }
734 
highbd_lpf_internal_6_sse2(__m128i * p2,__m128i * p1,__m128i * p0,__m128i * q0,__m128i * q1,__m128i * q2,__m128i * p1p0_out,__m128i * q1q0_out,const uint8_t * _blimit,const uint8_t * _limit,const uint8_t * _thresh,int bd)735 static AOM_FORCE_INLINE void highbd_lpf_internal_6_sse2(
736     __m128i *p2, __m128i *p1, __m128i *p0, __m128i *q0, __m128i *q1,
737     __m128i *q2, __m128i *p1p0_out, __m128i *q1q0_out, const uint8_t *_blimit,
738     const uint8_t *_limit, const uint8_t *_thresh, int bd) {
739   __m128i blimit, limit, thresh;
740   __m128i mask, hev, flat;
741   __m128i pq[3];
742   __m128i p1p0, q1q0, abs_p1p0, ps1ps0, qs1qs0;
743   __m128i flat_p1p0, flat_q0q1;
744 
745   pq[0] = _mm_unpacklo_epi64(*p0, *q0);
746   pq[1] = _mm_unpacklo_epi64(*p1, *q1);
747   pq[2] = _mm_unpacklo_epi64(*p2, *q2);
748 
749   const __m128i zero = _mm_setzero_si128();
750   const __m128i four = _mm_set1_epi16(4);
751   __m128i t80;
752   const __m128i one = _mm_set1_epi16(0x1);
753 
754   get_limit(_blimit, _limit, _thresh, bd, &blimit, &limit, &thresh, &t80);
755 
756   highbd_hev_filter_mask_x_sse2(pq, 3, &p1p0, &q1q0, &abs_p1p0, &limit, &blimit,
757                                 &thresh, &hev, &mask);
758 
759   // lp filter
760   highbd_filter4_sse2(&p1p0, &q1q0, &hev, &mask, q1q0_out, p1p0_out, &t80, bd);
761 
762   // flat_mask
763   flat = _mm_max_epi16(abs_diff16(pq[2], pq[0]), abs_p1p0);
764   flat = _mm_max_epi16(flat, _mm_srli_si128(flat, 8));
765 
766   flat = _mm_subs_epu16(flat, _mm_slli_epi16(one, bd - 8));
767 
768   flat = _mm_cmpeq_epi16(flat, zero);
769   flat = _mm_and_si128(flat, mask);
770   // replicate for the further "merged variables" usage
771   flat = _mm_unpacklo_epi64(flat, flat);
772 
773   // 5 tap filter
774   // need it only if flat !=0
775   if (0xffff != _mm_movemask_epi8(_mm_cmpeq_epi16(flat, zero))) {
776     __m128i workp_a, workp_b, workp_c;
777     __m128i pq0x2_pq1, pq1_pq2;
778 
779     // op1
780     pq0x2_pq1 =
781         _mm_add_epi16(_mm_add_epi16(pq[0], pq[0]), pq[1]);  // p0 *2 + p1
782     pq1_pq2 = _mm_add_epi16(pq[1], pq[2]);                  // p1 + p2
783     workp_a = _mm_add_epi16(_mm_add_epi16(pq0x2_pq1, four),
784                             pq1_pq2);  // p2 + p0 * 2 + p1 * 2 + 4
785 
786     workp_b = _mm_add_epi16(_mm_add_epi16(pq[2], pq[2]), *q0);
787     workp_b =
788         _mm_add_epi16(workp_a, workp_b);  // p2 * 3 + p1 * 2 + p0 * 2 + q0 + 4
789 
790     // op0
791     workp_c = _mm_srli_si128(pq0x2_pq1, 8);  // q0 * 2 + q1
792     workp_a = _mm_add_epi16(workp_a,
793                             workp_c);  // p2 + p0 * 2 + p1 * 2 + q0 * 2 + q1 + 4
794     workp_b = _mm_unpacklo_epi64(workp_a, workp_b);
795     flat_p1p0 = _mm_srli_epi16(workp_b, 3);
796 
797     // oq0
798     workp_a = _mm_sub_epi16(_mm_sub_epi16(workp_a, pq[2]),
799                             pq[1]);  // p0 * 2 + p1  + q0 * 2 + q1 + 4
800     workp_b = _mm_srli_si128(pq1_pq2, 8);
801     workp_a = _mm_add_epi16(
802         workp_a, workp_b);  // p0 * 2 + p1  + q0 * 2 + q1 * 2 + q2 + 4
803     // workp_shft0 = _mm_srli_epi16(workp_a, 3);
804 
805     // oq1
806     workp_c = _mm_sub_epi16(_mm_sub_epi16(workp_a, pq[1]),
807                             pq[0]);  // p0   + q0 * 2 + q1 * 2 + q2 + 4
808     workp_b = _mm_add_epi16(*q2, *q2);
809     workp_b =
810         _mm_add_epi16(workp_c, workp_b);  // p0  + q0 * 2 + q1 * 2 + q2 * 3 + 4
811 
812     workp_a = _mm_unpacklo_epi64(workp_a, workp_b);
813     flat_q0q1 = _mm_srli_epi16(workp_a, 3);
814 
815     qs1qs0 = _mm_andnot_si128(flat, *q1q0_out);
816     q1q0 = _mm_and_si128(flat, flat_q0q1);
817     *q1q0_out = _mm_or_si128(qs1qs0, q1q0);
818 
819     ps1ps0 = _mm_andnot_si128(flat, *p1p0_out);
820     p1p0 = _mm_and_si128(flat, flat_p1p0);
821     *p1p0_out = _mm_or_si128(ps1ps0, p1p0);
822   }
823 }
824 
highbd_lpf_internal_6_dual_sse2(__m128i * p2,__m128i * p1,__m128i * p0,__m128i * q0,__m128i * q1,__m128i * q2,const unsigned char * _blimit0,const unsigned char * _limit0,const unsigned char * _thresh0,const unsigned char * _blimit1,const unsigned char * _limit1,const unsigned char * _thresh1,int bd)825 static AOM_FORCE_INLINE void highbd_lpf_internal_6_dual_sse2(
826     __m128i *p2, __m128i *p1, __m128i *p0, __m128i *q0, __m128i *q1,
827     __m128i *q2, const unsigned char *_blimit0, const unsigned char *_limit0,
828     const unsigned char *_thresh0, const unsigned char *_blimit1,
829     const unsigned char *_limit1, const unsigned char *_thresh1, int bd) {
830   const __m128i zero = _mm_setzero_si128();
831   __m128i blimit0, limit0, thresh0;
832   __m128i t80;
833   __m128i mask, flat, work;
834   __m128i abs_p1q1, abs_p0q0, abs_p1p0, abs_p2p1, abs_q1q0, abs_q2q1;
835   __m128i op1, op0, oq0, oq1;
836   const __m128i four = _mm_set1_epi16(4);
837   const __m128i one = _mm_set1_epi16(0x1);
838   const __m128i ffff = _mm_cmpeq_epi16(one, one);
839 
840   get_limit_dual(_blimit0, _limit0, _thresh0, _blimit1, _limit1, _thresh1, bd,
841                  &blimit0, &limit0, &thresh0, &t80);
842 
843   abs_p2p1 = abs_diff16(*p2, *p1);
844   abs_p1p0 = abs_diff16(*p1, *p0);
845   abs_q1q0 = abs_diff16(*q1, *q0);
846   abs_q2q1 = abs_diff16(*q2, *q1);
847 
848   abs_p0q0 = abs_diff16(*p0, *q0);
849   abs_p1q1 = abs_diff16(*p1, *q1);
850 
851   abs_p0q0 = _mm_adds_epu16(abs_p0q0, abs_p0q0);
852   abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1);
853   mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit0);
854   mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff);
855   // mask |= (abs(*p0 - *q0) * 2 + abs(*p1 - *q1) / 2  > blimit) * -1;
856   // So taking maximums continues to work:
857   mask = _mm_and_si128(mask, _mm_adds_epu16(limit0, one));
858 
859   mask = _mm_max_epi16(abs_q2q1, mask);
860   work = _mm_max_epi16(abs_p1p0, abs_q1q0);
861   mask = _mm_max_epi16(work, mask);
862   mask = _mm_max_epi16(mask, abs_p2p1);
863   mask = _mm_subs_epu16(mask, limit0);
864   mask = _mm_cmpeq_epi16(mask, zero);
865 
866   // lp filter
867   __m128i ps[2], qs[2], p[2], q[2];
868   {
869     p[0] = *p0;
870     p[1] = *p1;
871     q[0] = *q0;
872     q[1] = *q1;
873     // filter_mask and hev_mask
874     highbd_filter4_dual_sse2(p, q, ps, qs, &mask, &thresh0, bd, &t80);
875   }
876 
877   // flat_mask
878   flat = _mm_max_epi16(abs_diff16(*q2, *q0), abs_diff16(*p2, *p0));
879   flat = _mm_max_epi16(flat, work);
880 
881   flat = _mm_subs_epu16(flat, _mm_slli_epi16(one, bd - 8));
882 
883   flat = _mm_cmpeq_epi16(flat, zero);
884   flat = _mm_and_si128(flat, mask);  // flat & mask
885 
886   // 5 tap filter
887   // need it only if flat !=0
888   if (0xffff != _mm_movemask_epi8(_mm_cmpeq_epi16(flat, zero))) {
889     __m128i workp_a, workp_b, workp_shft0, workp_shft1;
890 
891     // op1
892     workp_a = _mm_add_epi16(_mm_add_epi16(*p0, *p0),
893                             _mm_add_epi16(*p1, *p1));  // *p0 *2 + *p1 * 2
894     workp_a = _mm_add_epi16(_mm_add_epi16(workp_a, four),
895                             *p2);  // *p2 + *p0 * 2 + *p1 * 2 + 4
896 
897     workp_b = _mm_add_epi16(_mm_add_epi16(*p2, *p2), *q0);
898     workp_shft0 = _mm_add_epi16(
899         workp_a, workp_b);  // *p2 * 3 + *p1 * 2 + *p0 * 2 + *q0 + 4
900     op1 = _mm_srli_epi16(workp_shft0, 3);
901 
902     // op0
903     workp_b = _mm_add_epi16(_mm_add_epi16(*q0, *q0), *q1);  // *q0 * 2 + *q1
904     workp_a =
905         _mm_add_epi16(workp_a,
906                       workp_b);  // *p2 + *p0 * 2 + *p1 * 2 + *q0 * 2 + *q1 + 4
907     op0 = _mm_srli_epi16(workp_a, 3);
908 
909     // oq0
910     workp_a = _mm_sub_epi16(_mm_sub_epi16(workp_a, *p2),
911                             *p1);  // *p0 * 2 + *p1  + *q0 * 2 + *q1 + 4
912     workp_b = _mm_add_epi16(*q1, *q2);
913     workp_shft0 = _mm_add_epi16(
914         workp_a, workp_b);  // *p0 * 2 + *p1  + *q0 * 2 + *q1 * 2 + *q2 + 4
915     oq0 = _mm_srli_epi16(workp_shft0, 3);
916 
917     // oq1
918     workp_a = _mm_sub_epi16(_mm_sub_epi16(workp_shft0, *p1),
919                             *p0);  // *p0   + *q0 * 2 + *q1 * 2 + *q2 + 4
920     workp_b = _mm_add_epi16(*q2, *q2);
921     workp_shft1 = _mm_add_epi16(
922         workp_a, workp_b);  // *p0  + *q0 * 2 + *q1 * 2 + *q2 * 3 + 4
923     oq1 = _mm_srli_epi16(workp_shft1, 3);
924 
925     qs[0] = _mm_andnot_si128(flat, qs[0]);
926     oq0 = _mm_and_si128(flat, oq0);
927     *q0 = _mm_or_si128(qs[0], oq0);
928 
929     qs[1] = _mm_andnot_si128(flat, qs[1]);
930     oq1 = _mm_and_si128(flat, oq1);
931     *q1 = _mm_or_si128(qs[1], oq1);
932 
933     ps[0] = _mm_andnot_si128(flat, ps[0]);
934     op0 = _mm_and_si128(flat, op0);
935     *p0 = _mm_or_si128(ps[0], op0);
936 
937     ps[1] = _mm_andnot_si128(flat, ps[1]);
938     op1 = _mm_and_si128(flat, op1);
939     *p1 = _mm_or_si128(ps[1], op1);
940   } else {
941     *q0 = qs[0];
942     *q1 = qs[1];
943     *p0 = ps[0];
944     *p1 = ps[1];
945   }
946 }
947 
aom_highbd_lpf_horizontal_6_sse2(uint16_t * s,int p,const uint8_t * _blimit,const uint8_t * _limit,const uint8_t * _thresh,int bd)948 void aom_highbd_lpf_horizontal_6_sse2(uint16_t *s, int p,
949                                       const uint8_t *_blimit,
950                                       const uint8_t *_limit,
951                                       const uint8_t *_thresh, int bd) {
952   __m128i p2, p1, p0, q0, q1, q2, p1p0_out, q1q0_out;
953 
954   p2 = _mm_loadl_epi64((__m128i *)(s - 3 * p));
955   p1 = _mm_loadl_epi64((__m128i *)(s - 2 * p));
956   p0 = _mm_loadl_epi64((__m128i *)(s - 1 * p));
957   q0 = _mm_loadl_epi64((__m128i *)(s + 0 * p));
958   q1 = _mm_loadl_epi64((__m128i *)(s + 1 * p));
959   q2 = _mm_loadl_epi64((__m128i *)(s + 2 * p));
960 
961   highbd_lpf_internal_6_sse2(&p2, &p1, &p0, &q0, &q1, &q2, &p1p0_out, &q1q0_out,
962                              _blimit, _limit, _thresh, bd);
963 
964   _mm_storel_epi64((__m128i *)(s - 2 * p), _mm_srli_si128(p1p0_out, 8));
965   _mm_storel_epi64((__m128i *)(s - 1 * p), p1p0_out);
966   _mm_storel_epi64((__m128i *)(s + 0 * p), q1q0_out);
967   _mm_storel_epi64((__m128i *)(s + 1 * p), _mm_srli_si128(q1q0_out, 8));
968 }
969 
aom_highbd_lpf_horizontal_6_dual_sse2(uint16_t * s,int p,const uint8_t * _blimit0,const uint8_t * _limit0,const uint8_t * _thresh0,const uint8_t * _blimit1,const uint8_t * _limit1,const uint8_t * _thresh1,int bd)970 void aom_highbd_lpf_horizontal_6_dual_sse2(
971     uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0,
972     const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
973     const uint8_t *_thresh1, int bd) {
974   __m128i p2, p1, p0, q0, q1, q2;
975 
976   p2 = _mm_loadu_si128((__m128i *)(s - 3 * p));
977   p1 = _mm_loadu_si128((__m128i *)(s - 2 * p));
978   p0 = _mm_loadu_si128((__m128i *)(s - 1 * p));
979   q0 = _mm_loadu_si128((__m128i *)(s + 0 * p));
980   q1 = _mm_loadu_si128((__m128i *)(s + 1 * p));
981   q2 = _mm_loadu_si128((__m128i *)(s + 2 * p));
982 
983   highbd_lpf_internal_6_dual_sse2(&p2, &p1, &p0, &q0, &q1, &q2, _blimit0,
984                                   _limit0, _thresh0, _blimit1, _limit1,
985                                   _thresh1, bd);
986 
987   _mm_storeu_si128((__m128i *)(s - 2 * p), p1);
988   _mm_storeu_si128((__m128i *)(s - 1 * p), p0);
989   _mm_storeu_si128((__m128i *)(s + 0 * p), q0);
990   _mm_storeu_si128((__m128i *)(s + 1 * p), q1);
991 }
992 
highbd_lpf_internal_8_sse2(__m128i * p3,__m128i * q3,__m128i * p2,__m128i * q2,__m128i * p1,__m128i * q1,__m128i * p0,__m128i * q0,__m128i * q1q0_out,__m128i * p1p0_out,const unsigned char * _blimit,const unsigned char * _limit,const unsigned char * _thresh,int bd)993 static AOM_FORCE_INLINE void highbd_lpf_internal_8_sse2(
994     __m128i *p3, __m128i *q3, __m128i *p2, __m128i *q2, __m128i *p1,
995     __m128i *q1, __m128i *p0, __m128i *q0, __m128i *q1q0_out, __m128i *p1p0_out,
996     const unsigned char *_blimit, const unsigned char *_limit,
997     const unsigned char *_thresh, int bd) {
998   const __m128i zero = _mm_setzero_si128();
999   __m128i blimit, limit, thresh;
1000   __m128i mask, hev, flat;
1001   __m128i pq[4];
1002   __m128i p1p0, q1q0, ps1ps0, qs1qs0;
1003   __m128i work_a, opq2, flat_p1p0, flat_q0q1;
1004 
1005   pq[0] = _mm_unpacklo_epi64(*p0, *q0);
1006   pq[1] = _mm_unpacklo_epi64(*p1, *q1);
1007   pq[2] = _mm_unpacklo_epi64(*p2, *q2);
1008   pq[3] = _mm_unpacklo_epi64(*p3, *q3);
1009 
1010   __m128i abs_p1p0;
1011 
1012   const __m128i four = _mm_set1_epi16(4);
1013   __m128i t80;
1014   const __m128i one = _mm_set1_epi16(0x1);
1015 
1016   get_limit(_blimit, _limit, _thresh, bd, &blimit, &limit, &thresh, &t80);
1017 
1018   highbd_hev_filter_mask_x_sse2(pq, 4, &p1p0, &q1q0, &abs_p1p0, &limit, &blimit,
1019                                 &thresh, &hev, &mask);
1020 
1021   // lp filter
1022   highbd_filter4_sse2(&p1p0, &q1q0, &hev, &mask, q1q0_out, p1p0_out, &t80, bd);
1023 
1024   // flat_mask4
1025   flat = _mm_max_epi16(abs_diff16(pq[2], pq[0]), abs_diff16(pq[3], pq[0]));
1026   flat = _mm_max_epi16(abs_p1p0, flat);
1027   flat = _mm_max_epi16(flat, _mm_srli_si128(flat, 8));
1028 
1029   flat = _mm_subs_epu16(flat, _mm_slli_epi16(one, bd - 8));
1030 
1031   flat = _mm_cmpeq_epi16(flat, zero);
1032   flat = _mm_and_si128(flat, mask);
1033   // replicate for the further "merged variables" usage
1034   flat = _mm_unpacklo_epi64(flat, flat);
1035 
1036   if (0xffff != _mm_movemask_epi8(_mm_cmpeq_epi16(flat, zero))) {
1037     __m128i workp_a, workp_b, workp_c, workp_shft0, workp_shft1;
1038     // Added before shift for rounding part of ROUND_POWER_OF_TWO
1039 
1040     // o*p2
1041     workp_a = _mm_add_epi16(_mm_add_epi16(*p3, *p3), _mm_add_epi16(*p2, *p1));
1042     workp_a = _mm_add_epi16(_mm_add_epi16(workp_a, four), *p0);
1043     workp_c = _mm_add_epi16(_mm_add_epi16(*q0, *p2), *p3);
1044     workp_c = _mm_add_epi16(workp_a, workp_c);
1045 
1046     // o*p1
1047     workp_b = _mm_add_epi16(_mm_add_epi16(*q0, *q1), *p1);
1048     workp_shft0 = _mm_add_epi16(workp_a, workp_b);
1049 
1050     // o*p0
1051     workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, *p3), *q2);
1052     workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, *p1), *p0);
1053     workp_shft1 = _mm_add_epi16(workp_a, workp_b);
1054 
1055     flat_p1p0 = _mm_srli_epi16(_mm_unpacklo_epi64(workp_shft1, workp_shft0), 3);
1056 
1057     // oq0
1058     workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, *p3), *q3);
1059     workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, *p0), *q0);
1060     workp_shft0 = _mm_add_epi16(workp_a, workp_b);
1061 
1062     // oq1
1063     workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, *p2), *q3);
1064     workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, *q0), *q1);
1065     workp_shft1 = _mm_add_epi16(workp_a, workp_b);
1066 
1067     flat_q0q1 = _mm_srli_epi16(_mm_unpacklo_epi64(workp_shft0, workp_shft1), 3);
1068 
1069     // oq2
1070     workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, *p1), *q3);
1071     workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, *q1), *q2);
1072     workp_a = _mm_add_epi16(workp_a, workp_b);
1073     opq2 = _mm_srli_epi16(_mm_unpacklo_epi64(workp_c, workp_a), 3);
1074 
1075     qs1qs0 = _mm_andnot_si128(flat, *q1q0_out);
1076     q1q0 = _mm_and_si128(flat, flat_q0q1);
1077     *q1q0_out = _mm_or_si128(qs1qs0, q1q0);
1078 
1079     ps1ps0 = _mm_andnot_si128(flat, *p1p0_out);
1080     p1p0 = _mm_and_si128(flat, flat_p1p0);
1081     *p1p0_out = _mm_or_si128(ps1ps0, p1p0);
1082 
1083     work_a = _mm_andnot_si128(flat, pq[2]);
1084     *p2 = _mm_and_si128(flat, opq2);
1085     *p2 = _mm_or_si128(work_a, *p2);
1086     *q2 = _mm_srli_si128(*p2, 8);
1087   }
1088 }
1089 
highbd_lpf_internal_8_dual_sse2(__m128i * p3,__m128i * q3,__m128i * p2,__m128i * q2,__m128i * p1,__m128i * q1,__m128i * p0,__m128i * q0,const unsigned char * _blimit0,const unsigned char * _limit0,const unsigned char * _thresh0,const unsigned char * _blimit1,const unsigned char * _limit1,const unsigned char * _thresh1,int bd)1090 static AOM_FORCE_INLINE void highbd_lpf_internal_8_dual_sse2(
1091     __m128i *p3, __m128i *q3, __m128i *p2, __m128i *q2, __m128i *p1,
1092     __m128i *q1, __m128i *p0, __m128i *q0, const unsigned char *_blimit0,
1093     const unsigned char *_limit0, const unsigned char *_thresh0,
1094     const unsigned char *_blimit1, const unsigned char *_limit1,
1095     const unsigned char *_thresh1, int bd) {
1096   __m128i blimit0, limit0, thresh0;
1097   __m128i t80;
1098   __m128i mask, flat;
1099   __m128i work_a, op2, oq2, op1, op0, oq0, oq1;
1100   __m128i abs_p1q1, abs_p0q0, work0, work1, work2;
1101 
1102   const __m128i zero = _mm_setzero_si128();
1103   const __m128i four = _mm_set1_epi16(4);
1104   const __m128i one = _mm_set1_epi16(0x1);
1105   const __m128i ffff = _mm_cmpeq_epi16(one, one);
1106 
1107   get_limit_dual(_blimit0, _limit0, _thresh0, _blimit1, _limit1, _thresh1, bd,
1108                  &blimit0, &limit0, &thresh0, &t80);
1109 
1110   abs_p0q0 = abs_diff16(*p0, *q0);
1111   abs_p1q1 = abs_diff16(*p1, *q1);
1112 
1113   abs_p0q0 = _mm_adds_epu16(abs_p0q0, abs_p0q0);
1114   abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1);
1115   mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit0);
1116   mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff);
1117   // mask |= (abs(*p0 - q0) * 2 + abs(*p1 - q1) / 2  > blimit) * -1;
1118 
1119   // So taking maximums continues to work:
1120   mask = _mm_and_si128(mask, _mm_adds_epu16(limit0, one));
1121 
1122   work0 = _mm_max_epi16(abs_diff16(*p3, *p2), abs_diff16(*p2, *p1));
1123   work1 =
1124       _mm_max_epi16(abs_diff16(*p1, *p0), abs_diff16(*q1, *q0));  // tbu 4 flat
1125   work0 = _mm_max_epi16(work0, work1);
1126   work2 = _mm_max_epi16(abs_diff16(*q2, *q1), abs_diff16(*q2, *q3));
1127   work2 = _mm_max_epi16(work2, work0);
1128   mask = _mm_max_epi16(work2, mask);
1129 
1130   mask = _mm_subs_epu16(mask, limit0);
1131   mask = _mm_cmpeq_epi16(mask, zero);
1132 
1133   // lp filter
1134   __m128i ps[2], qs[2], p[2], q[2];
1135   {
1136     p[0] = *p0;
1137     p[1] = *p1;
1138     q[0] = *q0;
1139     q[1] = *q1;
1140     // filter_mask and hev_mask
1141     highbd_filter4_dual_sse2(p, q, ps, qs, &mask, &thresh0, bd, &t80);
1142   }
1143 
1144   flat = _mm_max_epi16(abs_diff16(*p2, *p0), abs_diff16(*q2, *q0));
1145   flat = _mm_max_epi16(work1, flat);
1146   work0 = _mm_max_epi16(abs_diff16(*p3, *p0), abs_diff16(*q3, *q0));
1147   flat = _mm_max_epi16(work0, flat);
1148 
1149   flat = _mm_subs_epu16(flat, _mm_slli_epi16(one, bd - 8));
1150   flat = _mm_cmpeq_epi16(flat, zero);
1151   flat = _mm_and_si128(flat, mask);  // flat & mask
1152 
1153   // filter8 need it only if flat !=0
1154   if (0xffff != _mm_movemask_epi8(_mm_cmpeq_epi16(flat, zero))) {
1155     __m128i workp_a, workp_b;
1156     // Added before shift for rounding part of ROUND_POWER_OF_TWO
1157 
1158     // o*p2
1159     workp_a = _mm_add_epi16(_mm_add_epi16(*p3, *p3), _mm_add_epi16(*p2, *p1));
1160     workp_a = _mm_add_epi16(_mm_add_epi16(workp_a, four), *p0);
1161     workp_b = _mm_add_epi16(_mm_add_epi16(*q0, *p2), *p3);
1162     op2 = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
1163 
1164     // o*p1
1165     workp_b = _mm_add_epi16(_mm_add_epi16(*q0, *q1), *p1);
1166     op1 = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
1167 
1168     // o*p0
1169     workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, *p3), *q2);
1170     workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, *p1), *p0);
1171     op0 = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
1172 
1173     // oq0
1174     workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, *p3), *q3);
1175     workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, *p0), *q0);
1176     oq0 = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
1177 
1178     // oq1
1179     workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, *p2), *q3);
1180     workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, *q0), *q1);
1181     oq1 = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
1182 
1183     // oq2
1184     workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, *p1), *q3);
1185     workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, *q1), *q2);
1186     oq2 = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
1187 
1188     qs[0] = _mm_andnot_si128(flat, qs[0]);
1189     oq0 = _mm_and_si128(flat, oq0);
1190     *q0 = _mm_or_si128(qs[0], oq0);
1191 
1192     qs[1] = _mm_andnot_si128(flat, qs[1]);
1193     oq1 = _mm_and_si128(flat, oq1);
1194     *q1 = _mm_or_si128(qs[1], oq1);
1195 
1196     ps[0] = _mm_andnot_si128(flat, ps[0]);
1197     op0 = _mm_and_si128(flat, op0);
1198     *p0 = _mm_or_si128(ps[0], op0);
1199 
1200     ps[1] = _mm_andnot_si128(flat, ps[1]);
1201     op1 = _mm_and_si128(flat, op1);
1202     *p1 = _mm_or_si128(ps[1], op1);
1203 
1204     work_a = _mm_andnot_si128(flat, *q2);
1205     *q2 = _mm_and_si128(flat, oq2);
1206     *q2 = _mm_or_si128(work_a, *q2);
1207 
1208     work_a = _mm_andnot_si128(flat, *p2);
1209     *p2 = _mm_and_si128(flat, op2);
1210     *p2 = _mm_or_si128(work_a, *p2);
1211   } else {
1212     *q0 = qs[0];
1213     *q1 = qs[1];
1214     *p0 = ps[0];
1215     *p1 = ps[1];
1216   }
1217 }
1218 
aom_highbd_lpf_horizontal_8_sse2(uint16_t * s,int p,const uint8_t * _blimit,const uint8_t * _limit,const uint8_t * _thresh,int bd)1219 void aom_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
1220                                       const uint8_t *_blimit,
1221                                       const uint8_t *_limit,
1222                                       const uint8_t *_thresh, int bd) {
1223   __m128i p2, p1, p0, q0, q1, q2, p3, q3;
1224   __m128i q1q0, p1p0;
1225 
1226   p3 = _mm_loadl_epi64((__m128i *)(s - 4 * p));
1227   q3 = _mm_loadl_epi64((__m128i *)(s + 3 * p));
1228   p2 = _mm_loadl_epi64((__m128i *)(s - 3 * p));
1229   q2 = _mm_loadl_epi64((__m128i *)(s + 2 * p));
1230   p1 = _mm_loadl_epi64((__m128i *)(s - 2 * p));
1231   q1 = _mm_loadl_epi64((__m128i *)(s + 1 * p));
1232   p0 = _mm_loadl_epi64((__m128i *)(s - 1 * p));
1233   q0 = _mm_loadl_epi64((__m128i *)(s + 0 * p));
1234 
1235   highbd_lpf_internal_8_sse2(&p3, &q3, &p2, &q2, &p1, &q1, &p0, &q0, &q1q0,
1236                              &p1p0, _blimit, _limit, _thresh, bd);
1237 
1238   _mm_storel_epi64((__m128i *)(s - 3 * p), p2);
1239   _mm_storel_epi64((__m128i *)(s - 2 * p), _mm_srli_si128(p1p0, 8));
1240   _mm_storel_epi64((__m128i *)(s - 1 * p), p1p0);
1241   _mm_storel_epi64((__m128i *)(s + 0 * p), q1q0);
1242   _mm_storel_epi64((__m128i *)(s + 1 * p), _mm_srli_si128(q1q0, 8));
1243   _mm_storel_epi64((__m128i *)(s + 2 * p), q2);
1244 }
1245 
aom_highbd_lpf_horizontal_8_dual_sse2(uint16_t * s,int p,const uint8_t * _blimit0,const uint8_t * _limit0,const uint8_t * _thresh0,const uint8_t * _blimit1,const uint8_t * _limit1,const uint8_t * _thresh1,int bd)1246 void aom_highbd_lpf_horizontal_8_dual_sse2(
1247     uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0,
1248     const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
1249     const uint8_t *_thresh1, int bd) {
1250   __m128i p2, p1, p0, q0, q1, q2, p3, q3;
1251 
1252   p3 = _mm_loadu_si128((__m128i *)(s - 4 * p));
1253   q3 = _mm_loadu_si128((__m128i *)(s + 3 * p));
1254   p2 = _mm_loadu_si128((__m128i *)(s - 3 * p));
1255   q2 = _mm_loadu_si128((__m128i *)(s + 2 * p));
1256   p1 = _mm_loadu_si128((__m128i *)(s - 2 * p));
1257   q1 = _mm_loadu_si128((__m128i *)(s + 1 * p));
1258   p0 = _mm_loadu_si128((__m128i *)(s - 1 * p));
1259   q0 = _mm_loadu_si128((__m128i *)(s + 0 * p));
1260 
1261   highbd_lpf_internal_8_dual_sse2(&p3, &q3, &p2, &q2, &p1, &q1, &p0, &q0,
1262                                   _blimit0, _limit0, _thresh0, _blimit1,
1263                                   _limit1, _thresh1, bd);
1264 
1265   _mm_storeu_si128((__m128i *)(s - 3 * p), p2);
1266   _mm_storeu_si128((__m128i *)(s - 2 * p), p1);
1267   _mm_storeu_si128((__m128i *)(s - 1 * p), p0);
1268   _mm_storeu_si128((__m128i *)(s + 0 * p), q0);
1269   _mm_storeu_si128((__m128i *)(s + 1 * p), q1);
1270   _mm_storeu_si128((__m128i *)(s + 2 * p), q2);
1271 }
1272 
highbd_lpf_internal_4_sse2(__m128i * p1,__m128i * p0,__m128i * q0,__m128i * q1,__m128i * q1q0_out,__m128i * p1p0_out,const uint8_t * _blimit,const uint8_t * _limit,const uint8_t * _thresh,int bd)1273 static AOM_FORCE_INLINE void highbd_lpf_internal_4_sse2(
1274     __m128i *p1, __m128i *p0, __m128i *q0, __m128i *q1, __m128i *q1q0_out,
1275     __m128i *p1p0_out, const uint8_t *_blimit, const uint8_t *_limit,
1276     const uint8_t *_thresh, int bd) {
1277   __m128i blimit, limit, thresh;
1278   __m128i mask, hev;
1279   __m128i p1p0, q1q0;
1280   __m128i pq[2];
1281 
1282   __m128i abs_p1p0;
1283 
1284   __m128i t80;
1285   get_limit(_blimit, _limit, _thresh, bd, &blimit, &limit, &thresh, &t80);
1286 
1287   pq[0] = _mm_unpacklo_epi64(*p0, *q0);
1288   pq[1] = _mm_unpacklo_epi64(*p1, *q1);
1289 
1290   highbd_hev_filter_mask_x_sse2(pq, 2, &p1p0, &q1q0, &abs_p1p0, &limit, &blimit,
1291                                 &thresh, &hev, &mask);
1292 
1293   highbd_filter4_sse2(&p1p0, &q1q0, &hev, &mask, q1q0_out, p1p0_out, &t80, bd);
1294 }
1295 
highbd_lpf_internal_4_dual_sse2(__m128i * p1,__m128i * p0,__m128i * q0,__m128i * q1,__m128i * ps,__m128i * qs,const uint8_t * _blimit0,const uint8_t * _limit0,const uint8_t * _thresh0,const uint8_t * _blimit1,const uint8_t * _limit1,const uint8_t * _thresh1,int bd)1296 static AOM_FORCE_INLINE void highbd_lpf_internal_4_dual_sse2(
1297     __m128i *p1, __m128i *p0, __m128i *q0, __m128i *q1, __m128i *ps,
1298     __m128i *qs, const uint8_t *_blimit0, const uint8_t *_limit0,
1299     const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
1300     const uint8_t *_thresh1, int bd) {
1301   __m128i blimit0, limit0, thresh0;
1302   __m128i mask, flat;
1303   __m128i p[2], q[2];
1304 
1305   const __m128i zero = _mm_setzero_si128();
1306   __m128i abs_p0q0 = abs_diff16(*q0, *p0);
1307   __m128i abs_p1q1 = abs_diff16(*q1, *p1);
1308 
1309   __m128i abs_p1p0 = abs_diff16(*p1, *p0);
1310   __m128i abs_q1q0 = abs_diff16(*q1, *q0);
1311 
1312   const __m128i ffff = _mm_cmpeq_epi16(abs_p1p0, abs_p1p0);
1313   const __m128i one = _mm_set1_epi16(1);
1314 
1315   __m128i t80;
1316 
1317   get_limit_dual(_blimit0, _limit0, _thresh0, _blimit1, _limit1, _thresh1, bd,
1318                  &blimit0, &limit0, &thresh0, &t80);
1319 
1320   // filter_mask and hev_mask
1321   flat = _mm_max_epi16(abs_p1p0, abs_q1q0);
1322 
1323   abs_p0q0 = _mm_adds_epu16(abs_p0q0, abs_p0q0);
1324   abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1);
1325 
1326   mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit0);
1327   mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff);
1328   // mask |= (abs(*p0 - *q0) * 2 + abs(*p1 - *q1) / 2  > blimit) * -1;
1329   // So taking maximums continues to work:
1330   mask = _mm_and_si128(mask, _mm_adds_epu16(limit0, one));
1331   mask = _mm_max_epi16(flat, mask);
1332 
1333   mask = _mm_subs_epu16(mask, limit0);
1334   mask = _mm_cmpeq_epi16(mask, zero);
1335 
1336   p[0] = *p0;
1337   p[1] = *p1;
1338   q[0] = *q0;
1339   q[1] = *q1;
1340 
1341   highbd_filter4_dual_sse2(p, q, ps, qs, &mask, &thresh0, bd, &t80);
1342 }
1343 
aom_highbd_lpf_horizontal_4_sse2(uint16_t * s,int p,const uint8_t * _blimit,const uint8_t * _limit,const uint8_t * _thresh,int bd)1344 void aom_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p,
1345                                       const uint8_t *_blimit,
1346                                       const uint8_t *_limit,
1347                                       const uint8_t *_thresh, int bd) {
1348   __m128i p1p0, q1q0;
1349   __m128i p1 = _mm_loadl_epi64((__m128i *)(s - 2 * p));
1350   __m128i p0 = _mm_loadl_epi64((__m128i *)(s - 1 * p));
1351   __m128i q0 = _mm_loadl_epi64((__m128i *)(s - 0 * p));
1352   __m128i q1 = _mm_loadl_epi64((__m128i *)(s + 1 * p));
1353 
1354   highbd_lpf_internal_4_sse2(&p1, &p0, &q0, &q1, &q1q0, &p1p0, _blimit, _limit,
1355                              _thresh, bd);
1356 
1357   _mm_storel_epi64((__m128i *)(s - 2 * p), _mm_srli_si128(p1p0, 8));
1358   _mm_storel_epi64((__m128i *)(s - 1 * p), p1p0);
1359   _mm_storel_epi64((__m128i *)(s + 0 * p), q1q0);
1360   _mm_storel_epi64((__m128i *)(s + 1 * p), _mm_srli_si128(q1q0, 8));
1361 }
1362 
aom_highbd_lpf_horizontal_4_dual_sse2(uint16_t * s,int p,const uint8_t * _blimit0,const uint8_t * _limit0,const uint8_t * _thresh0,const uint8_t * _blimit1,const uint8_t * _limit1,const uint8_t * _thresh1,int bd)1363 void aom_highbd_lpf_horizontal_4_dual_sse2(
1364     uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0,
1365     const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
1366     const uint8_t *_thresh1, int bd) {
1367   __m128i p1 = _mm_loadu_si128((__m128i *)(s - 2 * p));
1368   __m128i p0 = _mm_loadu_si128((__m128i *)(s - 1 * p));
1369   __m128i q0 = _mm_loadu_si128((__m128i *)(s - 0 * p));
1370   __m128i q1 = _mm_loadu_si128((__m128i *)(s + 1 * p));
1371   __m128i ps[2], qs[2];
1372 
1373   highbd_lpf_internal_4_dual_sse2(&p1, &p0, &q0, &q1, ps, qs, _blimit0, _limit0,
1374                                   _thresh0, _blimit1, _limit1, _thresh1, bd);
1375 
1376   _mm_storeu_si128((__m128i *)(s - 2 * p), ps[1]);
1377   _mm_storeu_si128((__m128i *)(s - 1 * p), ps[0]);
1378   _mm_storeu_si128((__m128i *)(s + 0 * p), qs[0]);
1379   _mm_storeu_si128((__m128i *)(s + 1 * p), qs[1]);
1380 }
1381 
aom_highbd_lpf_vertical_4_sse2(uint16_t * s,int p,const uint8_t * blimit,const uint8_t * limit,const uint8_t * thresh,int bd)1382 void aom_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, const uint8_t *blimit,
1383                                     const uint8_t *limit, const uint8_t *thresh,
1384                                     int bd) {
1385   __m128i x0, x1, x2, x3, d0, d1, d2, d3;
1386   __m128i p1p0, q1q0;
1387   __m128i p1, q1;
1388 
1389   x0 = _mm_loadl_epi64((__m128i *)(s - 2 + 0 * p));
1390   x1 = _mm_loadl_epi64((__m128i *)(s - 2 + 1 * p));
1391   x2 = _mm_loadl_epi64((__m128i *)(s - 2 + 2 * p));
1392   x3 = _mm_loadl_epi64((__m128i *)(s - 2 + 3 * p));
1393 
1394   highbd_transpose4x8_8x4_low_sse2(&x0, &x1, &x2, &x3, &d0, &d1, &d2, &d3);
1395 
1396   highbd_lpf_internal_4_sse2(&d0, &d1, &d2, &d3, &q1q0, &p1p0, blimit, limit,
1397                              thresh, bd);
1398 
1399   p1 = _mm_srli_si128(p1p0, 8);
1400   q1 = _mm_srli_si128(q1q0, 8);
1401 
1402   // transpose from 8x4 to 4x8
1403   highbd_transpose4x8_8x4_low_sse2(&p1, &p1p0, &q1q0, &q1, &d0, &d1, &d2, &d3);
1404 
1405   _mm_storel_epi64((__m128i *)(s - 2 + 0 * p), d0);
1406   _mm_storel_epi64((__m128i *)(s - 2 + 1 * p), d1);
1407   _mm_storel_epi64((__m128i *)(s - 2 + 2 * p), d2);
1408   _mm_storel_epi64((__m128i *)(s - 2 + 3 * p), d3);
1409 }
1410 
aom_highbd_lpf_vertical_4_dual_sse2(uint16_t * s,int p,const uint8_t * blimit0,const uint8_t * limit0,const uint8_t * thresh0,const uint8_t * blimit1,const uint8_t * limit1,const uint8_t * thresh1,int bd)1411 void aom_highbd_lpf_vertical_4_dual_sse2(
1412     uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
1413     const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
1414     const uint8_t *thresh1, int bd) {
1415   __m128i x0, x1, x2, x3, x4, x5, x6, x7;
1416   __m128i d0, d1, d2, d3, d4, d5, d6, d7;
1417   __m128i ps[2], qs[2];
1418 
1419   x0 = _mm_loadl_epi64((__m128i *)(s - 2 + 0 * p));
1420   x1 = _mm_loadl_epi64((__m128i *)(s - 2 + 1 * p));
1421   x2 = _mm_loadl_epi64((__m128i *)(s - 2 + 2 * p));
1422   x3 = _mm_loadl_epi64((__m128i *)(s - 2 + 3 * p));
1423   x4 = _mm_loadl_epi64((__m128i *)(s - 2 + 4 * p));
1424   x5 = _mm_loadl_epi64((__m128i *)(s - 2 + 5 * p));
1425   x6 = _mm_loadl_epi64((__m128i *)(s - 2 + 6 * p));
1426   x7 = _mm_loadl_epi64((__m128i *)(s - 2 + 7 * p));
1427 
1428   highbd_transpose8x8_low_sse2(&x0, &x1, &x2, &x3, &x4, &x5, &x6, &x7, &d0, &d1,
1429                                &d2, &d3);
1430 
1431   highbd_lpf_internal_4_dual_sse2(&d0, &d1, &d2, &d3, ps, qs, blimit0, limit0,
1432                                   thresh0, blimit1, limit1, thresh1, bd);
1433 
1434   highbd_transpose4x8_8x4_sse2(&ps[1], &ps[0], &qs[0], &qs[1], &d0, &d1, &d2,
1435                                &d3, &d4, &d5, &d6, &d7);
1436 
1437   _mm_storel_epi64((__m128i *)(s - 2 + 0 * p), d0);
1438   _mm_storel_epi64((__m128i *)(s - 2 + 1 * p), d1);
1439   _mm_storel_epi64((__m128i *)(s - 2 + 2 * p), d2);
1440   _mm_storel_epi64((__m128i *)(s - 2 + 3 * p), d3);
1441   _mm_storel_epi64((__m128i *)(s - 2 + 4 * p), d4);
1442   _mm_storel_epi64((__m128i *)(s - 2 + 5 * p), d5);
1443   _mm_storel_epi64((__m128i *)(s - 2 + 6 * p), d6);
1444   _mm_storel_epi64((__m128i *)(s - 2 + 7 * p), d7);
1445 }
1446 
aom_highbd_lpf_vertical_6_sse2(uint16_t * s,int p,const uint8_t * blimit,const uint8_t * limit,const uint8_t * thresh,int bd)1447 void aom_highbd_lpf_vertical_6_sse2(uint16_t *s, int p, const uint8_t *blimit,
1448                                     const uint8_t *limit, const uint8_t *thresh,
1449                                     int bd) {
1450   __m128i d0, d1, d2, d3, d4, d5, d6, d7;
1451   __m128i x3, x2, x1, x0, p0, q0;
1452   __m128i p1p0, q1q0;
1453 
1454   x3 = _mm_loadu_si128((__m128i *)((s - 3) + 0 * p));
1455   x2 = _mm_loadu_si128((__m128i *)((s - 3) + 1 * p));
1456   x1 = _mm_loadu_si128((__m128i *)((s - 3) + 2 * p));
1457   x0 = _mm_loadu_si128((__m128i *)((s - 3) + 3 * p));
1458 
1459   highbd_transpose4x8_8x4_sse2(&x3, &x2, &x1, &x0, &d0, &d1, &d2, &d3, &d4, &d5,
1460                                &d6, &d7);
1461 
1462   highbd_lpf_internal_6_sse2(&d0, &d1, &d2, &d3, &d4, &d5, &p1p0, &q1q0, blimit,
1463                              limit, thresh, bd);
1464 
1465   p0 = _mm_srli_si128(p1p0, 8);
1466   q0 = _mm_srli_si128(q1q0, 8);
1467 
1468   highbd_transpose4x8_8x4_low_sse2(&p0, &p1p0, &q1q0, &q0, &d0, &d1, &d2, &d3);
1469 
1470   _mm_storel_epi64((__m128i *)(s - 2 + 0 * p), d0);
1471   _mm_storel_epi64((__m128i *)(s - 2 + 1 * p), d1);
1472   _mm_storel_epi64((__m128i *)(s - 2 + 2 * p), d2);
1473   _mm_storel_epi64((__m128i *)(s - 2 + 3 * p), d3);
1474 }
1475 
aom_highbd_lpf_vertical_6_dual_sse2(uint16_t * s,int p,const uint8_t * _blimit0,const uint8_t * _limit0,const uint8_t * _thresh0,const uint8_t * _blimit1,const uint8_t * _limit1,const uint8_t * _thresh1,int bd)1476 void aom_highbd_lpf_vertical_6_dual_sse2(
1477     uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0,
1478     const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
1479     const uint8_t *_thresh1, int bd) {
1480   __m128i d0, d1, d2, d3, d4, d5, d6, d7;
1481   __m128i x0, x1, x2, x3, x4, x5, x6, x7;
1482   __m128i p0, q0, p1, q1, p2, q2;
1483 
1484   x0 = _mm_loadu_si128((__m128i *)((s - 3) + 0 * p));
1485   x1 = _mm_loadu_si128((__m128i *)((s - 3) + 1 * p));
1486   x2 = _mm_loadu_si128((__m128i *)((s - 3) + 2 * p));
1487   x3 = _mm_loadu_si128((__m128i *)((s - 3) + 3 * p));
1488   x4 = _mm_loadu_si128((__m128i *)((s - 3) + 4 * p));
1489   x5 = _mm_loadu_si128((__m128i *)((s - 3) + 5 * p));
1490   x6 = _mm_loadu_si128((__m128i *)((s - 3) + 6 * p));
1491   x7 = _mm_loadu_si128((__m128i *)((s - 3) + 7 * p));
1492 
1493   highbd_transpose8x8_sse2(&x0, &x1, &x2, &x3, &x4, &x5, &x6, &x7, &p2, &p1,
1494                            &p0, &q0, &q1, &q2, &d6, &d7);
1495 
1496   highbd_lpf_internal_6_dual_sse2(&p2, &p1, &p0, &q0, &q1, &q2, _blimit0,
1497                                   _limit0, _thresh0, _blimit1, _limit1,
1498                                   _thresh1, bd);
1499 
1500   highbd_transpose4x8_8x4_sse2(&p1, &p0, &q0, &q1, &d0, &d1, &d2, &d3, &d4, &d5,
1501                                &d6, &d7);
1502 
1503   _mm_storel_epi64((__m128i *)(s - 2 + 0 * p), d0);
1504   _mm_storel_epi64((__m128i *)(s - 2 + 1 * p), d1);
1505   _mm_storel_epi64((__m128i *)(s - 2 + 2 * p), d2);
1506   _mm_storel_epi64((__m128i *)(s - 2 + 3 * p), d3);
1507   _mm_storel_epi64((__m128i *)(s - 2 + 4 * p), d4);
1508   _mm_storel_epi64((__m128i *)(s - 2 + 5 * p), d5);
1509   _mm_storel_epi64((__m128i *)(s - 2 + 6 * p), d6);
1510   _mm_storel_epi64((__m128i *)(s - 2 + 7 * p), d7);
1511 }
1512 
aom_highbd_lpf_vertical_8_sse2(uint16_t * s,int p,const uint8_t * blimit,const uint8_t * limit,const uint8_t * thresh,int bd)1513 void aom_highbd_lpf_vertical_8_sse2(uint16_t *s, int p, const uint8_t *blimit,
1514                                     const uint8_t *limit, const uint8_t *thresh,
1515                                     int bd) {
1516   __m128i d0, d1, d2, d3, d4, d5, d6, d7;
1517   __m128i p2, p1, p0, p3, q0;
1518   __m128i q1q0, p1p0;
1519 
1520   p3 = _mm_loadu_si128((__m128i *)((s - 4) + 0 * p));
1521   p2 = _mm_loadu_si128((__m128i *)((s - 4) + 1 * p));
1522   p1 = _mm_loadu_si128((__m128i *)((s - 4) + 2 * p));
1523   p0 = _mm_loadu_si128((__m128i *)((s - 4) + 3 * p));
1524 
1525   highbd_transpose4x8_8x4_sse2(&p3, &p2, &p1, &p0, &d0, &d1, &d2, &d3, &d4, &d5,
1526                                &d6, &d7);
1527 
1528   // Loop filtering
1529   highbd_lpf_internal_8_sse2(&d0, &d7, &d1, &d6, &d2, &d5, &d3, &d4, &q1q0,
1530                              &p1p0, blimit, limit, thresh, bd);
1531 
1532   p0 = _mm_srli_si128(p1p0, 8);
1533   q0 = _mm_srli_si128(q1q0, 8);
1534 
1535   highbd_transpose8x8_low_sse2(&d0, &d1, &p0, &p1p0, &q1q0, &q0, &d6, &d7, &d0,
1536                                &d1, &d2, &d3);
1537 
1538   _mm_storeu_si128((__m128i *)(s - 4 + 0 * p), d0);
1539   _mm_storeu_si128((__m128i *)(s - 4 + 1 * p), d1);
1540   _mm_storeu_si128((__m128i *)(s - 4 + 2 * p), d2);
1541   _mm_storeu_si128((__m128i *)(s - 4 + 3 * p), d3);
1542 }
1543 
aom_highbd_lpf_vertical_8_dual_sse2(uint16_t * s,int p,const uint8_t * blimit0,const uint8_t * limit0,const uint8_t * thresh0,const uint8_t * blimit1,const uint8_t * limit1,const uint8_t * thresh1,int bd)1544 void aom_highbd_lpf_vertical_8_dual_sse2(
1545     uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
1546     const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
1547     const uint8_t *thresh1, int bd) {
1548   __m128i x0, x1, x2, x3, x4, x5, x6, x7;
1549   __m128i d0, d1, d2, d3, d4, d5, d6, d7;
1550 
1551   x0 = _mm_loadu_si128((__m128i *)(s - 4 + 0 * p));
1552   x1 = _mm_loadu_si128((__m128i *)(s - 4 + 1 * p));
1553   x2 = _mm_loadu_si128((__m128i *)(s - 4 + 2 * p));
1554   x3 = _mm_loadu_si128((__m128i *)(s - 4 + 3 * p));
1555   x4 = _mm_loadu_si128((__m128i *)(s - 4 + 4 * p));
1556   x5 = _mm_loadu_si128((__m128i *)(s - 4 + 5 * p));
1557   x6 = _mm_loadu_si128((__m128i *)(s - 4 + 6 * p));
1558   x7 = _mm_loadu_si128((__m128i *)(s - 4 + 7 * p));
1559 
1560   highbd_transpose8x8_sse2(&x0, &x1, &x2, &x3, &x4, &x5, &x6, &x7, &d0, &d1,
1561                            &d2, &d3, &d4, &d5, &d6, &d7);
1562 
1563   highbd_lpf_internal_8_dual_sse2(&d0, &d7, &d1, &d6, &d2, &d5, &d3, &d4,
1564                                   blimit0, limit0, thresh0, blimit1, limit1,
1565                                   thresh1, bd);
1566 
1567   highbd_transpose8x8_sse2(&d0, &d1, &d2, &d3, &d4, &d5, &d6, &d7, &x0, &x1,
1568                            &x2, &x3, &x4, &x5, &x6, &x7);
1569 
1570   _mm_storeu_si128((__m128i *)(s - 4 + 0 * p), x0);
1571   _mm_storeu_si128((__m128i *)(s - 4 + 1 * p), x1);
1572   _mm_storeu_si128((__m128i *)(s - 4 + 2 * p), x2);
1573   _mm_storeu_si128((__m128i *)(s - 4 + 3 * p), x3);
1574   _mm_storeu_si128((__m128i *)(s - 4 + 4 * p), x4);
1575   _mm_storeu_si128((__m128i *)(s - 4 + 5 * p), x5);
1576   _mm_storeu_si128((__m128i *)(s - 4 + 6 * p), x6);
1577   _mm_storeu_si128((__m128i *)(s - 4 + 7 * p), x7);
1578 }
1579 
aom_highbd_lpf_vertical_14_sse2(uint16_t * s,int pitch,const uint8_t * blimit,const uint8_t * limit,const uint8_t * thresh,int bd)1580 void aom_highbd_lpf_vertical_14_sse2(uint16_t *s, int pitch,
1581                                      const uint8_t *blimit,
1582                                      const uint8_t *limit,
1583                                      const uint8_t *thresh, int bd) {
1584   __m128i q[7], p[7], pq[7];
1585   __m128i p6, p5, p4, p3;
1586   __m128i p6_2, p5_2, p4_2, p3_2;
1587   __m128i d0, d1, d2, d3;
1588   __m128i d0_2, d1_2, d2_2, d3_2, d7_2;
1589 
1590   p6 = _mm_loadu_si128((__m128i *)((s - 8) + 0 * pitch));
1591   p5 = _mm_loadu_si128((__m128i *)((s - 8) + 1 * pitch));
1592   p4 = _mm_loadu_si128((__m128i *)((s - 8) + 2 * pitch));
1593   p3 = _mm_loadu_si128((__m128i *)((s - 8) + 3 * pitch));
1594 
1595   highbd_transpose4x8_8x4_sse2(&p6, &p5, &p4, &p3, &d0, &p[6], &p[5], &p[4],
1596                                &p[3], &p[2], &p[1], &p[0]);
1597 
1598   p6_2 = _mm_loadu_si128((__m128i *)(s + 0 * pitch));
1599   p5_2 = _mm_loadu_si128((__m128i *)(s + 1 * pitch));
1600   p4_2 = _mm_loadu_si128((__m128i *)(s + 2 * pitch));
1601   p3_2 = _mm_loadu_si128((__m128i *)(s + 3 * pitch));
1602 
1603   highbd_transpose4x8_8x4_sse2(&p6_2, &p5_2, &p4_2, &p3_2, &q[0], &q[1], &q[2],
1604                                &q[3], &q[4], &q[5], &q[6], &d7_2);
1605 
1606   highbd_lpf_internal_14_sse2(p, q, pq, blimit, limit, thresh, bd);
1607 
1608   highbd_transpose8x8_low_sse2(&d0, &p[6], &pq[5], &pq[4], &pq[3], &pq[2],
1609                                &pq[1], &pq[0], &d0, &d1, &d2, &d3);
1610 
1611   q[0] = _mm_srli_si128(pq[0], 8);
1612   q[1] = _mm_srli_si128(pq[1], 8);
1613   q[2] = _mm_srli_si128(pq[2], 8);
1614   q[3] = _mm_srli_si128(pq[3], 8);
1615   q[4] = _mm_srli_si128(pq[4], 8);
1616   q[5] = _mm_srli_si128(pq[5], 8);
1617 
1618   highbd_transpose8x8_low_sse2(&q[0], &q[1], &q[2], &q[3], &q[4], &q[5], &q[6],
1619                                &d7_2, &d0_2, &d1_2, &d2_2, &d3_2);
1620 
1621   _mm_storeu_si128((__m128i *)(s - 8 + 0 * pitch), d0);
1622   _mm_storeu_si128((__m128i *)(s + 0 * pitch), d0_2);
1623 
1624   _mm_storeu_si128((__m128i *)(s - 8 + 1 * pitch), d1);
1625   _mm_storeu_si128((__m128i *)(s + 1 * pitch), d1_2);
1626 
1627   _mm_storeu_si128((__m128i *)(s - 8 + 2 * pitch), d2);
1628   _mm_storeu_si128((__m128i *)(s + 2 * pitch), d2_2);
1629 
1630   _mm_storeu_si128((__m128i *)(s - 8 + 3 * pitch), d3);
1631   _mm_storeu_si128((__m128i *)(s + 3 * pitch), d3_2);
1632 }
1633 
aom_highbd_lpf_vertical_14_dual_sse2(uint16_t * s,int pitch,const uint8_t * blimit0,const uint8_t * limit0,const uint8_t * thresh0,const uint8_t * blimit1,const uint8_t * limit1,const uint8_t * thresh1,int bd)1634 void aom_highbd_lpf_vertical_14_dual_sse2(
1635     uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0,
1636     const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
1637     const uint8_t *thresh1, int bd) {
1638   __m128i q[7], p[7];
1639   __m128i p6, p5, p4, p3, p2, p1, p0, q0;
1640   __m128i p6_2, p5_2, p4_2, p3_2, p2_2, p1_2, q0_2, p0_2;
1641   __m128i d0, d7;
1642   __m128i d0_out, d1_out, d2_out, d3_out, d4_out, d5_out, d6_out, d7_out;
1643 
1644   p6 = _mm_loadu_si128((__m128i *)((s - 8) + 0 * pitch));
1645   p5 = _mm_loadu_si128((__m128i *)((s - 8) + 1 * pitch));
1646   p4 = _mm_loadu_si128((__m128i *)((s - 8) + 2 * pitch));
1647   p3 = _mm_loadu_si128((__m128i *)((s - 8) + 3 * pitch));
1648   p2 = _mm_loadu_si128((__m128i *)((s - 8) + 4 * pitch));
1649   p1 = _mm_loadu_si128((__m128i *)((s - 8) + 5 * pitch));
1650   p0 = _mm_loadu_si128((__m128i *)((s - 8) + 6 * pitch));
1651   q0 = _mm_loadu_si128((__m128i *)((s - 8) + 7 * pitch));
1652 
1653   highbd_transpose8x8_sse2(&p6, &p5, &p4, &p3, &p2, &p1, &p0, &q0, &d0, &p[6],
1654                            &p[5], &p[4], &p[3], &p[2], &p[1], &p[0]);
1655 
1656   p6_2 = _mm_loadu_si128((__m128i *)(s + 0 * pitch));
1657   p5_2 = _mm_loadu_si128((__m128i *)(s + 1 * pitch));
1658   p4_2 = _mm_loadu_si128((__m128i *)(s + 2 * pitch));
1659   p3_2 = _mm_loadu_si128((__m128i *)(s + 3 * pitch));
1660   p2_2 = _mm_loadu_si128((__m128i *)(s + 4 * pitch));
1661   p1_2 = _mm_loadu_si128((__m128i *)(s + 5 * pitch));
1662   p0_2 = _mm_loadu_si128((__m128i *)(s + 6 * pitch));
1663   q0_2 = _mm_loadu_si128((__m128i *)(s + 7 * pitch));
1664 
1665   highbd_transpose8x8_sse2(&p6_2, &p5_2, &p4_2, &p3_2, &p2_2, &p1_2, &p0_2,
1666                            &q0_2, &q[0], &q[1], &q[2], &q[3], &q[4], &q[5],
1667                            &q[6], &d7);
1668 
1669   highbd_lpf_internal_14_dual_sse2(p, q, blimit0, limit0, thresh0, blimit1,
1670                                    limit1, thresh1, bd);
1671 
1672   highbd_transpose8x8_sse2(&d0, &p[6], &p[5], &p[4], &p[3], &p[2], &p[1], &p[0],
1673                            &d0_out, &d1_out, &d2_out, &d3_out, &d4_out, &d5_out,
1674                            &d6_out, &d7_out);
1675 
1676   _mm_storeu_si128((__m128i *)(s - 8 + 0 * pitch), d0_out);
1677   _mm_storeu_si128((__m128i *)(s - 8 + 1 * pitch), d1_out);
1678   _mm_storeu_si128((__m128i *)(s - 8 + 2 * pitch), d2_out);
1679   _mm_storeu_si128((__m128i *)(s - 8 + 3 * pitch), d3_out);
1680   _mm_storeu_si128((__m128i *)(s - 8 + 4 * pitch), d4_out);
1681   _mm_storeu_si128((__m128i *)(s - 8 + 5 * pitch), d5_out);
1682   _mm_storeu_si128((__m128i *)(s - 8 + 6 * pitch), d6_out);
1683   _mm_storeu_si128((__m128i *)(s - 8 + 7 * pitch), d7_out);
1684 
1685   highbd_transpose8x8_sse2(&q[0], &q[1], &q[2], &q[3], &q[4], &q[5], &q[6], &d7,
1686                            &d0_out, &d1_out, &d2_out, &d3_out, &d4_out, &d5_out,
1687                            &d6_out, &d7_out);
1688 
1689   _mm_storeu_si128((__m128i *)(s + 0 * pitch), d0_out);
1690   _mm_storeu_si128((__m128i *)(s + 1 * pitch), d1_out);
1691   _mm_storeu_si128((__m128i *)(s + 2 * pitch), d2_out);
1692   _mm_storeu_si128((__m128i *)(s + 3 * pitch), d3_out);
1693   _mm_storeu_si128((__m128i *)(s + 4 * pitch), d4_out);
1694   _mm_storeu_si128((__m128i *)(s + 5 * pitch), d5_out);
1695   _mm_storeu_si128((__m128i *)(s + 6 * pitch), d6_out);
1696   _mm_storeu_si128((__m128i *)(s + 7 * pitch), d7_out);
1697 }
1698