1 /*
2  * Copyright (c) 2018, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include <arm_neon.h>
13 #include <assert.h>
14 
15 #include "config/aom_config.h"
16 #include "config/av1_rtcd.h"
17 
18 #include "aom_dsp/aom_dsp_common.h"
19 #include "aom_dsp/txfm_common.h"
20 #include "aom_mem/aom_mem.h"
21 #include "aom_ports/mem.h"
22 #include "av1/common/common.h"
23 #include "av1/common/onyxc_int.h"
24 #include "av1/common/resize.h"
25 #include "av1/common/restoration.h"
26 #include "av1/common/arm/mem_neon.h"
27 #include "av1/common/arm/transpose_neon.h"
28 
29 // Constants used for right shift in final_filter calculation.
30 #define NB_EVEN 5
31 #define NB_ODD 4
32 
calc_ab_fast_internal_common(uint32x4_t s0,uint32x4_t s1,uint32x4_t s2,uint32x4_t s3,uint32x4_t s4,uint32x4_t s5,uint32x4_t s6,uint32x4_t s7,int32x4_t sr4,int32x4_t sr5,int32x4_t sr6,int32x4_t sr7,uint32x4_t const_n_val,uint32x4_t s_vec,uint32x4_t const_val,uint32x4_t one_by_n_minus_1_vec,uint16x4_t sgrproj_sgr,int32_t * src1,uint16_t * dst_A16,int32_t * src2,const int buf_stride)33 static INLINE void calc_ab_fast_internal_common(
34     uint32x4_t s0, uint32x4_t s1, uint32x4_t s2, uint32x4_t s3, uint32x4_t s4,
35     uint32x4_t s5, uint32x4_t s6, uint32x4_t s7, int32x4_t sr4, int32x4_t sr5,
36     int32x4_t sr6, int32x4_t sr7, uint32x4_t const_n_val, uint32x4_t s_vec,
37     uint32x4_t const_val, uint32x4_t one_by_n_minus_1_vec,
38     uint16x4_t sgrproj_sgr, int32_t *src1, uint16_t *dst_A16, int32_t *src2,
39     const int buf_stride) {
40   uint32x4_t q0, q1, q2, q3;
41   uint32x4_t p0, p1, p2, p3;
42   uint16x4_t d0, d1, d2, d3;
43 
44   s0 = vmulq_u32(s0, const_n_val);
45   s1 = vmulq_u32(s1, const_n_val);
46   s2 = vmulq_u32(s2, const_n_val);
47   s3 = vmulq_u32(s3, const_n_val);
48 
49   q0 = vmulq_u32(s4, s4);
50   q1 = vmulq_u32(s5, s5);
51   q2 = vmulq_u32(s6, s6);
52   q3 = vmulq_u32(s7, s7);
53 
54   p0 = vcleq_u32(q0, s0);
55   p1 = vcleq_u32(q1, s1);
56   p2 = vcleq_u32(q2, s2);
57   p3 = vcleq_u32(q3, s3);
58 
59   q0 = vsubq_u32(s0, q0);
60   q1 = vsubq_u32(s1, q1);
61   q2 = vsubq_u32(s2, q2);
62   q3 = vsubq_u32(s3, q3);
63 
64   p0 = vandq_u32(p0, q0);
65   p1 = vandq_u32(p1, q1);
66   p2 = vandq_u32(p2, q2);
67   p3 = vandq_u32(p3, q3);
68 
69   p0 = vmulq_u32(p0, s_vec);
70   p1 = vmulq_u32(p1, s_vec);
71   p2 = vmulq_u32(p2, s_vec);
72   p3 = vmulq_u32(p3, s_vec);
73 
74   p0 = vrshrq_n_u32(p0, SGRPROJ_MTABLE_BITS);
75   p1 = vrshrq_n_u32(p1, SGRPROJ_MTABLE_BITS);
76   p2 = vrshrq_n_u32(p2, SGRPROJ_MTABLE_BITS);
77   p3 = vrshrq_n_u32(p3, SGRPROJ_MTABLE_BITS);
78 
79   p0 = vminq_u32(p0, const_val);
80   p1 = vminq_u32(p1, const_val);
81   p2 = vminq_u32(p2, const_val);
82   p3 = vminq_u32(p3, const_val);
83 
84   {
85     store_u32_4x4((uint32_t *)src1, buf_stride, p0, p1, p2, p3);
86 
87     for (int x = 0; x < 4; x++) {
88       for (int y = 0; y < 4; y++) {
89         dst_A16[x * buf_stride + y] = x_by_xplus1[src1[x * buf_stride + y]];
90       }
91     }
92     load_u16_4x4(dst_A16, buf_stride, &d0, &d1, &d2, &d3);
93   }
94   p0 = vsubl_u16(sgrproj_sgr, d0);
95   p1 = vsubl_u16(sgrproj_sgr, d1);
96   p2 = vsubl_u16(sgrproj_sgr, d2);
97   p3 = vsubl_u16(sgrproj_sgr, d3);
98 
99   s4 = vmulq_u32(vreinterpretq_u32_s32(sr4), one_by_n_minus_1_vec);
100   s5 = vmulq_u32(vreinterpretq_u32_s32(sr5), one_by_n_minus_1_vec);
101   s6 = vmulq_u32(vreinterpretq_u32_s32(sr6), one_by_n_minus_1_vec);
102   s7 = vmulq_u32(vreinterpretq_u32_s32(sr7), one_by_n_minus_1_vec);
103 
104   s4 = vmulq_u32(s4, p0);
105   s5 = vmulq_u32(s5, p1);
106   s6 = vmulq_u32(s6, p2);
107   s7 = vmulq_u32(s7, p3);
108 
109   p0 = vrshrq_n_u32(s4, SGRPROJ_RECIP_BITS);
110   p1 = vrshrq_n_u32(s5, SGRPROJ_RECIP_BITS);
111   p2 = vrshrq_n_u32(s6, SGRPROJ_RECIP_BITS);
112   p3 = vrshrq_n_u32(s7, SGRPROJ_RECIP_BITS);
113 
114   store_s32_4x4(src2, buf_stride, vreinterpretq_s32_u32(p0),
115                 vreinterpretq_s32_u32(p1), vreinterpretq_s32_u32(p2),
116                 vreinterpretq_s32_u32(p3));
117 }
calc_ab_internal_common(uint32x4_t s0,uint32x4_t s1,uint32x4_t s2,uint32x4_t s3,uint32x4_t s4,uint32x4_t s5,uint32x4_t s6,uint32x4_t s7,uint16x8_t s16_0,uint16x8_t s16_1,uint16x8_t s16_2,uint16x8_t s16_3,uint16x8_t s16_4,uint16x8_t s16_5,uint16x8_t s16_6,uint16x8_t s16_7,uint32x4_t const_n_val,uint32x4_t s_vec,uint32x4_t const_val,uint16x4_t one_by_n_minus_1_vec,uint16x8_t sgrproj_sgr,int32_t * src1,uint16_t * dst_A16,int32_t * dst2,const int buf_stride)118 static INLINE void calc_ab_internal_common(
119     uint32x4_t s0, uint32x4_t s1, uint32x4_t s2, uint32x4_t s3, uint32x4_t s4,
120     uint32x4_t s5, uint32x4_t s6, uint32x4_t s7, uint16x8_t s16_0,
121     uint16x8_t s16_1, uint16x8_t s16_2, uint16x8_t s16_3, uint16x8_t s16_4,
122     uint16x8_t s16_5, uint16x8_t s16_6, uint16x8_t s16_7,
123     uint32x4_t const_n_val, uint32x4_t s_vec, uint32x4_t const_val,
124     uint16x4_t one_by_n_minus_1_vec, uint16x8_t sgrproj_sgr, int32_t *src1,
125     uint16_t *dst_A16, int32_t *dst2, const int buf_stride) {
126   uint16x4_t d0, d1, d2, d3, d4, d5, d6, d7;
127   uint32x4_t q0, q1, q2, q3, q4, q5, q6, q7;
128   uint32x4_t p0, p1, p2, p3, p4, p5, p6, p7;
129 
130   s0 = vmulq_u32(s0, const_n_val);
131   s1 = vmulq_u32(s1, const_n_val);
132   s2 = vmulq_u32(s2, const_n_val);
133   s3 = vmulq_u32(s3, const_n_val);
134   s4 = vmulq_u32(s4, const_n_val);
135   s5 = vmulq_u32(s5, const_n_val);
136   s6 = vmulq_u32(s6, const_n_val);
137   s7 = vmulq_u32(s7, const_n_val);
138 
139   d0 = vget_low_u16(s16_4);
140   d1 = vget_low_u16(s16_5);
141   d2 = vget_low_u16(s16_6);
142   d3 = vget_low_u16(s16_7);
143   d4 = vget_high_u16(s16_4);
144   d5 = vget_high_u16(s16_5);
145   d6 = vget_high_u16(s16_6);
146   d7 = vget_high_u16(s16_7);
147 
148   q0 = vmull_u16(d0, d0);
149   q1 = vmull_u16(d1, d1);
150   q2 = vmull_u16(d2, d2);
151   q3 = vmull_u16(d3, d3);
152   q4 = vmull_u16(d4, d4);
153   q5 = vmull_u16(d5, d5);
154   q6 = vmull_u16(d6, d6);
155   q7 = vmull_u16(d7, d7);
156 
157   p0 = vcleq_u32(q0, s0);
158   p1 = vcleq_u32(q1, s1);
159   p2 = vcleq_u32(q2, s2);
160   p3 = vcleq_u32(q3, s3);
161   p4 = vcleq_u32(q4, s4);
162   p5 = vcleq_u32(q5, s5);
163   p6 = vcleq_u32(q6, s6);
164   p7 = vcleq_u32(q7, s7);
165 
166   q0 = vsubq_u32(s0, q0);
167   q1 = vsubq_u32(s1, q1);
168   q2 = vsubq_u32(s2, q2);
169   q3 = vsubq_u32(s3, q3);
170   q4 = vsubq_u32(s4, q4);
171   q5 = vsubq_u32(s5, q5);
172   q6 = vsubq_u32(s6, q6);
173   q7 = vsubq_u32(s7, q7);
174 
175   p0 = vandq_u32(p0, q0);
176   p1 = vandq_u32(p1, q1);
177   p2 = vandq_u32(p2, q2);
178   p3 = vandq_u32(p3, q3);
179   p4 = vandq_u32(p4, q4);
180   p5 = vandq_u32(p5, q5);
181   p6 = vandq_u32(p6, q6);
182   p7 = vandq_u32(p7, q7);
183 
184   p0 = vmulq_u32(p0, s_vec);
185   p1 = vmulq_u32(p1, s_vec);
186   p2 = vmulq_u32(p2, s_vec);
187   p3 = vmulq_u32(p3, s_vec);
188   p4 = vmulq_u32(p4, s_vec);
189   p5 = vmulq_u32(p5, s_vec);
190   p6 = vmulq_u32(p6, s_vec);
191   p7 = vmulq_u32(p7, s_vec);
192 
193   p0 = vrshrq_n_u32(p0, SGRPROJ_MTABLE_BITS);
194   p1 = vrshrq_n_u32(p1, SGRPROJ_MTABLE_BITS);
195   p2 = vrshrq_n_u32(p2, SGRPROJ_MTABLE_BITS);
196   p3 = vrshrq_n_u32(p3, SGRPROJ_MTABLE_BITS);
197   p4 = vrshrq_n_u32(p4, SGRPROJ_MTABLE_BITS);
198   p5 = vrshrq_n_u32(p5, SGRPROJ_MTABLE_BITS);
199   p6 = vrshrq_n_u32(p6, SGRPROJ_MTABLE_BITS);
200   p7 = vrshrq_n_u32(p7, SGRPROJ_MTABLE_BITS);
201 
202   p0 = vminq_u32(p0, const_val);
203   p1 = vminq_u32(p1, const_val);
204   p2 = vminq_u32(p2, const_val);
205   p3 = vminq_u32(p3, const_val);
206   p4 = vminq_u32(p4, const_val);
207   p5 = vminq_u32(p5, const_val);
208   p6 = vminq_u32(p6, const_val);
209   p7 = vminq_u32(p7, const_val);
210 
211   {
212     store_u32_4x4((uint32_t *)src1, buf_stride, p0, p1, p2, p3);
213     store_u32_4x4((uint32_t *)src1 + 4, buf_stride, p4, p5, p6, p7);
214 
215     for (int x = 0; x < 4; x++) {
216       for (int y = 0; y < 8; y++) {
217         dst_A16[x * buf_stride + y] = x_by_xplus1[src1[x * buf_stride + y]];
218       }
219     }
220     load_u16_8x4(dst_A16, buf_stride, &s16_4, &s16_5, &s16_6, &s16_7);
221   }
222 
223   s16_4 = vsubq_u16(sgrproj_sgr, s16_4);
224   s16_5 = vsubq_u16(sgrproj_sgr, s16_5);
225   s16_6 = vsubq_u16(sgrproj_sgr, s16_6);
226   s16_7 = vsubq_u16(sgrproj_sgr, s16_7);
227 
228   s0 = vmull_u16(vget_low_u16(s16_0), one_by_n_minus_1_vec);
229   s1 = vmull_u16(vget_low_u16(s16_1), one_by_n_minus_1_vec);
230   s2 = vmull_u16(vget_low_u16(s16_2), one_by_n_minus_1_vec);
231   s3 = vmull_u16(vget_low_u16(s16_3), one_by_n_minus_1_vec);
232   s4 = vmull_u16(vget_high_u16(s16_0), one_by_n_minus_1_vec);
233   s5 = vmull_u16(vget_high_u16(s16_1), one_by_n_minus_1_vec);
234   s6 = vmull_u16(vget_high_u16(s16_2), one_by_n_minus_1_vec);
235   s7 = vmull_u16(vget_high_u16(s16_3), one_by_n_minus_1_vec);
236 
237   s0 = vmulq_u32(s0, vmovl_u16(vget_low_u16(s16_4)));
238   s1 = vmulq_u32(s1, vmovl_u16(vget_low_u16(s16_5)));
239   s2 = vmulq_u32(s2, vmovl_u16(vget_low_u16(s16_6)));
240   s3 = vmulq_u32(s3, vmovl_u16(vget_low_u16(s16_7)));
241   s4 = vmulq_u32(s4, vmovl_u16(vget_high_u16(s16_4)));
242   s5 = vmulq_u32(s5, vmovl_u16(vget_high_u16(s16_5)));
243   s6 = vmulq_u32(s6, vmovl_u16(vget_high_u16(s16_6)));
244   s7 = vmulq_u32(s7, vmovl_u16(vget_high_u16(s16_7)));
245 
246   p0 = vrshrq_n_u32(s0, SGRPROJ_RECIP_BITS);
247   p1 = vrshrq_n_u32(s1, SGRPROJ_RECIP_BITS);
248   p2 = vrshrq_n_u32(s2, SGRPROJ_RECIP_BITS);
249   p3 = vrshrq_n_u32(s3, SGRPROJ_RECIP_BITS);
250   p4 = vrshrq_n_u32(s4, SGRPROJ_RECIP_BITS);
251   p5 = vrshrq_n_u32(s5, SGRPROJ_RECIP_BITS);
252   p6 = vrshrq_n_u32(s6, SGRPROJ_RECIP_BITS);
253   p7 = vrshrq_n_u32(s7, SGRPROJ_RECIP_BITS);
254 
255   store_s32_4x4(dst2, buf_stride, vreinterpretq_s32_u32(p0),
256                 vreinterpretq_s32_u32(p1), vreinterpretq_s32_u32(p2),
257                 vreinterpretq_s32_u32(p3));
258   store_s32_4x4(dst2 + 4, buf_stride, vreinterpretq_s32_u32(p4),
259                 vreinterpretq_s32_u32(p5), vreinterpretq_s32_u32(p6),
260                 vreinterpretq_s32_u32(p7));
261 }
262 
boxsum2_square_sum_calc(int16x4_t t1,int16x4_t t2,int16x4_t t3,int16x4_t t4,int16x4_t t5,int16x4_t t6,int16x4_t t7,int16x4_t t8,int16x4_t t9,int16x4_t t10,int16x4_t t11,int32x4_t * r0,int32x4_t * r1,int32x4_t * r2,int32x4_t * r3)263 static INLINE void boxsum2_square_sum_calc(
264     int16x4_t t1, int16x4_t t2, int16x4_t t3, int16x4_t t4, int16x4_t t5,
265     int16x4_t t6, int16x4_t t7, int16x4_t t8, int16x4_t t9, int16x4_t t10,
266     int16x4_t t11, int32x4_t *r0, int32x4_t *r1, int32x4_t *r2, int32x4_t *r3) {
267   int32x4_t d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11;
268   int32x4_t r12, r34, r67, r89, r1011;
269   int32x4_t r345, r6789, r789;
270 
271   d1 = vmull_s16(t1, t1);
272   d2 = vmull_s16(t2, t2);
273   d3 = vmull_s16(t3, t3);
274   d4 = vmull_s16(t4, t4);
275   d5 = vmull_s16(t5, t5);
276   d6 = vmull_s16(t6, t6);
277   d7 = vmull_s16(t7, t7);
278   d8 = vmull_s16(t8, t8);
279   d9 = vmull_s16(t9, t9);
280   d10 = vmull_s16(t10, t10);
281   d11 = vmull_s16(t11, t11);
282 
283   r12 = vaddq_s32(d1, d2);
284   r34 = vaddq_s32(d3, d4);
285   r67 = vaddq_s32(d6, d7);
286   r89 = vaddq_s32(d8, d9);
287   r1011 = vaddq_s32(d10, d11);
288   r345 = vaddq_s32(r34, d5);
289   r6789 = vaddq_s32(r67, r89);
290   r789 = vsubq_s32(r6789, d6);
291   *r0 = vaddq_s32(r12, r345);
292   *r1 = vaddq_s32(r67, r345);
293   *r2 = vaddq_s32(d5, r6789);
294   *r3 = vaddq_s32(r789, r1011);
295 }
296 
boxsum2(int16_t * src,const int src_stride,int16_t * dst16,int32_t * dst32,int32_t * dst2,const int dst_stride,const int width,const int height)297 static INLINE void boxsum2(int16_t *src, const int src_stride, int16_t *dst16,
298                            int32_t *dst32, int32_t *dst2, const int dst_stride,
299                            const int width, const int height) {
300   assert(width > 2 * SGRPROJ_BORDER_HORZ);
301   assert(height > 2 * SGRPROJ_BORDER_VERT);
302 
303   int16_t *dst1_16_ptr, *src_ptr;
304   int32_t *dst2_ptr;
305   int h, w, count = 0;
306   const int dst_stride_2 = (dst_stride << 1);
307   const int dst_stride_8 = (dst_stride << 3);
308 
309   dst1_16_ptr = dst16;
310   dst2_ptr = dst2;
311   src_ptr = src;
312   w = width;
313   {
314     int16x8_t t1, t2, t3, t4, t5, t6, t7;
315     int16x8_t t8, t9, t10, t11, t12;
316 
317     int16x8_t q12345, q56789, q34567, q7891011;
318     int16x8_t q12, q34, q67, q89, q1011;
319     int16x8_t q345, q6789, q789;
320 
321     int32x4_t r12345, r56789, r34567, r7891011;
322 
323     do {
324       h = height;
325       dst1_16_ptr = dst16 + (count << 3);
326       dst2_ptr = dst2 + (count << 3);
327       src_ptr = src + (count << 3);
328 
329       dst1_16_ptr += dst_stride_2;
330       dst2_ptr += dst_stride_2;
331       do {
332         load_s16_8x4(src_ptr, src_stride, &t1, &t2, &t3, &t4);
333         src_ptr += 4 * src_stride;
334         load_s16_8x4(src_ptr, src_stride, &t5, &t6, &t7, &t8);
335         src_ptr += 4 * src_stride;
336         load_s16_8x4(src_ptr, src_stride, &t9, &t10, &t11, &t12);
337 
338         q12 = vaddq_s16(t1, t2);
339         q34 = vaddq_s16(t3, t4);
340         q67 = vaddq_s16(t6, t7);
341         q89 = vaddq_s16(t8, t9);
342         q1011 = vaddq_s16(t10, t11);
343         q345 = vaddq_s16(q34, t5);
344         q6789 = vaddq_s16(q67, q89);
345         q789 = vaddq_s16(q89, t7);
346         q12345 = vaddq_s16(q12, q345);
347         q34567 = vaddq_s16(q67, q345);
348         q56789 = vaddq_s16(t5, q6789);
349         q7891011 = vaddq_s16(q789, q1011);
350 
351         store_s16_8x4(dst1_16_ptr, dst_stride_2, q12345, q34567, q56789,
352                       q7891011);
353         dst1_16_ptr += dst_stride_8;
354 
355         boxsum2_square_sum_calc(
356             vget_low_s16(t1), vget_low_s16(t2), vget_low_s16(t3),
357             vget_low_s16(t4), vget_low_s16(t5), vget_low_s16(t6),
358             vget_low_s16(t7), vget_low_s16(t8), vget_low_s16(t9),
359             vget_low_s16(t10), vget_low_s16(t11), &r12345, &r34567, &r56789,
360             &r7891011);
361 
362         store_s32_4x4(dst2_ptr, dst_stride_2, r12345, r34567, r56789, r7891011);
363 
364         boxsum2_square_sum_calc(
365             vget_high_s16(t1), vget_high_s16(t2), vget_high_s16(t3),
366             vget_high_s16(t4), vget_high_s16(t5), vget_high_s16(t6),
367             vget_high_s16(t7), vget_high_s16(t8), vget_high_s16(t9),
368             vget_high_s16(t10), vget_high_s16(t11), &r12345, &r34567, &r56789,
369             &r7891011);
370 
371         store_s32_4x4(dst2_ptr + 4, dst_stride_2, r12345, r34567, r56789,
372                       r7891011);
373         dst2_ptr += (dst_stride_8);
374         h -= 8;
375       } while (h > 0);
376       w -= 8;
377       count++;
378     } while (w > 0);
379   }
380 
381   {
382     int16x4_t s1, s2, s3, s4, s5, s6, s7, s8;
383     int32x4_t d1, d2, d3, d4, d5, d6, d7, d8;
384     int32x4_t q12345, q34567, q23456, q45678;
385     int32x4_t q23, q45, q67;
386     int32x4_t q2345, q4567;
387 
388     int32x4_t r12345, r34567, r23456, r45678;
389     int32x4_t r23, r45, r67;
390     int32x4_t r2345, r4567;
391 
392     int32_t *src2_ptr, *dst1_32_ptr;
393     int16_t *src1_ptr;
394     count = 0;
395     h = height;
396     do {
397       dst1_32_ptr = dst32 + count * dst_stride_8 + (dst_stride_2);
398       dst2_ptr = dst2 + count * dst_stride_8 + (dst_stride_2);
399       src1_ptr = dst16 + count * dst_stride_8 + (dst_stride_2);
400       src2_ptr = dst2 + count * dst_stride_8 + (dst_stride_2);
401       w = width;
402 
403       dst1_32_ptr += 2;
404       dst2_ptr += 2;
405       load_s16_4x4(src1_ptr, dst_stride_2, &s1, &s2, &s3, &s4);
406       transpose_s16_4x4d(&s1, &s2, &s3, &s4);
407       load_s32_4x4(src2_ptr, dst_stride_2, &d1, &d2, &d3, &d4);
408       transpose_s32_4x4(&d1, &d2, &d3, &d4);
409       do {
410         src1_ptr += 4;
411         src2_ptr += 4;
412         load_s16_4x4(src1_ptr, dst_stride_2, &s5, &s6, &s7, &s8);
413         transpose_s16_4x4d(&s5, &s6, &s7, &s8);
414         load_s32_4x4(src2_ptr, dst_stride_2, &d5, &d6, &d7, &d8);
415         transpose_s32_4x4(&d5, &d6, &d7, &d8);
416         q23 = vaddl_s16(s2, s3);
417         q45 = vaddl_s16(s4, s5);
418         q67 = vaddl_s16(s6, s7);
419         q2345 = vaddq_s32(q23, q45);
420         q4567 = vaddq_s32(q45, q67);
421         q12345 = vaddq_s32(vmovl_s16(s1), q2345);
422         q23456 = vaddq_s32(q2345, vmovl_s16(s6));
423         q34567 = vaddq_s32(q4567, vmovl_s16(s3));
424         q45678 = vaddq_s32(q4567, vmovl_s16(s8));
425 
426         transpose_s32_4x4(&q12345, &q23456, &q34567, &q45678);
427         store_s32_4x4(dst1_32_ptr, dst_stride_2, q12345, q23456, q34567,
428                       q45678);
429         dst1_32_ptr += 4;
430         s1 = s5;
431         s2 = s6;
432         s3 = s7;
433         s4 = s8;
434 
435         r23 = vaddq_s32(d2, d3);
436         r45 = vaddq_s32(d4, d5);
437         r67 = vaddq_s32(d6, d7);
438         r2345 = vaddq_s32(r23, r45);
439         r4567 = vaddq_s32(r45, r67);
440         r12345 = vaddq_s32(d1, r2345);
441         r23456 = vaddq_s32(r2345, d6);
442         r34567 = vaddq_s32(r4567, d3);
443         r45678 = vaddq_s32(r4567, d8);
444 
445         transpose_s32_4x4(&r12345, &r23456, &r34567, &r45678);
446         store_s32_4x4(dst2_ptr, dst_stride_2, r12345, r23456, r34567, r45678);
447         dst2_ptr += 4;
448         d1 = d5;
449         d2 = d6;
450         d3 = d7;
451         d4 = d8;
452         w -= 4;
453       } while (w > 0);
454       h -= 8;
455       count++;
456     } while (h > 0);
457   }
458 }
459 
calc_ab_internal_lbd(int32_t * A,uint16_t * A16,uint16_t * B16,int32_t * B,const int buf_stride,const int width,const int height,const int r,const int s,const int ht_inc)460 static INLINE void calc_ab_internal_lbd(int32_t *A, uint16_t *A16,
461                                         uint16_t *B16, int32_t *B,
462                                         const int buf_stride, const int width,
463                                         const int height, const int r,
464                                         const int s, const int ht_inc) {
465   int32_t *src1, *dst2, count = 0;
466   uint16_t *dst_A16, *src2;
467   const uint32_t n = (2 * r + 1) * (2 * r + 1);
468   const uint32x4_t const_n_val = vdupq_n_u32(n);
469   const uint16x8_t sgrproj_sgr = vdupq_n_u16(SGRPROJ_SGR);
470   const uint16x4_t one_by_n_minus_1_vec = vdup_n_u16(one_by_x[n - 1]);
471   const uint32x4_t const_val = vdupq_n_u32(255);
472 
473   uint16x8_t s16_0, s16_1, s16_2, s16_3, s16_4, s16_5, s16_6, s16_7;
474 
475   uint32x4_t s0, s1, s2, s3, s4, s5, s6, s7;
476 
477   const uint32x4_t s_vec = vdupq_n_u32(s);
478   int w, h = height;
479 
480   do {
481     dst_A16 = A16 + (count << 2) * buf_stride;
482     src1 = A + (count << 2) * buf_stride;
483     src2 = B16 + (count << 2) * buf_stride;
484     dst2 = B + (count << 2) * buf_stride;
485     w = width;
486     do {
487       load_u32_4x4((uint32_t *)src1, buf_stride, &s0, &s1, &s2, &s3);
488       load_u32_4x4((uint32_t *)src1 + 4, buf_stride, &s4, &s5, &s6, &s7);
489       load_u16_8x4(src2, buf_stride, &s16_0, &s16_1, &s16_2, &s16_3);
490 
491       s16_4 = s16_0;
492       s16_5 = s16_1;
493       s16_6 = s16_2;
494       s16_7 = s16_3;
495 
496       calc_ab_internal_common(
497           s0, s1, s2, s3, s4, s5, s6, s7, s16_0, s16_1, s16_2, s16_3, s16_4,
498           s16_5, s16_6, s16_7, const_n_val, s_vec, const_val,
499           one_by_n_minus_1_vec, sgrproj_sgr, src1, dst_A16, dst2, buf_stride);
500 
501       w -= 8;
502       dst2 += 8;
503       src1 += 8;
504       src2 += 8;
505       dst_A16 += 8;
506     } while (w > 0);
507     count++;
508     h -= (ht_inc * 4);
509   } while (h > 0);
510 }
511 
calc_ab_internal_hbd(int32_t * A,uint16_t * A16,uint16_t * B16,int32_t * B,const int buf_stride,const int width,const int height,const int bit_depth,const int r,const int s,const int ht_inc)512 static INLINE void calc_ab_internal_hbd(int32_t *A, uint16_t *A16,
513                                         uint16_t *B16, int32_t *B,
514                                         const int buf_stride, const int width,
515                                         const int height, const int bit_depth,
516                                         const int r, const int s,
517                                         const int ht_inc) {
518   int32_t *src1, *dst2, count = 0;
519   uint16_t *dst_A16, *src2;
520   const uint32_t n = (2 * r + 1) * (2 * r + 1);
521   const int16x8_t bd_min_2_vec = vdupq_n_s16(-(bit_depth - 8));
522   const int32x4_t bd_min_1_vec = vdupq_n_s32(-((bit_depth - 8) << 1));
523   const uint32x4_t const_n_val = vdupq_n_u32(n);
524   const uint16x8_t sgrproj_sgr = vdupq_n_u16(SGRPROJ_SGR);
525   const uint16x4_t one_by_n_minus_1_vec = vdup_n_u16(one_by_x[n - 1]);
526   const uint32x4_t const_val = vdupq_n_u32(255);
527 
528   int32x4_t sr0, sr1, sr2, sr3, sr4, sr5, sr6, sr7;
529   uint16x8_t s16_0, s16_1, s16_2, s16_3;
530   uint16x8_t s16_4, s16_5, s16_6, s16_7;
531   uint32x4_t s0, s1, s2, s3, s4, s5, s6, s7;
532 
533   const uint32x4_t s_vec = vdupq_n_u32(s);
534   int w, h = height;
535 
536   do {
537     src1 = A + (count << 2) * buf_stride;
538     src2 = B16 + (count << 2) * buf_stride;
539     dst2 = B + (count << 2) * buf_stride;
540     dst_A16 = A16 + (count << 2) * buf_stride;
541     w = width;
542     do {
543       load_s32_4x4(src1, buf_stride, &sr0, &sr1, &sr2, &sr3);
544       load_s32_4x4(src1 + 4, buf_stride, &sr4, &sr5, &sr6, &sr7);
545       load_u16_8x4(src2, buf_stride, &s16_0, &s16_1, &s16_2, &s16_3);
546 
547       s0 = vrshlq_u32(vreinterpretq_u32_s32(sr0), bd_min_1_vec);
548       s1 = vrshlq_u32(vreinterpretq_u32_s32(sr1), bd_min_1_vec);
549       s2 = vrshlq_u32(vreinterpretq_u32_s32(sr2), bd_min_1_vec);
550       s3 = vrshlq_u32(vreinterpretq_u32_s32(sr3), bd_min_1_vec);
551       s4 = vrshlq_u32(vreinterpretq_u32_s32(sr4), bd_min_1_vec);
552       s5 = vrshlq_u32(vreinterpretq_u32_s32(sr5), bd_min_1_vec);
553       s6 = vrshlq_u32(vreinterpretq_u32_s32(sr6), bd_min_1_vec);
554       s7 = vrshlq_u32(vreinterpretq_u32_s32(sr7), bd_min_1_vec);
555 
556       s16_4 = vrshlq_u16(s16_0, bd_min_2_vec);
557       s16_5 = vrshlq_u16(s16_1, bd_min_2_vec);
558       s16_6 = vrshlq_u16(s16_2, bd_min_2_vec);
559       s16_7 = vrshlq_u16(s16_3, bd_min_2_vec);
560 
561       calc_ab_internal_common(
562           s0, s1, s2, s3, s4, s5, s6, s7, s16_0, s16_1, s16_2, s16_3, s16_4,
563           s16_5, s16_6, s16_7, const_n_val, s_vec, const_val,
564           one_by_n_minus_1_vec, sgrproj_sgr, src1, dst_A16, dst2, buf_stride);
565 
566       w -= 8;
567       dst2 += 8;
568       src1 += 8;
569       src2 += 8;
570       dst_A16 += 8;
571     } while (w > 0);
572     count++;
573     h -= (ht_inc * 4);
574   } while (h > 0);
575 }
576 
calc_ab_fast_internal_lbd(int32_t * A,uint16_t * A16,int32_t * B,const int buf_stride,const int width,const int height,const int r,const int s,const int ht_inc)577 static INLINE void calc_ab_fast_internal_lbd(int32_t *A, uint16_t *A16,
578                                              int32_t *B, const int buf_stride,
579                                              const int width, const int height,
580                                              const int r, const int s,
581                                              const int ht_inc) {
582   int32_t *src1, *src2, count = 0;
583   uint16_t *dst_A16;
584   const uint32_t n = (2 * r + 1) * (2 * r + 1);
585   const uint32x4_t const_n_val = vdupq_n_u32(n);
586   const uint16x4_t sgrproj_sgr = vdup_n_u16(SGRPROJ_SGR);
587   const uint32x4_t one_by_n_minus_1_vec = vdupq_n_u32(one_by_x[n - 1]);
588   const uint32x4_t const_val = vdupq_n_u32(255);
589 
590   int32x4_t sr0, sr1, sr2, sr3, sr4, sr5, sr6, sr7;
591   uint32x4_t s0, s1, s2, s3, s4, s5, s6, s7;
592 
593   const uint32x4_t s_vec = vdupq_n_u32(s);
594   int w, h = height;
595 
596   do {
597     src1 = A + (count << 2) * buf_stride;
598     src2 = B + (count << 2) * buf_stride;
599     dst_A16 = A16 + (count << 2) * buf_stride;
600     w = width;
601     do {
602       load_s32_4x4(src1, buf_stride, &sr0, &sr1, &sr2, &sr3);
603       load_s32_4x4(src2, buf_stride, &sr4, &sr5, &sr6, &sr7);
604 
605       s0 = vreinterpretq_u32_s32(sr0);
606       s1 = vreinterpretq_u32_s32(sr1);
607       s2 = vreinterpretq_u32_s32(sr2);
608       s3 = vreinterpretq_u32_s32(sr3);
609       s4 = vreinterpretq_u32_s32(sr4);
610       s5 = vreinterpretq_u32_s32(sr5);
611       s6 = vreinterpretq_u32_s32(sr6);
612       s7 = vreinterpretq_u32_s32(sr7);
613 
614       calc_ab_fast_internal_common(s0, s1, s2, s3, s4, s5, s6, s7, sr4, sr5,
615                                    sr6, sr7, const_n_val, s_vec, const_val,
616                                    one_by_n_minus_1_vec, sgrproj_sgr, src1,
617                                    dst_A16, src2, buf_stride);
618 
619       w -= 4;
620       src1 += 4;
621       src2 += 4;
622       dst_A16 += 4;
623     } while (w > 0);
624     count++;
625     h -= (ht_inc * 4);
626   } while (h > 0);
627 }
628 
calc_ab_fast_internal_hbd(int32_t * A,uint16_t * A16,int32_t * B,const int buf_stride,const int width,const int height,const int bit_depth,const int r,const int s,const int ht_inc)629 static INLINE void calc_ab_fast_internal_hbd(int32_t *A, uint16_t *A16,
630                                              int32_t *B, const int buf_stride,
631                                              const int width, const int height,
632                                              const int bit_depth, const int r,
633                                              const int s, const int ht_inc) {
634   int32_t *src1, *src2, count = 0;
635   uint16_t *dst_A16;
636   const uint32_t n = (2 * r + 1) * (2 * r + 1);
637   const int32x4_t bd_min_2_vec = vdupq_n_s32(-(bit_depth - 8));
638   const int32x4_t bd_min_1_vec = vdupq_n_s32(-((bit_depth - 8) << 1));
639   const uint32x4_t const_n_val = vdupq_n_u32(n);
640   const uint16x4_t sgrproj_sgr = vdup_n_u16(SGRPROJ_SGR);
641   const uint32x4_t one_by_n_minus_1_vec = vdupq_n_u32(one_by_x[n - 1]);
642   const uint32x4_t const_val = vdupq_n_u32(255);
643 
644   int32x4_t sr0, sr1, sr2, sr3, sr4, sr5, sr6, sr7;
645   uint32x4_t s0, s1, s2, s3, s4, s5, s6, s7;
646 
647   const uint32x4_t s_vec = vdupq_n_u32(s);
648   int w, h = height;
649 
650   do {
651     src1 = A + (count << 2) * buf_stride;
652     src2 = B + (count << 2) * buf_stride;
653     dst_A16 = A16 + (count << 2) * buf_stride;
654     w = width;
655     do {
656       load_s32_4x4(src1, buf_stride, &sr0, &sr1, &sr2, &sr3);
657       load_s32_4x4(src2, buf_stride, &sr4, &sr5, &sr6, &sr7);
658 
659       s0 = vrshlq_u32(vreinterpretq_u32_s32(sr0), bd_min_1_vec);
660       s1 = vrshlq_u32(vreinterpretq_u32_s32(sr1), bd_min_1_vec);
661       s2 = vrshlq_u32(vreinterpretq_u32_s32(sr2), bd_min_1_vec);
662       s3 = vrshlq_u32(vreinterpretq_u32_s32(sr3), bd_min_1_vec);
663       s4 = vrshlq_u32(vreinterpretq_u32_s32(sr4), bd_min_2_vec);
664       s5 = vrshlq_u32(vreinterpretq_u32_s32(sr5), bd_min_2_vec);
665       s6 = vrshlq_u32(vreinterpretq_u32_s32(sr6), bd_min_2_vec);
666       s7 = vrshlq_u32(vreinterpretq_u32_s32(sr7), bd_min_2_vec);
667 
668       calc_ab_fast_internal_common(s0, s1, s2, s3, s4, s5, s6, s7, sr4, sr5,
669                                    sr6, sr7, const_n_val, s_vec, const_val,
670                                    one_by_n_minus_1_vec, sgrproj_sgr, src1,
671                                    dst_A16, src2, buf_stride);
672 
673       w -= 4;
674       src1 += 4;
675       src2 += 4;
676       dst_A16 += 4;
677     } while (w > 0);
678     count++;
679     h -= (ht_inc * 4);
680   } while (h > 0);
681 }
682 
boxsum1(int16_t * src,const int src_stride,uint16_t * dst1,int32_t * dst2,const int dst_stride,const int width,const int height)683 static INLINE void boxsum1(int16_t *src, const int src_stride, uint16_t *dst1,
684                            int32_t *dst2, const int dst_stride, const int width,
685                            const int height) {
686   assert(width > 2 * SGRPROJ_BORDER_HORZ);
687   assert(height > 2 * SGRPROJ_BORDER_VERT);
688 
689   int16_t *src_ptr;
690   int32_t *dst2_ptr;
691   uint16_t *dst1_ptr;
692   int h, w, count = 0;
693 
694   w = width;
695   {
696     int16x8_t s1, s2, s3, s4, s5, s6, s7, s8;
697     int16x8_t q23, q34, q56, q234, q345, q456, q567;
698     int32x4_t r23, r56, r345, r456, r567, r78, r678;
699     int32x4_t r4_low, r4_high, r34_low, r34_high, r234_low, r234_high;
700     int32x4_t r2, r3, r5, r6, r7, r8;
701     int16x8_t q678, q78;
702 
703     do {
704       dst1_ptr = dst1 + (count << 3);
705       dst2_ptr = dst2 + (count << 3);
706       src_ptr = src + (count << 3);
707       h = height;
708 
709       load_s16_8x4(src_ptr, src_stride, &s1, &s2, &s3, &s4);
710       src_ptr += 4 * src_stride;
711 
712       q23 = vaddq_s16(s2, s3);
713       q234 = vaddq_s16(q23, s4);
714       q34 = vaddq_s16(s3, s4);
715       dst1_ptr += (dst_stride << 1);
716 
717       r2 = vmull_s16(vget_low_s16(s2), vget_low_s16(s2));
718       r3 = vmull_s16(vget_low_s16(s3), vget_low_s16(s3));
719       r4_low = vmull_s16(vget_low_s16(s4), vget_low_s16(s4));
720       r23 = vaddq_s32(r2, r3);
721       r234_low = vaddq_s32(r23, r4_low);
722       r34_low = vaddq_s32(r3, r4_low);
723 
724       r2 = vmull_s16(vget_high_s16(s2), vget_high_s16(s2));
725       r3 = vmull_s16(vget_high_s16(s3), vget_high_s16(s3));
726       r4_high = vmull_s16(vget_high_s16(s4), vget_high_s16(s4));
727       r23 = vaddq_s32(r2, r3);
728       r234_high = vaddq_s32(r23, r4_high);
729       r34_high = vaddq_s32(r3, r4_high);
730 
731       dst2_ptr += (dst_stride << 1);
732 
733       do {
734         load_s16_8x4(src_ptr, src_stride, &s5, &s6, &s7, &s8);
735         src_ptr += 4 * src_stride;
736 
737         q345 = vaddq_s16(s5, q34);
738         q56 = vaddq_s16(s5, s6);
739         q456 = vaddq_s16(s4, q56);
740         q567 = vaddq_s16(s7, q56);
741         q78 = vaddq_s16(s7, s8);
742         q678 = vaddq_s16(s6, q78);
743 
744         store_s16_8x4((int16_t *)dst1_ptr, dst_stride, q234, q345, q456, q567);
745         dst1_ptr += (dst_stride << 2);
746 
747         s4 = s8;
748         q34 = q78;
749         q234 = q678;
750 
751         r5 = vmull_s16(vget_low_s16(s5), vget_low_s16(s5));
752         r6 = vmull_s16(vget_low_s16(s6), vget_low_s16(s6));
753         r7 = vmull_s16(vget_low_s16(s7), vget_low_s16(s7));
754         r8 = vmull_s16(vget_low_s16(s8), vget_low_s16(s8));
755 
756         r345 = vaddq_s32(r5, r34_low);
757         r56 = vaddq_s32(r5, r6);
758         r456 = vaddq_s32(r4_low, r56);
759         r567 = vaddq_s32(r7, r56);
760         r78 = vaddq_s32(r7, r8);
761         r678 = vaddq_s32(r6, r78);
762         store_s32_4x4(dst2_ptr, dst_stride, r234_low, r345, r456, r567);
763 
764         r4_low = r8;
765         r34_low = r78;
766         r234_low = r678;
767 
768         r5 = vmull_s16(vget_high_s16(s5), vget_high_s16(s5));
769         r6 = vmull_s16(vget_high_s16(s6), vget_high_s16(s6));
770         r7 = vmull_s16(vget_high_s16(s7), vget_high_s16(s7));
771         r8 = vmull_s16(vget_high_s16(s8), vget_high_s16(s8));
772 
773         r345 = vaddq_s32(r5, r34_high);
774         r56 = vaddq_s32(r5, r6);
775         r456 = vaddq_s32(r4_high, r56);
776         r567 = vaddq_s32(r7, r56);
777         r78 = vaddq_s32(r7, r8);
778         r678 = vaddq_s32(r6, r78);
779         store_s32_4x4((dst2_ptr + 4), dst_stride, r234_high, r345, r456, r567);
780         dst2_ptr += (dst_stride << 2);
781 
782         r4_high = r8;
783         r34_high = r78;
784         r234_high = r678;
785 
786         h -= 4;
787       } while (h > 0);
788       w -= 8;
789       count++;
790     } while (w > 0);
791   }
792 
793   {
794     int16x4_t d1, d2, d3, d4, d5, d6, d7, d8;
795     int16x4_t q23, q34, q56, q234, q345, q456, q567;
796     int32x4_t r23, r56, r234, r345, r456, r567, r34, r78, r678;
797     int32x4_t r1, r2, r3, r4, r5, r6, r7, r8;
798     int16x4_t q678, q78;
799 
800     int32_t *src2_ptr;
801     uint16_t *src1_ptr;
802     count = 0;
803     h = height;
804     w = width;
805     do {
806       dst1_ptr = dst1 + (count << 2) * dst_stride;
807       dst2_ptr = dst2 + (count << 2) * dst_stride;
808       src1_ptr = dst1 + (count << 2) * dst_stride;
809       src2_ptr = dst2 + (count << 2) * dst_stride;
810       w = width;
811 
812       load_s16_4x4((int16_t *)src1_ptr, dst_stride, &d1, &d2, &d3, &d4);
813       transpose_s16_4x4d(&d1, &d2, &d3, &d4);
814       load_s32_4x4(src2_ptr, dst_stride, &r1, &r2, &r3, &r4);
815       transpose_s32_4x4(&r1, &r2, &r3, &r4);
816       src1_ptr += 4;
817       src2_ptr += 4;
818 
819       q23 = vadd_s16(d2, d3);
820       q234 = vadd_s16(q23, d4);
821       q34 = vadd_s16(d3, d4);
822       dst1_ptr += 2;
823       r23 = vaddq_s32(r2, r3);
824       r234 = vaddq_s32(r23, r4);
825       r34 = vaddq_s32(r3, r4);
826       dst2_ptr += 2;
827 
828       do {
829         load_s16_4x4((int16_t *)src1_ptr, dst_stride, &d5, &d6, &d7, &d8);
830         transpose_s16_4x4d(&d5, &d6, &d7, &d8);
831         load_s32_4x4(src2_ptr, dst_stride, &r5, &r6, &r7, &r8);
832         transpose_s32_4x4(&r5, &r6, &r7, &r8);
833         src1_ptr += 4;
834         src2_ptr += 4;
835 
836         q345 = vadd_s16(d5, q34);
837         q56 = vadd_s16(d5, d6);
838         q456 = vadd_s16(d4, q56);
839         q567 = vadd_s16(d7, q56);
840         q78 = vadd_s16(d7, d8);
841         q678 = vadd_s16(d6, q78);
842         transpose_s16_4x4d(&q234, &q345, &q456, &q567);
843         store_s16_4x4((int16_t *)dst1_ptr, dst_stride, q234, q345, q456, q567);
844         dst1_ptr += 4;
845 
846         d4 = d8;
847         q34 = q78;
848         q234 = q678;
849 
850         r345 = vaddq_s32(r5, r34);
851         r56 = vaddq_s32(r5, r6);
852         r456 = vaddq_s32(r4, r56);
853         r567 = vaddq_s32(r7, r56);
854         r78 = vaddq_s32(r7, r8);
855         r678 = vaddq_s32(r6, r78);
856         transpose_s32_4x4(&r234, &r345, &r456, &r567);
857         store_s32_4x4(dst2_ptr, dst_stride, r234, r345, r456, r567);
858         dst2_ptr += 4;
859 
860         r4 = r8;
861         r34 = r78;
862         r234 = r678;
863         w -= 4;
864       } while (w > 0);
865       h -= 4;
866       count++;
867     } while (h > 0);
868   }
869 }
870 
cross_sum_inp_s32(int32_t * buf,int buf_stride)871 static INLINE int32x4_t cross_sum_inp_s32(int32_t *buf, int buf_stride) {
872   int32x4_t xtr, xt, xtl, xl, x, xr, xbr, xb, xbl;
873   int32x4_t fours, threes, res;
874 
875   xtl = vld1q_s32(buf - buf_stride - 1);
876   xt = vld1q_s32(buf - buf_stride);
877   xtr = vld1q_s32(buf - buf_stride + 1);
878   xl = vld1q_s32(buf - 1);
879   x = vld1q_s32(buf);
880   xr = vld1q_s32(buf + 1);
881   xbl = vld1q_s32(buf + buf_stride - 1);
882   xb = vld1q_s32(buf + buf_stride);
883   xbr = vld1q_s32(buf + buf_stride + 1);
884 
885   fours = vaddq_s32(xl, vaddq_s32(xt, vaddq_s32(xr, vaddq_s32(xb, x))));
886   threes = vaddq_s32(xtl, vaddq_s32(xtr, vaddq_s32(xbr, xbl)));
887   res = vsubq_s32(vshlq_n_s32(vaddq_s32(fours, threes), 2), threes);
888   return res;
889 }
890 
cross_sum_inp_u16(uint16_t * buf,int buf_stride,int32x4_t * a0,int32x4_t * a1)891 static INLINE void cross_sum_inp_u16(uint16_t *buf, int buf_stride,
892                                      int32x4_t *a0, int32x4_t *a1) {
893   uint16x8_t xtr, xt, xtl, xl, x, xr, xbr, xb, xbl;
894   uint16x8_t r0, r1;
895 
896   xtl = vld1q_u16(buf - buf_stride - 1);
897   xt = vld1q_u16(buf - buf_stride);
898   xtr = vld1q_u16(buf - buf_stride + 1);
899   xl = vld1q_u16(buf - 1);
900   x = vld1q_u16(buf);
901   xr = vld1q_u16(buf + 1);
902   xbl = vld1q_u16(buf + buf_stride - 1);
903   xb = vld1q_u16(buf + buf_stride);
904   xbr = vld1q_u16(buf + buf_stride + 1);
905 
906   xb = vaddq_u16(xb, x);
907   xt = vaddq_u16(xt, xr);
908   xl = vaddq_u16(xl, xb);
909   xl = vaddq_u16(xl, xt);
910 
911   r0 = vshlq_n_u16(xl, 2);
912 
913   xbl = vaddq_u16(xbl, xbr);
914   xtl = vaddq_u16(xtl, xtr);
915   xtl = vaddq_u16(xtl, xbl);
916 
917   r1 = vshlq_n_u16(xtl, 2);
918   r1 = vsubq_u16(r1, xtl);
919 
920   *a0 = vreinterpretq_s32_u32(
921       vaddq_u32(vmovl_u16(vget_low_u16(r0)), vmovl_u16(vget_low_u16(r1))));
922   *a1 = vreinterpretq_s32_u32(
923       vaddq_u32(vmovl_u16(vget_high_u16(r0)), vmovl_u16(vget_high_u16(r1))));
924 }
925 
cross_sum_fast_even_row(int32_t * buf,int buf_stride)926 static INLINE int32x4_t cross_sum_fast_even_row(int32_t *buf, int buf_stride) {
927   int32x4_t xtr, xt, xtl, xbr, xb, xbl;
928   int32x4_t fives, sixes, fives_plus_sixes;
929 
930   xtl = vld1q_s32(buf - buf_stride - 1);
931   xt = vld1q_s32(buf - buf_stride);
932   xtr = vld1q_s32(buf - buf_stride + 1);
933   xbl = vld1q_s32(buf + buf_stride - 1);
934   xb = vld1q_s32(buf + buf_stride);
935   xbr = vld1q_s32(buf + buf_stride + 1);
936 
937   fives = vaddq_s32(xtl, vaddq_s32(xtr, vaddq_s32(xbr, xbl)));
938   sixes = vaddq_s32(xt, xb);
939   fives_plus_sixes = vaddq_s32(fives, sixes);
940 
941   return vaddq_s32(
942       vaddq_s32(vshlq_n_s32(fives_plus_sixes, 2), fives_plus_sixes), sixes);
943 }
944 
cross_sum_fast_even_row_inp16(uint16_t * buf,int buf_stride,int32x4_t * a0,int32x4_t * a1)945 static INLINE void cross_sum_fast_even_row_inp16(uint16_t *buf, int buf_stride,
946                                                  int32x4_t *a0, int32x4_t *a1) {
947   uint16x8_t xtr, xt, xtl, xbr, xb, xbl, xb0;
948 
949   xtl = vld1q_u16(buf - buf_stride - 1);
950   xt = vld1q_u16(buf - buf_stride);
951   xtr = vld1q_u16(buf - buf_stride + 1);
952   xbl = vld1q_u16(buf + buf_stride - 1);
953   xb = vld1q_u16(buf + buf_stride);
954   xbr = vld1q_u16(buf + buf_stride + 1);
955 
956   xbr = vaddq_u16(xbr, xbl);
957   xtr = vaddq_u16(xtr, xtl);
958   xbr = vaddq_u16(xbr, xtr);
959   xtl = vshlq_n_u16(xbr, 2);
960   xbr = vaddq_u16(xtl, xbr);
961 
962   xb = vaddq_u16(xb, xt);
963   xb0 = vshlq_n_u16(xb, 1);
964   xb = vshlq_n_u16(xb, 2);
965   xb = vaddq_u16(xb, xb0);
966 
967   *a0 = vreinterpretq_s32_u32(
968       vaddq_u32(vmovl_u16(vget_low_u16(xbr)), vmovl_u16(vget_low_u16(xb))));
969   *a1 = vreinterpretq_s32_u32(
970       vaddq_u32(vmovl_u16(vget_high_u16(xbr)), vmovl_u16(vget_high_u16(xb))));
971 }
972 
cross_sum_fast_odd_row(int32_t * buf)973 static INLINE int32x4_t cross_sum_fast_odd_row(int32_t *buf) {
974   int32x4_t xl, x, xr;
975   int32x4_t fives, sixes, fives_plus_sixes;
976 
977   xl = vld1q_s32(buf - 1);
978   x = vld1q_s32(buf);
979   xr = vld1q_s32(buf + 1);
980   fives = vaddq_s32(xl, xr);
981   sixes = x;
982   fives_plus_sixes = vaddq_s32(fives, sixes);
983 
984   return vaddq_s32(
985       vaddq_s32(vshlq_n_s32(fives_plus_sixes, 2), fives_plus_sixes), sixes);
986 }
987 
cross_sum_fast_odd_row_inp16(uint16_t * buf,int32x4_t * a0,int32x4_t * a1)988 static INLINE void cross_sum_fast_odd_row_inp16(uint16_t *buf, int32x4_t *a0,
989                                                 int32x4_t *a1) {
990   uint16x8_t xl, x, xr;
991   uint16x8_t x0;
992 
993   xl = vld1q_u16(buf - 1);
994   x = vld1q_u16(buf);
995   xr = vld1q_u16(buf + 1);
996   xl = vaddq_u16(xl, xr);
997   x0 = vshlq_n_u16(xl, 2);
998   xl = vaddq_u16(xl, x0);
999 
1000   x0 = vshlq_n_u16(x, 1);
1001   x = vshlq_n_u16(x, 2);
1002   x = vaddq_u16(x, x0);
1003 
1004   *a0 = vreinterpretq_s32_u32(
1005       vaddq_u32(vmovl_u16(vget_low_u16(xl)), vmovl_u16(vget_low_u16(x))));
1006   *a1 = vreinterpretq_s32_u32(
1007       vaddq_u32(vmovl_u16(vget_high_u16(xl)), vmovl_u16(vget_high_u16(x))));
1008 }
1009 
final_filter_fast_internal(uint16_t * A,int32_t * B,const int buf_stride,int16_t * src,const int src_stride,int32_t * dst,const int dst_stride,const int width,const int height)1010 static void final_filter_fast_internal(uint16_t *A, int32_t *B,
1011                                        const int buf_stride, int16_t *src,
1012                                        const int src_stride, int32_t *dst,
1013                                        const int dst_stride, const int width,
1014                                        const int height) {
1015   int16x8_t s0;
1016   int32_t *B_tmp, *dst_ptr;
1017   uint16_t *A_tmp;
1018   int16_t *src_ptr;
1019   int32x4_t a_res0, a_res1, b_res0, b_res1;
1020   int w, h, count = 0;
1021   assert(SGRPROJ_SGR_BITS == 8);
1022   assert(SGRPROJ_RST_BITS == 4);
1023 
1024   A_tmp = A;
1025   B_tmp = B;
1026   src_ptr = src;
1027   dst_ptr = dst;
1028   h = height;
1029   do {
1030     A_tmp = (A + count * buf_stride);
1031     B_tmp = (B + count * buf_stride);
1032     src_ptr = (src + count * src_stride);
1033     dst_ptr = (dst + count * dst_stride);
1034     w = width;
1035     if (!(count & 1)) {
1036       do {
1037         s0 = vld1q_s16(src_ptr);
1038         cross_sum_fast_even_row_inp16(A_tmp, buf_stride, &a_res0, &a_res1);
1039         a_res0 = vmulq_s32(vmovl_s16(vget_low_s16(s0)), a_res0);
1040         a_res1 = vmulq_s32(vmovl_s16(vget_high_s16(s0)), a_res1);
1041 
1042         b_res0 = cross_sum_fast_even_row(B_tmp, buf_stride);
1043         b_res1 = cross_sum_fast_even_row(B_tmp + 4, buf_stride);
1044         a_res0 = vaddq_s32(a_res0, b_res0);
1045         a_res1 = vaddq_s32(a_res1, b_res1);
1046 
1047         a_res0 =
1048             vrshrq_n_s32(a_res0, SGRPROJ_SGR_BITS + NB_EVEN - SGRPROJ_RST_BITS);
1049         a_res1 =
1050             vrshrq_n_s32(a_res1, SGRPROJ_SGR_BITS + NB_EVEN - SGRPROJ_RST_BITS);
1051 
1052         vst1q_s32(dst_ptr, a_res0);
1053         vst1q_s32(dst_ptr + 4, a_res1);
1054 
1055         A_tmp += 8;
1056         B_tmp += 8;
1057         src_ptr += 8;
1058         dst_ptr += 8;
1059         w -= 8;
1060       } while (w > 0);
1061     } else {
1062       do {
1063         s0 = vld1q_s16(src_ptr);
1064         cross_sum_fast_odd_row_inp16(A_tmp, &a_res0, &a_res1);
1065         a_res0 = vmulq_s32(vmovl_s16(vget_low_s16(s0)), a_res0);
1066         a_res1 = vmulq_s32(vmovl_s16(vget_high_s16(s0)), a_res1);
1067 
1068         b_res0 = cross_sum_fast_odd_row(B_tmp);
1069         b_res1 = cross_sum_fast_odd_row(B_tmp + 4);
1070         a_res0 = vaddq_s32(a_res0, b_res0);
1071         a_res1 = vaddq_s32(a_res1, b_res1);
1072 
1073         a_res0 =
1074             vrshrq_n_s32(a_res0, SGRPROJ_SGR_BITS + NB_ODD - SGRPROJ_RST_BITS);
1075         a_res1 =
1076             vrshrq_n_s32(a_res1, SGRPROJ_SGR_BITS + NB_ODD - SGRPROJ_RST_BITS);
1077 
1078         vst1q_s32(dst_ptr, a_res0);
1079         vst1q_s32(dst_ptr + 4, a_res1);
1080 
1081         A_tmp += 8;
1082         B_tmp += 8;
1083         src_ptr += 8;
1084         dst_ptr += 8;
1085         w -= 8;
1086       } while (w > 0);
1087     }
1088     count++;
1089     h -= 1;
1090   } while (h > 0);
1091 }
1092 
final_filter_internal(uint16_t * A,int32_t * B,const int buf_stride,int16_t * src,const int src_stride,int32_t * dst,const int dst_stride,const int width,const int height)1093 void final_filter_internal(uint16_t *A, int32_t *B, const int buf_stride,
1094                            int16_t *src, const int src_stride, int32_t *dst,
1095                            const int dst_stride, const int width,
1096                            const int height) {
1097   int16x8_t s0;
1098   int32_t *B_tmp, *dst_ptr;
1099   uint16_t *A_tmp;
1100   int16_t *src_ptr;
1101   int32x4_t a_res0, a_res1, b_res0, b_res1;
1102   int w, h, count = 0;
1103 
1104   assert(SGRPROJ_SGR_BITS == 8);
1105   assert(SGRPROJ_RST_BITS == 4);
1106   h = height;
1107 
1108   do {
1109     A_tmp = (A + count * buf_stride);
1110     B_tmp = (B + count * buf_stride);
1111     src_ptr = (src + count * src_stride);
1112     dst_ptr = (dst + count * dst_stride);
1113     w = width;
1114     do {
1115       s0 = vld1q_s16(src_ptr);
1116       cross_sum_inp_u16(A_tmp, buf_stride, &a_res0, &a_res1);
1117       a_res0 = vmulq_s32(vmovl_s16(vget_low_s16(s0)), a_res0);
1118       a_res1 = vmulq_s32(vmovl_s16(vget_high_s16(s0)), a_res1);
1119 
1120       b_res0 = cross_sum_inp_s32(B_tmp, buf_stride);
1121       b_res1 = cross_sum_inp_s32(B_tmp + 4, buf_stride);
1122       a_res0 = vaddq_s32(a_res0, b_res0);
1123       a_res1 = vaddq_s32(a_res1, b_res1);
1124 
1125       a_res0 =
1126           vrshrq_n_s32(a_res0, SGRPROJ_SGR_BITS + NB_EVEN - SGRPROJ_RST_BITS);
1127       a_res1 =
1128           vrshrq_n_s32(a_res1, SGRPROJ_SGR_BITS + NB_EVEN - SGRPROJ_RST_BITS);
1129       vst1q_s32(dst_ptr, a_res0);
1130       vst1q_s32(dst_ptr + 4, a_res1);
1131 
1132       A_tmp += 8;
1133       B_tmp += 8;
1134       src_ptr += 8;
1135       dst_ptr += 8;
1136       w -= 8;
1137     } while (w > 0);
1138     count++;
1139     h -= 1;
1140   } while (h > 0);
1141 }
1142 
restoration_fast_internal(uint16_t * dgd16,int width,int height,int dgd_stride,int32_t * dst,int dst_stride,int bit_depth,int sgr_params_idx,int radius_idx)1143 static INLINE void restoration_fast_internal(uint16_t *dgd16, int width,
1144                                              int height, int dgd_stride,
1145                                              int32_t *dst, int dst_stride,
1146                                              int bit_depth, int sgr_params_idx,
1147                                              int radius_idx) {
1148   const sgr_params_type *const params = &sgr_params[sgr_params_idx];
1149   const int r = params->r[radius_idx];
1150   const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ;
1151   const int height_ext = height + 2 * SGRPROJ_BORDER_VERT;
1152 
1153   const int buf_stride = ((width_ext + 3) & ~3) + 16;
1154   int32_t A_[RESTORATION_PROC_UNIT_PELS];
1155   uint16_t A16_[RESTORATION_PROC_UNIT_PELS];
1156   int32_t B_[RESTORATION_PROC_UNIT_PELS];
1157   int32_t *square_sum_buf = A_;
1158   int32_t *sum_buf = B_;
1159   uint16_t *tmp16_buf = A16_;
1160 
1161   assert(r <= MAX_RADIUS && "Need MAX_RADIUS >= r");
1162   assert(r <= SGRPROJ_BORDER_VERT - 1 && r <= SGRPROJ_BORDER_HORZ - 1 &&
1163          "Need SGRPROJ_BORDER_* >= r+1");
1164 
1165   assert(radius_idx == 0);
1166   assert(r == 2);
1167 
1168   // input(dgd16) is 16bit.
1169   // sum of pixels 1st stage output will be in 16bit(tmp16_buf). End output is
1170   // kept in 32bit [sum_buf]. sum of squares output is kept in 32bit
1171   // buffer(square_sum_buf).
1172   boxsum2((int16_t *)(dgd16 - dgd_stride * SGRPROJ_BORDER_VERT -
1173                       SGRPROJ_BORDER_HORZ),
1174           dgd_stride, (int16_t *)tmp16_buf, sum_buf, square_sum_buf, buf_stride,
1175           width_ext, height_ext);
1176 
1177   square_sum_buf += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ;
1178   sum_buf += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ;
1179   tmp16_buf += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ;
1180 
1181   // Calculation of a, b. a output is in 16bit tmp_buf which is in range of
1182   // [1, 256] for all bit depths. b output is kept in 32bit buffer.
1183 
1184   if (8 == bit_depth) {
1185     calc_ab_fast_internal_lbd(
1186         (square_sum_buf - buf_stride - 1), (tmp16_buf - buf_stride - 1),
1187         (sum_buf - buf_stride - 1), buf_stride * 2, width + 2, height + 2, r,
1188         params->s[radius_idx], 2);
1189   } else {
1190     calc_ab_fast_internal_hbd(
1191         (square_sum_buf - buf_stride - 1), (tmp16_buf - buf_stride - 1),
1192         (sum_buf - buf_stride - 1), buf_stride * 2, width + 2, height + 2,
1193         bit_depth, r, params->s[radius_idx], 2);
1194   }
1195   final_filter_fast_internal(tmp16_buf, sum_buf, buf_stride, (int16_t *)dgd16,
1196                              dgd_stride, dst, dst_stride, width, height);
1197 }
1198 
restoration_internal(uint16_t * dgd16,int width,int height,int dgd_stride,int32_t * dst,int dst_stride,int bit_depth,int sgr_params_idx,int radius_idx)1199 static INLINE void restoration_internal(uint16_t *dgd16, int width, int height,
1200                                         int dgd_stride, int32_t *dst,
1201                                         int dst_stride, int bit_depth,
1202                                         int sgr_params_idx, int radius_idx) {
1203   const sgr_params_type *const params = &sgr_params[sgr_params_idx];
1204   const int r = params->r[radius_idx];
1205   const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ;
1206   const int height_ext = height + 2 * SGRPROJ_BORDER_VERT;
1207 
1208   int buf_stride = ((width_ext + 3) & ~3) + 16;
1209   int32_t A_[RESTORATION_PROC_UNIT_PELS];
1210   uint16_t A16_[RESTORATION_PROC_UNIT_PELS];
1211   uint16_t B16_[RESTORATION_PROC_UNIT_PELS];
1212   int32_t B_[RESTORATION_PROC_UNIT_PELS];
1213   int32_t *square_sum_buf = A_;
1214   uint16_t *sum_buf = B16_;
1215   uint16_t *A16 = A16_;
1216   int32_t *B = B_;
1217 
1218   assert(r <= MAX_RADIUS && "Need MAX_RADIUS >= r");
1219   assert(r <= SGRPROJ_BORDER_VERT - 1 && r <= SGRPROJ_BORDER_HORZ - 1 &&
1220          "Need SGRPROJ_BORDER_* >= r+1");
1221 
1222   assert(radius_idx == 1);
1223   assert(r == 1);
1224 
1225   // input(dgd16) is 16bit.
1226   // sum of pixels output will be in 16bit(sum_buf).
1227   // sum of squares output is kept in 32bit buffer(square_sum_buf).
1228   boxsum1((int16_t *)(dgd16 - dgd_stride * SGRPROJ_BORDER_VERT -
1229                       SGRPROJ_BORDER_HORZ),
1230           dgd_stride, sum_buf, square_sum_buf, buf_stride, width_ext,
1231           height_ext);
1232 
1233   square_sum_buf += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ;
1234   B += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ;
1235   A16 += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ;
1236   sum_buf += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ;
1237 
1238   // Calculation of a, b. a output is in 16bit tmp_buf which is in range of
1239   // [1, 256] for all bit depths. b output is kept in 32bit buffer.
1240   if (8 == bit_depth) {
1241     calc_ab_internal_lbd((square_sum_buf - buf_stride - 1),
1242                          (A16 - buf_stride - 1), (sum_buf - buf_stride - 1),
1243                          (B - buf_stride - 1), buf_stride, width + 2,
1244                          height + 2, r, params->s[radius_idx], 1);
1245   } else {
1246     calc_ab_internal_hbd((square_sum_buf - buf_stride - 1),
1247                          (A16 - buf_stride - 1), (sum_buf - buf_stride - 1),
1248                          (B - buf_stride - 1), buf_stride, width + 2,
1249                          height + 2, bit_depth, r, params->s[radius_idx], 1);
1250   }
1251   final_filter_internal(A16, B, buf_stride, (int16_t *)dgd16, dgd_stride, dst,
1252                         dst_stride, width, height);
1253 }
1254 
src_convert_u8_to_u16(const uint8_t * src,const int src_stride,uint16_t * dst,const int dst_stride,const int width,const int height)1255 static INLINE void src_convert_u8_to_u16(const uint8_t *src,
1256                                          const int src_stride, uint16_t *dst,
1257                                          const int dst_stride, const int width,
1258                                          const int height) {
1259   const uint8_t *src_ptr;
1260   uint16_t *dst_ptr;
1261   int h, w, count = 0;
1262 
1263   uint8x8_t t1, t2, t3, t4;
1264   uint16x8_t s1, s2, s3, s4;
1265   h = height;
1266   do {
1267     src_ptr = src + (count << 2) * src_stride;
1268     dst_ptr = dst + (count << 2) * dst_stride;
1269     w = width;
1270     if (w >= 7) {
1271       do {
1272         load_u8_8x4(src_ptr, src_stride, &t1, &t2, &t3, &t4);
1273         s1 = vmovl_u8(t1);
1274         s2 = vmovl_u8(t2);
1275         s3 = vmovl_u8(t3);
1276         s4 = vmovl_u8(t4);
1277         store_u16_8x4(dst_ptr, dst_stride, s1, s2, s3, s4);
1278 
1279         src_ptr += 8;
1280         dst_ptr += 8;
1281         w -= 8;
1282       } while (w > 7);
1283     }
1284 
1285     for (int y = 0; y < w; y++) {
1286       dst_ptr[y] = src_ptr[y];
1287       dst_ptr[y + 1 * dst_stride] = src_ptr[y + 1 * src_stride];
1288       dst_ptr[y + 2 * dst_stride] = src_ptr[y + 2 * src_stride];
1289       dst_ptr[y + 3 * dst_stride] = src_ptr[y + 3 * src_stride];
1290     }
1291     count++;
1292     h -= 4;
1293   } while (h > 3);
1294 
1295   src_ptr = src + (count << 2) * src_stride;
1296   dst_ptr = dst + (count << 2) * dst_stride;
1297   for (int x = 0; x < h; x++) {
1298     for (int y = 0; y < width; y++) {
1299       dst_ptr[y + x * dst_stride] = src_ptr[y + x * src_stride];
1300     }
1301   }
1302 }
1303 
src_convert_hbd_copy(const uint16_t * src,int src_stride,uint16_t * dst,const int dst_stride,int width,int height)1304 static INLINE void src_convert_hbd_copy(const uint16_t *src, int src_stride,
1305                                         uint16_t *dst, const int dst_stride,
1306                                         int width, int height) {
1307   const uint16_t *src_ptr;
1308   uint16_t *dst_ptr;
1309   int h, w, count = 0;
1310   uint16x8_t s1, s2, s3, s4;
1311 
1312   h = height;
1313   do {
1314     src_ptr = src + (count << 2) * src_stride;
1315     dst_ptr = dst + (count << 2) * dst_stride;
1316     w = width;
1317     do {
1318       load_u16_8x4(src_ptr, src_stride, &s1, &s2, &s3, &s4);
1319       store_u16_8x4(dst_ptr, dst_stride, s1, s2, s3, s4);
1320       src_ptr += 8;
1321       dst_ptr += 8;
1322       w -= 8;
1323     } while (w > 7);
1324 
1325     for (int y = 0; y < w; y++) {
1326       dst_ptr[y] = src_ptr[y];
1327       dst_ptr[y + 1 * dst_stride] = src_ptr[y + 1 * src_stride];
1328       dst_ptr[y + 2 * dst_stride] = src_ptr[y + 2 * src_stride];
1329       dst_ptr[y + 3 * dst_stride] = src_ptr[y + 3 * src_stride];
1330     }
1331     count++;
1332     h -= 4;
1333   } while (h > 3);
1334 
1335   src_ptr = src + (count << 2) * src_stride;
1336   dst_ptr = dst + (count << 2) * dst_stride;
1337 
1338   for (int x = 0; x < h; x++) {
1339     memcpy((dst_ptr + x * dst_stride), (src_ptr + x * src_stride),
1340            sizeof(uint16_t) * width);
1341   }
1342 }
1343 
av1_selfguided_restoration_neon(const uint8_t * dat8,int width,int height,int stride,int32_t * flt0,int32_t * flt1,int flt_stride,int sgr_params_idx,int bit_depth,int highbd)1344 int av1_selfguided_restoration_neon(const uint8_t *dat8, int width, int height,
1345                                     int stride, int32_t *flt0, int32_t *flt1,
1346                                     int flt_stride, int sgr_params_idx,
1347                                     int bit_depth, int highbd) {
1348   const sgr_params_type *const params = &sgr_params[sgr_params_idx];
1349   assert(!(params->r[0] == 0 && params->r[1] == 0));
1350 
1351   uint16_t dgd16_[RESTORATION_PROC_UNIT_PELS];
1352   const int dgd16_stride = width + 2 * SGRPROJ_BORDER_HORZ;
1353   uint16_t *dgd16 =
1354       dgd16_ + dgd16_stride * SGRPROJ_BORDER_VERT + SGRPROJ_BORDER_HORZ;
1355   const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ;
1356   const int height_ext = height + 2 * SGRPROJ_BORDER_VERT;
1357   const int dgd_stride = stride;
1358 
1359   if (highbd) {
1360     const uint16_t *dgd16_tmp = CONVERT_TO_SHORTPTR(dat8);
1361     src_convert_hbd_copy(
1362         dgd16_tmp - SGRPROJ_BORDER_VERT * dgd_stride - SGRPROJ_BORDER_HORZ,
1363         dgd_stride,
1364         dgd16 - SGRPROJ_BORDER_VERT * dgd16_stride - SGRPROJ_BORDER_HORZ,
1365         dgd16_stride, width_ext, height_ext);
1366   } else {
1367     src_convert_u8_to_u16(
1368         dat8 - SGRPROJ_BORDER_VERT * dgd_stride - SGRPROJ_BORDER_HORZ,
1369         dgd_stride,
1370         dgd16 - SGRPROJ_BORDER_VERT * dgd16_stride - SGRPROJ_BORDER_HORZ,
1371         dgd16_stride, width_ext, height_ext);
1372   }
1373 
1374   if (params->r[0] > 0)
1375     restoration_fast_internal(dgd16, width, height, dgd16_stride, flt0,
1376                               flt_stride, bit_depth, sgr_params_idx, 0);
1377   if (params->r[1] > 0)
1378     restoration_internal(dgd16, width, height, dgd16_stride, flt1, flt_stride,
1379                          bit_depth, sgr_params_idx, 1);
1380   return 0;
1381 }
1382 
apply_selfguided_restoration_neon(const uint8_t * dat8,int width,int height,int stride,int eps,const int * xqd,uint8_t * dst8,int dst_stride,int32_t * tmpbuf,int bit_depth,int highbd)1383 void apply_selfguided_restoration_neon(const uint8_t *dat8, int width,
1384                                        int height, int stride, int eps,
1385                                        const int *xqd, uint8_t *dst8,
1386                                        int dst_stride, int32_t *tmpbuf,
1387                                        int bit_depth, int highbd) {
1388   int32_t *flt0 = tmpbuf;
1389   int32_t *flt1 = flt0 + RESTORATION_UNITPELS_MAX;
1390   assert(width * height <= RESTORATION_UNITPELS_MAX);
1391   uint16_t dgd16_[RESTORATION_PROC_UNIT_PELS];
1392   const int dgd16_stride = width + 2 * SGRPROJ_BORDER_HORZ;
1393   uint16_t *dgd16 =
1394       dgd16_ + dgd16_stride * SGRPROJ_BORDER_VERT + SGRPROJ_BORDER_HORZ;
1395   const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ;
1396   const int height_ext = height + 2 * SGRPROJ_BORDER_VERT;
1397   const int dgd_stride = stride;
1398   const sgr_params_type *const params = &sgr_params[eps];
1399   int xq[2];
1400 
1401   assert(!(params->r[0] == 0 && params->r[1] == 0));
1402 
1403   if (highbd) {
1404     const uint16_t *dgd16_tmp = CONVERT_TO_SHORTPTR(dat8);
1405     src_convert_hbd_copy(
1406         dgd16_tmp - SGRPROJ_BORDER_VERT * dgd_stride - SGRPROJ_BORDER_HORZ,
1407         dgd_stride,
1408         dgd16 - SGRPROJ_BORDER_VERT * dgd16_stride - SGRPROJ_BORDER_HORZ,
1409         dgd16_stride, width_ext, height_ext);
1410   } else {
1411     src_convert_u8_to_u16(
1412         dat8 - SGRPROJ_BORDER_VERT * dgd_stride - SGRPROJ_BORDER_HORZ,
1413         dgd_stride,
1414         dgd16 - SGRPROJ_BORDER_VERT * dgd16_stride - SGRPROJ_BORDER_HORZ,
1415         dgd16_stride, width_ext, height_ext);
1416   }
1417 
1418   if (params->r[0] > 0)
1419     restoration_fast_internal(dgd16, width, height, dgd16_stride, flt0, width,
1420                               bit_depth, eps, 0);
1421   if (params->r[1] > 0)
1422     restoration_internal(dgd16, width, height, dgd16_stride, flt1, width,
1423                          bit_depth, eps, 1);
1424 
1425   decode_xq(xqd, xq, params);
1426 
1427   {
1428     int16_t *src_ptr;
1429     uint8_t *dst_ptr;
1430     uint16_t *dst16_ptr;
1431     int16x4_t d0, d4;
1432     int16x8_t r0, s0;
1433     uint16x8_t r4;
1434     int32x4_t u0, u4, v0, v4, f00, f10;
1435     uint8x8_t t0;
1436     int count = 0, w = width, h = height, rc = 0;
1437 
1438     const int32x4_t xq0_vec = vdupq_n_s32(xq[0]);
1439     const int32x4_t xq1_vec = vdupq_n_s32(xq[1]);
1440     const int16x8_t zero = vdupq_n_s16(0);
1441     const uint16x8_t max = vdupq_n_u16((1 << bit_depth) - 1);
1442     uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst8);
1443     dst_ptr = dst8;
1444     src_ptr = (int16_t *)dgd16;
1445     do {
1446       w = width;
1447       count = 0;
1448       dst_ptr = dst8 + rc * dst_stride;
1449       dst16_ptr = dst16 + rc * dst_stride;
1450       do {
1451         s0 = vld1q_s16(src_ptr + count);
1452 
1453         u0 = vshll_n_s16(vget_low_s16(s0), SGRPROJ_RST_BITS);
1454         u4 = vshll_n_s16(vget_high_s16(s0), SGRPROJ_RST_BITS);
1455 
1456         v0 = vshlq_n_s32(u0, SGRPROJ_PRJ_BITS);
1457         v4 = vshlq_n_s32(u4, SGRPROJ_PRJ_BITS);
1458 
1459         if (params->r[0] > 0) {
1460           f00 = vld1q_s32(flt0 + count);
1461           f10 = vld1q_s32(flt0 + count + 4);
1462 
1463           f00 = vsubq_s32(f00, u0);
1464           f10 = vsubq_s32(f10, u4);
1465 
1466           v0 = vmlaq_s32(v0, xq0_vec, f00);
1467           v4 = vmlaq_s32(v4, xq0_vec, f10);
1468         }
1469 
1470         if (params->r[1] > 0) {
1471           f00 = vld1q_s32(flt1 + count);
1472           f10 = vld1q_s32(flt1 + count + 4);
1473 
1474           f00 = vsubq_s32(f00, u0);
1475           f10 = vsubq_s32(f10, u4);
1476 
1477           v0 = vmlaq_s32(v0, xq1_vec, f00);
1478           v4 = vmlaq_s32(v4, xq1_vec, f10);
1479         }
1480 
1481         d0 = vqrshrn_n_s32(v0, SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
1482         d4 = vqrshrn_n_s32(v4, SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
1483 
1484         r0 = vcombine_s16(d0, d4);
1485 
1486         r4 = vreinterpretq_u16_s16(vmaxq_s16(r0, zero));
1487 
1488         if (highbd) {
1489           r4 = vminq_u16(r4, max);
1490           vst1q_u16(dst16_ptr, r4);
1491         } else {
1492           t0 = vqmovn_u16(r4);
1493           vst1_u8(dst_ptr, t0);
1494         }
1495         w -= 8;
1496         count += 8;
1497         dst_ptr += 8;
1498         dst16_ptr += 8;
1499       } while (w > 0);
1500 
1501       src_ptr += dgd16_stride;
1502       flt1 += width;
1503       flt0 += width;
1504       rc++;
1505       h--;
1506     } while (h > 0);
1507   }
1508 }
1509