1 /*
2  *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <arm_neon.h>
12 
13 #include "./vpx_config.h"
14 #include "./vp8_rtcd.h"
15 #include "vpx_ports/arm.h"
16 
17 #ifdef VPX_INCOMPATIBLE_GCC
write_2x4(unsigned char * dst,int pitch,const uint8x8x2_t result)18 static INLINE void write_2x4(unsigned char *dst, int pitch,
19                              const uint8x8x2_t result) {
20   /*
21    * uint8x8x2_t result
22   00 01 02 03 | 04 05 06 07
23   10 11 12 13 | 14 15 16 17
24   ---
25   * after vtrn_u8
26   00 10 02 12 | 04 14 06 16
27   01 11 03 13 | 05 15 07 17
28   */
29   const uint8x8x2_t r01_u8 = vtrn_u8(result.val[0], result.val[1]);
30   const uint16x4_t x_0_4 = vreinterpret_u16_u8(r01_u8.val[0]);
31   const uint16x4_t x_1_5 = vreinterpret_u16_u8(r01_u8.val[1]);
32   vst1_lane_u16((uint16_t *)dst, x_0_4, 0);
33   dst += pitch;
34   vst1_lane_u16((uint16_t *)dst, x_1_5, 0);
35   dst += pitch;
36   vst1_lane_u16((uint16_t *)dst, x_0_4, 1);
37   dst += pitch;
38   vst1_lane_u16((uint16_t *)dst, x_1_5, 1);
39   dst += pitch;
40   vst1_lane_u16((uint16_t *)dst, x_0_4, 2);
41   dst += pitch;
42   vst1_lane_u16((uint16_t *)dst, x_1_5, 2);
43   dst += pitch;
44   vst1_lane_u16((uint16_t *)dst, x_0_4, 3);
45   dst += pitch;
46   vst1_lane_u16((uint16_t *)dst, x_1_5, 3);
47 }
48 
write_2x8(unsigned char * dst,int pitch,const uint8x8x2_t result,const uint8x8x2_t result2)49 static INLINE void write_2x8(unsigned char *dst, int pitch,
50                              const uint8x8x2_t result,
51                              const uint8x8x2_t result2) {
52   write_2x4(dst, pitch, result);
53   dst += pitch * 8;
54   write_2x4(dst, pitch, result2);
55 }
56 #else
write_2x8(unsigned char * dst,int pitch,const uint8x8x2_t result,const uint8x8x2_t result2)57 static INLINE void write_2x8(unsigned char *dst, int pitch,
58                              const uint8x8x2_t result,
59                              const uint8x8x2_t result2) {
60   vst2_lane_u8(dst, result, 0);
61   dst += pitch;
62   vst2_lane_u8(dst, result, 1);
63   dst += pitch;
64   vst2_lane_u8(dst, result, 2);
65   dst += pitch;
66   vst2_lane_u8(dst, result, 3);
67   dst += pitch;
68   vst2_lane_u8(dst, result, 4);
69   dst += pitch;
70   vst2_lane_u8(dst, result, 5);
71   dst += pitch;
72   vst2_lane_u8(dst, result, 6);
73   dst += pitch;
74   vst2_lane_u8(dst, result, 7);
75   dst += pitch;
76 
77   vst2_lane_u8(dst, result2, 0);
78   dst += pitch;
79   vst2_lane_u8(dst, result2, 1);
80   dst += pitch;
81   vst2_lane_u8(dst, result2, 2);
82   dst += pitch;
83   vst2_lane_u8(dst, result2, 3);
84   dst += pitch;
85   vst2_lane_u8(dst, result2, 4);
86   dst += pitch;
87   vst2_lane_u8(dst, result2, 5);
88   dst += pitch;
89   vst2_lane_u8(dst, result2, 6);
90   dst += pitch;
91   vst2_lane_u8(dst, result2, 7);
92 }
93 #endif  // VPX_INCOMPATIBLE_GCC
94 
95 #ifdef VPX_INCOMPATIBLE_GCC
read_4x8(unsigned char * src,int pitch)96 static INLINE uint8x8x4_t read_4x8(unsigned char *src, int pitch) {
97   uint8x8x4_t x;
98   const uint8x8_t a = vld1_u8(src);
99   const uint8x8_t b = vld1_u8(src + pitch * 1);
100   const uint8x8_t c = vld1_u8(src + pitch * 2);
101   const uint8x8_t d = vld1_u8(src + pitch * 3);
102   const uint8x8_t e = vld1_u8(src + pitch * 4);
103   const uint8x8_t f = vld1_u8(src + pitch * 5);
104   const uint8x8_t g = vld1_u8(src + pitch * 6);
105   const uint8x8_t h = vld1_u8(src + pitch * 7);
106   const uint32x2x2_t r04_u32 =
107       vtrn_u32(vreinterpret_u32_u8(a), vreinterpret_u32_u8(e));
108   const uint32x2x2_t r15_u32 =
109       vtrn_u32(vreinterpret_u32_u8(b), vreinterpret_u32_u8(f));
110   const uint32x2x2_t r26_u32 =
111       vtrn_u32(vreinterpret_u32_u8(c), vreinterpret_u32_u8(g));
112   const uint32x2x2_t r37_u32 =
113       vtrn_u32(vreinterpret_u32_u8(d), vreinterpret_u32_u8(h));
114   const uint16x4x2_t r02_u16 = vtrn_u16(vreinterpret_u16_u32(r04_u32.val[0]),
115                                         vreinterpret_u16_u32(r26_u32.val[0]));
116   const uint16x4x2_t r13_u16 = vtrn_u16(vreinterpret_u16_u32(r15_u32.val[0]),
117                                         vreinterpret_u16_u32(r37_u32.val[0]));
118   const uint8x8x2_t r01_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[0]),
119                                      vreinterpret_u8_u16(r13_u16.val[0]));
120   const uint8x8x2_t r23_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[1]),
121                                      vreinterpret_u8_u16(r13_u16.val[1]));
122   /*
123    * after vtrn_u32
124   00 01 02 03 | 40 41 42 43
125   10 11 12 13 | 50 51 52 53
126   20 21 22 23 | 60 61 62 63
127   30 31 32 33 | 70 71 72 73
128   ---
129   * after vtrn_u16
130   00 01 20 21 | 40 41 60 61
131   02 03 22 23 | 42 43 62 63
132   10 11 30 31 | 50 51 70 71
133   12 13 32 33 | 52 52 72 73
134 
135   00 01 20 21 | 40 41 60 61
136   10 11 30 31 | 50 51 70 71
137   02 03 22 23 | 42 43 62 63
138   12 13 32 33 | 52 52 72 73
139   ---
140   * after vtrn_u8
141   00 10 20 30 | 40 50 60 70
142   01 11 21 31 | 41 51 61 71
143   02 12 22 32 | 42 52 62 72
144   03 13 23 33 | 43 53 63 73
145   */
146   x.val[0] = r01_u8.val[0];
147   x.val[1] = r01_u8.val[1];
148   x.val[2] = r23_u8.val[0];
149   x.val[3] = r23_u8.val[1];
150 
151   return x;
152 }
153 #else
read_4x8(unsigned char * src,int pitch)154 static INLINE uint8x8x4_t read_4x8(unsigned char *src, int pitch) {
155   uint8x8x4_t x;
156   x.val[0] = x.val[1] = x.val[2] = x.val[3] = vdup_n_u8(0);
157   x = vld4_lane_u8(src, x, 0);
158   src += pitch;
159   x = vld4_lane_u8(src, x, 1);
160   src += pitch;
161   x = vld4_lane_u8(src, x, 2);
162   src += pitch;
163   x = vld4_lane_u8(src, x, 3);
164   src += pitch;
165   x = vld4_lane_u8(src, x, 4);
166   src += pitch;
167   x = vld4_lane_u8(src, x, 5);
168   src += pitch;
169   x = vld4_lane_u8(src, x, 6);
170   src += pitch;
171   x = vld4_lane_u8(src, x, 7);
172   return x;
173 }
174 #endif  // VPX_INCOMPATIBLE_GCC
175 
vp8_loop_filter_simple_vertical_edge_neon(unsigned char * s,int p,const unsigned char * blimit)176 static INLINE void vp8_loop_filter_simple_vertical_edge_neon(
177     unsigned char *s, int p, const unsigned char *blimit) {
178   unsigned char *src1;
179   uint8x16_t qblimit, q0u8;
180   uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q11u8, q12u8, q14u8, q15u8;
181   int16x8_t q2s16, q13s16, q11s16;
182   int8x8_t d28s8, d29s8;
183   int8x16_t q2s8, q3s8, q10s8, q11s8, q14s8;
184   uint8x8x4_t d0u8x4;  // d6, d7, d8, d9
185   uint8x8x4_t d1u8x4;  // d10, d11, d12, d13
186   uint8x8x2_t d2u8x2;  // d12, d13
187   uint8x8x2_t d3u8x2;  // d14, d15
188 
189   qblimit = vdupq_n_u8(*blimit);
190 
191   src1 = s - 2;
192   d0u8x4 = read_4x8(src1, p);
193   src1 += p * 8;
194   d1u8x4 = read_4x8(src1, p);
195 
196   q3u8 = vcombine_u8(d0u8x4.val[0], d1u8x4.val[0]);  // d6 d10
197   q4u8 = vcombine_u8(d0u8x4.val[2], d1u8x4.val[2]);  // d8 d12
198   q5u8 = vcombine_u8(d0u8x4.val[1], d1u8x4.val[1]);  // d7 d11
199   q6u8 = vcombine_u8(d0u8x4.val[3], d1u8x4.val[3]);  // d9 d13
200 
201   q15u8 = vabdq_u8(q5u8, q4u8);
202   q14u8 = vabdq_u8(q3u8, q6u8);
203 
204   q15u8 = vqaddq_u8(q15u8, q15u8);
205   q14u8 = vshrq_n_u8(q14u8, 1);
206   q0u8 = vdupq_n_u8(0x80);
207   q11s16 = vdupq_n_s16(3);
208   q15u8 = vqaddq_u8(q15u8, q14u8);
209 
210   q3u8 = veorq_u8(q3u8, q0u8);
211   q4u8 = veorq_u8(q4u8, q0u8);
212   q5u8 = veorq_u8(q5u8, q0u8);
213   q6u8 = veorq_u8(q6u8, q0u8);
214 
215   q15u8 = vcgeq_u8(qblimit, q15u8);
216 
217   q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q4u8)),
218                    vget_low_s8(vreinterpretq_s8_u8(q5u8)));
219   q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q4u8)),
220                     vget_high_s8(vreinterpretq_s8_u8(q5u8)));
221 
222   q14s8 = vqsubq_s8(vreinterpretq_s8_u8(q3u8), vreinterpretq_s8_u8(q6u8));
223 
224   q2s16 = vmulq_s16(q2s16, q11s16);
225   q13s16 = vmulq_s16(q13s16, q11s16);
226 
227   q11u8 = vdupq_n_u8(3);
228   q12u8 = vdupq_n_u8(4);
229 
230   q2s16 = vaddw_s8(q2s16, vget_low_s8(q14s8));
231   q13s16 = vaddw_s8(q13s16, vget_high_s8(q14s8));
232 
233   d28s8 = vqmovn_s16(q2s16);
234   d29s8 = vqmovn_s16(q13s16);
235   q14s8 = vcombine_s8(d28s8, d29s8);
236 
237   q14s8 = vandq_s8(q14s8, vreinterpretq_s8_u8(q15u8));
238 
239   q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q11u8));
240   q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q12u8));
241   q2s8 = vshrq_n_s8(q2s8, 3);
242   q14s8 = vshrq_n_s8(q3s8, 3);
243 
244   q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q5u8), q2s8);
245   q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q4u8), q14s8);
246 
247   q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
248   q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
249 
250   d2u8x2.val[0] = vget_low_u8(q6u8);   // d12
251   d2u8x2.val[1] = vget_low_u8(q7u8);   // d14
252   d3u8x2.val[0] = vget_high_u8(q6u8);  // d13
253   d3u8x2.val[1] = vget_high_u8(q7u8);  // d15
254 
255   src1 = s - 1;
256   write_2x8(src1, p, d2u8x2, d3u8x2);
257 }
258 
vp8_loop_filter_bvs_neon(unsigned char * y_ptr,int y_stride,const unsigned char * blimit)259 void vp8_loop_filter_bvs_neon(unsigned char *y_ptr, int y_stride,
260                               const unsigned char *blimit) {
261   y_ptr += 4;
262   vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
263   y_ptr += 4;
264   vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
265   y_ptr += 4;
266   vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
267   return;
268 }
269 
vp8_loop_filter_mbvs_neon(unsigned char * y_ptr,int y_stride,const unsigned char * blimit)270 void vp8_loop_filter_mbvs_neon(unsigned char *y_ptr, int y_stride,
271                                const unsigned char *blimit) {
272   vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
273   return;
274 }
275