1 /*
2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <arm_neon.h>
12 #include "./vpx_config.h"
13 #include "vpx_ports/arm.h"
14
15 #ifdef VPX_INCOMPATIBLE_GCC
write_2x4(unsigned char * dst,int pitch,const uint8x8x2_t result)16 static INLINE void write_2x4(unsigned char *dst, int pitch,
17 const uint8x8x2_t result) {
18 /*
19 * uint8x8x2_t result
20 00 01 02 03 | 04 05 06 07
21 10 11 12 13 | 14 15 16 17
22 ---
23 * after vtrn_u8
24 00 10 02 12 | 04 14 06 16
25 01 11 03 13 | 05 15 07 17
26 */
27 const uint8x8x2_t r01_u8 = vtrn_u8(result.val[0],
28 result.val[1]);
29 const uint16x4_t x_0_4 = vreinterpret_u16_u8(r01_u8.val[0]);
30 const uint16x4_t x_1_5 = vreinterpret_u16_u8(r01_u8.val[1]);
31 vst1_lane_u16((uint16_t *)dst, x_0_4, 0);
32 dst += pitch;
33 vst1_lane_u16((uint16_t *)dst, x_1_5, 0);
34 dst += pitch;
35 vst1_lane_u16((uint16_t *)dst, x_0_4, 1);
36 dst += pitch;
37 vst1_lane_u16((uint16_t *)dst, x_1_5, 1);
38 dst += pitch;
39 vst1_lane_u16((uint16_t *)dst, x_0_4, 2);
40 dst += pitch;
41 vst1_lane_u16((uint16_t *)dst, x_1_5, 2);
42 dst += pitch;
43 vst1_lane_u16((uint16_t *)dst, x_0_4, 3);
44 dst += pitch;
45 vst1_lane_u16((uint16_t *)dst, x_1_5, 3);
46 }
47
write_2x8(unsigned char * dst,int pitch,const uint8x8x2_t result,const uint8x8x2_t result2)48 static INLINE void write_2x8(unsigned char *dst, int pitch,
49 const uint8x8x2_t result,
50 const uint8x8x2_t result2) {
51 write_2x4(dst, pitch, result);
52 dst += pitch * 8;
53 write_2x4(dst, pitch, result2);
54 }
55 #else
write_2x8(unsigned char * dst,int pitch,const uint8x8x2_t result,const uint8x8x2_t result2)56 static INLINE void write_2x8(unsigned char *dst, int pitch,
57 const uint8x8x2_t result,
58 const uint8x8x2_t result2) {
59 vst2_lane_u8(dst, result, 0);
60 dst += pitch;
61 vst2_lane_u8(dst, result, 1);
62 dst += pitch;
63 vst2_lane_u8(dst, result, 2);
64 dst += pitch;
65 vst2_lane_u8(dst, result, 3);
66 dst += pitch;
67 vst2_lane_u8(dst, result, 4);
68 dst += pitch;
69 vst2_lane_u8(dst, result, 5);
70 dst += pitch;
71 vst2_lane_u8(dst, result, 6);
72 dst += pitch;
73 vst2_lane_u8(dst, result, 7);
74 dst += pitch;
75
76 vst2_lane_u8(dst, result2, 0);
77 dst += pitch;
78 vst2_lane_u8(dst, result2, 1);
79 dst += pitch;
80 vst2_lane_u8(dst, result2, 2);
81 dst += pitch;
82 vst2_lane_u8(dst, result2, 3);
83 dst += pitch;
84 vst2_lane_u8(dst, result2, 4);
85 dst += pitch;
86 vst2_lane_u8(dst, result2, 5);
87 dst += pitch;
88 vst2_lane_u8(dst, result2, 6);
89 dst += pitch;
90 vst2_lane_u8(dst, result2, 7);
91 }
92 #endif // VPX_INCOMPATIBLE_GCC
93
94
95 #ifdef VPX_INCOMPATIBLE_GCC
96 static INLINE
read_4x8(unsigned char * src,int pitch)97 uint8x8x4_t read_4x8(unsigned char *src, int pitch) {
98 uint8x8x4_t x;
99 const uint8x8_t a = vld1_u8(src);
100 const uint8x8_t b = vld1_u8(src + pitch * 1);
101 const uint8x8_t c = vld1_u8(src + pitch * 2);
102 const uint8x8_t d = vld1_u8(src + pitch * 3);
103 const uint8x8_t e = vld1_u8(src + pitch * 4);
104 const uint8x8_t f = vld1_u8(src + pitch * 5);
105 const uint8x8_t g = vld1_u8(src + pitch * 6);
106 const uint8x8_t h = vld1_u8(src + pitch * 7);
107 const uint32x2x2_t r04_u32 = vtrn_u32(vreinterpret_u32_u8(a),
108 vreinterpret_u32_u8(e));
109 const uint32x2x2_t r15_u32 = vtrn_u32(vreinterpret_u32_u8(b),
110 vreinterpret_u32_u8(f));
111 const uint32x2x2_t r26_u32 = vtrn_u32(vreinterpret_u32_u8(c),
112 vreinterpret_u32_u8(g));
113 const uint32x2x2_t r37_u32 = vtrn_u32(vreinterpret_u32_u8(d),
114 vreinterpret_u32_u8(h));
115 const uint16x4x2_t r02_u16 = vtrn_u16(vreinterpret_u16_u32(r04_u32.val[0]),
116 vreinterpret_u16_u32(r26_u32.val[0]));
117 const uint16x4x2_t r13_u16 = vtrn_u16(vreinterpret_u16_u32(r15_u32.val[0]),
118 vreinterpret_u16_u32(r37_u32.val[0]));
119 const uint8x8x2_t r01_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[0]),
120 vreinterpret_u8_u16(r13_u16.val[0]));
121 const uint8x8x2_t r23_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[1]),
122 vreinterpret_u8_u16(r13_u16.val[1]));
123 /*
124 * after vtrn_u32
125 00 01 02 03 | 40 41 42 43
126 10 11 12 13 | 50 51 52 53
127 20 21 22 23 | 60 61 62 63
128 30 31 32 33 | 70 71 72 73
129 ---
130 * after vtrn_u16
131 00 01 20 21 | 40 41 60 61
132 02 03 22 23 | 42 43 62 63
133 10 11 30 31 | 50 51 70 71
134 12 13 32 33 | 52 52 72 73
135
136 00 01 20 21 | 40 41 60 61
137 10 11 30 31 | 50 51 70 71
138 02 03 22 23 | 42 43 62 63
139 12 13 32 33 | 52 52 72 73
140 ---
141 * after vtrn_u8
142 00 10 20 30 | 40 50 60 70
143 01 11 21 31 | 41 51 61 71
144 02 12 22 32 | 42 52 62 72
145 03 13 23 33 | 43 53 63 73
146 */
147 x.val[0] = r01_u8.val[0];
148 x.val[1] = r01_u8.val[1];
149 x.val[2] = r23_u8.val[0];
150 x.val[3] = r23_u8.val[1];
151
152 return x;
153 }
154 #else
155 static INLINE
read_4x8(unsigned char * src,int pitch)156 uint8x8x4_t read_4x8(unsigned char *src, int pitch) {
157 uint8x8x4_t x;
158 x.val[0] = x.val[1] = x.val[2] = x.val[3] = vdup_n_u8(0);
159 x = vld4_lane_u8(src, x, 0);
160 src += pitch;
161 x = vld4_lane_u8(src, x, 1);
162 src += pitch;
163 x = vld4_lane_u8(src, x, 2);
164 src += pitch;
165 x = vld4_lane_u8(src, x, 3);
166 src += pitch;
167 x = vld4_lane_u8(src, x, 4);
168 src += pitch;
169 x = vld4_lane_u8(src, x, 5);
170 src += pitch;
171 x = vld4_lane_u8(src, x, 6);
172 src += pitch;
173 x = vld4_lane_u8(src, x, 7);
174 return x;
175 }
176 #endif // VPX_INCOMPATIBLE_GCC
177
vp8_loop_filter_simple_vertical_edge_neon(unsigned char * s,int p,const unsigned char * blimit)178 static INLINE void vp8_loop_filter_simple_vertical_edge_neon(
179 unsigned char *s,
180 int p,
181 const unsigned char *blimit) {
182 unsigned char *src1;
183 uint8x16_t qblimit, q0u8;
184 uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q11u8, q12u8, q14u8, q15u8;
185 int16x8_t q2s16, q13s16, q11s16;
186 int8x8_t d28s8, d29s8;
187 int8x16_t q2s8, q3s8, q10s8, q11s8, q14s8;
188 uint8x8x4_t d0u8x4; // d6, d7, d8, d9
189 uint8x8x4_t d1u8x4; // d10, d11, d12, d13
190 uint8x8x2_t d2u8x2; // d12, d13
191 uint8x8x2_t d3u8x2; // d14, d15
192
193 qblimit = vdupq_n_u8(*blimit);
194
195 src1 = s - 2;
196 d0u8x4 = read_4x8(src1, p);
197 src1 += p * 8;
198 d1u8x4 = read_4x8(src1, p);
199
200 q3u8 = vcombine_u8(d0u8x4.val[0], d1u8x4.val[0]); // d6 d10
201 q4u8 = vcombine_u8(d0u8x4.val[2], d1u8x4.val[2]); // d8 d12
202 q5u8 = vcombine_u8(d0u8x4.val[1], d1u8x4.val[1]); // d7 d11
203 q6u8 = vcombine_u8(d0u8x4.val[3], d1u8x4.val[3]); // d9 d13
204
205 q15u8 = vabdq_u8(q5u8, q4u8);
206 q14u8 = vabdq_u8(q3u8, q6u8);
207
208 q15u8 = vqaddq_u8(q15u8, q15u8);
209 q14u8 = vshrq_n_u8(q14u8, 1);
210 q0u8 = vdupq_n_u8(0x80);
211 q11s16 = vdupq_n_s16(3);
212 q15u8 = vqaddq_u8(q15u8, q14u8);
213
214 q3u8 = veorq_u8(q3u8, q0u8);
215 q4u8 = veorq_u8(q4u8, q0u8);
216 q5u8 = veorq_u8(q5u8, q0u8);
217 q6u8 = veorq_u8(q6u8, q0u8);
218
219 q15u8 = vcgeq_u8(qblimit, q15u8);
220
221 q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q4u8)),
222 vget_low_s8(vreinterpretq_s8_u8(q5u8)));
223 q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q4u8)),
224 vget_high_s8(vreinterpretq_s8_u8(q5u8)));
225
226 q14s8 = vqsubq_s8(vreinterpretq_s8_u8(q3u8),
227 vreinterpretq_s8_u8(q6u8));
228
229 q2s16 = vmulq_s16(q2s16, q11s16);
230 q13s16 = vmulq_s16(q13s16, q11s16);
231
232 q11u8 = vdupq_n_u8(3);
233 q12u8 = vdupq_n_u8(4);
234
235 q2s16 = vaddw_s8(q2s16, vget_low_s8(q14s8));
236 q13s16 = vaddw_s8(q13s16, vget_high_s8(q14s8));
237
238 d28s8 = vqmovn_s16(q2s16);
239 d29s8 = vqmovn_s16(q13s16);
240 q14s8 = vcombine_s8(d28s8, d29s8);
241
242 q14s8 = vandq_s8(q14s8, vreinterpretq_s8_u8(q15u8));
243
244 q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q11u8));
245 q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q12u8));
246 q2s8 = vshrq_n_s8(q2s8, 3);
247 q14s8 = vshrq_n_s8(q3s8, 3);
248
249 q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q5u8), q2s8);
250 q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q4u8), q14s8);
251
252 q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
253 q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
254
255 d2u8x2.val[0] = vget_low_u8(q6u8); // d12
256 d2u8x2.val[1] = vget_low_u8(q7u8); // d14
257 d3u8x2.val[0] = vget_high_u8(q6u8); // d13
258 d3u8x2.val[1] = vget_high_u8(q7u8); // d15
259
260 src1 = s - 1;
261 write_2x8(src1, p, d2u8x2, d3u8x2);
262 }
263
vp8_loop_filter_bvs_neon(unsigned char * y_ptr,int y_stride,const unsigned char * blimit)264 void vp8_loop_filter_bvs_neon(
265 unsigned char *y_ptr,
266 int y_stride,
267 const unsigned char *blimit) {
268 y_ptr += 4;
269 vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
270 y_ptr += 4;
271 vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
272 y_ptr += 4;
273 vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
274 return;
275 }
276
vp8_loop_filter_mbvs_neon(unsigned char * y_ptr,int y_stride,const unsigned char * blimit)277 void vp8_loop_filter_mbvs_neon(
278 unsigned char *y_ptr,
279 int y_stride,
280 const unsigned char *blimit) {
281 vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
282 return;
283 }
284