1 /*
2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <arm_neon.h>
12
vp8_short_fdct4x4_neon(int16_t * input,int16_t * output,int pitch)13 void vp8_short_fdct4x4_neon(
14 int16_t *input,
15 int16_t *output,
16 int pitch) {
17 int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
18 int16x4_t d16s16, d17s16, d26s16, dEmptys16;
19 uint16x4_t d4u16;
20 int16x8_t q0s16, q1s16;
21 int32x4_t q9s32, q10s32, q11s32, q12s32;
22 int16x4x2_t v2tmp0, v2tmp1;
23 int32x2x2_t v2tmp2, v2tmp3;
24
25 d16s16 = vdup_n_s16(5352);
26 d17s16 = vdup_n_s16(2217);
27 q9s32 = vdupq_n_s32(14500);
28 q10s32 = vdupq_n_s32(7500);
29 q11s32 = vdupq_n_s32(12000);
30 q12s32 = vdupq_n_s32(51000);
31
32 // Part one
33 pitch >>= 1;
34 d0s16 = vld1_s16(input);
35 input += pitch;
36 d1s16 = vld1_s16(input);
37 input += pitch;
38 d2s16 = vld1_s16(input);
39 input += pitch;
40 d3s16 = vld1_s16(input);
41
42 v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d0s16),
43 vreinterpret_s32_s16(d2s16));
44 v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d1s16),
45 vreinterpret_s32_s16(d3s16));
46 v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]), // d0
47 vreinterpret_s16_s32(v2tmp3.val[0])); // d1
48 v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]), // d2
49 vreinterpret_s16_s32(v2tmp3.val[1])); // d3
50
51 d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[1]);
52 d5s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[0]);
53 d6s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[0]);
54 d7s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[1]);
55
56 d4s16 = vshl_n_s16(d4s16, 3);
57 d5s16 = vshl_n_s16(d5s16, 3);
58 d6s16 = vshl_n_s16(d6s16, 3);
59 d7s16 = vshl_n_s16(d7s16, 3);
60
61 d0s16 = vadd_s16(d4s16, d5s16);
62 d2s16 = vsub_s16(d4s16, d5s16);
63
64 q9s32 = vmlal_s16(q9s32, d7s16, d16s16);
65 q10s32 = vmlal_s16(q10s32, d7s16, d17s16);
66 q9s32 = vmlal_s16(q9s32, d6s16, d17s16);
67 q10s32 = vmlsl_s16(q10s32, d6s16, d16s16);
68
69 d1s16 = vshrn_n_s32(q9s32, 12);
70 d3s16 = vshrn_n_s32(q10s32, 12);
71
72 // Part two
73 v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d0s16),
74 vreinterpret_s32_s16(d2s16));
75 v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d1s16),
76 vreinterpret_s32_s16(d3s16));
77 v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]), // d0
78 vreinterpret_s16_s32(v2tmp3.val[0])); // d1
79 v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]), // d2
80 vreinterpret_s16_s32(v2tmp3.val[1])); // d3
81
82 d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[1]);
83 d5s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[0]);
84 d6s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[0]);
85 d7s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[1]);
86
87 d26s16 = vdup_n_s16(7);
88 d4s16 = vadd_s16(d4s16, d26s16);
89
90 d0s16 = vadd_s16(d4s16, d5s16);
91 d2s16 = vsub_s16(d4s16, d5s16);
92
93 q11s32 = vmlal_s16(q11s32, d7s16, d16s16);
94 q12s32 = vmlal_s16(q12s32, d7s16, d17s16);
95
96 dEmptys16 = vdup_n_s16(0);
97 d4u16 = vceq_s16(d7s16, dEmptys16);
98
99 d0s16 = vshr_n_s16(d0s16, 4);
100 d2s16 = vshr_n_s16(d2s16, 4);
101
102 q11s32 = vmlal_s16(q11s32, d6s16, d17s16);
103 q12s32 = vmlsl_s16(q12s32, d6s16, d16s16);
104
105 d4u16 = vmvn_u16(d4u16);
106 d1s16 = vshrn_n_s32(q11s32, 16);
107 d1s16 = vsub_s16(d1s16, vreinterpret_s16_u16(d4u16));
108 d3s16 = vshrn_n_s32(q12s32, 16);
109
110 q0s16 = vcombine_s16(d0s16, d1s16);
111 q1s16 = vcombine_s16(d2s16, d3s16);
112
113 vst1q_s16(output, q0s16);
114 vst1q_s16(output + 8, q1s16);
115 return;
116 }
117
vp8_short_fdct8x4_neon(int16_t * input,int16_t * output,int pitch)118 void vp8_short_fdct8x4_neon(
119 int16_t *input,
120 int16_t *output,
121 int pitch) {
122 int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
123 int16x4_t d16s16, d17s16, d26s16, d27s16, d28s16, d29s16;
124 uint16x4_t d28u16, d29u16;
125 uint16x8_t q14u16;
126 int16x8_t q0s16, q1s16, q2s16, q3s16;
127 int16x8_t q11s16, q12s16, q13s16, q14s16, q15s16, qEmptys16;
128 int32x4_t q9s32, q10s32, q11s32, q12s32;
129 int16x8x2_t v2tmp0, v2tmp1;
130 int32x4x2_t v2tmp2, v2tmp3;
131
132 d16s16 = vdup_n_s16(5352);
133 d17s16 = vdup_n_s16(2217);
134 q9s32 = vdupq_n_s32(14500);
135 q10s32 = vdupq_n_s32(7500);
136
137 // Part one
138 pitch >>= 1;
139 q0s16 = vld1q_s16(input);
140 input += pitch;
141 q1s16 = vld1q_s16(input);
142 input += pitch;
143 q2s16 = vld1q_s16(input);
144 input += pitch;
145 q3s16 = vld1q_s16(input);
146
147 v2tmp2 = vtrnq_s32(vreinterpretq_s32_s16(q0s16),
148 vreinterpretq_s32_s16(q2s16));
149 v2tmp3 = vtrnq_s32(vreinterpretq_s32_s16(q1s16),
150 vreinterpretq_s32_s16(q3s16));
151 v2tmp0 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[0]), // q0
152 vreinterpretq_s16_s32(v2tmp3.val[0])); // q1
153 v2tmp1 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[1]), // q2
154 vreinterpretq_s16_s32(v2tmp3.val[1])); // q3
155
156 q11s16 = vaddq_s16(v2tmp0.val[0], v2tmp1.val[1]);
157 q12s16 = vaddq_s16(v2tmp0.val[1], v2tmp1.val[0]);
158 q13s16 = vsubq_s16(v2tmp0.val[1], v2tmp1.val[0]);
159 q14s16 = vsubq_s16(v2tmp0.val[0], v2tmp1.val[1]);
160
161 q11s16 = vshlq_n_s16(q11s16, 3);
162 q12s16 = vshlq_n_s16(q12s16, 3);
163 q13s16 = vshlq_n_s16(q13s16, 3);
164 q14s16 = vshlq_n_s16(q14s16, 3);
165
166 q0s16 = vaddq_s16(q11s16, q12s16);
167 q2s16 = vsubq_s16(q11s16, q12s16);
168
169 q11s32 = q9s32;
170 q12s32 = q10s32;
171
172 d26s16 = vget_low_s16(q13s16);
173 d27s16 = vget_high_s16(q13s16);
174 d28s16 = vget_low_s16(q14s16);
175 d29s16 = vget_high_s16(q14s16);
176
177 q9s32 = vmlal_s16(q9s32, d28s16, d16s16);
178 q10s32 = vmlal_s16(q10s32, d28s16, d17s16);
179 q11s32 = vmlal_s16(q11s32, d29s16, d16s16);
180 q12s32 = vmlal_s16(q12s32, d29s16, d17s16);
181
182 q9s32 = vmlal_s16(q9s32, d26s16, d17s16);
183 q10s32 = vmlsl_s16(q10s32, d26s16, d16s16);
184 q11s32 = vmlal_s16(q11s32, d27s16, d17s16);
185 q12s32 = vmlsl_s16(q12s32, d27s16, d16s16);
186
187 d2s16 = vshrn_n_s32(q9s32, 12);
188 d6s16 = vshrn_n_s32(q10s32, 12);
189 d3s16 = vshrn_n_s32(q11s32, 12);
190 d7s16 = vshrn_n_s32(q12s32, 12);
191 q1s16 = vcombine_s16(d2s16, d3s16);
192 q3s16 = vcombine_s16(d6s16, d7s16);
193
194 // Part two
195 q9s32 = vdupq_n_s32(12000);
196 q10s32 = vdupq_n_s32(51000);
197
198 v2tmp2 = vtrnq_s32(vreinterpretq_s32_s16(q0s16),
199 vreinterpretq_s32_s16(q2s16));
200 v2tmp3 = vtrnq_s32(vreinterpretq_s32_s16(q1s16),
201 vreinterpretq_s32_s16(q3s16));
202 v2tmp0 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[0]), // q0
203 vreinterpretq_s16_s32(v2tmp3.val[0])); // q1
204 v2tmp1 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[1]), // q2
205 vreinterpretq_s16_s32(v2tmp3.val[1])); // q3
206
207 q11s16 = vaddq_s16(v2tmp0.val[0], v2tmp1.val[1]);
208 q12s16 = vaddq_s16(v2tmp0.val[1], v2tmp1.val[0]);
209 q13s16 = vsubq_s16(v2tmp0.val[1], v2tmp1.val[0]);
210 q14s16 = vsubq_s16(v2tmp0.val[0], v2tmp1.val[1]);
211
212 q15s16 = vdupq_n_s16(7);
213 q11s16 = vaddq_s16(q11s16, q15s16);
214 q0s16 = vaddq_s16(q11s16, q12s16);
215 q1s16 = vsubq_s16(q11s16, q12s16);
216
217 q11s32 = q9s32;
218 q12s32 = q10s32;
219
220 d0s16 = vget_low_s16(q0s16);
221 d1s16 = vget_high_s16(q0s16);
222 d2s16 = vget_low_s16(q1s16);
223 d3s16 = vget_high_s16(q1s16);
224
225 d0s16 = vshr_n_s16(d0s16, 4);
226 d4s16 = vshr_n_s16(d1s16, 4);
227 d2s16 = vshr_n_s16(d2s16, 4);
228 d6s16 = vshr_n_s16(d3s16, 4);
229
230 d26s16 = vget_low_s16(q13s16);
231 d27s16 = vget_high_s16(q13s16);
232 d28s16 = vget_low_s16(q14s16);
233 d29s16 = vget_high_s16(q14s16);
234
235 q9s32 = vmlal_s16(q9s32, d28s16, d16s16);
236 q10s32 = vmlal_s16(q10s32, d28s16, d17s16);
237 q11s32 = vmlal_s16(q11s32, d29s16, d16s16);
238 q12s32 = vmlal_s16(q12s32, d29s16, d17s16);
239
240 q9s32 = vmlal_s16(q9s32, d26s16, d17s16);
241 q10s32 = vmlsl_s16(q10s32, d26s16, d16s16);
242 q11s32 = vmlal_s16(q11s32, d27s16, d17s16);
243 q12s32 = vmlsl_s16(q12s32, d27s16, d16s16);
244
245 d1s16 = vshrn_n_s32(q9s32, 16);
246 d3s16 = vshrn_n_s32(q10s32, 16);
247 d5s16 = vshrn_n_s32(q11s32, 16);
248 d7s16 = vshrn_n_s32(q12s32, 16);
249
250 qEmptys16 = vdupq_n_s16(0);
251 q14u16 = vceqq_s16(q14s16, qEmptys16);
252 q14u16 = vmvnq_u16(q14u16);
253
254 d28u16 = vget_low_u16(q14u16);
255 d29u16 = vget_high_u16(q14u16);
256 d1s16 = vsub_s16(d1s16, vreinterpret_s16_u16(d28u16));
257 d5s16 = vsub_s16(d5s16, vreinterpret_s16_u16(d29u16));
258
259 q0s16 = vcombine_s16(d0s16, d1s16);
260 q1s16 = vcombine_s16(d2s16, d3s16);
261 q2s16 = vcombine_s16(d4s16, d5s16);
262 q3s16 = vcombine_s16(d6s16, d7s16);
263
264 vst1q_s16(output, q0s16);
265 vst1q_s16(output + 8, q1s16);
266 vst1q_s16(output + 16, q2s16);
267 vst1q_s16(output + 24, q3s16);
268 return;
269 }
270