1 /*
2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <emmintrin.h> // SSE2
12
13 #include "./vpx_dsp_rtcd.h"
14 #include "vpx_dsp/txfm_common.h"
15 #include "vpx_dsp/x86/fwd_txfm_sse2.h"
16 #include "vpx_dsp/x86/txfm_common_sse2.h"
17 #include "vpx_ports/mem.h"
18
19 // TODO(jingning) The high bit-depth functions need rework for performance.
20 // After we properly fix the high bit-depth function implementations, this
21 // file's dependency should be substantially simplified.
22 #if DCT_HIGH_BIT_DEPTH
23 #define ADD_EPI16 _mm_adds_epi16
24 #define SUB_EPI16 _mm_subs_epi16
25
26 #else
27 #define ADD_EPI16 _mm_add_epi16
28 #define SUB_EPI16 _mm_sub_epi16
29 #endif
30
FDCT4x4_2D(const int16_t * input,tran_low_t * output,int stride)31 void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) {
32 // This 2D transform implements 4 vertical 1D transforms followed
33 // by 4 horizontal 1D transforms. The multiplies and adds are as given
34 // by Chen, Smith and Fralick ('77). The commands for moving the data
35 // around have been minimized by hand.
36 // For the purposes of the comments, the 16 inputs are referred to at i0
37 // through iF (in raster order), intermediate variables are a0, b0, c0
38 // through f, and correspond to the in-place computations mapped to input
39 // locations. The outputs, o0 through oF are labeled according to the
40 // output locations.
41
42 // Constants
43 // These are the coefficients used for the multiplies.
44 // In the comments, pN means cos(N pi /64) and mN is -cos(N pi /64),
45 // where cospi_N_64 = cos(N pi /64)
46 const __m128i k__cospi_A =
47 octa_set_epi16(cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64,
48 cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64);
49 const __m128i k__cospi_B =
50 octa_set_epi16(cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64,
51 cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64);
52 const __m128i k__cospi_C =
53 octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64,
54 cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64);
55 const __m128i k__cospi_D =
56 octa_set_epi16(cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64,
57 cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64);
58 const __m128i k__cospi_E =
59 octa_set_epi16(cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64,
60 cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64);
61 const __m128i k__cospi_F =
62 octa_set_epi16(cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64,
63 cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64);
64 const __m128i k__cospi_G =
65 octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64,
66 -cospi_8_64, -cospi_24_64, -cospi_8_64, -cospi_24_64);
67 const __m128i k__cospi_H =
68 octa_set_epi16(cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64,
69 -cospi_24_64, cospi_8_64, -cospi_24_64, cospi_8_64);
70
71 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
72 // This second rounding constant saves doing some extra adds at the end
73 const __m128i k__DCT_CONST_ROUNDING2 =
74 _mm_set1_epi32(DCT_CONST_ROUNDING + (DCT_CONST_ROUNDING << 1));
75 const int DCT_CONST_BITS2 = DCT_CONST_BITS + 2;
76 const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1);
77 const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0);
78 __m128i in0, in1;
79 #if DCT_HIGH_BIT_DEPTH
80 __m128i cmp0, cmp1;
81 int test, overflow;
82 #endif
83
84 // Load inputs.
85 in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
86 in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
87 in1 = _mm_unpacklo_epi64(
88 in1, _mm_loadl_epi64((const __m128i *)(input + 2 * stride)));
89 in0 = _mm_unpacklo_epi64(
90 in0, _mm_loadl_epi64((const __m128i *)(input + 3 * stride)));
91 // in0 = [i0 i1 i2 i3 iC iD iE iF]
92 // in1 = [i4 i5 i6 i7 i8 i9 iA iB]
93 #if DCT_HIGH_BIT_DEPTH
94 // Check inputs small enough to use optimised code
95 cmp0 = _mm_xor_si128(_mm_cmpgt_epi16(in0, _mm_set1_epi16(0x3ff)),
96 _mm_cmplt_epi16(in0, _mm_set1_epi16((int16_t)0xfc00)));
97 cmp1 = _mm_xor_si128(_mm_cmpgt_epi16(in1, _mm_set1_epi16(0x3ff)),
98 _mm_cmplt_epi16(in1, _mm_set1_epi16((int16_t)0xfc00)));
99 test = _mm_movemask_epi8(_mm_or_si128(cmp0, cmp1));
100 if (test) {
101 vpx_highbd_fdct4x4_c(input, output, stride);
102 return;
103 }
104 #endif // DCT_HIGH_BIT_DEPTH
105
106 // multiply by 16 to give some extra precision
107 in0 = _mm_slli_epi16(in0, 4);
108 in1 = _mm_slli_epi16(in1, 4);
109 // if (i == 0 && input[0]) input[0] += 1;
110 // add 1 to the upper left pixel if it is non-zero, which helps reduce
111 // the round-trip error
112 {
113 // The mask will only contain whether the first value is zero, all
114 // other comparison will fail as something shifted by 4 (above << 4)
115 // can never be equal to one. To increment in the non-zero case, we
116 // add the mask and one for the first element:
117 // - if zero, mask = -1, v = v - 1 + 1 = v
118 // - if non-zero, mask = 0, v = v + 0 + 1 = v + 1
119 __m128i mask = _mm_cmpeq_epi16(in0, k__nonzero_bias_a);
120 in0 = _mm_add_epi16(in0, mask);
121 in0 = _mm_add_epi16(in0, k__nonzero_bias_b);
122 }
123 // There are 4 total stages, alternating between an add/subtract stage
124 // followed by an multiply-and-add stage.
125 {
126 // Stage 1: Add/subtract
127
128 // in0 = [i0 i1 i2 i3 iC iD iE iF]
129 // in1 = [i4 i5 i6 i7 i8 i9 iA iB]
130 const __m128i r0 = _mm_unpacklo_epi16(in0, in1);
131 const __m128i r1 = _mm_unpackhi_epi16(in0, in1);
132 // r0 = [i0 i4 i1 i5 i2 i6 i3 i7]
133 // r1 = [iC i8 iD i9 iE iA iF iB]
134 const __m128i r2 = _mm_shuffle_epi32(r0, 0xB4);
135 const __m128i r3 = _mm_shuffle_epi32(r1, 0xB4);
136 // r2 = [i0 i4 i1 i5 i3 i7 i2 i6]
137 // r3 = [iC i8 iD i9 iF iB iE iA]
138
139 const __m128i t0 = _mm_add_epi16(r2, r3);
140 const __m128i t1 = _mm_sub_epi16(r2, r3);
141 // t0 = [a0 a4 a1 a5 a3 a7 a2 a6]
142 // t1 = [aC a8 aD a9 aF aB aE aA]
143
144 // Stage 2: multiply by constants (which gets us into 32 bits).
145 // The constants needed here are:
146 // k__cospi_A = [p16 p16 p16 p16 p16 m16 p16 m16]
147 // k__cospi_B = [p16 m16 p16 m16 p16 p16 p16 p16]
148 // k__cospi_C = [p08 p24 p08 p24 p24 m08 p24 m08]
149 // k__cospi_D = [p24 m08 p24 m08 p08 p24 p08 p24]
150 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_A);
151 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_B);
152 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_C);
153 const __m128i u3 = _mm_madd_epi16(t1, k__cospi_D);
154 // Then add and right-shift to get back to 16-bit range
155 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
156 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
157 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
158 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
159 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
160 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
161 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
162 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
163 // w0 = [b0 b1 b7 b6]
164 // w1 = [b8 b9 bF bE]
165 // w2 = [b4 b5 b3 b2]
166 // w3 = [bC bD bB bA]
167 const __m128i x0 = _mm_packs_epi32(w0, w1);
168 const __m128i x1 = _mm_packs_epi32(w2, w3);
169 #if DCT_HIGH_BIT_DEPTH
170 overflow = check_epi16_overflow_x2(&x0, &x1);
171 if (overflow) {
172 vpx_highbd_fdct4x4_c(input, output, stride);
173 return;
174 }
175 #endif // DCT_HIGH_BIT_DEPTH
176 // x0 = [b0 b1 b7 b6 b8 b9 bF bE]
177 // x1 = [b4 b5 b3 b2 bC bD bB bA]
178 in0 = _mm_shuffle_epi32(x0, 0xD8);
179 in1 = _mm_shuffle_epi32(x1, 0x8D);
180 // in0 = [b0 b1 b8 b9 b7 b6 bF bE]
181 // in1 = [b3 b2 bB bA b4 b5 bC bD]
182 }
183 {
184 // vertical DCTs finished. Now we do the horizontal DCTs.
185 // Stage 3: Add/subtract
186
187 const __m128i t0 = ADD_EPI16(in0, in1);
188 const __m128i t1 = SUB_EPI16(in0, in1);
189 // t0 = [c0 c1 c8 c9 c4 c5 cC cD]
190 // t1 = [c3 c2 cB cA -c7 -c6 -cF -cE]
191 #if DCT_HIGH_BIT_DEPTH
192 overflow = check_epi16_overflow_x2(&t0, &t1);
193 if (overflow) {
194 vpx_highbd_fdct4x4_c(input, output, stride);
195 return;
196 }
197 #endif // DCT_HIGH_BIT_DEPTH
198
199 // Stage 4: multiply by constants (which gets us into 32 bits).
200 {
201 // The constants needed here are:
202 // k__cospi_E = [p16 p16 p16 p16 p16 p16 p16 p16]
203 // k__cospi_F = [p16 m16 p16 m16 p16 m16 p16 m16]
204 // k__cospi_G = [p08 p24 p08 p24 m08 m24 m08 m24]
205 // k__cospi_H = [p24 m08 p24 m08 m24 p08 m24 p08]
206 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_E);
207 const __m128i u1 = _mm_madd_epi16(t0, k__cospi_F);
208 const __m128i u2 = _mm_madd_epi16(t1, k__cospi_G);
209 const __m128i u3 = _mm_madd_epi16(t1, k__cospi_H);
210 // Then add and right-shift to get back to 16-bit range
211 // but this combines the final right-shift as well to save operations
212 // This unusual rounding operations is to maintain bit-accurate
213 // compatibility with the c version of this function which has two
214 // rounding steps in a row.
215 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING2);
216 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING2);
217 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING2);
218 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING2);
219 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS2);
220 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS2);
221 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS2);
222 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS2);
223 // w0 = [o0 o4 o8 oC]
224 // w1 = [o2 o6 oA oE]
225 // w2 = [o1 o5 o9 oD]
226 // w3 = [o3 o7 oB oF]
227 // remember the o's are numbered according to the correct output location
228 const __m128i x0 = _mm_packs_epi32(w0, w1);
229 const __m128i x1 = _mm_packs_epi32(w2, w3);
230 #if DCT_HIGH_BIT_DEPTH
231 overflow = check_epi16_overflow_x2(&x0, &x1);
232 if (overflow) {
233 vpx_highbd_fdct4x4_c(input, output, stride);
234 return;
235 }
236 #endif // DCT_HIGH_BIT_DEPTH
237 {
238 // x0 = [o0 o4 o8 oC o2 o6 oA oE]
239 // x1 = [o1 o5 o9 oD o3 o7 oB oF]
240 const __m128i y0 = _mm_unpacklo_epi16(x0, x1);
241 const __m128i y1 = _mm_unpackhi_epi16(x0, x1);
242 // y0 = [o0 o1 o4 o5 o8 o9 oC oD]
243 // y1 = [o2 o3 o6 o7 oA oB oE oF]
244 in0 = _mm_unpacklo_epi32(y0, y1);
245 // in0 = [o0 o1 o2 o3 o4 o5 o6 o7]
246 in1 = _mm_unpackhi_epi32(y0, y1);
247 // in1 = [o8 o9 oA oB oC oD oE oF]
248 }
249 }
250 }
251 // Post-condition (v + 1) >> 2 is now incorporated into previous
252 // add and right-shift commands. Only 2 store instructions needed
253 // because we are using the fact that 1/3 are stored just after 0/2.
254 storeu_output(&in0, output + 0 * 4);
255 storeu_output(&in1, output + 2 * 4);
256 }
257
FDCT8x8_2D(const int16_t * input,tran_low_t * output,int stride)258 void FDCT8x8_2D(const int16_t *input, tran_low_t *output, int stride) {
259 int pass;
260 // Constants
261 // When we use them, in one case, they are all the same. In all others
262 // it's a pair of them that we need to repeat four times. This is done
263 // by constructing the 32 bit constant corresponding to that pair.
264 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
265 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
266 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
267 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
268 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
269 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
270 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
271 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
272 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
273 #if DCT_HIGH_BIT_DEPTH
274 int overflow;
275 #endif
276 // Load input
277 __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
278 __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
279 __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
280 __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
281 __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride));
282 __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride));
283 __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride));
284 __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride));
285 // Pre-condition input (shift by two)
286 in0 = _mm_slli_epi16(in0, 2);
287 in1 = _mm_slli_epi16(in1, 2);
288 in2 = _mm_slli_epi16(in2, 2);
289 in3 = _mm_slli_epi16(in3, 2);
290 in4 = _mm_slli_epi16(in4, 2);
291 in5 = _mm_slli_epi16(in5, 2);
292 in6 = _mm_slli_epi16(in6, 2);
293 in7 = _mm_slli_epi16(in7, 2);
294
295 // We do two passes, first the columns, then the rows. The results of the
296 // first pass are transposed so that the same column code can be reused. The
297 // results of the second pass are also transposed so that the rows (processed
298 // as columns) are put back in row positions.
299 for (pass = 0; pass < 2; pass++) {
300 // To store results of each pass before the transpose.
301 __m128i res0, res1, res2, res3, res4, res5, res6, res7;
302 // Add/subtract
303 const __m128i q0 = ADD_EPI16(in0, in7);
304 const __m128i q1 = ADD_EPI16(in1, in6);
305 const __m128i q2 = ADD_EPI16(in2, in5);
306 const __m128i q3 = ADD_EPI16(in3, in4);
307 const __m128i q4 = SUB_EPI16(in3, in4);
308 const __m128i q5 = SUB_EPI16(in2, in5);
309 const __m128i q6 = SUB_EPI16(in1, in6);
310 const __m128i q7 = SUB_EPI16(in0, in7);
311 #if DCT_HIGH_BIT_DEPTH
312 if (pass == 1) {
313 overflow =
314 check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7);
315 if (overflow) {
316 vpx_highbd_fdct8x8_c(input, output, stride);
317 return;
318 }
319 }
320 #endif // DCT_HIGH_BIT_DEPTH
321 // Work on first four results
322 {
323 // Add/subtract
324 const __m128i r0 = ADD_EPI16(q0, q3);
325 const __m128i r1 = ADD_EPI16(q1, q2);
326 const __m128i r2 = SUB_EPI16(q1, q2);
327 const __m128i r3 = SUB_EPI16(q0, q3);
328 #if DCT_HIGH_BIT_DEPTH
329 overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3);
330 if (overflow) {
331 vpx_highbd_fdct8x8_c(input, output, stride);
332 return;
333 }
334 #endif // DCT_HIGH_BIT_DEPTH
335 // Interleave to do the multiply by constants which gets us into 32bits
336 {
337 const __m128i t0 = _mm_unpacklo_epi16(r0, r1);
338 const __m128i t1 = _mm_unpackhi_epi16(r0, r1);
339 const __m128i t2 = _mm_unpacklo_epi16(r2, r3);
340 const __m128i t3 = _mm_unpackhi_epi16(r2, r3);
341 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
342 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16);
343 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16);
344 const __m128i u3 = _mm_madd_epi16(t1, k__cospi_p16_m16);
345 const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08);
346 const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p24_p08);
347 const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24);
348 const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m08_p24);
349 // dct_const_round_shift
350 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
351 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
352 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
353 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
354 const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
355 const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
356 const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
357 const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
358 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
359 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
360 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
361 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
362 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
363 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
364 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
365 const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
366 // Combine
367 res0 = _mm_packs_epi32(w0, w1);
368 res4 = _mm_packs_epi32(w2, w3);
369 res2 = _mm_packs_epi32(w4, w5);
370 res6 = _mm_packs_epi32(w6, w7);
371 #if DCT_HIGH_BIT_DEPTH
372 overflow = check_epi16_overflow_x4(&res0, &res4, &res2, &res6);
373 if (overflow) {
374 vpx_highbd_fdct8x8_c(input, output, stride);
375 return;
376 }
377 #endif // DCT_HIGH_BIT_DEPTH
378 }
379 }
380 // Work on next four results
381 {
382 // Interleave to do the multiply by constants which gets us into 32bits
383 const __m128i d0 = _mm_unpacklo_epi16(q6, q5);
384 const __m128i d1 = _mm_unpackhi_epi16(q6, q5);
385 const __m128i e0 = _mm_madd_epi16(d0, k__cospi_p16_m16);
386 const __m128i e1 = _mm_madd_epi16(d1, k__cospi_p16_m16);
387 const __m128i e2 = _mm_madd_epi16(d0, k__cospi_p16_p16);
388 const __m128i e3 = _mm_madd_epi16(d1, k__cospi_p16_p16);
389 // dct_const_round_shift
390 const __m128i f0 = _mm_add_epi32(e0, k__DCT_CONST_ROUNDING);
391 const __m128i f1 = _mm_add_epi32(e1, k__DCT_CONST_ROUNDING);
392 const __m128i f2 = _mm_add_epi32(e2, k__DCT_CONST_ROUNDING);
393 const __m128i f3 = _mm_add_epi32(e3, k__DCT_CONST_ROUNDING);
394 const __m128i s0 = _mm_srai_epi32(f0, DCT_CONST_BITS);
395 const __m128i s1 = _mm_srai_epi32(f1, DCT_CONST_BITS);
396 const __m128i s2 = _mm_srai_epi32(f2, DCT_CONST_BITS);
397 const __m128i s3 = _mm_srai_epi32(f3, DCT_CONST_BITS);
398 // Combine
399 const __m128i r0 = _mm_packs_epi32(s0, s1);
400 const __m128i r1 = _mm_packs_epi32(s2, s3);
401 #if DCT_HIGH_BIT_DEPTH
402 overflow = check_epi16_overflow_x2(&r0, &r1);
403 if (overflow) {
404 vpx_highbd_fdct8x8_c(input, output, stride);
405 return;
406 }
407 #endif // DCT_HIGH_BIT_DEPTH
408 {
409 // Add/subtract
410 const __m128i x0 = ADD_EPI16(q4, r0);
411 const __m128i x1 = SUB_EPI16(q4, r0);
412 const __m128i x2 = SUB_EPI16(q7, r1);
413 const __m128i x3 = ADD_EPI16(q7, r1);
414 #if DCT_HIGH_BIT_DEPTH
415 overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3);
416 if (overflow) {
417 vpx_highbd_fdct8x8_c(input, output, stride);
418 return;
419 }
420 #endif // DCT_HIGH_BIT_DEPTH
421 // Interleave to do the multiply by constants which gets us into 32bits
422 {
423 const __m128i t0 = _mm_unpacklo_epi16(x0, x3);
424 const __m128i t1 = _mm_unpackhi_epi16(x0, x3);
425 const __m128i t2 = _mm_unpacklo_epi16(x1, x2);
426 const __m128i t3 = _mm_unpackhi_epi16(x1, x2);
427 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p28_p04);
428 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p28_p04);
429 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_m04_p28);
430 const __m128i u3 = _mm_madd_epi16(t1, k__cospi_m04_p28);
431 const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p12_p20);
432 const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p12_p20);
433 const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m20_p12);
434 const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m20_p12);
435 // dct_const_round_shift
436 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
437 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
438 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
439 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
440 const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
441 const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
442 const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
443 const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
444 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
445 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
446 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
447 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
448 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
449 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
450 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
451 const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
452 // Combine
453 res1 = _mm_packs_epi32(w0, w1);
454 res7 = _mm_packs_epi32(w2, w3);
455 res5 = _mm_packs_epi32(w4, w5);
456 res3 = _mm_packs_epi32(w6, w7);
457 #if DCT_HIGH_BIT_DEPTH
458 overflow = check_epi16_overflow_x4(&res1, &res7, &res5, &res3);
459 if (overflow) {
460 vpx_highbd_fdct8x8_c(input, output, stride);
461 return;
462 }
463 #endif // DCT_HIGH_BIT_DEPTH
464 }
465 }
466 }
467 // Transpose the 8x8.
468 {
469 // 00 01 02 03 04 05 06 07
470 // 10 11 12 13 14 15 16 17
471 // 20 21 22 23 24 25 26 27
472 // 30 31 32 33 34 35 36 37
473 // 40 41 42 43 44 45 46 47
474 // 50 51 52 53 54 55 56 57
475 // 60 61 62 63 64 65 66 67
476 // 70 71 72 73 74 75 76 77
477 const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1);
478 const __m128i tr0_1 = _mm_unpacklo_epi16(res2, res3);
479 const __m128i tr0_2 = _mm_unpackhi_epi16(res0, res1);
480 const __m128i tr0_3 = _mm_unpackhi_epi16(res2, res3);
481 const __m128i tr0_4 = _mm_unpacklo_epi16(res4, res5);
482 const __m128i tr0_5 = _mm_unpacklo_epi16(res6, res7);
483 const __m128i tr0_6 = _mm_unpackhi_epi16(res4, res5);
484 const __m128i tr0_7 = _mm_unpackhi_epi16(res6, res7);
485 // 00 10 01 11 02 12 03 13
486 // 20 30 21 31 22 32 23 33
487 // 04 14 05 15 06 16 07 17
488 // 24 34 25 35 26 36 27 37
489 // 40 50 41 51 42 52 43 53
490 // 60 70 61 71 62 72 63 73
491 // 54 54 55 55 56 56 57 57
492 // 64 74 65 75 66 76 67 77
493 const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
494 const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
495 const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
496 const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
497 const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
498 const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
499 const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
500 const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
501 // 00 10 20 30 01 11 21 31
502 // 40 50 60 70 41 51 61 71
503 // 02 12 22 32 03 13 23 33
504 // 42 52 62 72 43 53 63 73
505 // 04 14 24 34 05 15 21 36
506 // 44 54 64 74 45 55 61 76
507 // 06 16 26 36 07 17 27 37
508 // 46 56 66 76 47 57 67 77
509 in0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
510 in1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
511 in2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
512 in3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
513 in4 = _mm_unpacklo_epi64(tr1_1, tr1_5);
514 in5 = _mm_unpackhi_epi64(tr1_1, tr1_5);
515 in6 = _mm_unpacklo_epi64(tr1_3, tr1_7);
516 in7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
517 // 00 10 20 30 40 50 60 70
518 // 01 11 21 31 41 51 61 71
519 // 02 12 22 32 42 52 62 72
520 // 03 13 23 33 43 53 63 73
521 // 04 14 24 34 44 54 64 74
522 // 05 15 25 35 45 55 65 75
523 // 06 16 26 36 46 56 66 76
524 // 07 17 27 37 47 57 67 77
525 }
526 }
527 // Post-condition output and store it
528 {
529 // Post-condition (division by two)
530 // division of two 16 bits signed numbers using shifts
531 // n / 2 = (n - (n >> 15)) >> 1
532 const __m128i sign_in0 = _mm_srai_epi16(in0, 15);
533 const __m128i sign_in1 = _mm_srai_epi16(in1, 15);
534 const __m128i sign_in2 = _mm_srai_epi16(in2, 15);
535 const __m128i sign_in3 = _mm_srai_epi16(in3, 15);
536 const __m128i sign_in4 = _mm_srai_epi16(in4, 15);
537 const __m128i sign_in5 = _mm_srai_epi16(in5, 15);
538 const __m128i sign_in6 = _mm_srai_epi16(in6, 15);
539 const __m128i sign_in7 = _mm_srai_epi16(in7, 15);
540 in0 = _mm_sub_epi16(in0, sign_in0);
541 in1 = _mm_sub_epi16(in1, sign_in1);
542 in2 = _mm_sub_epi16(in2, sign_in2);
543 in3 = _mm_sub_epi16(in3, sign_in3);
544 in4 = _mm_sub_epi16(in4, sign_in4);
545 in5 = _mm_sub_epi16(in5, sign_in5);
546 in6 = _mm_sub_epi16(in6, sign_in6);
547 in7 = _mm_sub_epi16(in7, sign_in7);
548 in0 = _mm_srai_epi16(in0, 1);
549 in1 = _mm_srai_epi16(in1, 1);
550 in2 = _mm_srai_epi16(in2, 1);
551 in3 = _mm_srai_epi16(in3, 1);
552 in4 = _mm_srai_epi16(in4, 1);
553 in5 = _mm_srai_epi16(in5, 1);
554 in6 = _mm_srai_epi16(in6, 1);
555 in7 = _mm_srai_epi16(in7, 1);
556 // store results
557 store_output(&in0, (output + 0 * 8));
558 store_output(&in1, (output + 1 * 8));
559 store_output(&in2, (output + 2 * 8));
560 store_output(&in3, (output + 3 * 8));
561 store_output(&in4, (output + 4 * 8));
562 store_output(&in5, (output + 5 * 8));
563 store_output(&in6, (output + 6 * 8));
564 store_output(&in7, (output + 7 * 8));
565 }
566 }
567
FDCT16x16_2D(const int16_t * input,tran_low_t * output,int stride)568 void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
569 // The 2D transform is done with two passes which are actually pretty
570 // similar. In the first one, we transform the columns and transpose
571 // the results. In the second one, we transform the rows. To achieve that,
572 // as the first pass results are transposed, we transpose the columns (that
573 // is the transposed rows) and transpose the results (so that it goes back
574 // in normal/row positions).
575 int pass;
576 // We need an intermediate buffer between passes.
577 DECLARE_ALIGNED(16, int16_t, intermediate[256]);
578 const int16_t *in = input;
579 int16_t *out0 = intermediate;
580 tran_low_t *out1 = output;
581 // Constants
582 // When we use them, in one case, they are all the same. In all others
583 // it's a pair of them that we need to repeat four times. This is done
584 // by constructing the 32 bit constant corresponding to that pair.
585 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
586 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
587 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
588 const __m128i k__cospi_p08_m24 = pair_set_epi16(cospi_8_64, -cospi_24_64);
589 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
590 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
591 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
592 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
593 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
594 const __m128i k__cospi_p30_p02 = pair_set_epi16(cospi_30_64, cospi_2_64);
595 const __m128i k__cospi_p14_p18 = pair_set_epi16(cospi_14_64, cospi_18_64);
596 const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64);
597 const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64);
598 const __m128i k__cospi_p22_p10 = pair_set_epi16(cospi_22_64, cospi_10_64);
599 const __m128i k__cospi_p06_p26 = pair_set_epi16(cospi_6_64, cospi_26_64);
600 const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64);
601 const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64);
602 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
603 const __m128i kOne = _mm_set1_epi16(1);
604 // Do the two transform/transpose passes
605 for (pass = 0; pass < 2; ++pass) {
606 // We process eight columns (transposed rows in second pass) at a time.
607 int column_start;
608 #if DCT_HIGH_BIT_DEPTH
609 int overflow;
610 #endif
611 for (column_start = 0; column_start < 16; column_start += 8) {
612 __m128i in00, in01, in02, in03, in04, in05, in06, in07;
613 __m128i in08, in09, in10, in11, in12, in13, in14, in15;
614 __m128i input0, input1, input2, input3, input4, input5, input6, input7;
615 __m128i step1_0, step1_1, step1_2, step1_3;
616 __m128i step1_4, step1_5, step1_6, step1_7;
617 __m128i step2_1, step2_2, step2_3, step2_4, step2_5, step2_6;
618 __m128i step3_0, step3_1, step3_2, step3_3;
619 __m128i step3_4, step3_5, step3_6, step3_7;
620 __m128i res00, res01, res02, res03, res04, res05, res06, res07;
621 __m128i res08, res09, res10, res11, res12, res13, res14, res15;
622 // Load and pre-condition input.
623 if (0 == pass) {
624 in00 = _mm_load_si128((const __m128i *)(in + 0 * stride));
625 in01 = _mm_load_si128((const __m128i *)(in + 1 * stride));
626 in02 = _mm_load_si128((const __m128i *)(in + 2 * stride));
627 in03 = _mm_load_si128((const __m128i *)(in + 3 * stride));
628 in04 = _mm_load_si128((const __m128i *)(in + 4 * stride));
629 in05 = _mm_load_si128((const __m128i *)(in + 5 * stride));
630 in06 = _mm_load_si128((const __m128i *)(in + 6 * stride));
631 in07 = _mm_load_si128((const __m128i *)(in + 7 * stride));
632 in08 = _mm_load_si128((const __m128i *)(in + 8 * stride));
633 in09 = _mm_load_si128((const __m128i *)(in + 9 * stride));
634 in10 = _mm_load_si128((const __m128i *)(in + 10 * stride));
635 in11 = _mm_load_si128((const __m128i *)(in + 11 * stride));
636 in12 = _mm_load_si128((const __m128i *)(in + 12 * stride));
637 in13 = _mm_load_si128((const __m128i *)(in + 13 * stride));
638 in14 = _mm_load_si128((const __m128i *)(in + 14 * stride));
639 in15 = _mm_load_si128((const __m128i *)(in + 15 * stride));
640 // x = x << 2
641 in00 = _mm_slli_epi16(in00, 2);
642 in01 = _mm_slli_epi16(in01, 2);
643 in02 = _mm_slli_epi16(in02, 2);
644 in03 = _mm_slli_epi16(in03, 2);
645 in04 = _mm_slli_epi16(in04, 2);
646 in05 = _mm_slli_epi16(in05, 2);
647 in06 = _mm_slli_epi16(in06, 2);
648 in07 = _mm_slli_epi16(in07, 2);
649 in08 = _mm_slli_epi16(in08, 2);
650 in09 = _mm_slli_epi16(in09, 2);
651 in10 = _mm_slli_epi16(in10, 2);
652 in11 = _mm_slli_epi16(in11, 2);
653 in12 = _mm_slli_epi16(in12, 2);
654 in13 = _mm_slli_epi16(in13, 2);
655 in14 = _mm_slli_epi16(in14, 2);
656 in15 = _mm_slli_epi16(in15, 2);
657 } else {
658 in00 = _mm_load_si128((const __m128i *)(in + 0 * 16));
659 in01 = _mm_load_si128((const __m128i *)(in + 1 * 16));
660 in02 = _mm_load_si128((const __m128i *)(in + 2 * 16));
661 in03 = _mm_load_si128((const __m128i *)(in + 3 * 16));
662 in04 = _mm_load_si128((const __m128i *)(in + 4 * 16));
663 in05 = _mm_load_si128((const __m128i *)(in + 5 * 16));
664 in06 = _mm_load_si128((const __m128i *)(in + 6 * 16));
665 in07 = _mm_load_si128((const __m128i *)(in + 7 * 16));
666 in08 = _mm_load_si128((const __m128i *)(in + 8 * 16));
667 in09 = _mm_load_si128((const __m128i *)(in + 9 * 16));
668 in10 = _mm_load_si128((const __m128i *)(in + 10 * 16));
669 in11 = _mm_load_si128((const __m128i *)(in + 11 * 16));
670 in12 = _mm_load_si128((const __m128i *)(in + 12 * 16));
671 in13 = _mm_load_si128((const __m128i *)(in + 13 * 16));
672 in14 = _mm_load_si128((const __m128i *)(in + 14 * 16));
673 in15 = _mm_load_si128((const __m128i *)(in + 15 * 16));
674 // x = (x + 1) >> 2
675 in00 = _mm_add_epi16(in00, kOne);
676 in01 = _mm_add_epi16(in01, kOne);
677 in02 = _mm_add_epi16(in02, kOne);
678 in03 = _mm_add_epi16(in03, kOne);
679 in04 = _mm_add_epi16(in04, kOne);
680 in05 = _mm_add_epi16(in05, kOne);
681 in06 = _mm_add_epi16(in06, kOne);
682 in07 = _mm_add_epi16(in07, kOne);
683 in08 = _mm_add_epi16(in08, kOne);
684 in09 = _mm_add_epi16(in09, kOne);
685 in10 = _mm_add_epi16(in10, kOne);
686 in11 = _mm_add_epi16(in11, kOne);
687 in12 = _mm_add_epi16(in12, kOne);
688 in13 = _mm_add_epi16(in13, kOne);
689 in14 = _mm_add_epi16(in14, kOne);
690 in15 = _mm_add_epi16(in15, kOne);
691 in00 = _mm_srai_epi16(in00, 2);
692 in01 = _mm_srai_epi16(in01, 2);
693 in02 = _mm_srai_epi16(in02, 2);
694 in03 = _mm_srai_epi16(in03, 2);
695 in04 = _mm_srai_epi16(in04, 2);
696 in05 = _mm_srai_epi16(in05, 2);
697 in06 = _mm_srai_epi16(in06, 2);
698 in07 = _mm_srai_epi16(in07, 2);
699 in08 = _mm_srai_epi16(in08, 2);
700 in09 = _mm_srai_epi16(in09, 2);
701 in10 = _mm_srai_epi16(in10, 2);
702 in11 = _mm_srai_epi16(in11, 2);
703 in12 = _mm_srai_epi16(in12, 2);
704 in13 = _mm_srai_epi16(in13, 2);
705 in14 = _mm_srai_epi16(in14, 2);
706 in15 = _mm_srai_epi16(in15, 2);
707 }
708 in += 8;
709 // Calculate input for the first 8 results.
710 {
711 input0 = ADD_EPI16(in00, in15);
712 input1 = ADD_EPI16(in01, in14);
713 input2 = ADD_EPI16(in02, in13);
714 input3 = ADD_EPI16(in03, in12);
715 input4 = ADD_EPI16(in04, in11);
716 input5 = ADD_EPI16(in05, in10);
717 input6 = ADD_EPI16(in06, in09);
718 input7 = ADD_EPI16(in07, in08);
719 #if DCT_HIGH_BIT_DEPTH
720 overflow = check_epi16_overflow_x8(&input0, &input1, &input2, &input3,
721 &input4, &input5, &input6, &input7);
722 if (overflow) {
723 vpx_highbd_fdct16x16_c(input, output, stride);
724 return;
725 }
726 #endif // DCT_HIGH_BIT_DEPTH
727 }
728 // Calculate input for the next 8 results.
729 {
730 step1_0 = SUB_EPI16(in07, in08);
731 step1_1 = SUB_EPI16(in06, in09);
732 step1_2 = SUB_EPI16(in05, in10);
733 step1_3 = SUB_EPI16(in04, in11);
734 step1_4 = SUB_EPI16(in03, in12);
735 step1_5 = SUB_EPI16(in02, in13);
736 step1_6 = SUB_EPI16(in01, in14);
737 step1_7 = SUB_EPI16(in00, in15);
738 #if DCT_HIGH_BIT_DEPTH
739 overflow =
740 check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3,
741 &step1_4, &step1_5, &step1_6, &step1_7);
742 if (overflow) {
743 vpx_highbd_fdct16x16_c(input, output, stride);
744 return;
745 }
746 #endif // DCT_HIGH_BIT_DEPTH
747 }
748 // Work on the first eight values; fdct8(input, even_results);
749 {
750 // Add/subtract
751 const __m128i q0 = ADD_EPI16(input0, input7);
752 const __m128i q1 = ADD_EPI16(input1, input6);
753 const __m128i q2 = ADD_EPI16(input2, input5);
754 const __m128i q3 = ADD_EPI16(input3, input4);
755 const __m128i q4 = SUB_EPI16(input3, input4);
756 const __m128i q5 = SUB_EPI16(input2, input5);
757 const __m128i q6 = SUB_EPI16(input1, input6);
758 const __m128i q7 = SUB_EPI16(input0, input7);
759 #if DCT_HIGH_BIT_DEPTH
760 overflow =
761 check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7);
762 if (overflow) {
763 vpx_highbd_fdct16x16_c(input, output, stride);
764 return;
765 }
766 #endif // DCT_HIGH_BIT_DEPTH
767 // Work on first four results
768 {
769 // Add/subtract
770 const __m128i r0 = ADD_EPI16(q0, q3);
771 const __m128i r1 = ADD_EPI16(q1, q2);
772 const __m128i r2 = SUB_EPI16(q1, q2);
773 const __m128i r3 = SUB_EPI16(q0, q3);
774 #if DCT_HIGH_BIT_DEPTH
775 overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3);
776 if (overflow) {
777 vpx_highbd_fdct16x16_c(input, output, stride);
778 return;
779 }
780 #endif // DCT_HIGH_BIT_DEPTH
781
782 // Interleave to do the multiply by constants which gets us
783 // into 32 bits.
784 {
785 const __m128i t0 = _mm_unpacklo_epi16(r0, r1);
786 const __m128i t1 = _mm_unpackhi_epi16(r0, r1);
787 const __m128i t2 = _mm_unpacklo_epi16(r2, r3);
788 const __m128i t3 = _mm_unpackhi_epi16(r2, r3);
789 res00 = mult_round_shift(&t0, &t1, &k__cospi_p16_p16,
790 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
791 res08 = mult_round_shift(&t0, &t1, &k__cospi_p16_m16,
792 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
793 res04 = mult_round_shift(&t2, &t3, &k__cospi_p24_p08,
794 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
795 res12 = mult_round_shift(&t2, &t3, &k__cospi_m08_p24,
796 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
797 #if DCT_HIGH_BIT_DEPTH
798 overflow = check_epi16_overflow_x4(&res00, &res08, &res04, &res12);
799 if (overflow) {
800 vpx_highbd_fdct16x16_c(input, output, stride);
801 return;
802 }
803 #endif // DCT_HIGH_BIT_DEPTH
804 }
805 }
806 // Work on next four results
807 {
808 // Interleave to do the multiply by constants which gets us
809 // into 32 bits.
810 const __m128i d0 = _mm_unpacklo_epi16(q6, q5);
811 const __m128i d1 = _mm_unpackhi_epi16(q6, q5);
812 const __m128i r0 =
813 mult_round_shift(&d0, &d1, &k__cospi_p16_m16,
814 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
815 const __m128i r1 =
816 mult_round_shift(&d0, &d1, &k__cospi_p16_p16,
817 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
818 #if DCT_HIGH_BIT_DEPTH
819 overflow = check_epi16_overflow_x2(&r0, &r1);
820 if (overflow) {
821 vpx_highbd_fdct16x16_c(input, output, stride);
822 return;
823 }
824 #endif // DCT_HIGH_BIT_DEPTH
825 {
826 // Add/subtract
827 const __m128i x0 = ADD_EPI16(q4, r0);
828 const __m128i x1 = SUB_EPI16(q4, r0);
829 const __m128i x2 = SUB_EPI16(q7, r1);
830 const __m128i x3 = ADD_EPI16(q7, r1);
831 #if DCT_HIGH_BIT_DEPTH
832 overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3);
833 if (overflow) {
834 vpx_highbd_fdct16x16_c(input, output, stride);
835 return;
836 }
837 #endif // DCT_HIGH_BIT_DEPTH
838
839 // Interleave to do the multiply by constants which gets us
840 // into 32 bits.
841 {
842 const __m128i t0 = _mm_unpacklo_epi16(x0, x3);
843 const __m128i t1 = _mm_unpackhi_epi16(x0, x3);
844 const __m128i t2 = _mm_unpacklo_epi16(x1, x2);
845 const __m128i t3 = _mm_unpackhi_epi16(x1, x2);
846 res02 = mult_round_shift(&t0, &t1, &k__cospi_p28_p04,
847 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
848 res14 = mult_round_shift(&t0, &t1, &k__cospi_m04_p28,
849 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
850 res10 = mult_round_shift(&t2, &t3, &k__cospi_p12_p20,
851 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
852 res06 = mult_round_shift(&t2, &t3, &k__cospi_m20_p12,
853 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
854 #if DCT_HIGH_BIT_DEPTH
855 overflow =
856 check_epi16_overflow_x4(&res02, &res14, &res10, &res06);
857 if (overflow) {
858 vpx_highbd_fdct16x16_c(input, output, stride);
859 return;
860 }
861 #endif // DCT_HIGH_BIT_DEPTH
862 }
863 }
864 }
865 }
866 // Work on the next eight values; step1 -> odd_results
867 {
868 // step 2
869 {
870 const __m128i t0 = _mm_unpacklo_epi16(step1_5, step1_2);
871 const __m128i t1 = _mm_unpackhi_epi16(step1_5, step1_2);
872 const __m128i t2 = _mm_unpacklo_epi16(step1_4, step1_3);
873 const __m128i t3 = _mm_unpackhi_epi16(step1_4, step1_3);
874 step2_2 = mult_round_shift(&t0, &t1, &k__cospi_p16_m16,
875 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
876 step2_3 = mult_round_shift(&t2, &t3, &k__cospi_p16_m16,
877 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
878 step2_5 = mult_round_shift(&t0, &t1, &k__cospi_p16_p16,
879 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
880 step2_4 = mult_round_shift(&t2, &t3, &k__cospi_p16_p16,
881 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
882 #if DCT_HIGH_BIT_DEPTH
883 overflow =
884 check_epi16_overflow_x4(&step2_2, &step2_3, &step2_5, &step2_4);
885 if (overflow) {
886 vpx_highbd_fdct16x16_c(input, output, stride);
887 return;
888 }
889 #endif // DCT_HIGH_BIT_DEPTH
890 }
891 // step 3
892 {
893 step3_0 = ADD_EPI16(step1_0, step2_3);
894 step3_1 = ADD_EPI16(step1_1, step2_2);
895 step3_2 = SUB_EPI16(step1_1, step2_2);
896 step3_3 = SUB_EPI16(step1_0, step2_3);
897 step3_4 = SUB_EPI16(step1_7, step2_4);
898 step3_5 = SUB_EPI16(step1_6, step2_5);
899 step3_6 = ADD_EPI16(step1_6, step2_5);
900 step3_7 = ADD_EPI16(step1_7, step2_4);
901 #if DCT_HIGH_BIT_DEPTH
902 overflow =
903 check_epi16_overflow_x8(&step3_0, &step3_1, &step3_2, &step3_3,
904 &step3_4, &step3_5, &step3_6, &step3_7);
905 if (overflow) {
906 vpx_highbd_fdct16x16_c(input, output, stride);
907 return;
908 }
909 #endif // DCT_HIGH_BIT_DEPTH
910 }
911 // step 4
912 {
913 const __m128i t0 = _mm_unpacklo_epi16(step3_1, step3_6);
914 const __m128i t1 = _mm_unpackhi_epi16(step3_1, step3_6);
915 const __m128i t2 = _mm_unpacklo_epi16(step3_2, step3_5);
916 const __m128i t3 = _mm_unpackhi_epi16(step3_2, step3_5);
917 step2_1 = mult_round_shift(&t0, &t1, &k__cospi_m08_p24,
918 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
919 step2_2 = mult_round_shift(&t2, &t3, &k__cospi_p24_p08,
920 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
921 step2_6 = mult_round_shift(&t0, &t1, &k__cospi_p24_p08,
922 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
923 step2_5 = mult_round_shift(&t2, &t3, &k__cospi_p08_m24,
924 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
925 #if DCT_HIGH_BIT_DEPTH
926 overflow =
927 check_epi16_overflow_x4(&step2_1, &step2_2, &step2_6, &step2_5);
928 if (overflow) {
929 vpx_highbd_fdct16x16_c(input, output, stride);
930 return;
931 }
932 #endif // DCT_HIGH_BIT_DEPTH
933 }
934 // step 5
935 {
936 step1_0 = ADD_EPI16(step3_0, step2_1);
937 step1_1 = SUB_EPI16(step3_0, step2_1);
938 step1_2 = ADD_EPI16(step3_3, step2_2);
939 step1_3 = SUB_EPI16(step3_3, step2_2);
940 step1_4 = SUB_EPI16(step3_4, step2_5);
941 step1_5 = ADD_EPI16(step3_4, step2_5);
942 step1_6 = SUB_EPI16(step3_7, step2_6);
943 step1_7 = ADD_EPI16(step3_7, step2_6);
944 #if DCT_HIGH_BIT_DEPTH
945 overflow =
946 check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3,
947 &step1_4, &step1_5, &step1_6, &step1_7);
948 if (overflow) {
949 vpx_highbd_fdct16x16_c(input, output, stride);
950 return;
951 }
952 #endif // DCT_HIGH_BIT_DEPTH
953 }
954 // step 6
955 {
956 const __m128i t0 = _mm_unpacklo_epi16(step1_0, step1_7);
957 const __m128i t1 = _mm_unpackhi_epi16(step1_0, step1_7);
958 const __m128i t2 = _mm_unpacklo_epi16(step1_1, step1_6);
959 const __m128i t3 = _mm_unpackhi_epi16(step1_1, step1_6);
960 res01 = mult_round_shift(&t0, &t1, &k__cospi_p30_p02,
961 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
962 res09 = mult_round_shift(&t2, &t3, &k__cospi_p14_p18,
963 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
964 res15 = mult_round_shift(&t0, &t1, &k__cospi_m02_p30,
965 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
966 res07 = mult_round_shift(&t2, &t3, &k__cospi_m18_p14,
967 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
968 #if DCT_HIGH_BIT_DEPTH
969 overflow = check_epi16_overflow_x4(&res01, &res09, &res15, &res07);
970 if (overflow) {
971 vpx_highbd_fdct16x16_c(input, output, stride);
972 return;
973 }
974 #endif // DCT_HIGH_BIT_DEPTH
975 }
976 {
977 const __m128i t0 = _mm_unpacklo_epi16(step1_2, step1_5);
978 const __m128i t1 = _mm_unpackhi_epi16(step1_2, step1_5);
979 const __m128i t2 = _mm_unpacklo_epi16(step1_3, step1_4);
980 const __m128i t3 = _mm_unpackhi_epi16(step1_3, step1_4);
981 res05 = mult_round_shift(&t0, &t1, &k__cospi_p22_p10,
982 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
983 res13 = mult_round_shift(&t2, &t3, &k__cospi_p06_p26,
984 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
985 res11 = mult_round_shift(&t0, &t1, &k__cospi_m10_p22,
986 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
987 res03 = mult_round_shift(&t2, &t3, &k__cospi_m26_p06,
988 &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
989 #if DCT_HIGH_BIT_DEPTH
990 overflow = check_epi16_overflow_x4(&res05, &res13, &res11, &res03);
991 if (overflow) {
992 vpx_highbd_fdct16x16_c(input, output, stride);
993 return;
994 }
995 #endif // DCT_HIGH_BIT_DEPTH
996 }
997 }
998 // Transpose the results, do it as two 8x8 transposes.
999 transpose_and_output8x8(&res00, &res01, &res02, &res03, &res04, &res05,
1000 &res06, &res07, pass, out0, out1);
1001 transpose_and_output8x8(&res08, &res09, &res10, &res11, &res12, &res13,
1002 &res14, &res15, pass, out0 + 8, out1 + 8);
1003 if (pass == 0) {
1004 out0 += 8 * 16;
1005 } else {
1006 out1 += 8 * 16;
1007 }
1008 }
1009 // Setup in/out for next pass.
1010 in = intermediate;
1011 }
1012 }
1013
1014 #undef ADD_EPI16
1015 #undef SUB_EPI16
1016