1 /*
2 * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "./vpx_dsp_rtcd.h"
12 #include "vpx_dsp/x86/highbd_inv_txfm_sse2.h"
13 #include "vpx_dsp/x86/inv_txfm_sse2.h"
14 #include "vpx_dsp/x86/transpose_sse2.h"
15 #include "vpx_dsp/x86/txfm_common_sse2.h"
16
highbd_idct32_4x32_quarter_2_stage_4_to_6(__m128i * const step1,__m128i * const out)17 static INLINE void highbd_idct32_4x32_quarter_2_stage_4_to_6(
18 __m128i *const step1 /*step1[16]*/, __m128i *const out /*out[16]*/) {
19 __m128i step2[32];
20
21 // stage 4
22 step2[8] = step1[8];
23 step2[15] = step1[15];
24 highbd_butterfly_sse2(step1[14], step1[9], cospi_24_64, cospi_8_64, &step2[9],
25 &step2[14]);
26 highbd_butterfly_sse2(step1[10], step1[13], cospi_8_64, cospi_24_64,
27 &step2[13], &step2[10]);
28 step2[11] = step1[11];
29 step2[12] = step1[12];
30
31 // stage 5
32 step1[8] = _mm_add_epi32(step2[8], step2[11]);
33 step1[9] = _mm_add_epi32(step2[9], step2[10]);
34 step1[10] = _mm_sub_epi32(step2[9], step2[10]);
35 step1[11] = _mm_sub_epi32(step2[8], step2[11]);
36 step1[12] = _mm_sub_epi32(step2[15], step2[12]);
37 step1[13] = _mm_sub_epi32(step2[14], step2[13]);
38 step1[14] = _mm_add_epi32(step2[14], step2[13]);
39 step1[15] = _mm_add_epi32(step2[15], step2[12]);
40
41 // stage 6
42 out[8] = step1[8];
43 out[9] = step1[9];
44 highbd_butterfly_sse2(step1[13], step1[10], cospi_16_64, cospi_16_64,
45 &out[10], &out[13]);
46 highbd_butterfly_sse2(step1[12], step1[11], cospi_16_64, cospi_16_64,
47 &out[11], &out[12]);
48 out[14] = step1[14];
49 out[15] = step1[15];
50 }
51
highbd_idct32_4x32_quarter_3_4_stage_4_to_7(__m128i * const step1,__m128i * const out)52 static INLINE void highbd_idct32_4x32_quarter_3_4_stage_4_to_7(
53 __m128i *const step1 /*step1[32]*/, __m128i *const out /*out[32]*/) {
54 __m128i step2[32];
55
56 // stage 4
57 step2[16] = _mm_add_epi32(step1[16], step1[19]);
58 step2[17] = _mm_add_epi32(step1[17], step1[18]);
59 step2[18] = _mm_sub_epi32(step1[17], step1[18]);
60 step2[19] = _mm_sub_epi32(step1[16], step1[19]);
61 step2[20] = _mm_sub_epi32(step1[20], step1[23]); // step2[20] = -step2[20]
62 step2[21] = _mm_sub_epi32(step1[21], step1[22]); // step2[21] = -step2[21]
63 step2[22] = _mm_add_epi32(step1[21], step1[22]);
64 step2[23] = _mm_add_epi32(step1[20], step1[23]);
65
66 step2[24] = _mm_add_epi32(step1[27], step1[24]);
67 step2[25] = _mm_add_epi32(step1[26], step1[25]);
68 step2[26] = _mm_sub_epi32(step1[26], step1[25]); // step2[26] = -step2[26]
69 step2[27] = _mm_sub_epi32(step1[27], step1[24]); // step2[27] = -step2[27]
70 step2[28] = _mm_sub_epi32(step1[31], step1[28]);
71 step2[29] = _mm_sub_epi32(step1[30], step1[29]);
72 step2[30] = _mm_add_epi32(step1[29], step1[30]);
73 step2[31] = _mm_add_epi32(step1[28], step1[31]);
74
75 // stage 5
76 step1[16] = step2[16];
77 step1[17] = step2[17];
78 highbd_butterfly_sse2(step2[29], step2[18], cospi_24_64, cospi_8_64,
79 &step1[18], &step1[29]);
80 highbd_butterfly_sse2(step2[28], step2[19], cospi_24_64, cospi_8_64,
81 &step1[19], &step1[28]);
82 highbd_butterfly_sse2(step2[20], step2[27], cospi_8_64, cospi_24_64,
83 &step1[27], &step1[20]);
84 highbd_butterfly_sse2(step2[21], step2[26], cospi_8_64, cospi_24_64,
85 &step1[26], &step1[21]);
86 step1[22] = step2[22];
87 step1[23] = step2[23];
88 step1[24] = step2[24];
89 step1[25] = step2[25];
90 step1[30] = step2[30];
91 step1[31] = step2[31];
92
93 // stage 6
94 step2[16] = _mm_add_epi32(step1[16], step1[23]);
95 step2[17] = _mm_add_epi32(step1[17], step1[22]);
96 step2[18] = _mm_add_epi32(step1[18], step1[21]);
97 step2[19] = _mm_add_epi32(step1[19], step1[20]);
98 step2[20] = _mm_sub_epi32(step1[19], step1[20]);
99 step2[21] = _mm_sub_epi32(step1[18], step1[21]);
100 step2[22] = _mm_sub_epi32(step1[17], step1[22]);
101 step2[23] = _mm_sub_epi32(step1[16], step1[23]);
102
103 step2[24] = _mm_sub_epi32(step1[31], step1[24]);
104 step2[25] = _mm_sub_epi32(step1[30], step1[25]);
105 step2[26] = _mm_sub_epi32(step1[29], step1[26]);
106 step2[27] = _mm_sub_epi32(step1[28], step1[27]);
107 step2[28] = _mm_add_epi32(step1[27], step1[28]);
108 step2[29] = _mm_add_epi32(step1[26], step1[29]);
109 step2[30] = _mm_add_epi32(step1[25], step1[30]);
110 step2[31] = _mm_add_epi32(step1[24], step1[31]);
111
112 // stage 7
113 out[16] = step2[16];
114 out[17] = step2[17];
115 out[18] = step2[18];
116 out[19] = step2[19];
117 highbd_butterfly_sse2(step2[27], step2[20], cospi_16_64, cospi_16_64,
118 &out[20], &out[27]);
119 highbd_butterfly_sse2(step2[26], step2[21], cospi_16_64, cospi_16_64,
120 &out[21], &out[26]);
121 highbd_butterfly_sse2(step2[25], step2[22], cospi_16_64, cospi_16_64,
122 &out[22], &out[25]);
123 highbd_butterfly_sse2(step2[24], step2[23], cospi_16_64, cospi_16_64,
124 &out[23], &out[24]);
125 out[28] = step2[28];
126 out[29] = step2[29];
127 out[30] = step2[30];
128 out[31] = step2[31];
129 }
130
131 // Group the coefficient calculation into smaller functions to prevent stack
132 // spillover in 32x32 idct optimizations:
133 // quarter_1: 0-7
134 // quarter_2: 8-15
135 // quarter_3_4: 16-23, 24-31
136
137 // For each 4x32 block __m128i in[32],
138 // Input with index, 0, 4, 8, 12, 16, 20, 24, 28
139 // output pixels: 0-7 in __m128i out[32]
highbd_idct32_1024_4x32_quarter_1(const __m128i * const in,__m128i * const out)140 static INLINE void highbd_idct32_1024_4x32_quarter_1(
141 const __m128i *const in /*in[32]*/, __m128i *const out /*out[8]*/) {
142 __m128i step1[8], step2[8];
143
144 // stage 3
145 highbd_butterfly_sse2(in[4], in[28], cospi_28_64, cospi_4_64, &step1[4],
146 &step1[7]);
147 highbd_butterfly_sse2(in[20], in[12], cospi_12_64, cospi_20_64, &step1[5],
148 &step1[6]);
149
150 // stage 4
151 highbd_butterfly_sse2(in[0], in[16], cospi_16_64, cospi_16_64, &step2[1],
152 &step2[0]);
153 highbd_butterfly_sse2(in[8], in[24], cospi_24_64, cospi_8_64, &step2[2],
154 &step2[3]);
155 step2[4] = _mm_add_epi32(step1[4], step1[5]);
156 step2[5] = _mm_sub_epi32(step1[4], step1[5]);
157 step2[6] = _mm_sub_epi32(step1[7], step1[6]);
158 step2[7] = _mm_add_epi32(step1[7], step1[6]);
159
160 // stage 5
161 step1[0] = _mm_add_epi32(step2[0], step2[3]);
162 step1[1] = _mm_add_epi32(step2[1], step2[2]);
163 step1[2] = _mm_sub_epi32(step2[1], step2[2]);
164 step1[3] = _mm_sub_epi32(step2[0], step2[3]);
165 step1[4] = step2[4];
166 highbd_butterfly_sse2(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5],
167 &step1[6]);
168 step1[7] = step2[7];
169
170 // stage 6
171 out[0] = _mm_add_epi32(step1[0], step1[7]);
172 out[1] = _mm_add_epi32(step1[1], step1[6]);
173 out[2] = _mm_add_epi32(step1[2], step1[5]);
174 out[3] = _mm_add_epi32(step1[3], step1[4]);
175 out[4] = _mm_sub_epi32(step1[3], step1[4]);
176 out[5] = _mm_sub_epi32(step1[2], step1[5]);
177 out[6] = _mm_sub_epi32(step1[1], step1[6]);
178 out[7] = _mm_sub_epi32(step1[0], step1[7]);
179 }
180
181 // For each 4x32 block __m128i in[32],
182 // Input with index, 2, 6, 10, 14, 18, 22, 26, 30
183 // output pixels: 8-15 in __m128i out[32]
highbd_idct32_1024_4x32_quarter_2(const __m128i * in,__m128i * out)184 static INLINE void highbd_idct32_1024_4x32_quarter_2(
185 const __m128i *in /*in[32]*/, __m128i *out /*out[16]*/) {
186 __m128i step1[32], step2[32];
187
188 // stage 2
189 highbd_butterfly_sse2(in[2], in[30], cospi_30_64, cospi_2_64, &step2[8],
190 &step2[15]);
191 highbd_butterfly_sse2(in[18], in[14], cospi_14_64, cospi_18_64, &step2[9],
192 &step2[14]);
193 highbd_butterfly_sse2(in[10], in[22], cospi_22_64, cospi_10_64, &step2[10],
194 &step2[13]);
195 highbd_butterfly_sse2(in[26], in[6], cospi_6_64, cospi_26_64, &step2[11],
196 &step2[12]);
197
198 // stage 3
199 step1[8] = _mm_add_epi32(step2[8], step2[9]);
200 step1[9] = _mm_sub_epi32(step2[8], step2[9]);
201 step1[14] = _mm_sub_epi32(step2[15], step2[14]);
202 step1[15] = _mm_add_epi32(step2[15], step2[14]);
203 step1[10] = _mm_sub_epi32(step2[10], step2[11]); // step1[10] = -step1[10]
204 step1[11] = _mm_add_epi32(step2[10], step2[11]);
205 step1[12] = _mm_add_epi32(step2[13], step2[12]);
206 step1[13] = _mm_sub_epi32(step2[13], step2[12]); // step1[13] = -step1[13]
207
208 highbd_idct32_4x32_quarter_2_stage_4_to_6(step1, out);
209 }
210
highbd_idct32_1024_4x32_quarter_1_2(const __m128i * const in,__m128i * const out)211 static INLINE void highbd_idct32_1024_4x32_quarter_1_2(
212 const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
213 __m128i temp[16];
214 highbd_idct32_1024_4x32_quarter_1(in, temp);
215 highbd_idct32_1024_4x32_quarter_2(in, temp);
216 // stage 7
217 highbd_add_sub_butterfly(temp, out, 16);
218 }
219
220 // For each 4x32 block __m128i in[32],
221 // Input with odd index,
222 // 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
223 // output pixels: 16-23, 24-31 in __m128i out[32]
highbd_idct32_1024_4x32_quarter_3_4(const __m128i * const in,__m128i * const out)224 static INLINE void highbd_idct32_1024_4x32_quarter_3_4(
225 const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
226 __m128i step1[32], step2[32];
227
228 // stage 1
229 highbd_butterfly_sse2(in[1], in[31], cospi_31_64, cospi_1_64, &step1[16],
230 &step1[31]);
231 highbd_butterfly_sse2(in[17], in[15], cospi_15_64, cospi_17_64, &step1[17],
232 &step1[30]);
233 highbd_butterfly_sse2(in[9], in[23], cospi_23_64, cospi_9_64, &step1[18],
234 &step1[29]);
235 highbd_butterfly_sse2(in[25], in[7], cospi_7_64, cospi_25_64, &step1[19],
236 &step1[28]);
237
238 highbd_butterfly_sse2(in[5], in[27], cospi_27_64, cospi_5_64, &step1[20],
239 &step1[27]);
240 highbd_butterfly_sse2(in[21], in[11], cospi_11_64, cospi_21_64, &step1[21],
241 &step1[26]);
242
243 highbd_butterfly_sse2(in[13], in[19], cospi_19_64, cospi_13_64, &step1[22],
244 &step1[25]);
245 highbd_butterfly_sse2(in[29], in[3], cospi_3_64, cospi_29_64, &step1[23],
246 &step1[24]);
247
248 // stage 2
249 step2[16] = _mm_add_epi32(step1[16], step1[17]);
250 step2[17] = _mm_sub_epi32(step1[16], step1[17]);
251 step2[18] = _mm_sub_epi32(step1[18], step1[19]); // step2[18] = -step2[18]
252 step2[19] = _mm_add_epi32(step1[18], step1[19]);
253 step2[20] = _mm_add_epi32(step1[20], step1[21]);
254 step2[21] = _mm_sub_epi32(step1[20], step1[21]);
255 step2[22] = _mm_sub_epi32(step1[22], step1[23]); // step2[22] = -step2[22]
256 step2[23] = _mm_add_epi32(step1[22], step1[23]);
257
258 step2[24] = _mm_add_epi32(step1[25], step1[24]);
259 step2[25] = _mm_sub_epi32(step1[25], step1[24]); // step2[25] = -step2[25]
260 step2[26] = _mm_sub_epi32(step1[27], step1[26]);
261 step2[27] = _mm_add_epi32(step1[27], step1[26]);
262 step2[28] = _mm_add_epi32(step1[29], step1[28]);
263 step2[29] = _mm_sub_epi32(step1[29], step1[28]); // step2[29] = -step2[29]
264 step2[30] = _mm_sub_epi32(step1[31], step1[30]);
265 step2[31] = _mm_add_epi32(step1[31], step1[30]);
266
267 // stage 3
268 step1[16] = step2[16];
269 step1[31] = step2[31];
270 highbd_butterfly_sse2(step2[30], step2[17], cospi_28_64, cospi_4_64,
271 &step1[17], &step1[30]);
272 highbd_butterfly_sse2(step2[18], step2[29], cospi_4_64, cospi_28_64,
273 &step1[29], &step1[18]);
274 step1[19] = step2[19];
275 step1[20] = step2[20];
276 highbd_butterfly_sse2(step2[26], step2[21], cospi_12_64, cospi_20_64,
277 &step1[21], &step1[26]);
278 highbd_butterfly_sse2(step2[22], step2[25], cospi_20_64, cospi_12_64,
279 &step1[25], &step1[22]);
280 step1[23] = step2[23];
281 step1[24] = step2[24];
282 step1[27] = step2[27];
283 step1[28] = step2[28];
284
285 highbd_idct32_4x32_quarter_3_4_stage_4_to_7(step1, out);
286 }
287
highbd_idct32_1024_4x32(__m128i * const io)288 static void highbd_idct32_1024_4x32(__m128i *const io /*io[32]*/) {
289 __m128i temp[32];
290
291 highbd_idct32_1024_4x32_quarter_1_2(io, temp);
292 highbd_idct32_1024_4x32_quarter_3_4(io, temp);
293 // final stage
294 highbd_add_sub_butterfly(temp, io, 32);
295 }
296
vpx_highbd_idct32x32_1024_add_sse2(const tran_low_t * input,uint16_t * dest,int stride,int bd)297 void vpx_highbd_idct32x32_1024_add_sse2(const tran_low_t *input, uint16_t *dest,
298 int stride, int bd) {
299 int i, j;
300
301 if (bd == 8) {
302 __m128i col[4][32], io[32];
303
304 // rows
305 for (i = 0; i < 4; i++) {
306 highbd_load_pack_transpose_32bit_8x8(&input[0], 32, &io[0]);
307 highbd_load_pack_transpose_32bit_8x8(&input[8], 32, &io[8]);
308 highbd_load_pack_transpose_32bit_8x8(&input[16], 32, &io[16]);
309 highbd_load_pack_transpose_32bit_8x8(&input[24], 32, &io[24]);
310 idct32_1024_8x32(io, col[i]);
311 input += 32 << 3;
312 }
313
314 // columns
315 for (i = 0; i < 32; i += 8) {
316 // Transpose 32x8 block to 8x32 block
317 transpose_16bit_8x8(col[0] + i, io);
318 transpose_16bit_8x8(col[1] + i, io + 8);
319 transpose_16bit_8x8(col[2] + i, io + 16);
320 transpose_16bit_8x8(col[3] + i, io + 24);
321 idct32_1024_8x32(io, io);
322 for (j = 0; j < 32; ++j) {
323 highbd_write_buffer_8(dest + j * stride, io[j], bd);
324 }
325 dest += 8;
326 }
327 } else {
328 __m128i all[8][32], out[32], *in;
329
330 for (i = 0; i < 8; i++) {
331 in = all[i];
332 highbd_load_transpose_32bit_8x4(&input[0], 32, &in[0]);
333 highbd_load_transpose_32bit_8x4(&input[8], 32, &in[8]);
334 highbd_load_transpose_32bit_8x4(&input[16], 32, &in[16]);
335 highbd_load_transpose_32bit_8x4(&input[24], 32, &in[24]);
336 highbd_idct32_1024_4x32(in);
337 input += 4 * 32;
338 }
339
340 for (i = 0; i < 32; i += 4) {
341 transpose_32bit_4x4(all[0] + i, out + 0);
342 transpose_32bit_4x4(all[1] + i, out + 4);
343 transpose_32bit_4x4(all[2] + i, out + 8);
344 transpose_32bit_4x4(all[3] + i, out + 12);
345 transpose_32bit_4x4(all[4] + i, out + 16);
346 transpose_32bit_4x4(all[5] + i, out + 20);
347 transpose_32bit_4x4(all[6] + i, out + 24);
348 transpose_32bit_4x4(all[7] + i, out + 28);
349 highbd_idct32_1024_4x32(out);
350
351 for (j = 0; j < 32; ++j) {
352 highbd_write_buffer_4(dest + j * stride, out[j], bd);
353 }
354 dest += 4;
355 }
356 }
357 }
358
359 // -----------------------------------------------------------------------------
360
361 // For each 4x32 block __m128i in[32],
362 // Input with index, 0, 4, 8, 12
363 // output pixels: 0-7 in __m128i out[32]
highbd_idct32_135_4x32_quarter_1(const __m128i * const in,__m128i * const out)364 static INLINE void highbd_idct32_135_4x32_quarter_1(
365 const __m128i *const in /*in[32]*/, __m128i *const out /*out[8]*/) {
366 __m128i step1[8], step2[8];
367
368 // stage 3
369 highbd_partial_butterfly_sse2(in[4], cospi_28_64, cospi_4_64, &step1[4],
370 &step1[7]);
371 highbd_partial_butterfly_neg_sse2(in[12], cospi_12_64, cospi_20_64, &step1[5],
372 &step1[6]);
373
374 // stage 4
375 highbd_partial_butterfly_sse2(in[0], cospi_16_64, cospi_16_64, &step2[1],
376 &step2[0]);
377 highbd_partial_butterfly_sse2(in[8], cospi_24_64, cospi_8_64, &step2[2],
378 &step2[3]);
379 step2[4] = _mm_add_epi32(step1[4], step1[5]);
380 step2[5] = _mm_sub_epi32(step1[4], step1[5]);
381 step2[6] = _mm_sub_epi32(step1[7], step1[6]);
382 step2[7] = _mm_add_epi32(step1[7], step1[6]);
383
384 // stage 5
385 step1[0] = _mm_add_epi32(step2[0], step2[3]);
386 step1[1] = _mm_add_epi32(step2[1], step2[2]);
387 step1[2] = _mm_sub_epi32(step2[1], step2[2]);
388 step1[3] = _mm_sub_epi32(step2[0], step2[3]);
389 step1[4] = step2[4];
390 highbd_butterfly_sse2(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5],
391 &step1[6]);
392 step1[7] = step2[7];
393
394 // stage 6
395 out[0] = _mm_add_epi32(step1[0], step1[7]);
396 out[1] = _mm_add_epi32(step1[1], step1[6]);
397 out[2] = _mm_add_epi32(step1[2], step1[5]);
398 out[3] = _mm_add_epi32(step1[3], step1[4]);
399 out[4] = _mm_sub_epi32(step1[3], step1[4]);
400 out[5] = _mm_sub_epi32(step1[2], step1[5]);
401 out[6] = _mm_sub_epi32(step1[1], step1[6]);
402 out[7] = _mm_sub_epi32(step1[0], step1[7]);
403 }
404
405 // For each 4x32 block __m128i in[32],
406 // Input with index, 2, 6, 10, 14
407 // output pixels: 8-15 in __m128i out[32]
highbd_idct32_135_4x32_quarter_2(const __m128i * in,__m128i * out)408 static INLINE void highbd_idct32_135_4x32_quarter_2(
409 const __m128i *in /*in[32]*/, __m128i *out /*out[16]*/) {
410 __m128i step1[32], step2[32];
411
412 // stage 2
413 highbd_partial_butterfly_sse2(in[2], cospi_30_64, cospi_2_64, &step2[8],
414 &step2[15]);
415 highbd_partial_butterfly_neg_sse2(in[14], cospi_14_64, cospi_18_64, &step2[9],
416 &step2[14]);
417 highbd_partial_butterfly_sse2(in[10], cospi_22_64, cospi_10_64, &step2[10],
418 &step2[13]);
419 highbd_partial_butterfly_neg_sse2(in[6], cospi_6_64, cospi_26_64, &step2[11],
420 &step2[12]);
421
422 // stage 3
423 step1[8] = _mm_add_epi32(step2[8], step2[9]);
424 step1[9] = _mm_sub_epi32(step2[8], step2[9]);
425 step1[14] = _mm_sub_epi32(step2[15], step2[14]);
426 step1[15] = _mm_add_epi32(step2[15], step2[14]);
427 step1[10] = _mm_sub_epi32(step2[10], step2[11]); // step1[10] = -step1[10]
428 step1[11] = _mm_add_epi32(step2[10], step2[11]);
429 step1[12] = _mm_add_epi32(step2[13], step2[12]);
430 step1[13] = _mm_sub_epi32(step2[13], step2[12]); // step1[13] = -step1[13]
431
432 highbd_idct32_4x32_quarter_2_stage_4_to_6(step1, out);
433 }
434
highbd_idct32_135_4x32_quarter_1_2(const __m128i * const in,__m128i * const out)435 static INLINE void highbd_idct32_135_4x32_quarter_1_2(
436 const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
437 __m128i temp[16];
438 highbd_idct32_135_4x32_quarter_1(in, temp);
439 highbd_idct32_135_4x32_quarter_2(in, temp);
440 // stage 7
441 highbd_add_sub_butterfly(temp, out, 16);
442 }
443
444 // For each 4x32 block __m128i in[32],
445 // Input with odd index,
446 // 1, 3, 5, 7, 9, 11, 13, 15
447 // output pixels: 16-23, 24-31 in __m128i out[32]
highbd_idct32_135_4x32_quarter_3_4(const __m128i * const in,__m128i * const out)448 static INLINE void highbd_idct32_135_4x32_quarter_3_4(
449 const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
450 __m128i step1[32], step2[32];
451
452 // stage 1
453 highbd_partial_butterfly_sse2(in[1], cospi_31_64, cospi_1_64, &step1[16],
454 &step1[31]);
455 highbd_partial_butterfly_neg_sse2(in[15], cospi_15_64, cospi_17_64,
456 &step1[17], &step1[30]);
457 highbd_partial_butterfly_sse2(in[9], cospi_23_64, cospi_9_64, &step1[18],
458 &step1[29]);
459 highbd_partial_butterfly_neg_sse2(in[7], cospi_7_64, cospi_25_64, &step1[19],
460 &step1[28]);
461
462 highbd_partial_butterfly_sse2(in[5], cospi_27_64, cospi_5_64, &step1[20],
463 &step1[27]);
464 highbd_partial_butterfly_neg_sse2(in[11], cospi_11_64, cospi_21_64,
465 &step1[21], &step1[26]);
466
467 highbd_partial_butterfly_sse2(in[13], cospi_19_64, cospi_13_64, &step1[22],
468 &step1[25]);
469 highbd_partial_butterfly_neg_sse2(in[3], cospi_3_64, cospi_29_64, &step1[23],
470 &step1[24]);
471
472 // stage 2
473 step2[16] = _mm_add_epi32(step1[16], step1[17]);
474 step2[17] = _mm_sub_epi32(step1[16], step1[17]);
475 step2[18] = _mm_sub_epi32(step1[18], step1[19]); // step2[18] = -step2[18]
476 step2[19] = _mm_add_epi32(step1[18], step1[19]);
477 step2[20] = _mm_add_epi32(step1[20], step1[21]);
478 step2[21] = _mm_sub_epi32(step1[20], step1[21]);
479 step2[22] = _mm_sub_epi32(step1[22], step1[23]); // step2[22] = -step2[22]
480 step2[23] = _mm_add_epi32(step1[22], step1[23]);
481
482 step2[24] = _mm_add_epi32(step1[25], step1[24]);
483 step2[25] = _mm_sub_epi32(step1[25], step1[24]); // step2[25] = -step2[25]
484 step2[26] = _mm_sub_epi32(step1[27], step1[26]);
485 step2[27] = _mm_add_epi32(step1[27], step1[26]);
486 step2[28] = _mm_add_epi32(step1[29], step1[28]);
487 step2[29] = _mm_sub_epi32(step1[29], step1[28]); // step2[29] = -step2[29]
488 step2[30] = _mm_sub_epi32(step1[31], step1[30]);
489 step2[31] = _mm_add_epi32(step1[31], step1[30]);
490
491 // stage 3
492 step1[16] = step2[16];
493 step1[31] = step2[31];
494 highbd_butterfly_sse2(step2[30], step2[17], cospi_28_64, cospi_4_64,
495 &step1[17], &step1[30]);
496 highbd_butterfly_sse2(step2[18], step2[29], cospi_4_64, cospi_28_64,
497 &step1[29], &step1[18]);
498 step1[19] = step2[19];
499 step1[20] = step2[20];
500 highbd_butterfly_sse2(step2[26], step2[21], cospi_12_64, cospi_20_64,
501 &step1[21], &step1[26]);
502 highbd_butterfly_sse2(step2[22], step2[25], cospi_20_64, cospi_12_64,
503 &step1[25], &step1[22]);
504 step1[23] = step2[23];
505 step1[24] = step2[24];
506 step1[27] = step2[27];
507 step1[28] = step2[28];
508
509 highbd_idct32_4x32_quarter_3_4_stage_4_to_7(step1, out);
510 }
511
highbd_idct32_135_4x32(__m128i * const io)512 static void highbd_idct32_135_4x32(__m128i *const io /*io[32]*/) {
513 __m128i temp[32];
514
515 highbd_idct32_135_4x32_quarter_1_2(io, temp);
516 highbd_idct32_135_4x32_quarter_3_4(io, temp);
517 // final stage
518 highbd_add_sub_butterfly(temp, io, 32);
519 }
520
vpx_highbd_idct32x32_135_add_sse2(const tran_low_t * input,uint16_t * dest,int stride,int bd)521 void vpx_highbd_idct32x32_135_add_sse2(const tran_low_t *input, uint16_t *dest,
522 int stride, int bd) {
523 int i, j;
524
525 if (bd == 8) {
526 __m128i col[2][32], in[32], out[32];
527
528 for (i = 16; i < 32; i++) {
529 in[i] = _mm_setzero_si128();
530 }
531
532 // rows
533 for (i = 0; i < 2; i++) {
534 highbd_load_pack_transpose_32bit_8x8(&input[0], 32, &in[0]);
535 highbd_load_pack_transpose_32bit_8x8(&input[8], 32, &in[8]);
536 idct32_1024_8x32(in, col[i]);
537 input += 32 << 3;
538 }
539
540 // columns
541 for (i = 0; i < 32; i += 8) {
542 transpose_16bit_8x8(col[0] + i, in);
543 transpose_16bit_8x8(col[1] + i, in + 8);
544 idct32_1024_8x32(in, out);
545 for (j = 0; j < 32; ++j) {
546 highbd_write_buffer_8(dest + j * stride, out[j], bd);
547 }
548 dest += 8;
549 }
550 } else {
551 __m128i all[8][32], out[32], *in;
552
553 for (i = 0; i < 4; i++) {
554 in = all[i];
555 highbd_load_transpose_32bit_8x4(&input[0], 32, &in[0]);
556 highbd_load_transpose_32bit_8x4(&input[8], 32, &in[8]);
557 highbd_idct32_135_4x32(in);
558 input += 4 * 32;
559 }
560
561 for (i = 0; i < 32; i += 4) {
562 transpose_32bit_4x4(all[0] + i, out + 0);
563 transpose_32bit_4x4(all[1] + i, out + 4);
564 transpose_32bit_4x4(all[2] + i, out + 8);
565 transpose_32bit_4x4(all[3] + i, out + 12);
566 highbd_idct32_135_4x32(out);
567
568 for (j = 0; j < 32; ++j) {
569 highbd_write_buffer_4(dest + j * stride, out[j], bd);
570 }
571 dest += 4;
572 }
573 }
574 }
575
576 // -----------------------------------------------------------------------------
577
578 // For each 4x32 block __m128i in[32],
579 // Input with index, 0, 4
580 // output pixels: 0-7 in __m128i out[32]
highbd_idct32_34_4x32_quarter_1(const __m128i * const in,__m128i * const out)581 static INLINE void highbd_idct32_34_4x32_quarter_1(
582 const __m128i *const in /*in[32]*/, __m128i *const out /*out[8]*/) {
583 __m128i step1[8], step2[8];
584
585 // stage 3
586 highbd_partial_butterfly_sse2(in[4], cospi_28_64, cospi_4_64, &step1[4],
587 &step1[7]);
588
589 // stage 4
590 highbd_partial_butterfly_sse2(in[0], cospi_16_64, cospi_16_64, &step2[1],
591 &step2[0]);
592 step2[4] = step1[4];
593 step2[5] = step1[4];
594 step2[6] = step1[7];
595 step2[7] = step1[7];
596
597 // stage 5
598 step1[0] = step2[0];
599 step1[1] = step2[1];
600 step1[2] = step2[1];
601 step1[3] = step2[0];
602 step1[4] = step2[4];
603 highbd_butterfly_sse2(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5],
604 &step1[6]);
605 step1[7] = step2[7];
606
607 // stage 6
608 out[0] = _mm_add_epi32(step1[0], step1[7]);
609 out[1] = _mm_add_epi32(step1[1], step1[6]);
610 out[2] = _mm_add_epi32(step1[2], step1[5]);
611 out[3] = _mm_add_epi32(step1[3], step1[4]);
612 out[4] = _mm_sub_epi32(step1[3], step1[4]);
613 out[5] = _mm_sub_epi32(step1[2], step1[5]);
614 out[6] = _mm_sub_epi32(step1[1], step1[6]);
615 out[7] = _mm_sub_epi32(step1[0], step1[7]);
616 }
617
618 // For each 4x32 block __m128i in[32],
619 // Input with index, 2, 6
620 // output pixels: 8-15 in __m128i out[32]
highbd_idct32_34_4x32_quarter_2(const __m128i * in,__m128i * out)621 static INLINE void highbd_idct32_34_4x32_quarter_2(const __m128i *in /*in[32]*/,
622 __m128i *out /*out[16]*/) {
623 __m128i step1[32], step2[32];
624
625 // stage 2
626 highbd_partial_butterfly_sse2(in[2], cospi_30_64, cospi_2_64, &step2[8],
627 &step2[15]);
628 highbd_partial_butterfly_neg_sse2(in[6], cospi_6_64, cospi_26_64, &step2[11],
629 &step2[12]);
630
631 // stage 3
632 step1[8] = step2[8];
633 step1[9] = step2[8];
634 step1[14] = step2[15];
635 step1[15] = step2[15];
636 step1[10] = step2[11];
637 step1[11] = step2[11];
638 step1[12] = step2[12];
639 step1[13] = step2[12];
640
641 step1[10] =
642 _mm_sub_epi32(_mm_setzero_si128(), step1[10]); // step1[10] = -step1[10]
643 step1[13] =
644 _mm_sub_epi32(_mm_setzero_si128(), step1[13]); // step1[13] = -step1[13]
645 highbd_idct32_4x32_quarter_2_stage_4_to_6(step1, out);
646 }
647
highbd_idct32_34_4x32_quarter_1_2(const __m128i * const in,__m128i * const out)648 static INLINE void highbd_idct32_34_4x32_quarter_1_2(
649 const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
650 __m128i temp[16];
651 highbd_idct32_34_4x32_quarter_1(in, temp);
652 highbd_idct32_34_4x32_quarter_2(in, temp);
653 // stage 7
654 highbd_add_sub_butterfly(temp, out, 16);
655 }
656
657 // For each 4x32 block __m128i in[32],
658 // Input with odd index,
659 // 1, 3, 5, 7
660 // output pixels: 16-23, 24-31 in __m128i out[32]
highbd_idct32_34_4x32_quarter_3_4(const __m128i * const in,__m128i * const out)661 static INLINE void highbd_idct32_34_4x32_quarter_3_4(
662 const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
663 __m128i step1[32], step2[32];
664
665 // stage 1
666 highbd_partial_butterfly_sse2(in[1], cospi_31_64, cospi_1_64, &step1[16],
667 &step1[31]);
668 highbd_partial_butterfly_neg_sse2(in[7], cospi_7_64, cospi_25_64, &step1[19],
669 &step1[28]);
670
671 highbd_partial_butterfly_sse2(in[5], cospi_27_64, cospi_5_64, &step1[20],
672 &step1[27]);
673 highbd_partial_butterfly_neg_sse2(in[3], cospi_3_64, cospi_29_64, &step1[23],
674 &step1[24]);
675
676 // stage 2
677 step2[16] = step1[16];
678 step2[17] = step1[16];
679 step2[18] = step1[19];
680 step2[19] = step1[19];
681 step2[20] = step1[20];
682 step2[21] = step1[20];
683 step2[22] = step1[23];
684 step2[23] = step1[23];
685
686 step2[24] = step1[24];
687 step2[25] = step1[24];
688 step2[26] = step1[27];
689 step2[27] = step1[27];
690 step2[28] = step1[28];
691 step2[29] = step1[28];
692 step2[30] = step1[31];
693 step2[31] = step1[31];
694
695 // stage 3
696 step2[18] =
697 _mm_sub_epi32(_mm_setzero_si128(), step2[18]); // step2[18] = -step2[18]
698 step2[22] =
699 _mm_sub_epi32(_mm_setzero_si128(), step2[22]); // step2[22] = -step2[22]
700 step2[25] =
701 _mm_sub_epi32(_mm_setzero_si128(), step2[25]); // step2[25] = -step2[25]
702 step2[29] =
703 _mm_sub_epi32(_mm_setzero_si128(), step2[29]); // step2[29] = -step2[29]
704 step1[16] = step2[16];
705 step1[31] = step2[31];
706 highbd_butterfly_sse2(step2[30], step2[17], cospi_28_64, cospi_4_64,
707 &step1[17], &step1[30]);
708 highbd_butterfly_sse2(step2[18], step2[29], cospi_4_64, cospi_28_64,
709 &step1[29], &step1[18]);
710 step1[19] = step2[19];
711 step1[20] = step2[20];
712 highbd_butterfly_sse2(step2[26], step2[21], cospi_12_64, cospi_20_64,
713 &step1[21], &step1[26]);
714 highbd_butterfly_sse2(step2[22], step2[25], cospi_20_64, cospi_12_64,
715 &step1[25], &step1[22]);
716 step1[23] = step2[23];
717 step1[24] = step2[24];
718 step1[27] = step2[27];
719 step1[28] = step2[28];
720
721 highbd_idct32_4x32_quarter_3_4_stage_4_to_7(step1, out);
722 }
723
highbd_idct32_34_4x32(__m128i * const io)724 static void highbd_idct32_34_4x32(__m128i *const io /*io[32]*/) {
725 __m128i temp[32];
726
727 highbd_idct32_34_4x32_quarter_1_2(io, temp);
728 highbd_idct32_34_4x32_quarter_3_4(io, temp);
729 // final stage
730 highbd_add_sub_butterfly(temp, io, 32);
731 }
732
vpx_highbd_idct32x32_34_add_sse2(const tran_low_t * input,uint16_t * dest,int stride,int bd)733 void vpx_highbd_idct32x32_34_add_sse2(const tran_low_t *input, uint16_t *dest,
734 int stride, int bd) {
735 int i, j;
736
737 if (bd == 8) {
738 __m128i col[32], in[32], out[32];
739
740 // rows
741 highbd_load_pack_transpose_32bit_8x8(&input[0], 32, &in[0]);
742 idct32_34_8x32_sse2(in, col);
743
744 // columns
745 for (i = 0; i < 32; i += 8) {
746 transpose_16bit_8x8(col + i, in);
747 idct32_34_8x32_sse2(in, out);
748 for (j = 0; j < 32; ++j) {
749 highbd_write_buffer_8(dest + j * stride, out[j], bd);
750 }
751 dest += 8;
752 }
753 } else {
754 __m128i all[8][32], out[32], *in;
755
756 for (i = 0; i < 4; i++) {
757 in = all[i];
758 highbd_load_transpose_32bit_8x4(&input[0], 32, &in[0]);
759 highbd_load_transpose_32bit_8x4(&input[8], 32, &in[8]);
760 highbd_idct32_34_4x32(in);
761 input += 4 * 32;
762 }
763
764 for (i = 0; i < 32; i += 4) {
765 transpose_32bit_4x4(all[0] + i, out + 0);
766 transpose_32bit_4x4(all[1] + i, out + 4);
767 transpose_32bit_4x4(all[2] + i, out + 8);
768 transpose_32bit_4x4(all[3] + i, out + 12);
769 highbd_idct32_34_4x32(out);
770
771 for (j = 0; j < 32; ++j) {
772 highbd_write_buffer_4(dest + j * stride, out[j], bd);
773 }
774 dest += 4;
775 }
776 }
777 }
778
vpx_highbd_idct32x32_1_add_sse2(const tran_low_t * input,uint16_t * dest,int stride,int bd)779 void vpx_highbd_idct32x32_1_add_sse2(const tran_low_t *input, uint16_t *dest,
780 int stride, int bd) {
781 highbd_idct_1_add_kernel(input, dest, stride, bd, 32);
782 }
783