1 /*
2 * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <tmmintrin.h>
12
13 #include "./vpx_dsp_rtcd.h"
14 #include "vpx_dsp/x86/inv_txfm_sse2.h"
15 #include "vpx_dsp/x86/inv_txfm_ssse3.h"
16 #include "vpx_dsp/x86/transpose_sse2.h"
17 #include "vpx_dsp/x86/txfm_common_sse2.h"
18
partial_butterfly_ssse3(const __m128i in,const int c0,const int c1,__m128i * const out0,__m128i * const out1)19 static INLINE void partial_butterfly_ssse3(const __m128i in, const int c0,
20 const int c1, __m128i *const out0,
21 __m128i *const out1) {
22 const __m128i cst0 = _mm_set1_epi16(2 * c0);
23 const __m128i cst1 = _mm_set1_epi16(2 * c1);
24 *out0 = _mm_mulhrs_epi16(in, cst0);
25 *out1 = _mm_mulhrs_epi16(in, cst1);
26 }
27
partial_butterfly_cospi16_ssse3(const __m128i in)28 static INLINE __m128i partial_butterfly_cospi16_ssse3(const __m128i in) {
29 const __m128i coef_pair = _mm_set1_epi16(2 * cospi_16_64);
30 return _mm_mulhrs_epi16(in, coef_pair);
31 }
32
vpx_idct8x8_12_add_ssse3(const tran_low_t * input,uint8_t * dest,int stride)33 void vpx_idct8x8_12_add_ssse3(const tran_low_t *input, uint8_t *dest,
34 int stride) {
35 __m128i io[8];
36
37 io[0] = load_input_data4(input + 0 * 8);
38 io[1] = load_input_data4(input + 1 * 8);
39 io[2] = load_input_data4(input + 2 * 8);
40 io[3] = load_input_data4(input + 3 * 8);
41
42 idct8x8_12_add_kernel_ssse3(io);
43 write_buffer_8x8(io, dest, stride);
44 }
45
46 // Group the coefficient calculation into smaller functions to prevent stack
47 // spillover in 32x32 idct optimizations:
48 // quarter_1: 0-7
49 // quarter_2: 8-15
50 // quarter_3_4: 16-23, 24-31
51
52 // For each 8x32 block __m128i in[32],
53 // Input with index, 0, 4
54 // output pixels: 0-7 in __m128i out[32]
idct32_34_8x32_quarter_1(const __m128i * const in,__m128i * const out)55 static INLINE void idct32_34_8x32_quarter_1(const __m128i *const in /*in[32]*/,
56 __m128i *const out /*out[8]*/) {
57 __m128i step1[8], step2[8];
58
59 // stage 3
60 partial_butterfly_ssse3(in[4], cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
61
62 // stage 4
63 step2[0] = partial_butterfly_cospi16_ssse3(in[0]);
64 step2[4] = step1[4];
65 step2[5] = step1[4];
66 step2[6] = step1[7];
67 step2[7] = step1[7];
68
69 // stage 5
70 step1[0] = step2[0];
71 step1[1] = step2[0];
72 step1[2] = step2[0];
73 step1[3] = step2[0];
74 step1[4] = step2[4];
75 butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
76 step1[7] = step2[7];
77
78 // stage 6
79 out[0] = _mm_add_epi16(step1[0], step1[7]);
80 out[1] = _mm_add_epi16(step1[1], step1[6]);
81 out[2] = _mm_add_epi16(step1[2], step1[5]);
82 out[3] = _mm_add_epi16(step1[3], step1[4]);
83 out[4] = _mm_sub_epi16(step1[3], step1[4]);
84 out[5] = _mm_sub_epi16(step1[2], step1[5]);
85 out[6] = _mm_sub_epi16(step1[1], step1[6]);
86 out[7] = _mm_sub_epi16(step1[0], step1[7]);
87 }
88
89 // For each 8x32 block __m128i in[32],
90 // Input with index, 2, 6
91 // output pixels: 8-15 in __m128i out[32]
idct32_34_8x32_quarter_2(const __m128i * const in,__m128i * const out)92 static INLINE void idct32_34_8x32_quarter_2(const __m128i *const in /*in[32]*/,
93 __m128i *const out /*out[16]*/) {
94 __m128i step1[16], step2[16];
95
96 // stage 2
97 partial_butterfly_ssse3(in[2], cospi_30_64, cospi_2_64, &step2[8],
98 &step2[15]);
99 partial_butterfly_ssse3(in[6], -cospi_26_64, cospi_6_64, &step2[11],
100 &step2[12]);
101
102 // stage 3
103 step1[8] = step2[8];
104 step1[9] = step2[8];
105 step1[14] = step2[15];
106 step1[15] = step2[15];
107 step1[10] = step2[11];
108 step1[11] = step2[11];
109 step1[12] = step2[12];
110 step1[13] = step2[12];
111
112 idct32_8x32_quarter_2_stage_4_to_6(step1, out);
113 }
114
idct32_34_8x32_quarter_1_2(const __m128i * const in,__m128i * const out)115 static INLINE void idct32_34_8x32_quarter_1_2(
116 const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
117 __m128i temp[16];
118 idct32_34_8x32_quarter_1(in, temp);
119 idct32_34_8x32_quarter_2(in, temp);
120 // stage 7
121 add_sub_butterfly(temp, out, 16);
122 }
123
124 // For each 8x32 block __m128i in[32],
125 // Input with odd index, 1, 3, 5, 7
126 // output pixels: 16-23, 24-31 in __m128i out[32]
idct32_34_8x32_quarter_3_4(const __m128i * const in,__m128i * const out)127 static INLINE void idct32_34_8x32_quarter_3_4(
128 const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
129 __m128i step1[32];
130
131 // stage 1
132 partial_butterfly_ssse3(in[1], cospi_31_64, cospi_1_64, &step1[16],
133 &step1[31]);
134 partial_butterfly_ssse3(in[7], -cospi_25_64, cospi_7_64, &step1[19],
135 &step1[28]);
136 partial_butterfly_ssse3(in[5], cospi_27_64, cospi_5_64, &step1[20],
137 &step1[27]);
138 partial_butterfly_ssse3(in[3], -cospi_29_64, cospi_3_64, &step1[23],
139 &step1[24]);
140
141 // stage 3
142 butterfly(step1[31], step1[16], cospi_28_64, cospi_4_64, &step1[17],
143 &step1[30]);
144 butterfly(step1[28], step1[19], -cospi_4_64, cospi_28_64, &step1[18],
145 &step1[29]);
146 butterfly(step1[27], step1[20], cospi_12_64, cospi_20_64, &step1[21],
147 &step1[26]);
148 butterfly(step1[24], step1[23], -cospi_20_64, cospi_12_64, &step1[22],
149 &step1[25]);
150
151 idct32_8x32_quarter_3_4_stage_4_to_7(step1, out);
152 }
153
idct32_34_8x32_ssse3(const __m128i * const in,__m128i * const out)154 void idct32_34_8x32_ssse3(const __m128i *const in /*in[32]*/,
155 __m128i *const out /*out[32]*/) {
156 __m128i temp[32];
157
158 idct32_34_8x32_quarter_1_2(in, temp);
159 idct32_34_8x32_quarter_3_4(in, temp);
160 // final stage
161 add_sub_butterfly(temp, out, 32);
162 }
163
164 // Only upper-left 8x8 has non-zero coeff
vpx_idct32x32_34_add_ssse3(const tran_low_t * input,uint8_t * dest,int stride)165 void vpx_idct32x32_34_add_ssse3(const tran_low_t *input, uint8_t *dest,
166 int stride) {
167 __m128i io[32], col[32];
168 int i;
169
170 // Load input data. Only need to load the top left 8x8 block.
171 load_transpose_16bit_8x8(input, 32, io);
172 idct32_34_8x32_ssse3(io, col);
173
174 for (i = 0; i < 32; i += 8) {
175 int j;
176 transpose_16bit_8x8(col + i, io);
177 idct32_34_8x32_ssse3(io, io);
178
179 for (j = 0; j < 32; ++j) {
180 write_buffer_8x1(dest + j * stride, io[j]);
181 }
182
183 dest += 8;
184 }
185 }
186
187 // For each 8x32 block __m128i in[32],
188 // Input with index, 0, 4, 8, 12
189 // output pixels: 0-7 in __m128i out[32]
idct32_135_8x32_quarter_1(const __m128i * const in,__m128i * const out)190 static INLINE void idct32_135_8x32_quarter_1(const __m128i *const in /*in[32]*/,
191 __m128i *const out /*out[8]*/) {
192 __m128i step1[8], step2[8];
193
194 // stage 3
195 partial_butterfly_ssse3(in[4], cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
196 partial_butterfly_ssse3(in[12], -cospi_20_64, cospi_12_64, &step1[5],
197 &step1[6]);
198
199 // stage 4
200 step2[0] = partial_butterfly_cospi16_ssse3(in[0]);
201 partial_butterfly_ssse3(in[8], cospi_24_64, cospi_8_64, &step2[2], &step2[3]);
202 step2[4] = _mm_add_epi16(step1[4], step1[5]);
203 step2[5] = _mm_sub_epi16(step1[4], step1[5]);
204 step2[6] = _mm_sub_epi16(step1[7], step1[6]);
205 step2[7] = _mm_add_epi16(step1[7], step1[6]);
206
207 // stage 5
208 step1[0] = _mm_add_epi16(step2[0], step2[3]);
209 step1[1] = _mm_add_epi16(step2[0], step2[2]);
210 step1[2] = _mm_sub_epi16(step2[0], step2[2]);
211 step1[3] = _mm_sub_epi16(step2[0], step2[3]);
212 step1[4] = step2[4];
213 butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
214 step1[7] = step2[7];
215
216 // stage 6
217 out[0] = _mm_add_epi16(step1[0], step1[7]);
218 out[1] = _mm_add_epi16(step1[1], step1[6]);
219 out[2] = _mm_add_epi16(step1[2], step1[5]);
220 out[3] = _mm_add_epi16(step1[3], step1[4]);
221 out[4] = _mm_sub_epi16(step1[3], step1[4]);
222 out[5] = _mm_sub_epi16(step1[2], step1[5]);
223 out[6] = _mm_sub_epi16(step1[1], step1[6]);
224 out[7] = _mm_sub_epi16(step1[0], step1[7]);
225 }
226
227 // For each 8x32 block __m128i in[32],
228 // Input with index, 2, 6, 10, 14
229 // output pixels: 8-15 in __m128i out[32]
idct32_135_8x32_quarter_2(const __m128i * const in,__m128i * const out)230 static INLINE void idct32_135_8x32_quarter_2(const __m128i *const in /*in[32]*/,
231 __m128i *const out /*out[16]*/) {
232 __m128i step1[16], step2[16];
233
234 // stage 2
235 partial_butterfly_ssse3(in[2], cospi_30_64, cospi_2_64, &step2[8],
236 &step2[15]);
237 partial_butterfly_ssse3(in[14], -cospi_18_64, cospi_14_64, &step2[9],
238 &step2[14]);
239 partial_butterfly_ssse3(in[10], cospi_22_64, cospi_10_64, &step2[10],
240 &step2[13]);
241 partial_butterfly_ssse3(in[6], -cospi_26_64, cospi_6_64, &step2[11],
242 &step2[12]);
243
244 // stage 3
245 step1[8] = _mm_add_epi16(step2[8], step2[9]);
246 step1[9] = _mm_sub_epi16(step2[8], step2[9]);
247 step1[10] = _mm_sub_epi16(step2[11], step2[10]);
248 step1[11] = _mm_add_epi16(step2[11], step2[10]);
249 step1[12] = _mm_add_epi16(step2[12], step2[13]);
250 step1[13] = _mm_sub_epi16(step2[12], step2[13]);
251 step1[14] = _mm_sub_epi16(step2[15], step2[14]);
252 step1[15] = _mm_add_epi16(step2[15], step2[14]);
253
254 idct32_8x32_quarter_2_stage_4_to_6(step1, out);
255 }
256
idct32_135_8x32_quarter_1_2(const __m128i * const in,__m128i * const out)257 static INLINE void idct32_135_8x32_quarter_1_2(
258 const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
259 __m128i temp[16];
260 idct32_135_8x32_quarter_1(in, temp);
261 idct32_135_8x32_quarter_2(in, temp);
262 // stage 7
263 add_sub_butterfly(temp, out, 16);
264 }
265
266 // For each 8x32 block __m128i in[32],
267 // Input with odd index,
268 // 1, 3, 5, 7, 9, 11, 13, 15
269 // output pixels: 16-23, 24-31 in __m128i out[32]
idct32_135_8x32_quarter_3_4(const __m128i * const in,__m128i * const out)270 static INLINE void idct32_135_8x32_quarter_3_4(
271 const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
272 __m128i step1[32], step2[32];
273
274 // stage 1
275 partial_butterfly_ssse3(in[1], cospi_31_64, cospi_1_64, &step1[16],
276 &step1[31]);
277 partial_butterfly_ssse3(in[15], -cospi_17_64, cospi_15_64, &step1[17],
278 &step1[30]);
279 partial_butterfly_ssse3(in[9], cospi_23_64, cospi_9_64, &step1[18],
280 &step1[29]);
281 partial_butterfly_ssse3(in[7], -cospi_25_64, cospi_7_64, &step1[19],
282 &step1[28]);
283
284 partial_butterfly_ssse3(in[5], cospi_27_64, cospi_5_64, &step1[20],
285 &step1[27]);
286 partial_butterfly_ssse3(in[11], -cospi_21_64, cospi_11_64, &step1[21],
287 &step1[26]);
288
289 partial_butterfly_ssse3(in[13], cospi_19_64, cospi_13_64, &step1[22],
290 &step1[25]);
291 partial_butterfly_ssse3(in[3], -cospi_29_64, cospi_3_64, &step1[23],
292 &step1[24]);
293
294 // stage 2
295 step2[16] = _mm_add_epi16(step1[16], step1[17]);
296 step2[17] = _mm_sub_epi16(step1[16], step1[17]);
297 step2[18] = _mm_sub_epi16(step1[19], step1[18]);
298 step2[19] = _mm_add_epi16(step1[19], step1[18]);
299 step2[20] = _mm_add_epi16(step1[20], step1[21]);
300 step2[21] = _mm_sub_epi16(step1[20], step1[21]);
301 step2[22] = _mm_sub_epi16(step1[23], step1[22]);
302 step2[23] = _mm_add_epi16(step1[23], step1[22]);
303
304 step2[24] = _mm_add_epi16(step1[24], step1[25]);
305 step2[25] = _mm_sub_epi16(step1[24], step1[25]);
306 step2[26] = _mm_sub_epi16(step1[27], step1[26]);
307 step2[27] = _mm_add_epi16(step1[27], step1[26]);
308 step2[28] = _mm_add_epi16(step1[28], step1[29]);
309 step2[29] = _mm_sub_epi16(step1[28], step1[29]);
310 step2[30] = _mm_sub_epi16(step1[31], step1[30]);
311 step2[31] = _mm_add_epi16(step1[31], step1[30]);
312
313 // stage 3
314 step1[16] = step2[16];
315 step1[31] = step2[31];
316 butterfly(step2[30], step2[17], cospi_28_64, cospi_4_64, &step1[17],
317 &step1[30]);
318 butterfly(step2[29], step2[18], -cospi_4_64, cospi_28_64, &step1[18],
319 &step1[29]);
320 step1[19] = step2[19];
321 step1[20] = step2[20];
322 butterfly(step2[26], step2[21], cospi_12_64, cospi_20_64, &step1[21],
323 &step1[26]);
324 butterfly(step2[25], step2[22], -cospi_20_64, cospi_12_64, &step1[22],
325 &step1[25]);
326 step1[23] = step2[23];
327 step1[24] = step2[24];
328 step1[27] = step2[27];
329 step1[28] = step2[28];
330
331 idct32_8x32_quarter_3_4_stage_4_to_7(step1, out);
332 }
333
idct32_135_8x32_ssse3(const __m128i * const in,__m128i * const out)334 void idct32_135_8x32_ssse3(const __m128i *const in /*in[32]*/,
335 __m128i *const out /*out[32]*/) {
336 __m128i temp[32];
337 idct32_135_8x32_quarter_1_2(in, temp);
338 idct32_135_8x32_quarter_3_4(in, temp);
339 // final stage
340 add_sub_butterfly(temp, out, 32);
341 }
342
vpx_idct32x32_135_add_ssse3(const tran_low_t * input,uint8_t * dest,int stride)343 void vpx_idct32x32_135_add_ssse3(const tran_low_t *input, uint8_t *dest,
344 int stride) {
345 __m128i col[2][32], io[32];
346 int i;
347
348 // rows
349 for (i = 0; i < 2; i++) {
350 load_transpose_16bit_8x8(&input[0], 32, &io[0]);
351 load_transpose_16bit_8x8(&input[8], 32, &io[8]);
352 idct32_135_8x32_ssse3(io, col[i]);
353 input += 32 << 3;
354 }
355
356 // columns
357 for (i = 0; i < 32; i += 8) {
358 transpose_16bit_8x8(col[0] + i, io);
359 transpose_16bit_8x8(col[1] + i, io + 8);
360 idct32_135_8x32_ssse3(io, io);
361 store_buffer_8x32(io, dest, stride);
362 dest += 8;
363 }
364 }
365