1 /*
2  *  Copyright (c) 2017 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <assert.h>
12 #include <tmmintrin.h>
13 
14 #include "./vpx_dsp_rtcd.h"
15 #include "vpx/vpx_integer.h"
16 #include "vpx_dsp/x86/bitdepth_conversion_sse2.h"
17 #include "vpx_dsp/x86/quantize_sse2.h"
18 #include "vpx_dsp/x86/quantize_ssse3.h"
19 
vpx_quantize_b_ssse3(const tran_low_t * coeff_ptr,intptr_t n_coeffs,int skip_block,const int16_t * zbin_ptr,const int16_t * round_ptr,const int16_t * quant_ptr,const int16_t * quant_shift_ptr,tran_low_t * qcoeff_ptr,tran_low_t * dqcoeff_ptr,const int16_t * dequant_ptr,uint16_t * eob_ptr,const int16_t * scan,const int16_t * iscan)20 void vpx_quantize_b_ssse3(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
21                           int skip_block, const int16_t *zbin_ptr,
22                           const int16_t *round_ptr, const int16_t *quant_ptr,
23                           const int16_t *quant_shift_ptr,
24                           tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
25                           const int16_t *dequant_ptr, uint16_t *eob_ptr,
26                           const int16_t *scan, const int16_t *iscan) {
27   const __m128i zero = _mm_setzero_si128();
28   int index = 16;
29 
30   __m128i zbin, round, quant, dequant, shift;
31   __m128i coeff0, coeff1;
32   __m128i qcoeff0, qcoeff1;
33   __m128i cmp_mask0, cmp_mask1;
34   __m128i eob, eob0;
35 
36   (void)scan;
37   (void)skip_block;
38   assert(!skip_block);
39 
40   load_b_values(zbin_ptr, &zbin, round_ptr, &round, quant_ptr, &quant,
41                 dequant_ptr, &dequant, quant_shift_ptr, &shift);
42 
43   // Do DC and first 15 AC.
44   coeff0 = load_tran_low(coeff_ptr);
45   coeff1 = load_tran_low(coeff_ptr + 8);
46 
47   qcoeff0 = _mm_abs_epi16(coeff0);
48   qcoeff1 = _mm_abs_epi16(coeff1);
49 
50   cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin);
51   zbin = _mm_unpackhi_epi64(zbin, zbin);  // Switch DC to AC
52   cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin);
53 
54   calculate_qcoeff(&qcoeff0, round, quant, shift);
55   round = _mm_unpackhi_epi64(round, round);
56   quant = _mm_unpackhi_epi64(quant, quant);
57   shift = _mm_unpackhi_epi64(shift, shift);
58   calculate_qcoeff(&qcoeff1, round, quant, shift);
59 
60   // Reinsert signs
61   qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0);
62   qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1);
63 
64   // Mask out zbin threshold coeffs
65   qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0);
66   qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1);
67 
68   store_tran_low(qcoeff0, qcoeff_ptr);
69   store_tran_low(qcoeff1, qcoeff_ptr + 8);
70 
71   calculate_dqcoeff_and_store(qcoeff0, dequant, dqcoeff_ptr);
72   dequant = _mm_unpackhi_epi64(dequant, dequant);
73   calculate_dqcoeff_and_store(qcoeff1, dequant, dqcoeff_ptr + 8);
74 
75   eob = scan_for_eob(&qcoeff0, &qcoeff1, cmp_mask0, cmp_mask1, iscan, 0, zero);
76 
77   // AC only loop.
78   while (index < n_coeffs) {
79     coeff0 = load_tran_low(coeff_ptr + index);
80     coeff1 = load_tran_low(coeff_ptr + index + 8);
81 
82     qcoeff0 = _mm_abs_epi16(coeff0);
83     qcoeff1 = _mm_abs_epi16(coeff1);
84 
85     cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin);
86     cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin);
87 
88     calculate_qcoeff(&qcoeff0, round, quant, shift);
89     calculate_qcoeff(&qcoeff1, round, quant, shift);
90 
91     qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0);
92     qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1);
93 
94     qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0);
95     qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1);
96 
97     store_tran_low(qcoeff0, qcoeff_ptr + index);
98     store_tran_low(qcoeff1, qcoeff_ptr + index + 8);
99 
100     calculate_dqcoeff_and_store(qcoeff0, dequant, dqcoeff_ptr + index);
101     calculate_dqcoeff_and_store(qcoeff1, dequant, dqcoeff_ptr + index + 8);
102 
103     eob0 = scan_for_eob(&qcoeff0, &qcoeff1, cmp_mask0, cmp_mask1, iscan, index,
104                         zero);
105     eob = _mm_max_epi16(eob, eob0);
106 
107     index += 16;
108   }
109 
110   *eob_ptr = accumulate_eob(eob);
111 }
112 
vpx_quantize_b_32x32_ssse3(const tran_low_t * coeff_ptr,intptr_t n_coeffs,int skip_block,const int16_t * zbin_ptr,const int16_t * round_ptr,const int16_t * quant_ptr,const int16_t * quant_shift_ptr,tran_low_t * qcoeff_ptr,tran_low_t * dqcoeff_ptr,const int16_t * dequant_ptr,uint16_t * eob_ptr,const int16_t * scan,const int16_t * iscan)113 void vpx_quantize_b_32x32_ssse3(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
114                                 int skip_block, const int16_t *zbin_ptr,
115                                 const int16_t *round_ptr,
116                                 const int16_t *quant_ptr,
117                                 const int16_t *quant_shift_ptr,
118                                 tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
119                                 const int16_t *dequant_ptr, uint16_t *eob_ptr,
120                                 const int16_t *scan, const int16_t *iscan) {
121   const __m128i zero = _mm_setzero_si128();
122   const __m128i one = _mm_set1_epi16(1);
123   int index;
124 
125   __m128i zbin, round, quant, dequant, shift;
126   __m128i coeff0, coeff1;
127   __m128i qcoeff0, qcoeff1;
128   __m128i cmp_mask0, cmp_mask1;
129   __m128i all_zero;
130   __m128i eob = zero, eob0;
131 
132   (void)scan;
133   (void)n_coeffs;
134   (void)skip_block;
135   assert(!skip_block);
136 
137   // Setup global values.
138   // The 32x32 halves zbin and round.
139   zbin = _mm_load_si128((const __m128i *)zbin_ptr);
140   // Shift with rounding.
141   zbin = _mm_add_epi16(zbin, one);
142   zbin = _mm_srli_epi16(zbin, 1);
143   // x86 has no "greater *or equal*" comparison. Subtract 1 from zbin so
144   // it is a strict "greater" comparison.
145   zbin = _mm_sub_epi16(zbin, one);
146 
147   round = _mm_load_si128((const __m128i *)round_ptr);
148   round = _mm_add_epi16(round, one);
149   round = _mm_srli_epi16(round, 1);
150 
151   quant = _mm_load_si128((const __m128i *)quant_ptr);
152   dequant = _mm_load_si128((const __m128i *)dequant_ptr);
153   shift = _mm_load_si128((const __m128i *)quant_shift_ptr);
154   // I suspect this is not technically OK because quant_shift can be up
155   // to 1 << 16 and shifting up again will outrange that, but the test is not
156   // comprehensive enough to catch that and "it's been that way forever"
157   shift = _mm_slli_epi16(shift, 1);
158 
159   // Do DC and first 15 AC.
160   coeff0 = load_tran_low(coeff_ptr);
161   coeff1 = load_tran_low(coeff_ptr + 8);
162 
163   qcoeff0 = _mm_abs_epi16(coeff0);
164   qcoeff1 = _mm_abs_epi16(coeff1);
165 
166   cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin);
167   zbin = _mm_unpackhi_epi64(zbin, zbin);  // Switch DC to AC.
168   cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin);
169 
170   all_zero = _mm_or_si128(cmp_mask0, cmp_mask1);
171   if (_mm_movemask_epi8(all_zero) == 0) {
172     _mm_store_si128((__m128i *)(qcoeff_ptr), zero);
173     _mm_store_si128((__m128i *)(qcoeff_ptr + 8), zero);
174     _mm_store_si128((__m128i *)(dqcoeff_ptr), zero);
175     _mm_store_si128((__m128i *)(dqcoeff_ptr + 8), zero);
176 #if CONFIG_VP9_HIGHBITDEPTH
177     _mm_store_si128((__m128i *)(qcoeff_ptr + 4), zero);
178     _mm_store_si128((__m128i *)(qcoeff_ptr + 12), zero);
179     _mm_store_si128((__m128i *)(dqcoeff_ptr + 4), zero);
180     _mm_store_si128((__m128i *)(dqcoeff_ptr + 12), zero);
181 #endif  // CONFIG_HIGHBITDEPTH
182 
183     round = _mm_unpackhi_epi64(round, round);
184     quant = _mm_unpackhi_epi64(quant, quant);
185     shift = _mm_unpackhi_epi64(shift, shift);
186     dequant = _mm_unpackhi_epi64(dequant, dequant);
187   } else {
188     calculate_qcoeff(&qcoeff0, round, quant, shift);
189     round = _mm_unpackhi_epi64(round, round);
190     quant = _mm_unpackhi_epi64(quant, quant);
191     shift = _mm_unpackhi_epi64(shift, shift);
192     calculate_qcoeff(&qcoeff1, round, quant, shift);
193 
194     // Reinsert signs.
195     qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0);
196     qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1);
197 
198     // Mask out zbin threshold coeffs.
199     qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0);
200     qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1);
201 
202     store_tran_low(qcoeff0, qcoeff_ptr);
203     store_tran_low(qcoeff1, qcoeff_ptr + 8);
204 
205     calculate_dqcoeff_and_store_32x32(qcoeff0, dequant, zero, dqcoeff_ptr);
206     dequant = _mm_unpackhi_epi64(dequant, dequant);
207     calculate_dqcoeff_and_store_32x32(qcoeff1, dequant, zero, dqcoeff_ptr + 8);
208 
209     eob =
210         scan_for_eob(&qcoeff0, &qcoeff1, cmp_mask0, cmp_mask1, iscan, 0, zero);
211   }
212 
213   // AC only loop.
214   for (index = 16; index < 32 * 32; index += 16) {
215     coeff0 = load_tran_low(coeff_ptr + index);
216     coeff1 = load_tran_low(coeff_ptr + index + 8);
217 
218     qcoeff0 = _mm_abs_epi16(coeff0);
219     qcoeff1 = _mm_abs_epi16(coeff1);
220 
221     cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin);
222     cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin);
223 
224     all_zero = _mm_or_si128(cmp_mask0, cmp_mask1);
225     if (_mm_movemask_epi8(all_zero) == 0) {
226       _mm_store_si128((__m128i *)(qcoeff_ptr + index), zero);
227       _mm_store_si128((__m128i *)(qcoeff_ptr + index + 8), zero);
228       _mm_store_si128((__m128i *)(dqcoeff_ptr + index), zero);
229       _mm_store_si128((__m128i *)(dqcoeff_ptr + index + 8), zero);
230 #if CONFIG_VP9_HIGHBITDEPTH
231       _mm_store_si128((__m128i *)(qcoeff_ptr + index + 4), zero);
232       _mm_store_si128((__m128i *)(qcoeff_ptr + index + 12), zero);
233       _mm_store_si128((__m128i *)(dqcoeff_ptr + index + 4), zero);
234       _mm_store_si128((__m128i *)(dqcoeff_ptr + index + 12), zero);
235 #endif  // CONFIG_VP9_HIGHBITDEPTH
236       continue;
237     }
238 
239     calculate_qcoeff(&qcoeff0, round, quant, shift);
240     calculate_qcoeff(&qcoeff1, round, quant, shift);
241 
242     qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0);
243     qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1);
244 
245     qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0);
246     qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1);
247 
248     store_tran_low(qcoeff0, qcoeff_ptr + index);
249     store_tran_low(qcoeff1, qcoeff_ptr + index + 8);
250 
251     calculate_dqcoeff_and_store_32x32(qcoeff0, dequant, zero,
252                                       dqcoeff_ptr + index);
253     calculate_dqcoeff_and_store_32x32(qcoeff1, dequant, zero,
254                                       dqcoeff_ptr + 8 + index);
255 
256     eob0 = scan_for_eob(&qcoeff0, &qcoeff1, cmp_mask0, cmp_mask1, iscan, index,
257                         zero);
258     eob = _mm_max_epi16(eob, eob0);
259   }
260 
261   *eob_ptr = accumulate_eob(eob);
262 }
263