1 /*
2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 /*
12 * The core AEC algorithm, SSE2 version of speed-critical functions.
13 */
14
15 #include "typedefs.h"
16
17 #if defined(WEBRTC_USE_SSE2)
18 #include <emmintrin.h>
19 #include <math.h>
20
21 #include "aec_core.h"
22 #include "aec_rdft.h"
23
MulRe(float aRe,float aIm,float bRe,float bIm)24 __inline static float MulRe(float aRe, float aIm, float bRe, float bIm)
25 {
26 return aRe * bRe - aIm * bIm;
27 }
28
MulIm(float aRe,float aIm,float bRe,float bIm)29 __inline static float MulIm(float aRe, float aIm, float bRe, float bIm)
30 {
31 return aRe * bIm + aIm * bRe;
32 }
33
FilterFarSSE2(aec_t * aec,float yf[2][PART_LEN1])34 static void FilterFarSSE2(aec_t *aec, float yf[2][PART_LEN1])
35 {
36 int i;
37 for (i = 0; i < NR_PART; i++) {
38 int j;
39 int xPos = (i + aec->xfBufBlockPos) * PART_LEN1;
40 int pos = i * PART_LEN1;
41 // Check for wrap
42 if (i + aec->xfBufBlockPos >= NR_PART) {
43 xPos -= NR_PART*(PART_LEN1);
44 }
45
46 // vectorized code (four at once)
47 for (j = 0; j + 3 < PART_LEN1; j += 4) {
48 const __m128 xfBuf_re = _mm_loadu_ps(&aec->xfBuf[0][xPos + j]);
49 const __m128 xfBuf_im = _mm_loadu_ps(&aec->xfBuf[1][xPos + j]);
50 const __m128 wfBuf_re = _mm_loadu_ps(&aec->wfBuf[0][pos + j]);
51 const __m128 wfBuf_im = _mm_loadu_ps(&aec->wfBuf[1][pos + j]);
52 const __m128 yf_re = _mm_loadu_ps(&yf[0][j]);
53 const __m128 yf_im = _mm_loadu_ps(&yf[1][j]);
54 const __m128 a = _mm_mul_ps(xfBuf_re, wfBuf_re);
55 const __m128 b = _mm_mul_ps(xfBuf_im, wfBuf_im);
56 const __m128 c = _mm_mul_ps(xfBuf_re, wfBuf_im);
57 const __m128 d = _mm_mul_ps(xfBuf_im, wfBuf_re);
58 const __m128 e = _mm_sub_ps(a, b);
59 const __m128 f = _mm_add_ps(c, d);
60 const __m128 g = _mm_add_ps(yf_re, e);
61 const __m128 h = _mm_add_ps(yf_im, f);
62 _mm_storeu_ps(&yf[0][j], g);
63 _mm_storeu_ps(&yf[1][j], h);
64 }
65 // scalar code for the remaining items.
66 for (; j < PART_LEN1; j++) {
67 yf[0][j] += MulRe(aec->xfBuf[0][xPos + j], aec->xfBuf[1][xPos + j],
68 aec->wfBuf[0][ pos + j], aec->wfBuf[1][ pos + j]);
69 yf[1][j] += MulIm(aec->xfBuf[0][xPos + j], aec->xfBuf[1][xPos + j],
70 aec->wfBuf[0][ pos + j], aec->wfBuf[1][ pos + j]);
71 }
72 }
73 }
74
ScaleErrorSignalSSE2(aec_t * aec,float ef[2][PART_LEN1])75 static void ScaleErrorSignalSSE2(aec_t *aec, float ef[2][PART_LEN1])
76 {
77 const __m128 k1e_10f = _mm_set1_ps(1e-10f);
78 const __m128 kThresh = _mm_set1_ps(aec->errThresh);
79 const __m128 kMu = _mm_set1_ps(aec->mu);
80
81 int i;
82 // vectorized code (four at once)
83 for (i = 0; i + 3 < PART_LEN1; i += 4) {
84 const __m128 xPow = _mm_loadu_ps(&aec->xPow[i]);
85 const __m128 ef_re_base = _mm_loadu_ps(&ef[0][i]);
86 const __m128 ef_im_base = _mm_loadu_ps(&ef[1][i]);
87
88 const __m128 xPowPlus = _mm_add_ps(xPow, k1e_10f);
89 __m128 ef_re = _mm_div_ps(ef_re_base, xPowPlus);
90 __m128 ef_im = _mm_div_ps(ef_im_base, xPowPlus);
91 const __m128 ef_re2 = _mm_mul_ps(ef_re, ef_re);
92 const __m128 ef_im2 = _mm_mul_ps(ef_im, ef_im);
93 const __m128 ef_sum2 = _mm_add_ps(ef_re2, ef_im2);
94 const __m128 absEf = _mm_sqrt_ps(ef_sum2);
95 const __m128 bigger = _mm_cmpgt_ps(absEf, kThresh);
96 __m128 absEfPlus = _mm_add_ps(absEf, k1e_10f);
97 const __m128 absEfInv = _mm_div_ps(kThresh, absEfPlus);
98 __m128 ef_re_if = _mm_mul_ps(ef_re, absEfInv);
99 __m128 ef_im_if = _mm_mul_ps(ef_im, absEfInv);
100 ef_re_if = _mm_and_ps(bigger, ef_re_if);
101 ef_im_if = _mm_and_ps(bigger, ef_im_if);
102 ef_re = _mm_andnot_ps(bigger, ef_re);
103 ef_im = _mm_andnot_ps(bigger, ef_im);
104 ef_re = _mm_or_ps(ef_re, ef_re_if);
105 ef_im = _mm_or_ps(ef_im, ef_im_if);
106 ef_re = _mm_mul_ps(ef_re, kMu);
107 ef_im = _mm_mul_ps(ef_im, kMu);
108
109 _mm_storeu_ps(&ef[0][i], ef_re);
110 _mm_storeu_ps(&ef[1][i], ef_im);
111 }
112 // scalar code for the remaining items.
113 for (; i < (PART_LEN1); i++) {
114 float absEf;
115 ef[0][i] /= (aec->xPow[i] + 1e-10f);
116 ef[1][i] /= (aec->xPow[i] + 1e-10f);
117 absEf = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]);
118
119 if (absEf > aec->errThresh) {
120 absEf = aec->errThresh / (absEf + 1e-10f);
121 ef[0][i] *= absEf;
122 ef[1][i] *= absEf;
123 }
124
125 // Stepsize factor
126 ef[0][i] *= aec->mu;
127 ef[1][i] *= aec->mu;
128 }
129 }
130
FilterAdaptationSSE2(aec_t * aec,float * fft,float ef[2][PART_LEN1])131 static void FilterAdaptationSSE2(aec_t *aec, float *fft, float ef[2][PART_LEN1]) {
132 int i, j;
133 for (i = 0; i < NR_PART; i++) {
134 int xPos = (i + aec->xfBufBlockPos)*(PART_LEN1);
135 int pos = i * PART_LEN1;
136 // Check for wrap
137 if (i + aec->xfBufBlockPos >= NR_PART) {
138 xPos -= NR_PART * PART_LEN1;
139 }
140
141 // Process the whole array...
142 for (j = 0; j < PART_LEN; j+= 4) {
143 // Load xfBuf and ef.
144 const __m128 xfBuf_re = _mm_loadu_ps(&aec->xfBuf[0][xPos + j]);
145 const __m128 xfBuf_im = _mm_loadu_ps(&aec->xfBuf[1][xPos + j]);
146 const __m128 ef_re = _mm_loadu_ps(&ef[0][j]);
147 const __m128 ef_im = _mm_loadu_ps(&ef[1][j]);
148 // Calculate the product of conjugate(xfBuf) by ef.
149 // re(conjugate(a) * b) = aRe * bRe + aIm * bIm
150 // im(conjugate(a) * b)= aRe * bIm - aIm * bRe
151 const __m128 a = _mm_mul_ps(xfBuf_re, ef_re);
152 const __m128 b = _mm_mul_ps(xfBuf_im, ef_im);
153 const __m128 c = _mm_mul_ps(xfBuf_re, ef_im);
154 const __m128 d = _mm_mul_ps(xfBuf_im, ef_re);
155 const __m128 e = _mm_add_ps(a, b);
156 const __m128 f = _mm_sub_ps(c, d);
157 // Interleave real and imaginary parts.
158 const __m128 g = _mm_unpacklo_ps(e, f);
159 const __m128 h = _mm_unpackhi_ps(e, f);
160 // Store
161 _mm_storeu_ps(&fft[2*j + 0], g);
162 _mm_storeu_ps(&fft[2*j + 4], h);
163 }
164 // ... and fixup the first imaginary entry.
165 fft[1] = MulRe(aec->xfBuf[0][xPos + PART_LEN],
166 -aec->xfBuf[1][xPos + PART_LEN],
167 ef[0][PART_LEN], ef[1][PART_LEN]);
168
169 aec_rdft_inverse_128(fft);
170 memset(fft + PART_LEN, 0, sizeof(float)*PART_LEN);
171
172 // fft scaling
173 {
174 float scale = 2.0f / PART_LEN2;
175 const __m128 scale_ps = _mm_load_ps1(&scale);
176 for (j = 0; j < PART_LEN; j+=4) {
177 const __m128 fft_ps = _mm_loadu_ps(&fft[j]);
178 const __m128 fft_scale = _mm_mul_ps(fft_ps, scale_ps);
179 _mm_storeu_ps(&fft[j], fft_scale);
180 }
181 }
182 aec_rdft_forward_128(fft);
183
184 {
185 float wt1 = aec->wfBuf[1][pos];
186 aec->wfBuf[0][pos + PART_LEN] += fft[1];
187 for (j = 0; j < PART_LEN; j+= 4) {
188 __m128 wtBuf_re = _mm_loadu_ps(&aec->wfBuf[0][pos + j]);
189 __m128 wtBuf_im = _mm_loadu_ps(&aec->wfBuf[1][pos + j]);
190 const __m128 fft0 = _mm_loadu_ps(&fft[2 * j + 0]);
191 const __m128 fft4 = _mm_loadu_ps(&fft[2 * j + 4]);
192 const __m128 fft_re = _mm_shuffle_ps(fft0, fft4, _MM_SHUFFLE(2, 0, 2 ,0));
193 const __m128 fft_im = _mm_shuffle_ps(fft0, fft4, _MM_SHUFFLE(3, 1, 3 ,1));
194 wtBuf_re = _mm_add_ps(wtBuf_re, fft_re);
195 wtBuf_im = _mm_add_ps(wtBuf_im, fft_im);
196 _mm_storeu_ps(&aec->wfBuf[0][pos + j], wtBuf_re);
197 _mm_storeu_ps(&aec->wfBuf[1][pos + j], wtBuf_im);
198 }
199 aec->wfBuf[1][pos] = wt1;
200 }
201 }
202 }
203
mm_pow_ps(__m128 a,__m128 b)204 static __m128 mm_pow_ps(__m128 a, __m128 b)
205 {
206 // a^b = exp2(b * log2(a))
207 // exp2(x) and log2(x) are calculated using polynomial approximations.
208 __m128 log2_a, b_log2_a, a_exp_b;
209
210 // Calculate log2(x), x = a.
211 {
212 // To calculate log2(x), we decompose x like this:
213 // x = y * 2^n
214 // n is an integer
215 // y is in the [1.0, 2.0) range
216 //
217 // log2(x) = log2(y) + n
218 // n can be evaluated by playing with float representation.
219 // log2(y) in a small range can be approximated, this code uses an order
220 // five polynomial approximation. The coefficients have been
221 // estimated with the Remez algorithm and the resulting
222 // polynomial has a maximum relative error of 0.00086%.
223
224 // Compute n.
225 // This is done by masking the exponent, shifting it into the top bit of
226 // the mantissa, putting eight into the biased exponent (to shift/
227 // compensate the fact that the exponent has been shifted in the top/
228 // fractional part and finally getting rid of the implicit leading one
229 // from the mantissa by substracting it out.
230 static const ALIGN16_BEG int float_exponent_mask[4] ALIGN16_END =
231 {0x7F800000, 0x7F800000, 0x7F800000, 0x7F800000};
232 static const ALIGN16_BEG int eight_biased_exponent[4] ALIGN16_END =
233 {0x43800000, 0x43800000, 0x43800000, 0x43800000};
234 static const ALIGN16_BEG int implicit_leading_one[4] ALIGN16_END =
235 {0x43BF8000, 0x43BF8000, 0x43BF8000, 0x43BF8000};
236 static const int shift_exponent_into_top_mantissa = 8;
237 const __m128 two_n = _mm_and_ps(a, *((__m128 *)float_exponent_mask));
238 const __m128 n_1 = _mm_castsi128_ps(_mm_srli_epi32(_mm_castps_si128(two_n),
239 shift_exponent_into_top_mantissa));
240 const __m128 n_0 = _mm_or_ps(n_1, *((__m128 *)eight_biased_exponent));
241 const __m128 n = _mm_sub_ps(n_0, *((__m128 *)implicit_leading_one));
242
243 // Compute y.
244 static const ALIGN16_BEG int mantissa_mask[4] ALIGN16_END =
245 {0x007FFFFF, 0x007FFFFF, 0x007FFFFF, 0x007FFFFF};
246 static const ALIGN16_BEG int zero_biased_exponent_is_one[4] ALIGN16_END =
247 {0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000};
248 const __m128 mantissa = _mm_and_ps(a, *((__m128 *)mantissa_mask));
249 const __m128 y = _mm_or_ps(
250 mantissa, *((__m128 *)zero_biased_exponent_is_one));
251
252 // Approximate log2(y) ~= (y - 1) * pol5(y).
253 // pol5(y) = C5 * y^5 + C4 * y^4 + C3 * y^3 + C2 * y^2 + C1 * y + C0
254 static const ALIGN16_BEG float ALIGN16_END C5[4] =
255 {-3.4436006e-2f, -3.4436006e-2f, -3.4436006e-2f, -3.4436006e-2f};
256 static const ALIGN16_BEG float ALIGN16_END C4[4] =
257 {3.1821337e-1f, 3.1821337e-1f, 3.1821337e-1f, 3.1821337e-1f};
258 static const ALIGN16_BEG float ALIGN16_END C3[4] =
259 {-1.2315303f, -1.2315303f, -1.2315303f, -1.2315303f};
260 static const ALIGN16_BEG float ALIGN16_END C2[4] =
261 {2.5988452f, 2.5988452f, 2.5988452f, 2.5988452f};
262 static const ALIGN16_BEG float ALIGN16_END C1[4] =
263 {-3.3241990f, -3.3241990f, -3.3241990f, -3.3241990f};
264 static const ALIGN16_BEG float ALIGN16_END C0[4] =
265 {3.1157899f, 3.1157899f, 3.1157899f, 3.1157899f};
266 const __m128 pol5_y_0 = _mm_mul_ps(y, *((__m128 *)C5));
267 const __m128 pol5_y_1 = _mm_add_ps(pol5_y_0, *((__m128 *)C4));
268 const __m128 pol5_y_2 = _mm_mul_ps(pol5_y_1, y);
269 const __m128 pol5_y_3 = _mm_add_ps(pol5_y_2, *((__m128 *)C3));
270 const __m128 pol5_y_4 = _mm_mul_ps(pol5_y_3, y);
271 const __m128 pol5_y_5 = _mm_add_ps(pol5_y_4, *((__m128 *)C2));
272 const __m128 pol5_y_6 = _mm_mul_ps(pol5_y_5, y);
273 const __m128 pol5_y_7 = _mm_add_ps(pol5_y_6, *((__m128 *)C1));
274 const __m128 pol5_y_8 = _mm_mul_ps(pol5_y_7, y);
275 const __m128 pol5_y = _mm_add_ps(pol5_y_8, *((__m128 *)C0));
276 const __m128 y_minus_one = _mm_sub_ps(
277 y, *((__m128 *)zero_biased_exponent_is_one));
278 const __m128 log2_y = _mm_mul_ps(y_minus_one , pol5_y);
279
280 // Combine parts.
281 log2_a = _mm_add_ps(n, log2_y);
282 }
283
284 // b * log2(a)
285 b_log2_a = _mm_mul_ps(b, log2_a);
286
287 // Calculate exp2(x), x = b * log2(a).
288 {
289 // To calculate 2^x, we decompose x like this:
290 // x = n + y
291 // n is an integer, the value of x - 0.5 rounded down, therefore
292 // y is in the [0.5, 1.5) range
293 //
294 // 2^x = 2^n * 2^y
295 // 2^n can be evaluated by playing with float representation.
296 // 2^y in a small range can be approximated, this code uses an order two
297 // polynomial approximation. The coefficients have been estimated
298 // with the Remez algorithm and the resulting polynomial has a
299 // maximum relative error of 0.17%.
300
301 // To avoid over/underflow, we reduce the range of input to ]-127, 129].
302 static const ALIGN16_BEG float max_input[4] ALIGN16_END =
303 {129.f, 129.f, 129.f, 129.f};
304 static const ALIGN16_BEG float min_input[4] ALIGN16_END =
305 {-126.99999f, -126.99999f, -126.99999f, -126.99999f};
306 const __m128 x_min = _mm_min_ps(b_log2_a, *((__m128 *)max_input));
307 const __m128 x_max = _mm_max_ps(x_min, *((__m128 *)min_input));
308 // Compute n.
309 static const ALIGN16_BEG float half[4] ALIGN16_END =
310 {0.5f, 0.5f, 0.5f, 0.5f};
311 const __m128 x_minus_half = _mm_sub_ps(x_max, *((__m128 *)half));
312 const __m128i x_minus_half_floor = _mm_cvtps_epi32(x_minus_half);
313 // Compute 2^n.
314 static const ALIGN16_BEG int float_exponent_bias[4] ALIGN16_END =
315 {127, 127, 127, 127};
316 static const int float_exponent_shift = 23;
317 const __m128i two_n_exponent = _mm_add_epi32(
318 x_minus_half_floor, *((__m128i *)float_exponent_bias));
319 const __m128 two_n = _mm_castsi128_ps(_mm_slli_epi32(
320 two_n_exponent, float_exponent_shift));
321 // Compute y.
322 const __m128 y = _mm_sub_ps(x_max, _mm_cvtepi32_ps(x_minus_half_floor));
323 // Approximate 2^y ~= C2 * y^2 + C1 * y + C0.
324 static const ALIGN16_BEG float C2[4] ALIGN16_END =
325 {3.3718944e-1f, 3.3718944e-1f, 3.3718944e-1f, 3.3718944e-1f};
326 static const ALIGN16_BEG float C1[4] ALIGN16_END =
327 {6.5763628e-1f, 6.5763628e-1f, 6.5763628e-1f, 6.5763628e-1f};
328 static const ALIGN16_BEG float C0[4] ALIGN16_END =
329 {1.0017247f, 1.0017247f, 1.0017247f, 1.0017247f};
330 const __m128 exp2_y_0 = _mm_mul_ps(y, *((__m128 *)C2));
331 const __m128 exp2_y_1 = _mm_add_ps(exp2_y_0, *((__m128 *)C1));
332 const __m128 exp2_y_2 = _mm_mul_ps(exp2_y_1, y);
333 const __m128 exp2_y = _mm_add_ps(exp2_y_2, *((__m128 *)C0));
334
335 // Combine parts.
336 a_exp_b = _mm_mul_ps(exp2_y, two_n);
337 }
338 return a_exp_b;
339 }
340
341 extern const float WebRtcAec_weightCurve[65];
342 extern const float WebRtcAec_overDriveCurve[65];
343
OverdriveAndSuppressSSE2(aec_t * aec,float hNl[PART_LEN1],const float hNlFb,float efw[2][PART_LEN1])344 static void OverdriveAndSuppressSSE2(aec_t *aec, float hNl[PART_LEN1],
345 const float hNlFb,
346 float efw[2][PART_LEN1]) {
347 int i;
348 const __m128 vec_hNlFb = _mm_set1_ps(hNlFb);
349 const __m128 vec_one = _mm_set1_ps(1.0f);
350 const __m128 vec_minus_one = _mm_set1_ps(-1.0f);
351 const __m128 vec_overDriveSm = _mm_set1_ps(aec->overDriveSm);
352 // vectorized code (four at once)
353 for (i = 0; i + 3 < PART_LEN1; i+=4) {
354 // Weight subbands
355 __m128 vec_hNl = _mm_loadu_ps(&hNl[i]);
356 const __m128 vec_weightCurve = _mm_loadu_ps(&WebRtcAec_weightCurve[i]);
357 const __m128 bigger = _mm_cmpgt_ps(vec_hNl, vec_hNlFb);
358 const __m128 vec_weightCurve_hNlFb = _mm_mul_ps(
359 vec_weightCurve, vec_hNlFb);
360 const __m128 vec_one_weightCurve = _mm_sub_ps(vec_one, vec_weightCurve);
361 const __m128 vec_one_weightCurve_hNl = _mm_mul_ps(
362 vec_one_weightCurve, vec_hNl);
363 const __m128 vec_if0 = _mm_andnot_ps(bigger, vec_hNl);
364 const __m128 vec_if1 = _mm_and_ps(
365 bigger, _mm_add_ps(vec_weightCurve_hNlFb, vec_one_weightCurve_hNl));
366 vec_hNl = _mm_or_ps(vec_if0, vec_if1);
367
368 {
369 const __m128 vec_overDriveCurve = _mm_loadu_ps(
370 &WebRtcAec_overDriveCurve[i]);
371 const __m128 vec_overDriveSm_overDriveCurve = _mm_mul_ps(
372 vec_overDriveSm, vec_overDriveCurve);
373 vec_hNl = mm_pow_ps(vec_hNl, vec_overDriveSm_overDriveCurve);
374 _mm_storeu_ps(&hNl[i], vec_hNl);
375 }
376
377 // Suppress error signal
378 {
379 __m128 vec_efw_re = _mm_loadu_ps(&efw[0][i]);
380 __m128 vec_efw_im = _mm_loadu_ps(&efw[1][i]);
381 vec_efw_re = _mm_mul_ps(vec_efw_re, vec_hNl);
382 vec_efw_im = _mm_mul_ps(vec_efw_im, vec_hNl);
383
384 // Ooura fft returns incorrect sign on imaginary component. It matters
385 // here because we are making an additive change with comfort noise.
386 vec_efw_im = _mm_mul_ps(vec_efw_im, vec_minus_one);
387 _mm_storeu_ps(&efw[0][i], vec_efw_re);
388 _mm_storeu_ps(&efw[1][i], vec_efw_im);
389 }
390 }
391 // scalar code for the remaining items.
392 for (; i < PART_LEN1; i++) {
393 // Weight subbands
394 if (hNl[i] > hNlFb) {
395 hNl[i] = WebRtcAec_weightCurve[i] * hNlFb +
396 (1 - WebRtcAec_weightCurve[i]) * hNl[i];
397 }
398 hNl[i] = powf(hNl[i], aec->overDriveSm * WebRtcAec_overDriveCurve[i]);
399
400 // Suppress error signal
401 efw[0][i] *= hNl[i];
402 efw[1][i] *= hNl[i];
403
404 // Ooura fft returns incorrect sign on imaginary component. It matters
405 // here because we are making an additive change with comfort noise.
406 efw[1][i] *= -1;
407 }
408 }
409
WebRtcAec_InitAec_SSE2(void)410 void WebRtcAec_InitAec_SSE2(void) {
411 WebRtcAec_FilterFar = FilterFarSSE2;
412 WebRtcAec_ScaleErrorSignal = ScaleErrorSignalSSE2;
413 WebRtcAec_FilterAdaptation = FilterAdaptationSSE2;
414 WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppressSSE2;
415 }
416
417 #endif // WEBRTC_USE_SSE2
418