Home | History | Annotate | Download | only in source
      1 /*
      2  *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 /*
     12  * The core AEC algorithm, SSE2 version of speed-critical functions.
     13  */
     14 
     15 #if defined(__SSE2__)
     16 #include <emmintrin.h>
     17 #include <math.h>
     18 
     19 #include "aec_core.h"
     20 #include "aec_rdft.h"
     21 
     22 __inline static float MulRe(float aRe, float aIm, float bRe, float bIm)
     23 {
     24   return aRe * bRe - aIm * bIm;
     25 }
     26 
     27 __inline static float MulIm(float aRe, float aIm, float bRe, float bIm)
     28 {
     29   return aRe * bIm + aIm * bRe;
     30 }
     31 
     32 static void FilterFarSSE2(aec_t *aec, float yf[2][PART_LEN1])
     33 {
     34   int i;
     35   for (i = 0; i < NR_PART; i++) {
     36     int j;
     37     int xPos = (i + aec->xfBufBlockPos) * PART_LEN1;
     38     int pos = i * PART_LEN1;
     39     // Check for wrap
     40     if (i + aec->xfBufBlockPos >= NR_PART) {
     41       xPos -= NR_PART*(PART_LEN1);
     42     }
     43 
     44     // vectorized code (four at once)
     45     for (j = 0; j + 3 < PART_LEN1; j += 4) {
     46       const __m128 xfBuf_re = _mm_loadu_ps(&aec->xfBuf[0][xPos + j]);
     47       const __m128 xfBuf_im = _mm_loadu_ps(&aec->xfBuf[1][xPos + j]);
     48       const __m128 wfBuf_re = _mm_loadu_ps(&aec->wfBuf[0][pos + j]);
     49       const __m128 wfBuf_im = _mm_loadu_ps(&aec->wfBuf[1][pos + j]);
     50       const __m128 yf_re = _mm_loadu_ps(&yf[0][j]);
     51       const __m128 yf_im = _mm_loadu_ps(&yf[1][j]);
     52       const __m128 a = _mm_mul_ps(xfBuf_re, wfBuf_re);
     53       const __m128 b = _mm_mul_ps(xfBuf_im, wfBuf_im);
     54       const __m128 c = _mm_mul_ps(xfBuf_re, wfBuf_im);
     55       const __m128 d = _mm_mul_ps(xfBuf_im, wfBuf_re);
     56       const __m128 e = _mm_sub_ps(a, b);
     57       const __m128 f = _mm_add_ps(c, d);
     58       const __m128 g = _mm_add_ps(yf_re, e);
     59       const __m128 h = _mm_add_ps(yf_im, f);
     60       _mm_storeu_ps(&yf[0][j], g);
     61       _mm_storeu_ps(&yf[1][j], h);
     62     }
     63     // scalar code for the remaining items.
     64     for (; j < PART_LEN1; j++) {
     65       yf[0][j] += MulRe(aec->xfBuf[0][xPos + j], aec->xfBuf[1][xPos + j],
     66                         aec->wfBuf[0][ pos + j], aec->wfBuf[1][ pos + j]);
     67       yf[1][j] += MulIm(aec->xfBuf[0][xPos + j], aec->xfBuf[1][xPos + j],
     68                         aec->wfBuf[0][ pos + j], aec->wfBuf[1][ pos + j]);
     69     }
     70   }
     71 }
     72 
     73 static void ScaleErrorSignalSSE2(aec_t *aec, float ef[2][PART_LEN1])
     74 {
     75   const __m128 k1e_10f = _mm_set1_ps(1e-10f);
     76   const __m128 kThresh = _mm_set1_ps(aec->errThresh);
     77   const __m128 kMu = _mm_set1_ps(aec->mu);
     78 
     79   int i;
     80   // vectorized code (four at once)
     81   for (i = 0; i + 3 < PART_LEN1; i += 4) {
     82     const __m128 xPow = _mm_loadu_ps(&aec->xPow[i]);
     83     const __m128 ef_re_base = _mm_loadu_ps(&ef[0][i]);
     84     const __m128 ef_im_base = _mm_loadu_ps(&ef[1][i]);
     85 
     86     const __m128 xPowPlus = _mm_add_ps(xPow, k1e_10f);
     87     __m128 ef_re = _mm_div_ps(ef_re_base, xPowPlus);
     88     __m128 ef_im = _mm_div_ps(ef_im_base, xPowPlus);
     89     const __m128 ef_re2 = _mm_mul_ps(ef_re, ef_re);
     90     const __m128 ef_im2 = _mm_mul_ps(ef_im, ef_im);
     91     const __m128 ef_sum2 = _mm_add_ps(ef_re2, ef_im2);
     92     const __m128 absEf = _mm_sqrt_ps(ef_sum2);
     93     const __m128 bigger = _mm_cmpgt_ps(absEf, kThresh);
     94     __m128 absEfPlus = _mm_add_ps(absEf, k1e_10f);
     95     const __m128 absEfInv = _mm_div_ps(kThresh, absEfPlus);
     96     __m128 ef_re_if = _mm_mul_ps(ef_re, absEfInv);
     97     __m128 ef_im_if = _mm_mul_ps(ef_im, absEfInv);
     98     ef_re_if = _mm_and_ps(bigger, ef_re_if);
     99     ef_im_if = _mm_and_ps(bigger, ef_im_if);
    100     ef_re = _mm_andnot_ps(bigger, ef_re);
    101     ef_im = _mm_andnot_ps(bigger, ef_im);
    102     ef_re = _mm_or_ps(ef_re, ef_re_if);
    103     ef_im = _mm_or_ps(ef_im, ef_im_if);
    104     ef_re = _mm_mul_ps(ef_re, kMu);
    105     ef_im = _mm_mul_ps(ef_im, kMu);
    106 
    107     _mm_storeu_ps(&ef[0][i], ef_re);
    108     _mm_storeu_ps(&ef[1][i], ef_im);
    109   }
    110   // scalar code for the remaining items.
    111   for (; i < (PART_LEN1); i++) {
    112     float absEf;
    113     ef[0][i] /= (aec->xPow[i] + 1e-10f);
    114     ef[1][i] /= (aec->xPow[i] + 1e-10f);
    115     absEf = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]);
    116 
    117     if (absEf > aec->errThresh) {
    118       absEf = aec->errThresh / (absEf + 1e-10f);
    119       ef[0][i] *= absEf;
    120       ef[1][i] *= absEf;
    121     }
    122 
    123     // Stepsize factor
    124     ef[0][i] *= aec->mu;
    125     ef[1][i] *= aec->mu;
    126   }
    127 }
    128 
    129 static void FilterAdaptationSSE2(aec_t *aec, float *fft, float ef[2][PART_LEN1]) {
    130   int i, j;
    131   for (i = 0; i < NR_PART; i++) {
    132     int xPos = (i + aec->xfBufBlockPos)*(PART_LEN1);
    133     int pos = i * PART_LEN1;
    134     // Check for wrap
    135     if (i + aec->xfBufBlockPos >= NR_PART) {
    136       xPos -= NR_PART * PART_LEN1;
    137     }
    138 
    139 #ifdef UNCONSTR
    140     for (j = 0; j < PART_LEN1; j++) {
    141       aec->wfBuf[pos + j][0] += MulRe(aec->xfBuf[xPos + j][0],
    142                                       -aec->xfBuf[xPos + j][1],
    143                                       ef[j][0], ef[j][1]);
    144       aec->wfBuf[pos + j][1] += MulIm(aec->xfBuf[xPos + j][0],
    145                                       -aec->xfBuf[xPos + j][1],
    146                                       ef[j][0], ef[j][1]);
    147     }
    148 #else
    149     // Process the whole array...
    150     for (j = 0; j < PART_LEN; j+= 4) {
    151       // Load xfBuf and ef.
    152       const __m128 xfBuf_re = _mm_loadu_ps(&aec->xfBuf[0][xPos + j]);
    153       const __m128 xfBuf_im = _mm_loadu_ps(&aec->xfBuf[1][xPos + j]);
    154       const __m128 ef_re = _mm_loadu_ps(&ef[0][j]);
    155       const __m128 ef_im = _mm_loadu_ps(&ef[1][j]);
    156       // Calculate the product of conjugate(xfBuf) by ef.
    157       //   re(conjugate(a) * b) = aRe * bRe + aIm * bIm
    158       //   im(conjugate(a) * b)=  aRe * bIm - aIm * bRe
    159       const __m128 a = _mm_mul_ps(xfBuf_re, ef_re);
    160       const __m128 b = _mm_mul_ps(xfBuf_im, ef_im);
    161       const __m128 c = _mm_mul_ps(xfBuf_re, ef_im);
    162       const __m128 d = _mm_mul_ps(xfBuf_im, ef_re);
    163       const __m128 e = _mm_add_ps(a, b);
    164       const __m128 f = _mm_sub_ps(c, d);
    165       // Interleave real and imaginary parts.
    166       const __m128 g = _mm_unpacklo_ps(e, f);
    167       const __m128 h = _mm_unpackhi_ps(e, f);
    168       // Store
    169       _mm_storeu_ps(&fft[2*j + 0], g);
    170       _mm_storeu_ps(&fft[2*j + 4], h);
    171     }
    172     // ... and fixup the first imaginary entry.
    173     fft[1] = MulRe(aec->xfBuf[0][xPos + PART_LEN],
    174                    -aec->xfBuf[1][xPos + PART_LEN],
    175                    ef[0][PART_LEN], ef[1][PART_LEN]);
    176 
    177     aec_rdft_inverse_128(fft);
    178     memset(fft + PART_LEN, 0, sizeof(float)*PART_LEN);
    179 
    180     // fft scaling
    181     {
    182       float scale = 2.0f / PART_LEN2;
    183       const __m128 scale_ps = _mm_load_ps1(&scale);
    184       for (j = 0; j < PART_LEN; j+=4) {
    185         const __m128 fft_ps = _mm_loadu_ps(&fft[j]);
    186         const __m128 fft_scale = _mm_mul_ps(fft_ps, scale_ps);
    187         _mm_storeu_ps(&fft[j], fft_scale);
    188       }
    189     }
    190     aec_rdft_forward_128(fft);
    191 
    192     {
    193       float wt1 = aec->wfBuf[1][pos];
    194       aec->wfBuf[0][pos + PART_LEN] += fft[1];
    195       for (j = 0; j < PART_LEN; j+= 4) {
    196         __m128 wtBuf_re = _mm_loadu_ps(&aec->wfBuf[0][pos + j]);
    197         __m128 wtBuf_im = _mm_loadu_ps(&aec->wfBuf[1][pos + j]);
    198         const __m128 fft0 = _mm_loadu_ps(&fft[2 * j + 0]);
    199         const __m128 fft4 = _mm_loadu_ps(&fft[2 * j + 4]);
    200         const __m128 fft_re = _mm_shuffle_ps(fft0, fft4, _MM_SHUFFLE(2, 0, 2 ,0));
    201         const __m128 fft_im = _mm_shuffle_ps(fft0, fft4, _MM_SHUFFLE(3, 1, 3 ,1));
    202         wtBuf_re = _mm_add_ps(wtBuf_re, fft_re);
    203         wtBuf_im = _mm_add_ps(wtBuf_im, fft_im);
    204         _mm_storeu_ps(&aec->wfBuf[0][pos + j], wtBuf_re);
    205         _mm_storeu_ps(&aec->wfBuf[1][pos + j], wtBuf_im);
    206       }
    207       aec->wfBuf[1][pos] = wt1;
    208     }
    209 #endif // UNCONSTR
    210   }
    211 }
    212 
    213 #ifdef _MSC_VER /* visual c++ */
    214 # define ALIGN16_BEG __declspec(align(16))
    215 # define ALIGN16_END
    216 #else /* gcc or icc */
    217 # define ALIGN16_BEG
    218 # define ALIGN16_END __attribute__((aligned(16)))
    219 #endif
    220 
    221 static __m128 mm_pow_ps(__m128 a, __m128 b)
    222 {
    223   // a^b = exp2(b * log2(a))
    224   //   exp2(x) and log2(x) are calculated using polynomial approximations.
    225   __m128 log2_a, b_log2_a, a_exp_b;
    226 
    227   // Calculate log2(x), x = a.
    228   {
    229     // To calculate log2(x), we decompose x like this:
    230     //   x = y * 2^n
    231     //     n is an integer
    232     //     y is in the [1.0, 2.0) range
    233     //
    234     //   log2(x) = log2(y) + n
    235     //     n       can be evaluated by playing with float representation.
    236     //     log2(y) in a small range can be approximated, this code uses an order
    237     //             five polynomial approximation. The coefficients have been
    238     //             estimated with the Remez algorithm and the resulting
    239     //             polynomial has a maximum relative error of 0.00086%.
    240 
    241     // Compute n.
    242     //    This is done by masking the exponent, shifting it into the top bit of
    243     //    the mantissa, putting eight into the biased exponent (to shift/
    244     //    compensate the fact that the exponent has been shifted in the top/
    245     //    fractional part and finally getting rid of the implicit leading one
    246     //    from the mantissa by substracting it out.
    247     static const ALIGN16_BEG int float_exponent_mask[4] ALIGN16_END =
    248         {0x7F800000, 0x7F800000, 0x7F800000, 0x7F800000};
    249     static const ALIGN16_BEG int eight_biased_exponent[4] ALIGN16_END =
    250         {0x43800000, 0x43800000, 0x43800000, 0x43800000};
    251     static const ALIGN16_BEG int implicit_leading_one[4] ALIGN16_END =
    252         {0x43BF8000, 0x43BF8000, 0x43BF8000, 0x43BF8000};
    253     static const int shift_exponent_into_top_mantissa = 8;
    254     const __m128 two_n = _mm_and_ps(a, *((__m128 *)float_exponent_mask));
    255     const __m128 n_1 = (__m128)_mm_srli_epi32((__m128i)two_n,
    256         shift_exponent_into_top_mantissa);
    257     const __m128 n_0 = _mm_or_ps(
    258         (__m128)n_1, *((__m128 *)eight_biased_exponent));
    259     const __m128 n   = _mm_sub_ps(n_0,  *((__m128 *)implicit_leading_one));
    260 
    261     // Compute y.
    262     static const ALIGN16_BEG int mantissa_mask[4] ALIGN16_END =
    263         {0x007FFFFF, 0x007FFFFF, 0x007FFFFF, 0x007FFFFF};
    264     static const ALIGN16_BEG int zero_biased_exponent_is_one[4] ALIGN16_END =
    265         {0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000};
    266     const __m128 mantissa = _mm_and_ps(a, *((__m128 *)mantissa_mask));
    267     const __m128 y        = _mm_or_ps(
    268         mantissa,  *((__m128 *)zero_biased_exponent_is_one));
    269 
    270     // Approximate log2(y) ~= (y - 1) * pol5(y).
    271     //    pol5(y) = C5 * y^5 + C4 * y^4 + C3 * y^3 + C2 * y^2 + C1 * y + C0
    272     static const ALIGN16_BEG float ALIGN16_END C5[4] =
    273         {-3.4436006e-2f, -3.4436006e-2f, -3.4436006e-2f, -3.4436006e-2f};
    274     static const ALIGN16_BEG float ALIGN16_END C4[4] =
    275         {3.1821337e-1f, 3.1821337e-1f, 3.1821337e-1f, 3.1821337e-1f};
    276     static const ALIGN16_BEG float ALIGN16_END C3[4] =
    277         {-1.2315303f, -1.2315303f, -1.2315303f, -1.2315303f};
    278     static const ALIGN16_BEG float ALIGN16_END C2[4] =
    279         {2.5988452f, 2.5988452f, 2.5988452f, 2.5988452f};
    280     static const ALIGN16_BEG float ALIGN16_END C1[4] =
    281         {-3.3241990f, -3.3241990f, -3.3241990f, -3.3241990f};
    282     static const ALIGN16_BEG float ALIGN16_END C0[4] =
    283         {3.1157899f, 3.1157899f, 3.1157899f, 3.1157899f};
    284     const __m128 pol5_y_0 = _mm_mul_ps(y,        *((__m128 *)C5));
    285     const __m128 pol5_y_1 = _mm_add_ps(pol5_y_0, *((__m128 *)C4));
    286     const __m128 pol5_y_2 = _mm_mul_ps(pol5_y_1, y);
    287     const __m128 pol5_y_3 = _mm_add_ps(pol5_y_2, *((__m128 *)C3));
    288     const __m128 pol5_y_4 = _mm_mul_ps(pol5_y_3, y);
    289     const __m128 pol5_y_5 = _mm_add_ps(pol5_y_4, *((__m128 *)C2));
    290     const __m128 pol5_y_6 = _mm_mul_ps(pol5_y_5, y);
    291     const __m128 pol5_y_7 = _mm_add_ps(pol5_y_6, *((__m128 *)C1));
    292     const __m128 pol5_y_8 = _mm_mul_ps(pol5_y_7, y);
    293     const __m128 pol5_y   = _mm_add_ps(pol5_y_8, *((__m128 *)C0));
    294     const __m128 y_minus_one = _mm_sub_ps(
    295         y, *((__m128 *)zero_biased_exponent_is_one));
    296     const __m128 log2_y = _mm_mul_ps(y_minus_one ,  pol5_y);
    297 
    298     // Combine parts.
    299     log2_a = _mm_add_ps(n, log2_y);
    300   }
    301 
    302   // b * log2(a)
    303   b_log2_a = _mm_mul_ps(b, log2_a);
    304 
    305   // Calculate exp2(x), x = b * log2(a).
    306   {
    307     // To calculate 2^x, we decompose x like this:
    308     //   x = n + y
    309     //     n is an integer, the value of x - 0.5 rounded down, therefore
    310     //     y is in the [0.5, 1.5) range
    311     //
    312     //   2^x = 2^n * 2^y
    313     //     2^n can be evaluated by playing with float representation.
    314     //     2^y in a small range can be approximated, this code uses an order two
    315     //         polynomial approximation. The coefficients have been estimated
    316     //         with the Remez algorithm and the resulting polynomial has a
    317     //         maximum relative error of 0.17%.
    318 
    319     // To avoid over/underflow, we reduce the range of input to ]-127, 129].
    320     static const ALIGN16_BEG float max_input[4] ALIGN16_END =
    321         {129.f, 129.f, 129.f, 129.f};
    322     static const ALIGN16_BEG float min_input[4] ALIGN16_END =
    323         {-126.99999f, -126.99999f, -126.99999f, -126.99999f};
    324     const __m128 x_min = _mm_min_ps(b_log2_a, *((__m128 *)max_input));
    325     const __m128 x_max = _mm_max_ps(x_min,    *((__m128 *)min_input));
    326     // Compute n.
    327     static const ALIGN16_BEG float half[4] ALIGN16_END =
    328         {0.5f, 0.5f, 0.5f, 0.5f};
    329     const __m128  x_minus_half = _mm_sub_ps(x_max, *((__m128 *)half));
    330     const __m128i x_minus_half_floor = _mm_cvtps_epi32(x_minus_half);
    331     // Compute 2^n.
    332     static const ALIGN16_BEG int float_exponent_bias[4] ALIGN16_END =
    333         {127, 127, 127, 127};
    334     static const int float_exponent_shift = 23;
    335     const __m128i two_n_exponent = _mm_add_epi32(
    336         x_minus_half_floor, *((__m128i *)float_exponent_bias));
    337     const __m128  two_n = (__m128)_mm_slli_epi32(
    338         two_n_exponent, float_exponent_shift);
    339     // Compute y.
    340     const __m128 y = _mm_sub_ps(x_max, _mm_cvtepi32_ps(x_minus_half_floor));
    341     // Approximate 2^y ~= C2 * y^2 + C1 * y + C0.
    342     static const ALIGN16_BEG float C2[4] ALIGN16_END =
    343         {3.3718944e-1f, 3.3718944e-1f, 3.3718944e-1f, 3.3718944e-1f};
    344     static const ALIGN16_BEG float C1[4] ALIGN16_END =
    345         {6.5763628e-1f, 6.5763628e-1f, 6.5763628e-1f, 6.5763628e-1f};
    346     static const ALIGN16_BEG float C0[4] ALIGN16_END =
    347         {1.0017247f, 1.0017247f, 1.0017247f, 1.0017247f};
    348     const __m128 exp2_y_0 = _mm_mul_ps(y,        *((__m128 *)C2));
    349     const __m128 exp2_y_1 = _mm_add_ps(exp2_y_0, *((__m128 *)C1));
    350     const __m128 exp2_y_2 = _mm_mul_ps(exp2_y_1, y);
    351     const __m128 exp2_y   = _mm_add_ps(exp2_y_2, *((__m128 *)C0));
    352 
    353     // Combine parts.
    354     a_exp_b = _mm_mul_ps(exp2_y, two_n);
    355   }
    356   return a_exp_b;
    357 }
    358 
    359 extern const float WebRtcAec_weightCurve[65];
    360 extern const float WebRtcAec_overDriveCurve[65];
    361 
    362 static void OverdriveAndSuppressSSE2(aec_t *aec, float hNl[PART_LEN1],
    363                                      const float hNlFb,
    364                                      float efw[2][PART_LEN1]) {
    365   int i;
    366   const __m128 vec_hNlFb = _mm_set1_ps(hNlFb);
    367   const __m128 vec_one = _mm_set1_ps(1.0f);
    368   const __m128 vec_minus_one = _mm_set1_ps(-1.0f);
    369   const __m128 vec_overDriveSm = _mm_set1_ps(aec->overDriveSm);
    370   // vectorized code (four at once)
    371   for (i = 0; i + 3 < PART_LEN1; i+=4) {
    372     // Weight subbands
    373     __m128 vec_hNl = _mm_loadu_ps(&hNl[i]);
    374     const __m128 vec_weightCurve = _mm_loadu_ps(&WebRtcAec_weightCurve[i]);
    375     const __m128 bigger = _mm_cmpgt_ps(vec_hNl, vec_hNlFb);
    376     const __m128 vec_weightCurve_hNlFb = _mm_mul_ps(
    377         vec_weightCurve, vec_hNlFb);
    378     const __m128 vec_one_weightCurve = _mm_sub_ps(vec_one, vec_weightCurve);
    379     const __m128 vec_one_weightCurve_hNl = _mm_mul_ps(
    380         vec_one_weightCurve, vec_hNl);
    381     const __m128 vec_if0 = _mm_andnot_ps(bigger, vec_hNl);
    382     const __m128 vec_if1 = _mm_and_ps(
    383         bigger, _mm_add_ps(vec_weightCurve_hNlFb, vec_one_weightCurve_hNl));
    384     vec_hNl = _mm_or_ps(vec_if0, vec_if1);
    385 
    386     {
    387       const __m128 vec_overDriveCurve = _mm_loadu_ps(
    388           &WebRtcAec_overDriveCurve[i]);
    389       const __m128 vec_overDriveSm_overDriveCurve = _mm_mul_ps(
    390           vec_overDriveSm, vec_overDriveCurve);
    391       vec_hNl = mm_pow_ps(vec_hNl, vec_overDriveSm_overDriveCurve);
    392       _mm_storeu_ps(&hNl[i], vec_hNl);
    393     }
    394 
    395     // Suppress error signal
    396     {
    397       __m128 vec_efw_re = _mm_loadu_ps(&efw[0][i]);
    398       __m128 vec_efw_im = _mm_loadu_ps(&efw[1][i]);
    399       vec_efw_re = _mm_mul_ps(vec_efw_re, vec_hNl);
    400       vec_efw_im = _mm_mul_ps(vec_efw_im, vec_hNl);
    401 
    402       // Ooura fft returns incorrect sign on imaginary component. It matters
    403       // here because we are making an additive change with comfort noise.
    404       vec_efw_im = _mm_mul_ps(vec_efw_im, vec_minus_one);
    405       _mm_storeu_ps(&efw[0][i], vec_efw_re);
    406       _mm_storeu_ps(&efw[1][i], vec_efw_im);
    407     }
    408   }
    409   // scalar code for the remaining items.
    410   for (; i < PART_LEN1; i++) {
    411     // Weight subbands
    412     if (hNl[i] > hNlFb) {
    413       hNl[i] = WebRtcAec_weightCurve[i] * hNlFb +
    414           (1 - WebRtcAec_weightCurve[i]) * hNl[i];
    415     }
    416     hNl[i] = powf(hNl[i], aec->overDriveSm * WebRtcAec_overDriveCurve[i]);
    417 
    418     // Suppress error signal
    419     efw[0][i] *= hNl[i];
    420     efw[1][i] *= hNl[i];
    421 
    422     // Ooura fft returns incorrect sign on imaginary component. It matters
    423     // here because we are making an additive change with comfort noise.
    424     efw[1][i] *= -1;
    425   }
    426 }
    427 
    428 void WebRtcAec_InitAec_SSE2(void) {
    429   WebRtcAec_FilterFar = FilterFarSSE2;
    430   WebRtcAec_ScaleErrorSignal = ScaleErrorSignalSSE2;
    431   WebRtcAec_FilterAdaptation = FilterAdaptationSSE2;
    432   WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppressSSE2;
    433 }
    434 
    435 #endif   //__SSE2__
    436