Home | History | Annotate | Download | only in aec
      1 /*
      2  *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 /*
     12  * The core AEC algorithm, SSE2 version of speed-critical functions.
     13  */
     14 
     15 #include <emmintrin.h>
     16 #include <math.h>
     17 #include <string.h>  // memset
     18 
     19 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
     20 #include "webrtc/modules/audio_processing/aec/aec_common.h"
     21 #include "webrtc/modules/audio_processing/aec/aec_core_internal.h"
     22 #include "webrtc/modules/audio_processing/aec/aec_rdft.h"
     23 
     24 __inline static float MulRe(float aRe, float aIm, float bRe, float bIm) {
     25   return aRe * bRe - aIm * bIm;
     26 }
     27 
     28 __inline static float MulIm(float aRe, float aIm, float bRe, float bIm) {
     29   return aRe * bIm + aIm * bRe;
     30 }
     31 
     32 static void FilterFarSSE2(
     33     int num_partitions,
     34     int x_fft_buf_block_pos,
     35     float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
     36     float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
     37     float y_fft[2][PART_LEN1]) {
     38 
     39   int i;
     40   for (i = 0; i < num_partitions; i++) {
     41     int j;
     42     int xPos = (i + x_fft_buf_block_pos) * PART_LEN1;
     43     int pos = i * PART_LEN1;
     44     // Check for wrap
     45     if (i + x_fft_buf_block_pos >= num_partitions) {
     46       xPos -= num_partitions * (PART_LEN1);
     47     }
     48 
     49     // vectorized code (four at once)
     50     for (j = 0; j + 3 < PART_LEN1; j += 4) {
     51       const __m128 x_fft_buf_re = _mm_loadu_ps(&x_fft_buf[0][xPos + j]);
     52       const __m128 x_fft_buf_im = _mm_loadu_ps(&x_fft_buf[1][xPos + j]);
     53       const __m128 h_fft_buf_re = _mm_loadu_ps(&h_fft_buf[0][pos + j]);
     54       const __m128 h_fft_buf_im = _mm_loadu_ps(&h_fft_buf[1][pos + j]);
     55       const __m128 y_fft_re = _mm_loadu_ps(&y_fft[0][j]);
     56       const __m128 y_fft_im = _mm_loadu_ps(&y_fft[1][j]);
     57       const __m128 a = _mm_mul_ps(x_fft_buf_re, h_fft_buf_re);
     58       const __m128 b = _mm_mul_ps(x_fft_buf_im, h_fft_buf_im);
     59       const __m128 c = _mm_mul_ps(x_fft_buf_re, h_fft_buf_im);
     60       const __m128 d = _mm_mul_ps(x_fft_buf_im, h_fft_buf_re);
     61       const __m128 e = _mm_sub_ps(a, b);
     62       const __m128 f = _mm_add_ps(c, d);
     63       const __m128 g = _mm_add_ps(y_fft_re, e);
     64       const __m128 h = _mm_add_ps(y_fft_im, f);
     65       _mm_storeu_ps(&y_fft[0][j], g);
     66       _mm_storeu_ps(&y_fft[1][j], h);
     67     }
     68     // scalar code for the remaining items.
     69     for (; j < PART_LEN1; j++) {
     70       y_fft[0][j] += MulRe(x_fft_buf[0][xPos + j],
     71                            x_fft_buf[1][xPos + j],
     72                            h_fft_buf[0][pos + j],
     73                            h_fft_buf[1][pos + j]);
     74       y_fft[1][j] += MulIm(x_fft_buf[0][xPos + j],
     75                            x_fft_buf[1][xPos + j],
     76                            h_fft_buf[0][pos + j],
     77                            h_fft_buf[1][pos + j]);
     78     }
     79   }
     80 }
     81 
     82 static void ScaleErrorSignalSSE2(int extended_filter_enabled,
     83                                  float normal_mu,
     84                                  float normal_error_threshold,
     85                                  float x_pow[PART_LEN1],
     86                                  float ef[2][PART_LEN1]) {
     87   const __m128 k1e_10f = _mm_set1_ps(1e-10f);
     88   const __m128 kMu = extended_filter_enabled ? _mm_set1_ps(kExtendedMu)
     89       : _mm_set1_ps(normal_mu);
     90   const __m128 kThresh = extended_filter_enabled
     91                              ? _mm_set1_ps(kExtendedErrorThreshold)
     92                              : _mm_set1_ps(normal_error_threshold);
     93 
     94   int i;
     95   // vectorized code (four at once)
     96   for (i = 0; i + 3 < PART_LEN1; i += 4) {
     97     const __m128 x_pow_local = _mm_loadu_ps(&x_pow[i]);
     98     const __m128 ef_re_base = _mm_loadu_ps(&ef[0][i]);
     99     const __m128 ef_im_base = _mm_loadu_ps(&ef[1][i]);
    100 
    101     const __m128 xPowPlus = _mm_add_ps(x_pow_local, k1e_10f);
    102     __m128 ef_re = _mm_div_ps(ef_re_base, xPowPlus);
    103     __m128 ef_im = _mm_div_ps(ef_im_base, xPowPlus);
    104     const __m128 ef_re2 = _mm_mul_ps(ef_re, ef_re);
    105     const __m128 ef_im2 = _mm_mul_ps(ef_im, ef_im);
    106     const __m128 ef_sum2 = _mm_add_ps(ef_re2, ef_im2);
    107     const __m128 absEf = _mm_sqrt_ps(ef_sum2);
    108     const __m128 bigger = _mm_cmpgt_ps(absEf, kThresh);
    109     __m128 absEfPlus = _mm_add_ps(absEf, k1e_10f);
    110     const __m128 absEfInv = _mm_div_ps(kThresh, absEfPlus);
    111     __m128 ef_re_if = _mm_mul_ps(ef_re, absEfInv);
    112     __m128 ef_im_if = _mm_mul_ps(ef_im, absEfInv);
    113     ef_re_if = _mm_and_ps(bigger, ef_re_if);
    114     ef_im_if = _mm_and_ps(bigger, ef_im_if);
    115     ef_re = _mm_andnot_ps(bigger, ef_re);
    116     ef_im = _mm_andnot_ps(bigger, ef_im);
    117     ef_re = _mm_or_ps(ef_re, ef_re_if);
    118     ef_im = _mm_or_ps(ef_im, ef_im_if);
    119     ef_re = _mm_mul_ps(ef_re, kMu);
    120     ef_im = _mm_mul_ps(ef_im, kMu);
    121 
    122     _mm_storeu_ps(&ef[0][i], ef_re);
    123     _mm_storeu_ps(&ef[1][i], ef_im);
    124   }
    125   // scalar code for the remaining items.
    126   {
    127     const float mu =
    128         extended_filter_enabled ? kExtendedMu : normal_mu;
    129     const float error_threshold = extended_filter_enabled
    130                                       ? kExtendedErrorThreshold
    131                                       : normal_error_threshold;
    132     for (; i < (PART_LEN1); i++) {
    133       float abs_ef;
    134       ef[0][i] /= (x_pow[i] + 1e-10f);
    135       ef[1][i] /= (x_pow[i] + 1e-10f);
    136       abs_ef = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]);
    137 
    138       if (abs_ef > error_threshold) {
    139         abs_ef = error_threshold / (abs_ef + 1e-10f);
    140         ef[0][i] *= abs_ef;
    141         ef[1][i] *= abs_ef;
    142       }
    143 
    144       // Stepsize factor
    145       ef[0][i] *= mu;
    146       ef[1][i] *= mu;
    147     }
    148   }
    149 }
    150 
    151 static void FilterAdaptationSSE2(
    152     int num_partitions,
    153     int x_fft_buf_block_pos,
    154     float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
    155     float e_fft[2][PART_LEN1],
    156     float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) {
    157   float fft[PART_LEN2];
    158   int i, j;
    159   for (i = 0; i < num_partitions; i++) {
    160     int xPos = (i + x_fft_buf_block_pos) * (PART_LEN1);
    161     int pos = i * PART_LEN1;
    162     // Check for wrap
    163     if (i + x_fft_buf_block_pos >= num_partitions) {
    164       xPos -= num_partitions * PART_LEN1;
    165     }
    166 
    167     // Process the whole array...
    168     for (j = 0; j < PART_LEN; j += 4) {
    169       // Load x_fft_buf and e_fft.
    170       const __m128 x_fft_buf_re = _mm_loadu_ps(&x_fft_buf[0][xPos + j]);
    171       const __m128 x_fft_buf_im = _mm_loadu_ps(&x_fft_buf[1][xPos + j]);
    172       const __m128 e_fft_re = _mm_loadu_ps(&e_fft[0][j]);
    173       const __m128 e_fft_im = _mm_loadu_ps(&e_fft[1][j]);
    174       // Calculate the product of conjugate(x_fft_buf) by e_fft.
    175       //   re(conjugate(a) * b) = aRe * bRe + aIm * bIm
    176       //   im(conjugate(a) * b)=  aRe * bIm - aIm * bRe
    177       const __m128 a = _mm_mul_ps(x_fft_buf_re, e_fft_re);
    178       const __m128 b = _mm_mul_ps(x_fft_buf_im, e_fft_im);
    179       const __m128 c = _mm_mul_ps(x_fft_buf_re, e_fft_im);
    180       const __m128 d = _mm_mul_ps(x_fft_buf_im, e_fft_re);
    181       const __m128 e = _mm_add_ps(a, b);
    182       const __m128 f = _mm_sub_ps(c, d);
    183       // Interleave real and imaginary parts.
    184       const __m128 g = _mm_unpacklo_ps(e, f);
    185       const __m128 h = _mm_unpackhi_ps(e, f);
    186       // Store
    187       _mm_storeu_ps(&fft[2 * j + 0], g);
    188       _mm_storeu_ps(&fft[2 * j + 4], h);
    189     }
    190     // ... and fixup the first imaginary entry.
    191     fft[1] = MulRe(x_fft_buf[0][xPos + PART_LEN],
    192                    -x_fft_buf[1][xPos + PART_LEN],
    193                    e_fft[0][PART_LEN],
    194                    e_fft[1][PART_LEN]);
    195 
    196     aec_rdft_inverse_128(fft);
    197     memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN);
    198 
    199     // fft scaling
    200     {
    201       float scale = 2.0f / PART_LEN2;
    202       const __m128 scale_ps = _mm_load_ps1(&scale);
    203       for (j = 0; j < PART_LEN; j += 4) {
    204         const __m128 fft_ps = _mm_loadu_ps(&fft[j]);
    205         const __m128 fft_scale = _mm_mul_ps(fft_ps, scale_ps);
    206         _mm_storeu_ps(&fft[j], fft_scale);
    207       }
    208     }
    209     aec_rdft_forward_128(fft);
    210 
    211     {
    212       float wt1 = h_fft_buf[1][pos];
    213       h_fft_buf[0][pos + PART_LEN] += fft[1];
    214       for (j = 0; j < PART_LEN; j += 4) {
    215         __m128 wtBuf_re = _mm_loadu_ps(&h_fft_buf[0][pos + j]);
    216         __m128 wtBuf_im = _mm_loadu_ps(&h_fft_buf[1][pos + j]);
    217         const __m128 fft0 = _mm_loadu_ps(&fft[2 * j + 0]);
    218         const __m128 fft4 = _mm_loadu_ps(&fft[2 * j + 4]);
    219         const __m128 fft_re =
    220             _mm_shuffle_ps(fft0, fft4, _MM_SHUFFLE(2, 0, 2, 0));
    221         const __m128 fft_im =
    222             _mm_shuffle_ps(fft0, fft4, _MM_SHUFFLE(3, 1, 3, 1));
    223         wtBuf_re = _mm_add_ps(wtBuf_re, fft_re);
    224         wtBuf_im = _mm_add_ps(wtBuf_im, fft_im);
    225         _mm_storeu_ps(&h_fft_buf[0][pos + j], wtBuf_re);
    226         _mm_storeu_ps(&h_fft_buf[1][pos + j], wtBuf_im);
    227       }
    228       h_fft_buf[1][pos] = wt1;
    229     }
    230   }
    231 }
    232 
    233 static __m128 mm_pow_ps(__m128 a, __m128 b) {
    234   // a^b = exp2(b * log2(a))
    235   //   exp2(x) and log2(x) are calculated using polynomial approximations.
    236   __m128 log2_a, b_log2_a, a_exp_b;
    237 
    238   // Calculate log2(x), x = a.
    239   {
    240     // To calculate log2(x), we decompose x like this:
    241     //   x = y * 2^n
    242     //     n is an integer
    243     //     y is in the [1.0, 2.0) range
    244     //
    245     //   log2(x) = log2(y) + n
    246     //     n       can be evaluated by playing with float representation.
    247     //     log2(y) in a small range can be approximated, this code uses an order
    248     //             five polynomial approximation. The coefficients have been
    249     //             estimated with the Remez algorithm and the resulting
    250     //             polynomial has a maximum relative error of 0.00086%.
    251 
    252     // Compute n.
    253     //    This is done by masking the exponent, shifting it into the top bit of
    254     //    the mantissa, putting eight into the biased exponent (to shift/
    255     //    compensate the fact that the exponent has been shifted in the top/
    256     //    fractional part and finally getting rid of the implicit leading one
    257     //    from the mantissa by substracting it out.
    258     static const ALIGN16_BEG int float_exponent_mask[4] ALIGN16_END = {
    259         0x7F800000, 0x7F800000, 0x7F800000, 0x7F800000};
    260     static const ALIGN16_BEG int eight_biased_exponent[4] ALIGN16_END = {
    261         0x43800000, 0x43800000, 0x43800000, 0x43800000};
    262     static const ALIGN16_BEG int implicit_leading_one[4] ALIGN16_END = {
    263         0x43BF8000, 0x43BF8000, 0x43BF8000, 0x43BF8000};
    264     static const int shift_exponent_into_top_mantissa = 8;
    265     const __m128 two_n = _mm_and_ps(a, *((__m128*)float_exponent_mask));
    266     const __m128 n_1 = _mm_castsi128_ps(_mm_srli_epi32(
    267         _mm_castps_si128(two_n), shift_exponent_into_top_mantissa));
    268     const __m128 n_0 = _mm_or_ps(n_1, *((__m128*)eight_biased_exponent));
    269     const __m128 n = _mm_sub_ps(n_0, *((__m128*)implicit_leading_one));
    270 
    271     // Compute y.
    272     static const ALIGN16_BEG int mantissa_mask[4] ALIGN16_END = {
    273         0x007FFFFF, 0x007FFFFF, 0x007FFFFF, 0x007FFFFF};
    274     static const ALIGN16_BEG int zero_biased_exponent_is_one[4] ALIGN16_END = {
    275         0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000};
    276     const __m128 mantissa = _mm_and_ps(a, *((__m128*)mantissa_mask));
    277     const __m128 y =
    278         _mm_or_ps(mantissa, *((__m128*)zero_biased_exponent_is_one));
    279 
    280     // Approximate log2(y) ~= (y - 1) * pol5(y).
    281     //    pol5(y) = C5 * y^5 + C4 * y^4 + C3 * y^3 + C2 * y^2 + C1 * y + C0
    282     static const ALIGN16_BEG float ALIGN16_END C5[4] = {
    283         -3.4436006e-2f, -3.4436006e-2f, -3.4436006e-2f, -3.4436006e-2f};
    284     static const ALIGN16_BEG float ALIGN16_END
    285         C4[4] = {3.1821337e-1f, 3.1821337e-1f, 3.1821337e-1f, 3.1821337e-1f};
    286     static const ALIGN16_BEG float ALIGN16_END
    287         C3[4] = {-1.2315303f, -1.2315303f, -1.2315303f, -1.2315303f};
    288     static const ALIGN16_BEG float ALIGN16_END
    289         C2[4] = {2.5988452f, 2.5988452f, 2.5988452f, 2.5988452f};
    290     static const ALIGN16_BEG float ALIGN16_END
    291         C1[4] = {-3.3241990f, -3.3241990f, -3.3241990f, -3.3241990f};
    292     static const ALIGN16_BEG float ALIGN16_END
    293         C0[4] = {3.1157899f, 3.1157899f, 3.1157899f, 3.1157899f};
    294     const __m128 pol5_y_0 = _mm_mul_ps(y, *((__m128*)C5));
    295     const __m128 pol5_y_1 = _mm_add_ps(pol5_y_0, *((__m128*)C4));
    296     const __m128 pol5_y_2 = _mm_mul_ps(pol5_y_1, y);
    297     const __m128 pol5_y_3 = _mm_add_ps(pol5_y_2, *((__m128*)C3));
    298     const __m128 pol5_y_4 = _mm_mul_ps(pol5_y_3, y);
    299     const __m128 pol5_y_5 = _mm_add_ps(pol5_y_4, *((__m128*)C2));
    300     const __m128 pol5_y_6 = _mm_mul_ps(pol5_y_5, y);
    301     const __m128 pol5_y_7 = _mm_add_ps(pol5_y_6, *((__m128*)C1));
    302     const __m128 pol5_y_8 = _mm_mul_ps(pol5_y_7, y);
    303     const __m128 pol5_y = _mm_add_ps(pol5_y_8, *((__m128*)C0));
    304     const __m128 y_minus_one =
    305         _mm_sub_ps(y, *((__m128*)zero_biased_exponent_is_one));
    306     const __m128 log2_y = _mm_mul_ps(y_minus_one, pol5_y);
    307 
    308     // Combine parts.
    309     log2_a = _mm_add_ps(n, log2_y);
    310   }
    311 
    312   // b * log2(a)
    313   b_log2_a = _mm_mul_ps(b, log2_a);
    314 
    315   // Calculate exp2(x), x = b * log2(a).
    316   {
    317     // To calculate 2^x, we decompose x like this:
    318     //   x = n + y
    319     //     n is an integer, the value of x - 0.5 rounded down, therefore
    320     //     y is in the [0.5, 1.5) range
    321     //
    322     //   2^x = 2^n * 2^y
    323     //     2^n can be evaluated by playing with float representation.
    324     //     2^y in a small range can be approximated, this code uses an order two
    325     //         polynomial approximation. The coefficients have been estimated
    326     //         with the Remez algorithm and the resulting polynomial has a
    327     //         maximum relative error of 0.17%.
    328 
    329     // To avoid over/underflow, we reduce the range of input to ]-127, 129].
    330     static const ALIGN16_BEG float max_input[4] ALIGN16_END = {129.f, 129.f,
    331                                                                129.f, 129.f};
    332     static const ALIGN16_BEG float min_input[4] ALIGN16_END = {
    333         -126.99999f, -126.99999f, -126.99999f, -126.99999f};
    334     const __m128 x_min = _mm_min_ps(b_log2_a, *((__m128*)max_input));
    335     const __m128 x_max = _mm_max_ps(x_min, *((__m128*)min_input));
    336     // Compute n.
    337     static const ALIGN16_BEG float half[4] ALIGN16_END = {0.5f, 0.5f,
    338                                                           0.5f, 0.5f};
    339     const __m128 x_minus_half = _mm_sub_ps(x_max, *((__m128*)half));
    340     const __m128i x_minus_half_floor = _mm_cvtps_epi32(x_minus_half);
    341     // Compute 2^n.
    342     static const ALIGN16_BEG int float_exponent_bias[4] ALIGN16_END = {
    343         127, 127, 127, 127};
    344     static const int float_exponent_shift = 23;
    345     const __m128i two_n_exponent =
    346         _mm_add_epi32(x_minus_half_floor, *((__m128i*)float_exponent_bias));
    347     const __m128 two_n =
    348         _mm_castsi128_ps(_mm_slli_epi32(two_n_exponent, float_exponent_shift));
    349     // Compute y.
    350     const __m128 y = _mm_sub_ps(x_max, _mm_cvtepi32_ps(x_minus_half_floor));
    351     // Approximate 2^y ~= C2 * y^2 + C1 * y + C0.
    352     static const ALIGN16_BEG float C2[4] ALIGN16_END = {
    353         3.3718944e-1f, 3.3718944e-1f, 3.3718944e-1f, 3.3718944e-1f};
    354     static const ALIGN16_BEG float C1[4] ALIGN16_END = {
    355         6.5763628e-1f, 6.5763628e-1f, 6.5763628e-1f, 6.5763628e-1f};
    356     static const ALIGN16_BEG float C0[4] ALIGN16_END = {1.0017247f, 1.0017247f,
    357                                                         1.0017247f, 1.0017247f};
    358     const __m128 exp2_y_0 = _mm_mul_ps(y, *((__m128*)C2));
    359     const __m128 exp2_y_1 = _mm_add_ps(exp2_y_0, *((__m128*)C1));
    360     const __m128 exp2_y_2 = _mm_mul_ps(exp2_y_1, y);
    361     const __m128 exp2_y = _mm_add_ps(exp2_y_2, *((__m128*)C0));
    362 
    363     // Combine parts.
    364     a_exp_b = _mm_mul_ps(exp2_y, two_n);
    365   }
    366   return a_exp_b;
    367 }
    368 
    369 static void OverdriveAndSuppressSSE2(AecCore* aec,
    370                                      float hNl[PART_LEN1],
    371                                      const float hNlFb,
    372                                      float efw[2][PART_LEN1]) {
    373   int i;
    374   const __m128 vec_hNlFb = _mm_set1_ps(hNlFb);
    375   const __m128 vec_one = _mm_set1_ps(1.0f);
    376   const __m128 vec_minus_one = _mm_set1_ps(-1.0f);
    377   const __m128 vec_overDriveSm = _mm_set1_ps(aec->overDriveSm);
    378   // vectorized code (four at once)
    379   for (i = 0; i + 3 < PART_LEN1; i += 4) {
    380     // Weight subbands
    381     __m128 vec_hNl = _mm_loadu_ps(&hNl[i]);
    382     const __m128 vec_weightCurve = _mm_loadu_ps(&WebRtcAec_weightCurve[i]);
    383     const __m128 bigger = _mm_cmpgt_ps(vec_hNl, vec_hNlFb);
    384     const __m128 vec_weightCurve_hNlFb = _mm_mul_ps(vec_weightCurve, vec_hNlFb);
    385     const __m128 vec_one_weightCurve = _mm_sub_ps(vec_one, vec_weightCurve);
    386     const __m128 vec_one_weightCurve_hNl =
    387         _mm_mul_ps(vec_one_weightCurve, vec_hNl);
    388     const __m128 vec_if0 = _mm_andnot_ps(bigger, vec_hNl);
    389     const __m128 vec_if1 = _mm_and_ps(
    390         bigger, _mm_add_ps(vec_weightCurve_hNlFb, vec_one_weightCurve_hNl));
    391     vec_hNl = _mm_or_ps(vec_if0, vec_if1);
    392 
    393     {
    394       const __m128 vec_overDriveCurve =
    395           _mm_loadu_ps(&WebRtcAec_overDriveCurve[i]);
    396       const __m128 vec_overDriveSm_overDriveCurve =
    397           _mm_mul_ps(vec_overDriveSm, vec_overDriveCurve);
    398       vec_hNl = mm_pow_ps(vec_hNl, vec_overDriveSm_overDriveCurve);
    399       _mm_storeu_ps(&hNl[i], vec_hNl);
    400     }
    401 
    402     // Suppress error signal
    403     {
    404       __m128 vec_efw_re = _mm_loadu_ps(&efw[0][i]);
    405       __m128 vec_efw_im = _mm_loadu_ps(&efw[1][i]);
    406       vec_efw_re = _mm_mul_ps(vec_efw_re, vec_hNl);
    407       vec_efw_im = _mm_mul_ps(vec_efw_im, vec_hNl);
    408 
    409       // Ooura fft returns incorrect sign on imaginary component. It matters
    410       // here because we are making an additive change with comfort noise.
    411       vec_efw_im = _mm_mul_ps(vec_efw_im, vec_minus_one);
    412       _mm_storeu_ps(&efw[0][i], vec_efw_re);
    413       _mm_storeu_ps(&efw[1][i], vec_efw_im);
    414     }
    415   }
    416   // scalar code for the remaining items.
    417   for (; i < PART_LEN1; i++) {
    418     // Weight subbands
    419     if (hNl[i] > hNlFb) {
    420       hNl[i] = WebRtcAec_weightCurve[i] * hNlFb +
    421                (1 - WebRtcAec_weightCurve[i]) * hNl[i];
    422     }
    423     hNl[i] = powf(hNl[i], aec->overDriveSm * WebRtcAec_overDriveCurve[i]);
    424 
    425     // Suppress error signal
    426     efw[0][i] *= hNl[i];
    427     efw[1][i] *= hNl[i];
    428 
    429     // Ooura fft returns incorrect sign on imaginary component. It matters
    430     // here because we are making an additive change with comfort noise.
    431     efw[1][i] *= -1;
    432   }
    433 }
    434 
    435 __inline static void _mm_add_ps_4x1(__m128 sum, float *dst) {
    436   // A+B C+D
    437   sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(0, 0, 3, 2)));
    438   // A+B+C+D A+B+C+D
    439   sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 1, 1, 1)));
    440   _mm_store_ss(dst, sum);
    441 }
    442 
    443 static int PartitionDelaySSE2(const AecCore* aec) {
    444   // Measures the energy in each filter partition and returns the partition with
    445   // highest energy.
    446   // TODO(bjornv): Spread computational cost by computing one partition per
    447   // block?
    448   float wfEnMax = 0;
    449   int i;
    450   int delay = 0;
    451 
    452   for (i = 0; i < aec->num_partitions; i++) {
    453     int j;
    454     int pos = i * PART_LEN1;
    455     float wfEn = 0;
    456     __m128 vec_wfEn = _mm_set1_ps(0.0f);
    457     // vectorized code (four at once)
    458     for (j = 0; j + 3 < PART_LEN1; j += 4) {
    459       const __m128 vec_wfBuf0 = _mm_loadu_ps(&aec->wfBuf[0][pos + j]);
    460       const __m128 vec_wfBuf1 = _mm_loadu_ps(&aec->wfBuf[1][pos + j]);
    461       vec_wfEn = _mm_add_ps(vec_wfEn, _mm_mul_ps(vec_wfBuf0, vec_wfBuf0));
    462       vec_wfEn = _mm_add_ps(vec_wfEn, _mm_mul_ps(vec_wfBuf1, vec_wfBuf1));
    463     }
    464     _mm_add_ps_4x1(vec_wfEn, &wfEn);
    465 
    466     // scalar code for the remaining items.
    467     for (; j < PART_LEN1; j++) {
    468       wfEn += aec->wfBuf[0][pos + j] * aec->wfBuf[0][pos + j] +
    469               aec->wfBuf[1][pos + j] * aec->wfBuf[1][pos + j];
    470     }
    471 
    472     if (wfEn > wfEnMax) {
    473       wfEnMax = wfEn;
    474       delay = i;
    475     }
    476   }
    477   return delay;
    478 }
    479 
    480 // Updates the following smoothed  Power Spectral Densities (PSD):
    481 //  - sd  : near-end
    482 //  - se  : residual echo
    483 //  - sx  : far-end
    484 //  - sde : cross-PSD of near-end and residual echo
    485 //  - sxd : cross-PSD of near-end and far-end
    486 //
    487 // In addition to updating the PSDs, also the filter diverge state is determined
    488 // upon actions are taken.
    489 static void SmoothedPSD(AecCore* aec,
    490                         float efw[2][PART_LEN1],
    491                         float dfw[2][PART_LEN1],
    492                         float xfw[2][PART_LEN1],
    493                         int* extreme_filter_divergence) {
    494   // Power estimate smoothing coefficients.
    495   const float* ptrGCoh = aec->extended_filter_enabled
    496       ? WebRtcAec_kExtendedSmoothingCoefficients[aec->mult - 1]
    497       : WebRtcAec_kNormalSmoothingCoefficients[aec->mult - 1];
    498   int i;
    499   float sdSum = 0, seSum = 0;
    500   const __m128 vec_15 =  _mm_set1_ps(WebRtcAec_kMinFarendPSD);
    501   const __m128 vec_GCoh0 = _mm_set1_ps(ptrGCoh[0]);
    502   const __m128 vec_GCoh1 = _mm_set1_ps(ptrGCoh[1]);
    503   __m128 vec_sdSum = _mm_set1_ps(0.0f);
    504   __m128 vec_seSum = _mm_set1_ps(0.0f);
    505 
    506   for (i = 0; i + 3 < PART_LEN1; i += 4) {
    507     const __m128 vec_dfw0 = _mm_loadu_ps(&dfw[0][i]);
    508     const __m128 vec_dfw1 = _mm_loadu_ps(&dfw[1][i]);
    509     const __m128 vec_efw0 = _mm_loadu_ps(&efw[0][i]);
    510     const __m128 vec_efw1 = _mm_loadu_ps(&efw[1][i]);
    511     const __m128 vec_xfw0 = _mm_loadu_ps(&xfw[0][i]);
    512     const __m128 vec_xfw1 = _mm_loadu_ps(&xfw[1][i]);
    513     __m128 vec_sd = _mm_mul_ps(_mm_loadu_ps(&aec->sd[i]), vec_GCoh0);
    514     __m128 vec_se = _mm_mul_ps(_mm_loadu_ps(&aec->se[i]), vec_GCoh0);
    515     __m128 vec_sx = _mm_mul_ps(_mm_loadu_ps(&aec->sx[i]), vec_GCoh0);
    516     __m128 vec_dfw_sumsq = _mm_mul_ps(vec_dfw0, vec_dfw0);
    517     __m128 vec_efw_sumsq = _mm_mul_ps(vec_efw0, vec_efw0);
    518     __m128 vec_xfw_sumsq = _mm_mul_ps(vec_xfw0, vec_xfw0);
    519     vec_dfw_sumsq = _mm_add_ps(vec_dfw_sumsq, _mm_mul_ps(vec_dfw1, vec_dfw1));
    520     vec_efw_sumsq = _mm_add_ps(vec_efw_sumsq, _mm_mul_ps(vec_efw1, vec_efw1));
    521     vec_xfw_sumsq = _mm_add_ps(vec_xfw_sumsq, _mm_mul_ps(vec_xfw1, vec_xfw1));
    522     vec_xfw_sumsq = _mm_max_ps(vec_xfw_sumsq, vec_15);
    523     vec_sd = _mm_add_ps(vec_sd, _mm_mul_ps(vec_dfw_sumsq, vec_GCoh1));
    524     vec_se = _mm_add_ps(vec_se, _mm_mul_ps(vec_efw_sumsq, vec_GCoh1));
    525     vec_sx = _mm_add_ps(vec_sx, _mm_mul_ps(vec_xfw_sumsq, vec_GCoh1));
    526     _mm_storeu_ps(&aec->sd[i], vec_sd);
    527     _mm_storeu_ps(&aec->se[i], vec_se);
    528     _mm_storeu_ps(&aec->sx[i], vec_sx);
    529 
    530     {
    531       const __m128 vec_3210 = _mm_loadu_ps(&aec->sde[i][0]);
    532       const __m128 vec_7654 = _mm_loadu_ps(&aec->sde[i + 2][0]);
    533       __m128 vec_a = _mm_shuffle_ps(vec_3210, vec_7654,
    534                                     _MM_SHUFFLE(2, 0, 2, 0));
    535       __m128 vec_b = _mm_shuffle_ps(vec_3210, vec_7654,
    536                                     _MM_SHUFFLE(3, 1, 3, 1));
    537       __m128 vec_dfwefw0011 = _mm_mul_ps(vec_dfw0, vec_efw0);
    538       __m128 vec_dfwefw0110 = _mm_mul_ps(vec_dfw0, vec_efw1);
    539       vec_a = _mm_mul_ps(vec_a, vec_GCoh0);
    540       vec_b = _mm_mul_ps(vec_b, vec_GCoh0);
    541       vec_dfwefw0011 = _mm_add_ps(vec_dfwefw0011,
    542                                   _mm_mul_ps(vec_dfw1, vec_efw1));
    543       vec_dfwefw0110 = _mm_sub_ps(vec_dfwefw0110,
    544                                   _mm_mul_ps(vec_dfw1, vec_efw0));
    545       vec_a = _mm_add_ps(vec_a, _mm_mul_ps(vec_dfwefw0011, vec_GCoh1));
    546       vec_b = _mm_add_ps(vec_b, _mm_mul_ps(vec_dfwefw0110, vec_GCoh1));
    547       _mm_storeu_ps(&aec->sde[i][0], _mm_unpacklo_ps(vec_a, vec_b));
    548       _mm_storeu_ps(&aec->sde[i + 2][0], _mm_unpackhi_ps(vec_a, vec_b));
    549     }
    550 
    551     {
    552       const __m128 vec_3210 = _mm_loadu_ps(&aec->sxd[i][0]);
    553       const __m128 vec_7654 = _mm_loadu_ps(&aec->sxd[i + 2][0]);
    554       __m128 vec_a = _mm_shuffle_ps(vec_3210, vec_7654,
    555                                     _MM_SHUFFLE(2, 0, 2, 0));
    556       __m128 vec_b = _mm_shuffle_ps(vec_3210, vec_7654,
    557                                     _MM_SHUFFLE(3, 1, 3, 1));
    558       __m128 vec_dfwxfw0011 = _mm_mul_ps(vec_dfw0, vec_xfw0);
    559       __m128 vec_dfwxfw0110 = _mm_mul_ps(vec_dfw0, vec_xfw1);
    560       vec_a = _mm_mul_ps(vec_a, vec_GCoh0);
    561       vec_b = _mm_mul_ps(vec_b, vec_GCoh0);
    562       vec_dfwxfw0011 = _mm_add_ps(vec_dfwxfw0011,
    563                                   _mm_mul_ps(vec_dfw1, vec_xfw1));
    564       vec_dfwxfw0110 = _mm_sub_ps(vec_dfwxfw0110,
    565                                   _mm_mul_ps(vec_dfw1, vec_xfw0));
    566       vec_a = _mm_add_ps(vec_a, _mm_mul_ps(vec_dfwxfw0011, vec_GCoh1));
    567       vec_b = _mm_add_ps(vec_b, _mm_mul_ps(vec_dfwxfw0110, vec_GCoh1));
    568       _mm_storeu_ps(&aec->sxd[i][0], _mm_unpacklo_ps(vec_a, vec_b));
    569       _mm_storeu_ps(&aec->sxd[i + 2][0], _mm_unpackhi_ps(vec_a, vec_b));
    570     }
    571 
    572     vec_sdSum = _mm_add_ps(vec_sdSum, vec_sd);
    573     vec_seSum = _mm_add_ps(vec_seSum, vec_se);
    574   }
    575 
    576   _mm_add_ps_4x1(vec_sdSum, &sdSum);
    577   _mm_add_ps_4x1(vec_seSum, &seSum);
    578 
    579   for (; i < PART_LEN1; i++) {
    580     aec->sd[i] = ptrGCoh[0] * aec->sd[i] +
    581                  ptrGCoh[1] * (dfw[0][i] * dfw[0][i] + dfw[1][i] * dfw[1][i]);
    582     aec->se[i] = ptrGCoh[0] * aec->se[i] +
    583                  ptrGCoh[1] * (efw[0][i] * efw[0][i] + efw[1][i] * efw[1][i]);
    584     // We threshold here to protect against the ill-effects of a zero farend.
    585     // The threshold is not arbitrarily chosen, but balances protection and
    586     // adverse interaction with the algorithm's tuning.
    587     // TODO(bjornv): investigate further why this is so sensitive.
    588     aec->sx[i] =
    589         ptrGCoh[0] * aec->sx[i] +
    590         ptrGCoh[1] * WEBRTC_SPL_MAX(
    591             xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i],
    592             WebRtcAec_kMinFarendPSD);
    593 
    594     aec->sde[i][0] =
    595         ptrGCoh[0] * aec->sde[i][0] +
    596         ptrGCoh[1] * (dfw[0][i] * efw[0][i] + dfw[1][i] * efw[1][i]);
    597     aec->sde[i][1] =
    598         ptrGCoh[0] * aec->sde[i][1] +
    599         ptrGCoh[1] * (dfw[0][i] * efw[1][i] - dfw[1][i] * efw[0][i]);
    600 
    601     aec->sxd[i][0] =
    602         ptrGCoh[0] * aec->sxd[i][0] +
    603         ptrGCoh[1] * (dfw[0][i] * xfw[0][i] + dfw[1][i] * xfw[1][i]);
    604     aec->sxd[i][1] =
    605         ptrGCoh[0] * aec->sxd[i][1] +
    606         ptrGCoh[1] * (dfw[0][i] * xfw[1][i] - dfw[1][i] * xfw[0][i]);
    607 
    608     sdSum += aec->sd[i];
    609     seSum += aec->se[i];
    610   }
    611 
    612   // Divergent filter safeguard update.
    613   aec->divergeState = (aec->divergeState ? 1.05f : 1.0f) * seSum > sdSum;
    614 
    615   // Signal extreme filter divergence if the error is significantly larger
    616   // than the nearend (13 dB).
    617   *extreme_filter_divergence = (seSum > (19.95f * sdSum));
    618 }
    619 
    620 // Window time domain data to be used by the fft.
    621 static void WindowDataSSE2(float* x_windowed, const float* x) {
    622   int i;
    623   for (i = 0; i < PART_LEN; i += 4) {
    624     const __m128 vec_Buf1 = _mm_loadu_ps(&x[i]);
    625     const __m128 vec_Buf2 = _mm_loadu_ps(&x[PART_LEN + i]);
    626     const __m128 vec_sqrtHanning = _mm_load_ps(&WebRtcAec_sqrtHanning[i]);
    627     // A B C D
    628     __m128 vec_sqrtHanning_rev =
    629         _mm_loadu_ps(&WebRtcAec_sqrtHanning[PART_LEN - i - 3]);
    630     // D C B A
    631     vec_sqrtHanning_rev =
    632         _mm_shuffle_ps(vec_sqrtHanning_rev, vec_sqrtHanning_rev,
    633                        _MM_SHUFFLE(0, 1, 2, 3));
    634     _mm_storeu_ps(&x_windowed[i], _mm_mul_ps(vec_Buf1, vec_sqrtHanning));
    635     _mm_storeu_ps(&x_windowed[PART_LEN + i],
    636                   _mm_mul_ps(vec_Buf2, vec_sqrtHanning_rev));
    637   }
    638 }
    639 
    640 // Puts fft output data into a complex valued array.
    641 static void StoreAsComplexSSE2(const float* data,
    642                                float data_complex[2][PART_LEN1]) {
    643   int i;
    644   for (i = 0; i < PART_LEN; i += 4) {
    645     const __m128 vec_fft0 = _mm_loadu_ps(&data[2 * i]);
    646     const __m128 vec_fft4 = _mm_loadu_ps(&data[2 * i + 4]);
    647     const __m128 vec_a = _mm_shuffle_ps(vec_fft0, vec_fft4,
    648                                         _MM_SHUFFLE(2, 0, 2, 0));
    649     const __m128 vec_b = _mm_shuffle_ps(vec_fft0, vec_fft4,
    650                                         _MM_SHUFFLE(3, 1, 3, 1));
    651     _mm_storeu_ps(&data_complex[0][i], vec_a);
    652     _mm_storeu_ps(&data_complex[1][i], vec_b);
    653   }
    654   // fix beginning/end values
    655   data_complex[1][0] = 0;
    656   data_complex[1][PART_LEN] = 0;
    657   data_complex[0][0] = data[0];
    658   data_complex[0][PART_LEN] = data[1];
    659 }
    660 
    661 static void SubbandCoherenceSSE2(AecCore* aec,
    662                                  float efw[2][PART_LEN1],
    663                                  float dfw[2][PART_LEN1],
    664                                  float xfw[2][PART_LEN1],
    665                                  float* fft,
    666                                  float* cohde,
    667                                  float* cohxd,
    668                                  int* extreme_filter_divergence) {
    669   int i;
    670 
    671   SmoothedPSD(aec, efw, dfw, xfw, extreme_filter_divergence);
    672 
    673   {
    674     const __m128 vec_1eminus10 =  _mm_set1_ps(1e-10f);
    675 
    676     // Subband coherence
    677     for (i = 0; i + 3 < PART_LEN1; i += 4) {
    678       const __m128 vec_sd = _mm_loadu_ps(&aec->sd[i]);
    679       const __m128 vec_se = _mm_loadu_ps(&aec->se[i]);
    680       const __m128 vec_sx = _mm_loadu_ps(&aec->sx[i]);
    681       const __m128 vec_sdse = _mm_add_ps(vec_1eminus10,
    682                                          _mm_mul_ps(vec_sd, vec_se));
    683       const __m128 vec_sdsx = _mm_add_ps(vec_1eminus10,
    684                                          _mm_mul_ps(vec_sd, vec_sx));
    685       const __m128 vec_sde_3210 = _mm_loadu_ps(&aec->sde[i][0]);
    686       const __m128 vec_sde_7654 = _mm_loadu_ps(&aec->sde[i + 2][0]);
    687       const __m128 vec_sxd_3210 = _mm_loadu_ps(&aec->sxd[i][0]);
    688       const __m128 vec_sxd_7654 = _mm_loadu_ps(&aec->sxd[i + 2][0]);
    689       const __m128 vec_sde_0 = _mm_shuffle_ps(vec_sde_3210, vec_sde_7654,
    690                                               _MM_SHUFFLE(2, 0, 2, 0));
    691       const __m128 vec_sde_1 = _mm_shuffle_ps(vec_sde_3210, vec_sde_7654,
    692                                               _MM_SHUFFLE(3, 1, 3, 1));
    693       const __m128 vec_sxd_0 = _mm_shuffle_ps(vec_sxd_3210, vec_sxd_7654,
    694                                               _MM_SHUFFLE(2, 0, 2, 0));
    695       const __m128 vec_sxd_1 = _mm_shuffle_ps(vec_sxd_3210, vec_sxd_7654,
    696                                               _MM_SHUFFLE(3, 1, 3, 1));
    697       __m128 vec_cohde = _mm_mul_ps(vec_sde_0, vec_sde_0);
    698       __m128 vec_cohxd = _mm_mul_ps(vec_sxd_0, vec_sxd_0);
    699       vec_cohde = _mm_add_ps(vec_cohde, _mm_mul_ps(vec_sde_1, vec_sde_1));
    700       vec_cohde = _mm_div_ps(vec_cohde, vec_sdse);
    701       vec_cohxd = _mm_add_ps(vec_cohxd, _mm_mul_ps(vec_sxd_1, vec_sxd_1));
    702       vec_cohxd = _mm_div_ps(vec_cohxd, vec_sdsx);
    703       _mm_storeu_ps(&cohde[i], vec_cohde);
    704       _mm_storeu_ps(&cohxd[i], vec_cohxd);
    705     }
    706 
    707     // scalar code for the remaining items.
    708     for (; i < PART_LEN1; i++) {
    709       cohde[i] =
    710           (aec->sde[i][0] * aec->sde[i][0] + aec->sde[i][1] * aec->sde[i][1]) /
    711           (aec->sd[i] * aec->se[i] + 1e-10f);
    712       cohxd[i] =
    713           (aec->sxd[i][0] * aec->sxd[i][0] + aec->sxd[i][1] * aec->sxd[i][1]) /
    714           (aec->sx[i] * aec->sd[i] + 1e-10f);
    715     }
    716   }
    717 }
    718 
    719 void WebRtcAec_InitAec_SSE2(void) {
    720   WebRtcAec_FilterFar = FilterFarSSE2;
    721   WebRtcAec_ScaleErrorSignal = ScaleErrorSignalSSE2;
    722   WebRtcAec_FilterAdaptation = FilterAdaptationSSE2;
    723   WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppressSSE2;
    724   WebRtcAec_SubbandCoherence = SubbandCoherenceSSE2;
    725   WebRtcAec_StoreAsComplex = StoreAsComplexSSE2;
    726   WebRtcAec_PartitionDelay = PartitionDelaySSE2;
    727   WebRtcAec_WindowData = WindowDataSSE2;
    728 }
    729