Home | History | Annotate | Download | only in base
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 //
      5 // Initial input buffer layout, dividing into regions r0_ to r4_ (note: r0_, r3_
      6 // and r4_ will move after the first load):
      7 //
      8 // |----------------|-----------------------------------------|----------------|
      9 //
     10 //                                        request_frames_
     11 //                   <--------------------------------------------------------->
     12 //                                    r0_ (during first load)
     13 //
     14 //  kKernelSize / 2   kKernelSize / 2         kKernelSize / 2   kKernelSize / 2
     15 // <---------------> <--------------->       <---------------> <--------------->
     16 //        r1_               r2_                     r3_               r4_
     17 //
     18 //                             block_size_ == r4_ - r2_
     19 //                   <--------------------------------------->
     20 //
     21 //                                                  request_frames_
     22 //                                    <------------------ ... ----------------->
     23 //                                               r0_ (during second load)
     24 //
     25 // On the second request r0_ slides to the right by kKernelSize / 2 and r3_, r4_
     26 // and block_size_ are reinitialized via step (3) in the algorithm below.
     27 //
     28 // These new regions remain constant until a Flush() occurs.  While complicated,
     29 // this allows us to reduce jitter by always requesting the same amount from the
     30 // provided callback.
     31 //
     32 // The algorithm:
     33 //
     34 // 1) Allocate input_buffer of size: request_frames_ + kKernelSize; this ensures
     35 //    there's enough room to read request_frames_ from the callback into region
     36 //    r0_ (which will move between the first and subsequent passes).
     37 //
     38 // 2) Let r1_, r2_ each represent half the kernel centered around r0_:
     39 //
     40 //        r0_ = input_buffer_ + kKernelSize / 2
     41 //        r1_ = input_buffer_
     42 //        r2_ = r0_
     43 //
     44 //    r0_ is always request_frames_ in size.  r1_, r2_ are kKernelSize / 2 in
     45 //    size.  r1_ must be zero initialized to avoid convolution with garbage (see
     46 //    step (5) for why).
     47 //
     48 // 3) Let r3_, r4_ each represent half the kernel right aligned with the end of
     49 //    r0_ and choose block_size_ as the distance in frames between r4_ and r2_:
     50 //
     51 //        r3_ = r0_ + request_frames_ - kKernelSize
     52 //        r4_ = r0_ + request_frames_ - kKernelSize / 2
     53 //        block_size_ = r4_ - r2_ = request_frames_ - kKernelSize / 2
     54 //
     55 // 4) Consume request_frames_ frames into r0_.
     56 //
     57 // 5) Position kernel centered at start of r2_ and generate output frames until
     58 //    the kernel is centered at the start of r4_ or we've finished generating
     59 //    all the output frames.
     60 //
     61 // 6) Wrap left over data from the r3_ to r1_ and r4_ to r2_.
     62 //
     63 // 7) If we're on the second load, in order to avoid overwriting the frames we
     64 //    just wrapped from r4_ we need to slide r0_ to the right by the size of
     65 //    r4_, which is kKernelSize / 2:
     66 //
     67 //        r0_ = r0_ + kKernelSize / 2 = input_buffer_ + kKernelSize
     68 //
     69 //    r3_, r4_, and block_size_ then need to be reinitialized, so goto (3).
     70 //
     71 // 8) Else, if we're not on the second load, goto (4).
     72 //
     73 // Note: we're glossing over how the sub-sample handling works with
     74 // |virtual_source_idx_|, etc.
     75 
     76 // MSVC++ requires this to be set before any other includes to get M_PI.
     77 #define _USE_MATH_DEFINES
     78 
     79 #include "media/base/sinc_resampler.h"
     80 
     81 #include <cmath>
     82 #include <limits>
     83 
     84 #include "base/logging.h"
     85 
     86 #if defined(ARCH_CPU_X86_FAMILY)
     87 #include <xmmintrin.h>
     88 #define CONVOLVE_FUNC Convolve_SSE
     89 #elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
     90 #include <arm_neon.h>
     91 #define CONVOLVE_FUNC Convolve_NEON
     92 #else
     93 #define CONVOLVE_FUNC Convolve_C
     94 #endif
     95 
     96 namespace media {
     97 
     98 static double SincScaleFactor(double io_ratio) {
     99   // |sinc_scale_factor| is basically the normalized cutoff frequency of the
    100   // low-pass filter.
    101   double sinc_scale_factor = io_ratio > 1.0 ? 1.0 / io_ratio : 1.0;
    102 
    103   // The sinc function is an idealized brick-wall filter, but since we're
    104   // windowing it the transition from pass to stop does not happen right away.
    105   // So we should adjust the low pass filter cutoff slightly downward to avoid
    106   // some aliasing at the very high-end.
    107   // TODO(crogers): this value is empirical and to be more exact should vary
    108   // depending on kKernelSize.
    109   sinc_scale_factor *= 0.9;
    110 
    111   return sinc_scale_factor;
    112 }
    113 
    114 SincResampler::SincResampler(double io_sample_rate_ratio,
    115                              int request_frames,
    116                              const ReadCB& read_cb)
    117     : io_sample_rate_ratio_(io_sample_rate_ratio),
    118       read_cb_(read_cb),
    119       request_frames_(request_frames),
    120       input_buffer_size_(request_frames_ + kKernelSize),
    121       // Create input buffers with a 16-byte alignment for SSE optimizations.
    122       kernel_storage_(static_cast<float*>(
    123           base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))),
    124       kernel_pre_sinc_storage_(static_cast<float*>(
    125           base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))),
    126       kernel_window_storage_(static_cast<float*>(
    127           base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))),
    128       input_buffer_(static_cast<float*>(
    129           base::AlignedAlloc(sizeof(float) * input_buffer_size_, 16))),
    130       r1_(input_buffer_.get()),
    131       r2_(input_buffer_.get() + kKernelSize / 2) {
    132   CHECK_GT(request_frames_, 0);
    133   Flush();
    134   CHECK_GT(block_size_, kKernelSize)
    135       << "block_size must be greater than kKernelSize!";
    136 
    137   memset(kernel_storage_.get(), 0,
    138          sizeof(*kernel_storage_.get()) * kKernelStorageSize);
    139   memset(kernel_pre_sinc_storage_.get(), 0,
    140          sizeof(*kernel_pre_sinc_storage_.get()) * kKernelStorageSize);
    141   memset(kernel_window_storage_.get(), 0,
    142          sizeof(*kernel_window_storage_.get()) * kKernelStorageSize);
    143 
    144   InitializeKernel();
    145 }
    146 
    147 SincResampler::~SincResampler() {}
    148 
    149 void SincResampler::UpdateRegions(bool second_load) {
    150   // Setup various region pointers in the buffer (see diagram above).  If we're
    151   // on the second load we need to slide r0_ to the right by kKernelSize / 2.
    152   r0_ = input_buffer_.get() + (second_load ? kKernelSize : kKernelSize / 2);
    153   r3_ = r0_ + request_frames_ - kKernelSize;
    154   r4_ = r0_ + request_frames_ - kKernelSize / 2;
    155   block_size_ = r4_ - r2_;
    156 
    157   // r1_ at the beginning of the buffer.
    158   CHECK_EQ(r1_, input_buffer_.get());
    159   // r1_ left of r2_, r4_ left of r3_ and size correct.
    160   CHECK_EQ(r2_ - r1_, r4_ - r3_);
    161   // r2_ left of r3.
    162   CHECK_LT(r2_, r3_);
    163 }
    164 
    165 void SincResampler::InitializeKernel() {
    166   // Blackman window parameters.
    167   static const double kAlpha = 0.16;
    168   static const double kA0 = 0.5 * (1.0 - kAlpha);
    169   static const double kA1 = 0.5;
    170   static const double kA2 = 0.5 * kAlpha;
    171 
    172   // Generates a set of windowed sinc() kernels.
    173   // We generate a range of sub-sample offsets from 0.0 to 1.0.
    174   const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_);
    175   for (int offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
    176     const float subsample_offset =
    177         static_cast<float>(offset_idx) / kKernelOffsetCount;
    178 
    179     for (int i = 0; i < kKernelSize; ++i) {
    180       const int idx = i + offset_idx * kKernelSize;
    181       const float pre_sinc = M_PI * (i - kKernelSize / 2 - subsample_offset);
    182       kernel_pre_sinc_storage_[idx] = pre_sinc;
    183 
    184       // Compute Blackman window, matching the offset of the sinc().
    185       const float x = (i - subsample_offset) / kKernelSize;
    186       const float window =
    187           kA0 - kA1 * cos(2.0 * M_PI * x) + kA2 * cos(4.0 * M_PI * x);
    188       kernel_window_storage_[idx] = window;
    189 
    190       // Compute the sinc with offset, then window the sinc() function and store
    191       // at the correct offset.
    192       if (pre_sinc == 0) {
    193         kernel_storage_[idx] = sinc_scale_factor * window;
    194       } else {
    195         kernel_storage_[idx] =
    196             window * sin(sinc_scale_factor * pre_sinc) / pre_sinc;
    197       }
    198     }
    199   }
    200 }
    201 
    202 void SincResampler::SetRatio(double io_sample_rate_ratio) {
    203   if (fabs(io_sample_rate_ratio_ - io_sample_rate_ratio) <
    204       std::numeric_limits<double>::epsilon()) {
    205     return;
    206   }
    207 
    208   io_sample_rate_ratio_ = io_sample_rate_ratio;
    209 
    210   // Optimize reinitialization by reusing values which are independent of
    211   // |sinc_scale_factor|.  Provides a 3x speedup.
    212   const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_);
    213   for (int offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
    214     for (int i = 0; i < kKernelSize; ++i) {
    215       const int idx = i + offset_idx * kKernelSize;
    216       const float window = kernel_window_storage_[idx];
    217       const float pre_sinc = kernel_pre_sinc_storage_[idx];
    218 
    219       if (pre_sinc == 0) {
    220         kernel_storage_[idx] = sinc_scale_factor * window;
    221       } else {
    222         kernel_storage_[idx] =
    223             window * sin(sinc_scale_factor * pre_sinc) / pre_sinc;
    224       }
    225     }
    226   }
    227 }
    228 
    229 void SincResampler::Resample(int frames, float* destination) {
    230   int remaining_frames = frames;
    231 
    232   // Step (1) -- Prime the input buffer at the start of the input stream.
    233   if (!buffer_primed_ && remaining_frames) {
    234     read_cb_.Run(request_frames_, r0_);
    235     buffer_primed_ = true;
    236   }
    237 
    238   // Step (2) -- Resample!  const what we can outside of the loop for speed.  It
    239   // actually has an impact on ARM performance.  See inner loop comment below.
    240   const double current_io_ratio = io_sample_rate_ratio_;
    241   const float* const kernel_ptr = kernel_storage_.get();
    242   while (remaining_frames) {
    243     // Note: The loop construct here can severely impact performance on ARM
    244     // or when built with clang.  See https://codereview.chromium.org/18566009/
    245     int source_idx = virtual_source_idx_;
    246     while (source_idx < block_size_) {
    247       // |virtual_source_idx_| lies in between two kernel offsets so figure out
    248       // what they are.
    249       const double subsample_remainder = virtual_source_idx_ - source_idx;
    250 
    251       const double virtual_offset_idx =
    252           subsample_remainder * kKernelOffsetCount;
    253       const int offset_idx = virtual_offset_idx;
    254 
    255       // We'll compute "convolutions" for the two kernels which straddle
    256       // |virtual_source_idx_|.
    257       const float* const k1 = kernel_ptr + offset_idx * kKernelSize;
    258       const float* const k2 = k1 + kKernelSize;
    259 
    260       // Ensure |k1|, |k2| are 16-byte aligned for SIMD usage.  Should always be
    261       // true so long as kKernelSize is a multiple of 16.
    262       DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k1) & 0x0F);
    263       DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k2) & 0x0F);
    264 
    265       // Initialize input pointer based on quantized |virtual_source_idx_|.
    266       const float* const input_ptr = r1_ + source_idx;
    267 
    268       // Figure out how much to weight each kernel's "convolution".
    269       const double kernel_interpolation_factor =
    270           virtual_offset_idx - offset_idx;
    271       *destination++ = CONVOLVE_FUNC(
    272           input_ptr, k1, k2, kernel_interpolation_factor);
    273 
    274       // Advance the virtual index.
    275       virtual_source_idx_ += current_io_ratio;
    276       source_idx = virtual_source_idx_;
    277 
    278       if (!--remaining_frames)
    279         return;
    280     }
    281 
    282     // Wrap back around to the start.
    283     DCHECK_GE(virtual_source_idx_, block_size_);
    284     virtual_source_idx_ -= block_size_;
    285 
    286     // Step (3) -- Copy r3_, r4_ to r1_, r2_.
    287     // This wraps the last input frames back to the start of the buffer.
    288     memcpy(r1_, r3_, sizeof(*input_buffer_.get()) * kKernelSize);
    289 
    290     // Step (4) -- Reinitialize regions if necessary.
    291     if (r0_ == r2_)
    292       UpdateRegions(true);
    293 
    294     // Step (5) -- Refresh the buffer with more input.
    295     read_cb_.Run(request_frames_, r0_);
    296   }
    297 }
    298 
    299 int SincResampler::ChunkSize() const {
    300   return block_size_ / io_sample_rate_ratio_;
    301 }
    302 
    303 void SincResampler::Flush() {
    304   virtual_source_idx_ = 0;
    305   buffer_primed_ = false;
    306   memset(input_buffer_.get(), 0,
    307          sizeof(*input_buffer_.get()) * input_buffer_size_);
    308   UpdateRegions(false);
    309 }
    310 
    311 float SincResampler::Convolve_C(const float* input_ptr, const float* k1,
    312                                 const float* k2,
    313                                 double kernel_interpolation_factor) {
    314   float sum1 = 0;
    315   float sum2 = 0;
    316 
    317   // Generate a single output sample.  Unrolling this loop hurt performance in
    318   // local testing.
    319   int n = kKernelSize;
    320   while (n--) {
    321     sum1 += *input_ptr * *k1++;
    322     sum2 += *input_ptr++ * *k2++;
    323   }
    324 
    325   // Linearly interpolate the two "convolutions".
    326   return (1.0 - kernel_interpolation_factor) * sum1
    327       + kernel_interpolation_factor * sum2;
    328 }
    329 
    330 #if defined(ARCH_CPU_X86_FAMILY)
    331 float SincResampler::Convolve_SSE(const float* input_ptr, const float* k1,
    332                                   const float* k2,
    333                                   double kernel_interpolation_factor) {
    334   __m128 m_input;
    335   __m128 m_sums1 = _mm_setzero_ps();
    336   __m128 m_sums2 = _mm_setzero_ps();
    337 
    338   // Based on |input_ptr| alignment, we need to use loadu or load.  Unrolling
    339   // these loops hurt performance in local testing.
    340   if (reinterpret_cast<uintptr_t>(input_ptr) & 0x0F) {
    341     for (int i = 0; i < kKernelSize; i += 4) {
    342       m_input = _mm_loadu_ps(input_ptr + i);
    343       m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
    344       m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
    345     }
    346   } else {
    347     for (int i = 0; i < kKernelSize; i += 4) {
    348       m_input = _mm_load_ps(input_ptr + i);
    349       m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
    350       m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
    351     }
    352   }
    353 
    354   // Linearly interpolate the two "convolutions".
    355   m_sums1 = _mm_mul_ps(m_sums1, _mm_set_ps1(1.0 - kernel_interpolation_factor));
    356   m_sums2 = _mm_mul_ps(m_sums2, _mm_set_ps1(kernel_interpolation_factor));
    357   m_sums1 = _mm_add_ps(m_sums1, m_sums2);
    358 
    359   // Sum components together.
    360   float result;
    361   m_sums2 = _mm_add_ps(_mm_movehl_ps(m_sums1, m_sums1), m_sums1);
    362   _mm_store_ss(&result, _mm_add_ss(m_sums2, _mm_shuffle_ps(
    363       m_sums2, m_sums2, 1)));
    364 
    365   return result;
    366 }
    367 #elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
    368 float SincResampler::Convolve_NEON(const float* input_ptr, const float* k1,
    369                                    const float* k2,
    370                                    double kernel_interpolation_factor) {
    371   float32x4_t m_input;
    372   float32x4_t m_sums1 = vmovq_n_f32(0);
    373   float32x4_t m_sums2 = vmovq_n_f32(0);
    374 
    375   const float* upper = input_ptr + kKernelSize;
    376   for (; input_ptr < upper; ) {
    377     m_input = vld1q_f32(input_ptr);
    378     input_ptr += 4;
    379     m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32(k1));
    380     k1 += 4;
    381     m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32(k2));
    382     k2 += 4;
    383   }
    384 
    385   // Linearly interpolate the two "convolutions".
    386   m_sums1 = vmlaq_f32(
    387       vmulq_f32(m_sums1, vmovq_n_f32(1.0 - kernel_interpolation_factor)),
    388       m_sums2, vmovq_n_f32(kernel_interpolation_factor));
    389 
    390   // Sum components together.
    391   float32x2_t m_half = vadd_f32(vget_high_f32(m_sums1), vget_low_f32(m_sums1));
    392   return vget_lane_f32(vpadd_f32(m_half, m_half), 0);
    393 }
    394 #endif
    395 
    396 }  // namespace media
    397