Home | History | Annotate | Download | only in audio
      1 /*
      2  * Copyright (C) 2010 Google Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions
      6  * are met:
      7  *
      8  * 1.  Redistributions of source code must retain the above copyright
      9  *     notice, this list of conditions and the following disclaimer.
     10  * 2.  Redistributions in binary form must reproduce the above copyright
     11  *     notice, this list of conditions and the following disclaimer in the
     12  *     documentation and/or other materials provided with the distribution.
     13  * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
     14  *     its contributors may be used to endorse or promote products derived
     15  *     from this software without specific prior written permission.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
     18  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     20  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
     21  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     23  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
     24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 #include "config.h"
     30 
     31 #if ENABLE(WEB_AUDIO)
     32 
     33 #include "platform/audio/HRTFKernel.h"
     34 
     35 #include "platform/audio/AudioChannel.h"
     36 #include "platform/FloatConversion.h"
     37 #include "wtf/MathExtras.h"
     38 
     39 using namespace std;
     40 
     41 namespace WebCore {
     42 
     43 // Takes the input AudioChannel as an input impulse response and calculates the average group delay.
     44 // This represents the initial delay before the most energetic part of the impulse response.
     45 // The sample-frame delay is removed from the impulseP impulse response, and this value  is returned.
     46 // the length of the passed in AudioChannel must be a power of 2.
     47 static float extractAverageGroupDelay(AudioChannel* channel, size_t analysisFFTSize)
     48 {
     49     ASSERT(channel);
     50 
     51     float* impulseP = channel->mutableData();
     52 
     53     bool isSizeGood = channel->length() >= analysisFFTSize;
     54     ASSERT(isSizeGood);
     55     if (!isSizeGood)
     56         return 0;
     57 
     58     // Check for power-of-2.
     59     ASSERT(1UL << static_cast<unsigned>(log2(analysisFFTSize)) == analysisFFTSize);
     60 
     61     FFTFrame estimationFrame(analysisFFTSize);
     62     estimationFrame.doFFT(impulseP);
     63 
     64     float frameDelay = narrowPrecisionToFloat(estimationFrame.extractAverageGroupDelay());
     65     estimationFrame.doInverseFFT(impulseP);
     66 
     67     return frameDelay;
     68 }
     69 
     70 HRTFKernel::HRTFKernel(AudioChannel* channel, size_t fftSize, float sampleRate)
     71     : m_frameDelay(0)
     72     , m_sampleRate(sampleRate)
     73 {
     74     ASSERT(channel);
     75 
     76     // Determine the leading delay (average group delay) for the response.
     77     m_frameDelay = extractAverageGroupDelay(channel, fftSize / 2);
     78 
     79     float* impulseResponse = channel->mutableData();
     80     size_t responseLength = channel->length();
     81 
     82     // We need to truncate to fit into 1/2 the FFT size (with zero padding) in order to do proper convolution.
     83     size_t truncatedResponseLength = min(responseLength, fftSize / 2); // truncate if necessary to max impulse response length allowed by FFT
     84 
     85     // Quick fade-out (apply window) at truncation point
     86     unsigned numberOfFadeOutFrames = static_cast<unsigned>(sampleRate / 4410); // 10 sample-frames @44.1KHz sample-rate
     87     ASSERT(numberOfFadeOutFrames < truncatedResponseLength);
     88     if (numberOfFadeOutFrames < truncatedResponseLength) {
     89         for (unsigned i = truncatedResponseLength - numberOfFadeOutFrames; i < truncatedResponseLength; ++i) {
     90             float x = 1.0f - static_cast<float>(i - (truncatedResponseLength - numberOfFadeOutFrames)) / numberOfFadeOutFrames;
     91             impulseResponse[i] *= x;
     92         }
     93     }
     94 
     95     m_fftFrame = adoptPtr(new FFTFrame(fftSize));
     96     m_fftFrame->doPaddedFFT(impulseResponse, truncatedResponseLength);
     97 }
     98 
     99 PassOwnPtr<AudioChannel> HRTFKernel::createImpulseResponse()
    100 {
    101     OwnPtr<AudioChannel> channel = adoptPtr(new AudioChannel(fftSize()));
    102     FFTFrame fftFrame(*m_fftFrame);
    103 
    104     // Add leading delay back in.
    105     fftFrame.addConstantGroupDelay(m_frameDelay);
    106     fftFrame.doInverseFFT(channel->mutableData());
    107 
    108     return channel.release();
    109 }
    110 
    111 // Interpolates two kernels with x: 0 -> 1 and returns the result.
    112 PassRefPtr<HRTFKernel> HRTFKernel::createInterpolatedKernel(HRTFKernel* kernel1, HRTFKernel* kernel2, float x)
    113 {
    114     ASSERT(kernel1 && kernel2);
    115     if (!kernel1 || !kernel2)
    116         return 0;
    117 
    118     ASSERT(x >= 0.0 && x < 1.0);
    119     x = min(1.0f, max(0.0f, x));
    120 
    121     float sampleRate1 = kernel1->sampleRate();
    122     float sampleRate2 = kernel2->sampleRate();
    123     ASSERT(sampleRate1 == sampleRate2);
    124     if (sampleRate1 != sampleRate2)
    125         return 0;
    126 
    127     float frameDelay = (1 - x) * kernel1->frameDelay() + x * kernel2->frameDelay();
    128 
    129     OwnPtr<FFTFrame> interpolatedFrame = FFTFrame::createInterpolatedFrame(*kernel1->fftFrame(), *kernel2->fftFrame(), x);
    130     return HRTFKernel::create(interpolatedFrame.release(), frameDelay, sampleRate1);
    131 }
    132 
    133 } // namespace WebCore
    134 
    135 #endif // ENABLE(WEB_AUDIO)
    136