Home | History | Annotate | Download | only in webaudio
      1 /*
      2  * Copyright (C) 2010, Google Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions
      6  * are met:
      7  * 1.  Redistributions of source code must retain the above copyright
      8  *    notice, this list of conditions and the following disclaimer.
      9  * 2.  Redistributions in binary form must reproduce the above copyright
     10  *    notice, this list of conditions and the following disclaimer in the
     11  *    documentation and/or other materials provided with the distribution.
     12  *
     13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
     14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     16  * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
     17  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     18  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     19  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
     20  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     22  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     23  */
     24 
     25 #include "config.h"
     26 
     27 #if ENABLE(WEB_AUDIO)
     28 
     29 #include "modules/webaudio/PannerNode.h"
     30 
     31 #include "core/dom/ExecutionContext.h"
     32 #include "platform/audio/HRTFPanner.h"
     33 #include "modules/webaudio/AudioBufferSourceNode.h"
     34 #include "modules/webaudio/AudioContext.h"
     35 #include "modules/webaudio/AudioNodeInput.h"
     36 #include "modules/webaudio/AudioNodeOutput.h"
     37 #include "wtf/MathExtras.h"
     38 
     39 using namespace std;
     40 
     41 namespace WebCore {
     42 
     43 static void fixNANs(double &x)
     44 {
     45     if (std::isnan(x) || std::isinf(x))
     46         x = 0.0;
     47 }
     48 
     49 PannerNode::PannerNode(AudioContext* context, float sampleRate)
     50     : AudioNode(context, sampleRate)
     51     , m_panningModel(Panner::PanningModelHRTF)
     52     , m_lastGain(-1.0)
     53     , m_connectionCount(0)
     54 {
     55     ScriptWrappable::init(this);
     56     addInput(adoptPtr(new AudioNodeInput(this)));
     57     addOutput(adoptPtr(new AudioNodeOutput(this, 2)));
     58 
     59     // Node-specific default mixing rules.
     60     m_channelCount = 2;
     61     m_channelCountMode = ClampedMax;
     62     m_channelInterpretation = AudioBus::Speakers;
     63 
     64     m_distanceGain = AudioParam::create(context, "distanceGain", 1.0, 0.0, 1.0);
     65     m_coneGain = AudioParam::create(context, "coneGain", 1.0, 0.0, 1.0);
     66 
     67     m_position = FloatPoint3D(0, 0, 0);
     68     m_orientation = FloatPoint3D(1, 0, 0);
     69     m_velocity = FloatPoint3D(0, 0, 0);
     70 
     71     setNodeType(NodeTypePanner);
     72 
     73     initialize();
     74 }
     75 
     76 PannerNode::~PannerNode()
     77 {
     78     uninitialize();
     79 }
     80 
     81 void PannerNode::pullInputs(size_t framesToProcess)
     82 {
     83     // We override pullInputs(), so we can detect new AudioSourceNodes which have connected to us when new connections are made.
     84     // These AudioSourceNodes need to be made aware of our existence in order to handle doppler shift pitch changes.
     85     if (m_connectionCount != context()->connectionCount()) {
     86         m_connectionCount = context()->connectionCount();
     87 
     88         // Recursively go through all nodes connected to us.
     89         notifyAudioSourcesConnectedToNode(this);
     90     }
     91 
     92     AudioNode::pullInputs(framesToProcess);
     93 }
     94 
     95 void PannerNode::process(size_t framesToProcess)
     96 {
     97     AudioBus* destination = output(0)->bus();
     98 
     99     if (!isInitialized() || !input(0)->isConnected() || !m_panner.get()) {
    100         destination->zero();
    101         return;
    102     }
    103 
    104     AudioBus* source = input(0)->bus();
    105 
    106     if (!source) {
    107         destination->zero();
    108         return;
    109     }
    110 
    111     // The audio thread can't block on this lock, so we call tryLock() instead.
    112     MutexTryLocker tryLocker(m_pannerLock);
    113     if (tryLocker.locked()) {
    114         // Apply the panning effect.
    115         double azimuth;
    116         double elevation;
    117         getAzimuthElevation(&azimuth, &elevation);
    118         m_panner->pan(azimuth, elevation, source, destination, framesToProcess);
    119 
    120         // Get the distance and cone gain.
    121         double totalGain = distanceConeGain();
    122 
    123         // Snap to desired gain at the beginning.
    124         if (m_lastGain == -1.0)
    125             m_lastGain = totalGain;
    126 
    127         // Apply gain in-place with de-zippering.
    128         destination->copyWithGainFrom(*destination, &m_lastGain, totalGain);
    129     } else {
    130         // Too bad - The tryLock() failed. We must be in the middle of changing the panner.
    131         destination->zero();
    132     }
    133 }
    134 
    135 void PannerNode::reset()
    136 {
    137     m_lastGain = -1.0; // force to snap to initial gain
    138     if (m_panner.get())
    139         m_panner->reset();
    140 }
    141 
    142 void PannerNode::initialize()
    143 {
    144     if (isInitialized())
    145         return;
    146 
    147     m_panner = Panner::create(m_panningModel, sampleRate(), context()->hrtfDatabaseLoader());
    148 
    149     AudioNode::initialize();
    150 }
    151 
    152 void PannerNode::uninitialize()
    153 {
    154     if (!isInitialized())
    155         return;
    156 
    157     m_panner.clear();
    158     AudioNode::uninitialize();
    159 }
    160 
    161 AudioListener* PannerNode::listener()
    162 {
    163     return context()->listener();
    164 }
    165 
    166 String PannerNode::panningModel() const
    167 {
    168     switch (m_panningModel) {
    169     case EQUALPOWER:
    170         return "equalpower";
    171     case HRTF:
    172         return "HRTF";
    173     case SOUNDFIELD:
    174         return "soundfield";
    175     default:
    176         ASSERT_NOT_REACHED();
    177         return "HRTF";
    178     }
    179 }
    180 
    181 void PannerNode::setPanningModel(const String& model)
    182 {
    183     if (model == "equalpower")
    184         setPanningModel(EQUALPOWER);
    185     else if (model == "HRTF")
    186         setPanningModel(HRTF);
    187     else if (model == "soundfield")
    188         setPanningModel(SOUNDFIELD);
    189     else
    190         ASSERT_NOT_REACHED();
    191 }
    192 
    193 bool PannerNode::setPanningModel(unsigned model)
    194 {
    195     switch (model) {
    196     case EQUALPOWER:
    197     case HRTF:
    198         if (!m_panner.get() || model != m_panningModel) {
    199             // This synchronizes with process().
    200             MutexLocker processLocker(m_pannerLock);
    201 
    202             OwnPtr<Panner> newPanner = Panner::create(model, sampleRate(), context()->hrtfDatabaseLoader());
    203             m_panner = newPanner.release();
    204             m_panningModel = model;
    205         }
    206         break;
    207     case SOUNDFIELD:
    208         // FIXME: Implement sound field model. See // https://bugs.webkit.org/show_bug.cgi?id=77367.
    209         context()->executionContext()->addConsoleMessage(JSMessageSource, WarningMessageLevel, "'soundfield' panning model not implemented.");
    210         break;
    211     default:
    212         return false;
    213     }
    214 
    215     return true;
    216 }
    217 
    218 String PannerNode::distanceModel() const
    219 {
    220     switch (const_cast<PannerNode*>(this)->m_distanceEffect.model()) {
    221     case DistanceEffect::ModelLinear:
    222         return "linear";
    223     case DistanceEffect::ModelInverse:
    224         return "inverse";
    225     case DistanceEffect::ModelExponential:
    226         return "exponential";
    227     default:
    228         ASSERT_NOT_REACHED();
    229         return "inverse";
    230     }
    231 }
    232 
    233 void PannerNode::setDistanceModel(const String& model)
    234 {
    235     if (model == "linear")
    236         setDistanceModel(DistanceEffect::ModelLinear);
    237     else if (model == "inverse")
    238         setDistanceModel(DistanceEffect::ModelInverse);
    239     else if (model == "exponential")
    240         setDistanceModel(DistanceEffect::ModelExponential);
    241     else
    242         ASSERT_NOT_REACHED();
    243 }
    244 
    245 bool PannerNode::setDistanceModel(unsigned model)
    246 {
    247     switch (model) {
    248     case DistanceEffect::ModelLinear:
    249     case DistanceEffect::ModelInverse:
    250     case DistanceEffect::ModelExponential:
    251         m_distanceEffect.setModel(static_cast<DistanceEffect::ModelType>(model), true);
    252         break;
    253     default:
    254         return false;
    255     }
    256 
    257     return true;
    258 }
    259 
    260 void PannerNode::getAzimuthElevation(double* outAzimuth, double* outElevation)
    261 {
    262     // FIXME: we should cache azimuth and elevation (if possible), so we only re-calculate if a change has been made.
    263 
    264     double azimuth = 0.0;
    265 
    266     // Calculate the source-listener vector
    267     FloatPoint3D listenerPosition = listener()->position();
    268     FloatPoint3D sourceListener = m_position - listenerPosition;
    269 
    270     if (sourceListener.isZero()) {
    271         // degenerate case if source and listener are at the same point
    272         *outAzimuth = 0.0;
    273         *outElevation = 0.0;
    274         return;
    275     }
    276 
    277     sourceListener.normalize();
    278 
    279     // Align axes
    280     FloatPoint3D listenerFront = listener()->orientation();
    281     FloatPoint3D listenerUp = listener()->upVector();
    282     FloatPoint3D listenerRight = listenerFront.cross(listenerUp);
    283     listenerRight.normalize();
    284 
    285     FloatPoint3D listenerFrontNorm = listenerFront;
    286     listenerFrontNorm.normalize();
    287 
    288     FloatPoint3D up = listenerRight.cross(listenerFrontNorm);
    289 
    290     float upProjection = sourceListener.dot(up);
    291 
    292     FloatPoint3D projectedSource = sourceListener - upProjection * up;
    293     projectedSource.normalize();
    294 
    295     azimuth = 180.0 * acos(projectedSource.dot(listenerRight)) / piDouble;
    296     fixNANs(azimuth); // avoid illegal values
    297 
    298     // Source  in front or behind the listener
    299     double frontBack = projectedSource.dot(listenerFrontNorm);
    300     if (frontBack < 0.0)
    301         azimuth = 360.0 - azimuth;
    302 
    303     // Make azimuth relative to "front" and not "right" listener vector
    304     if ((azimuth >= 0.0) && (azimuth <= 270.0))
    305         azimuth = 90.0 - azimuth;
    306     else
    307         azimuth = 450.0 - azimuth;
    308 
    309     // Elevation
    310     double elevation = 90.0 - 180.0 * acos(sourceListener.dot(up)) / piDouble;
    311     fixNANs(elevation); // avoid illegal values
    312 
    313     if (elevation > 90.0)
    314         elevation = 180.0 - elevation;
    315     else if (elevation < -90.0)
    316         elevation = -180.0 - elevation;
    317 
    318     if (outAzimuth)
    319         *outAzimuth = azimuth;
    320     if (outElevation)
    321         *outElevation = elevation;
    322 }
    323 
    324 float PannerNode::dopplerRate()
    325 {
    326     double dopplerShift = 1.0;
    327 
    328     // FIXME: optimize for case when neither source nor listener has changed...
    329     double dopplerFactor = listener()->dopplerFactor();
    330 
    331     if (dopplerFactor > 0.0) {
    332         double speedOfSound = listener()->speedOfSound();
    333 
    334         const FloatPoint3D &sourceVelocity = m_velocity;
    335         const FloatPoint3D &listenerVelocity = listener()->velocity();
    336 
    337         // Don't bother if both source and listener have no velocity
    338         bool sourceHasVelocity = !sourceVelocity.isZero();
    339         bool listenerHasVelocity = !listenerVelocity.isZero();
    340 
    341         if (sourceHasVelocity || listenerHasVelocity) {
    342             // Calculate the source to listener vector
    343             FloatPoint3D listenerPosition = listener()->position();
    344             FloatPoint3D sourceToListener = m_position - listenerPosition;
    345 
    346             double sourceListenerMagnitude = sourceToListener.length();
    347 
    348             double listenerProjection = sourceToListener.dot(listenerVelocity) / sourceListenerMagnitude;
    349             double sourceProjection = sourceToListener.dot(sourceVelocity) / sourceListenerMagnitude;
    350 
    351             listenerProjection = -listenerProjection;
    352             sourceProjection = -sourceProjection;
    353 
    354             double scaledSpeedOfSound = speedOfSound / dopplerFactor;
    355             listenerProjection = min(listenerProjection, scaledSpeedOfSound);
    356             sourceProjection = min(sourceProjection, scaledSpeedOfSound);
    357 
    358             dopplerShift = ((speedOfSound - dopplerFactor * listenerProjection) / (speedOfSound - dopplerFactor * sourceProjection));
    359             fixNANs(dopplerShift); // avoid illegal values
    360 
    361             // Limit the pitch shifting to 4 octaves up and 3 octaves down.
    362             if (dopplerShift > 16.0)
    363                 dopplerShift = 16.0;
    364             else if (dopplerShift < 0.125)
    365                 dopplerShift = 0.125;
    366         }
    367     }
    368 
    369     return static_cast<float>(dopplerShift);
    370 }
    371 
    372 float PannerNode::distanceConeGain()
    373 {
    374     FloatPoint3D listenerPosition = listener()->position();
    375 
    376     double listenerDistance = m_position.distanceTo(listenerPosition);
    377     double distanceGain = m_distanceEffect.gain(listenerDistance);
    378 
    379     m_distanceGain->setValue(static_cast<float>(distanceGain));
    380 
    381     // FIXME: could optimize by caching coneGain
    382     double coneGain = m_coneEffect.gain(m_position, m_orientation, listenerPosition);
    383 
    384     m_coneGain->setValue(static_cast<float>(coneGain));
    385 
    386     return float(distanceGain * coneGain);
    387 }
    388 
    389 void PannerNode::notifyAudioSourcesConnectedToNode(AudioNode* node)
    390 {
    391     ASSERT(node);
    392     if (!node)
    393         return;
    394 
    395     // First check if this node is an AudioBufferSourceNode. If so, let it know about us so that doppler shift pitch can be taken into account.
    396     if (node->nodeType() == NodeTypeAudioBufferSource) {
    397         AudioBufferSourceNode* bufferSourceNode = static_cast<AudioBufferSourceNode*>(node);
    398         bufferSourceNode->setPannerNode(this);
    399     } else {
    400         // Go through all inputs to this node.
    401         for (unsigned i = 0; i < node->numberOfInputs(); ++i) {
    402             AudioNodeInput* input = node->input(i);
    403 
    404             // For each input, go through all of its connections, looking for AudioBufferSourceNodes.
    405             for (unsigned j = 0; j < input->numberOfRenderingConnections(); ++j) {
    406                 AudioNodeOutput* connectedOutput = input->renderingOutput(j);
    407                 AudioNode* connectedNode = connectedOutput->node();
    408                 notifyAudioSourcesConnectedToNode(connectedNode); // recurse
    409             }
    410         }
    411     }
    412 }
    413 
    414 } // namespace WebCore
    415 
    416 #endif // ENABLE(WEB_AUDIO)
    417