Home | History | Annotate | Download | only in webaudio
      1 /*
      2  * Copyright (C) 2010, Google Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions
      6  * are met:
      7  * 1.  Redistributions of source code must retain the above copyright
      8  *    notice, this list of conditions and the following disclaimer.
      9  * 2.  Redistributions in binary form must reproduce the above copyright
     10  *    notice, this list of conditions and the following disclaimer in the
     11  *    documentation and/or other materials provided with the distribution.
     12  *
     13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
     14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     16  * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
     17  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     18  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     19  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
     20  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     22  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     23  */
     24 
     25 #include "config.h"
     26 
     27 #if ENABLE(WEB_AUDIO)
     28 
     29 #include "modules/webaudio/AudioNode.h"
     30 
     31 #include "bindings/v8/ExceptionState.h"
     32 #include "core/dom/ExceptionCode.h"
     33 #include "modules/webaudio/AudioContext.h"
     34 #include "modules/webaudio/AudioNodeInput.h"
     35 #include "modules/webaudio/AudioNodeOutput.h"
     36 #include "modules/webaudio/AudioParam.h"
     37 #include "wtf/Atomics.h"
     38 #include "wtf/MainThread.h"
     39 
     40 #if DEBUG_AUDIONODE_REFERENCES
     41 #include <stdio.h>
     42 #endif
     43 
     44 namespace WebCore {
     45 
     46 AudioNode::AudioNode(AudioContext* context, float sampleRate)
     47     : m_isInitialized(false)
     48     , m_nodeType(NodeTypeUnknown)
     49     , m_context(context)
     50     , m_sampleRate(sampleRate)
     51 #if ENABLE(OILPAN)
     52     , m_keepAlive(adoptPtr(new Persistent<AudioNode>(this)))
     53 #endif
     54     , m_lastProcessingTime(-1)
     55     , m_lastNonSilentTime(-1)
     56     , m_normalRefCount(1) // start out with normal refCount == 1 (like WTF::RefCounted class)
     57     , m_connectionRefCount(0)
     58     , m_isMarkedForDeletion(false)
     59     , m_isDisabled(false)
     60     , m_channelCount(2)
     61     , m_channelCountMode(Max)
     62     , m_channelInterpretation(AudioBus::Speakers)
     63 {
     64     ScriptWrappable::init(this);
     65 #if DEBUG_AUDIONODE_REFERENCES
     66     if (!s_isNodeCountInitialized) {
     67         s_isNodeCountInitialized = true;
     68         atexit(AudioNode::printNodeCounts);
     69     }
     70 #endif
     71 }
     72 
     73 AudioNode::~AudioNode()
     74 {
     75 #if DEBUG_AUDIONODE_REFERENCES
     76     --s_nodeCount[nodeType()];
     77     fprintf(stderr, "%p: %d: AudioNode::~AudioNode() %d %d\n", this, nodeType(), m_normalRefCount, m_connectionRefCount);
     78 #endif
     79 }
     80 
     81 void AudioNode::initialize()
     82 {
     83     m_isInitialized = true;
     84 }
     85 
     86 void AudioNode::uninitialize()
     87 {
     88     m_isInitialized = false;
     89 }
     90 
     91 String AudioNode::nodeTypeName() const
     92 {
     93     switch (m_nodeType) {
     94     case NodeTypeDestination:
     95         return "AudioDestinationNode";
     96     case NodeTypeOscillator:
     97         return "OscillatorNode";
     98     case NodeTypeAudioBufferSource:
     99         return "AudioBufferSourceNode";
    100     case NodeTypeMediaElementAudioSource:
    101         return "MediaElementAudioSourceNode";
    102     case NodeTypeMediaStreamAudioDestination:
    103         return "MediaStreamAudioDestinationNode";
    104     case NodeTypeMediaStreamAudioSource:
    105         return "MediaStreamAudioSourceNode";
    106     case NodeTypeJavaScript:
    107         return "ScriptProcessorNode";
    108     case NodeTypeBiquadFilter:
    109         return "BiquadFilterNode";
    110     case NodeTypePanner:
    111         return "PannerNode";
    112     case NodeTypeConvolver:
    113         return "ConvolverNode";
    114     case NodeTypeDelay:
    115         return "DelayNode";
    116     case NodeTypeGain:
    117         return "GainNode";
    118     case NodeTypeChannelSplitter:
    119         return "ChannelSplitterNode";
    120     case NodeTypeChannelMerger:
    121         return "ChannelMergerNode";
    122     case NodeTypeAnalyser:
    123         return "AnalyserNode";
    124     case NodeTypeDynamicsCompressor:
    125         return "DynamicsCompressorNode";
    126     case NodeTypeWaveShaper:
    127         return "WaveShaperNode";
    128     case NodeTypeUnknown:
    129     case NodeTypeEnd:
    130     default:
    131         ASSERT_NOT_REACHED();
    132         return "UnknownNode";
    133     }
    134 }
    135 
    136 void AudioNode::setNodeType(NodeType type)
    137 {
    138     m_nodeType = type;
    139 
    140 #if DEBUG_AUDIONODE_REFERENCES
    141     ++s_nodeCount[type];
    142 #endif
    143 }
    144 
    145 void AudioNode::addInput(PassOwnPtr<AudioNodeInput> input)
    146 {
    147     m_inputs.append(input);
    148 }
    149 
    150 void AudioNode::addOutput(PassOwnPtr<AudioNodeOutput> output)
    151 {
    152     m_outputs.append(output);
    153 }
    154 
    155 AudioNodeInput* AudioNode::input(unsigned i)
    156 {
    157     if (i < m_inputs.size())
    158         return m_inputs[i].get();
    159     return 0;
    160 }
    161 
    162 AudioNodeOutput* AudioNode::output(unsigned i)
    163 {
    164     if (i < m_outputs.size())
    165         return m_outputs[i].get();
    166     return 0;
    167 }
    168 
    169 void AudioNode::connect(AudioNode* destination, unsigned outputIndex, unsigned inputIndex, ExceptionState& exceptionState)
    170 {
    171     ASSERT(isMainThread());
    172     AudioContext::AutoLocker locker(context());
    173 
    174     if (!destination) {
    175         exceptionState.throwDOMException(
    176             SyntaxError,
    177             "invalid destination node.");
    178         return;
    179     }
    180 
    181     // Sanity check input and output indices.
    182     if (outputIndex >= numberOfOutputs()) {
    183         exceptionState.throwDOMException(
    184             IndexSizeError,
    185             "output index (" + String::number(outputIndex) + ") exceeds number of outputs (" + String::number(numberOfOutputs()) + ").");
    186         return;
    187     }
    188 
    189     if (destination && inputIndex >= destination->numberOfInputs()) {
    190         exceptionState.throwDOMException(
    191             IndexSizeError,
    192             "input index (" + String::number(inputIndex) + ") exceeds number of inputs (" + String::number(destination->numberOfInputs()) + ").");
    193         return;
    194     }
    195 
    196     if (context() != destination->context()) {
    197         exceptionState.throwDOMException(
    198             SyntaxError,
    199             "cannot connect to a destination belonging to a different audio context.");
    200         return;
    201     }
    202 
    203     AudioNodeInput* input = destination->input(inputIndex);
    204     AudioNodeOutput* output = this->output(outputIndex);
    205     input->connect(output);
    206 
    207     // Let context know that a connection has been made.
    208     context()->incrementConnectionCount();
    209 }
    210 
    211 void AudioNode::connect(AudioParam* param, unsigned outputIndex, ExceptionState& exceptionState)
    212 {
    213     ASSERT(isMainThread());
    214     AudioContext::AutoLocker locker(context());
    215 
    216     if (!param) {
    217         exceptionState.throwDOMException(
    218             SyntaxError,
    219             "invalid AudioParam.");
    220         return;
    221     }
    222 
    223     if (outputIndex >= numberOfOutputs()) {
    224         exceptionState.throwDOMException(
    225             IndexSizeError,
    226             "output index (" + String::number(outputIndex) + ") exceeds number of outputs (" + String::number(numberOfOutputs()) + ").");
    227         return;
    228     }
    229 
    230     if (context() != param->context()) {
    231         exceptionState.throwDOMException(
    232             SyntaxError,
    233             "cannot connect to an AudioParam belonging to a different audio context.");
    234         return;
    235     }
    236 
    237     AudioNodeOutput* output = this->output(outputIndex);
    238     param->connect(output);
    239 }
    240 
    241 void AudioNode::disconnect(unsigned outputIndex, ExceptionState& exceptionState)
    242 {
    243     ASSERT(isMainThread());
    244     AudioContext::AutoLocker locker(context());
    245 
    246     // Sanity check input and output indices.
    247     if (outputIndex >= numberOfOutputs()) {
    248         exceptionState.throwDOMException(
    249             IndexSizeError,
    250             "output index (" + String::number(outputIndex) + ") exceeds number of outputs (" + String::number(numberOfOutputs()) + ").");
    251         return;
    252     }
    253 
    254     AudioNodeOutput* output = this->output(outputIndex);
    255     output->disconnectAll();
    256 }
    257 
    258 unsigned long AudioNode::channelCount()
    259 {
    260     return m_channelCount;
    261 }
    262 
    263 void AudioNode::setChannelCount(unsigned long channelCount, ExceptionState& exceptionState)
    264 {
    265     ASSERT(isMainThread());
    266     AudioContext::AutoLocker locker(context());
    267 
    268     if (channelCount > 0 && channelCount <= AudioContext::maxNumberOfChannels()) {
    269         if (m_channelCount != channelCount) {
    270             m_channelCount = channelCount;
    271             if (m_channelCountMode != Max)
    272                 updateChannelsForInputs();
    273         }
    274     } else {
    275         exceptionState.throwDOMException(
    276             NotSupportedError,
    277             "channel count (" + String::number(channelCount) + ") must be between 1 and " + String::number(AudioContext::maxNumberOfChannels()) + ".");
    278     }
    279 }
    280 
    281 String AudioNode::channelCountMode()
    282 {
    283     switch (m_channelCountMode) {
    284     case Max:
    285         return "max";
    286     case ClampedMax:
    287         return "clamped-max";
    288     case Explicit:
    289         return "explicit";
    290     }
    291     ASSERT_NOT_REACHED();
    292     return "";
    293 }
    294 
    295 void AudioNode::setChannelCountMode(const String& mode, ExceptionState& exceptionState)
    296 {
    297     ASSERT(isMainThread());
    298     AudioContext::AutoLocker locker(context());
    299 
    300     ChannelCountMode oldMode = m_channelCountMode;
    301 
    302     if (mode == "max") {
    303         m_channelCountMode = Max;
    304     } else if (mode == "clamped-max") {
    305         m_channelCountMode = ClampedMax;
    306     } else if (mode == "explicit") {
    307         m_channelCountMode = Explicit;
    308     } else {
    309         ASSERT_NOT_REACHED();
    310     }
    311 
    312     if (m_channelCountMode != oldMode)
    313         updateChannelsForInputs();
    314 }
    315 
    316 String AudioNode::channelInterpretation()
    317 {
    318     switch (m_channelInterpretation) {
    319     case AudioBus::Speakers:
    320         return "speakers";
    321     case AudioBus::Discrete:
    322         return "discrete";
    323     }
    324     ASSERT_NOT_REACHED();
    325     return "";
    326 }
    327 
    328 void AudioNode::setChannelInterpretation(const String& interpretation, ExceptionState& exceptionState)
    329 {
    330     ASSERT(isMainThread());
    331     AudioContext::AutoLocker locker(context());
    332 
    333     if (interpretation == "speakers") {
    334         m_channelInterpretation = AudioBus::Speakers;
    335     } else if (interpretation == "discrete") {
    336         m_channelInterpretation = AudioBus::Discrete;
    337     } else {
    338         ASSERT_NOT_REACHED();
    339     }
    340 }
    341 
    342 void AudioNode::updateChannelsForInputs()
    343 {
    344     for (unsigned i = 0; i < m_inputs.size(); ++i)
    345         input(i)->changedOutputs();
    346 }
    347 
    348 const AtomicString& AudioNode::interfaceName() const
    349 {
    350     return EventTargetNames::AudioNode;
    351 }
    352 
    353 ExecutionContext* AudioNode::executionContext() const
    354 {
    355     return const_cast<AudioNode*>(this)->context()->executionContext();
    356 }
    357 
    358 void AudioNode::processIfNecessary(size_t framesToProcess)
    359 {
    360     ASSERT(context()->isAudioThread());
    361 
    362     if (!isInitialized())
    363         return;
    364 
    365     // Ensure that we only process once per rendering quantum.
    366     // This handles the "fanout" problem where an output is connected to multiple inputs.
    367     // The first time we're called during this time slice we process, but after that we don't want to re-process,
    368     // instead our output(s) will already have the results cached in their bus;
    369     double currentTime = context()->currentTime();
    370     if (m_lastProcessingTime != currentTime) {
    371         m_lastProcessingTime = currentTime; // important to first update this time because of feedback loops in the rendering graph
    372 
    373         pullInputs(framesToProcess);
    374 
    375         bool silentInputs = inputsAreSilent();
    376         if (!silentInputs)
    377             m_lastNonSilentTime = (context()->currentSampleFrame() + framesToProcess) / static_cast<double>(m_sampleRate);
    378 
    379         if (silentInputs && propagatesSilence())
    380             silenceOutputs();
    381         else {
    382             process(framesToProcess);
    383             unsilenceOutputs();
    384         }
    385     }
    386 }
    387 
    388 void AudioNode::checkNumberOfChannelsForInput(AudioNodeInput* input)
    389 {
    390     ASSERT(context()->isAudioThread() && context()->isGraphOwner());
    391 
    392     ASSERT(m_inputs.contains(input));
    393     if (!m_inputs.contains(input))
    394         return;
    395 
    396     input->updateInternalBus();
    397 }
    398 
    399 bool AudioNode::propagatesSilence() const
    400 {
    401     return m_lastNonSilentTime + latencyTime() + tailTime() < context()->currentTime();
    402 }
    403 
    404 void AudioNode::pullInputs(size_t framesToProcess)
    405 {
    406     ASSERT(context()->isAudioThread());
    407 
    408     // Process all of the AudioNodes connected to our inputs.
    409     for (unsigned i = 0; i < m_inputs.size(); ++i)
    410         input(i)->pull(0, framesToProcess);
    411 }
    412 
    413 bool AudioNode::inputsAreSilent()
    414 {
    415     for (unsigned i = 0; i < m_inputs.size(); ++i) {
    416         if (!input(i)->bus()->isSilent())
    417             return false;
    418     }
    419     return true;
    420 }
    421 
    422 void AudioNode::silenceOutputs()
    423 {
    424     for (unsigned i = 0; i < m_outputs.size(); ++i)
    425         output(i)->bus()->zero();
    426 }
    427 
    428 void AudioNode::unsilenceOutputs()
    429 {
    430     for (unsigned i = 0; i < m_outputs.size(); ++i)
    431         output(i)->bus()->clearSilentFlag();
    432 }
    433 
    434 void AudioNode::enableOutputsIfNecessary()
    435 {
    436     if (m_isDisabled && m_connectionRefCount > 0) {
    437         ASSERT(isMainThread());
    438         AudioContext::AutoLocker locker(context());
    439 
    440         m_isDisabled = false;
    441         for (unsigned i = 0; i < m_outputs.size(); ++i)
    442             output(i)->enable();
    443     }
    444 }
    445 
    446 void AudioNode::disableOutputsIfNecessary()
    447 {
    448     // Disable outputs if appropriate. We do this if the number of connections is 0 or 1. The case
    449     // of 0 is from finishDeref() where there are no connections left. The case of 1 is from
    450     // AudioNodeInput::disable() where we want to disable outputs when there's only one connection
    451     // left because we're ready to go away, but can't quite yet.
    452     if (m_connectionRefCount <= 1 && !m_isDisabled) {
    453         // Still may have JavaScript references, but no more "active" connection references, so put all of our outputs in a "dormant" disabled state.
    454         // Garbage collection may take a very long time after this time, so the "dormant" disabled nodes should not bog down the rendering...
    455 
    456         // As far as JavaScript is concerned, our outputs must still appear to be connected.
    457         // But internally our outputs should be disabled from the inputs they're connected to.
    458         // disable() can recursively deref connections (and call disable()) down a whole chain of connected nodes.
    459 
    460         // FIXME: we special case the convolver and delay since they have a significant tail-time and shouldn't be disconnected simply
    461         // because they no longer have any input connections. This needs to be handled more generally where AudioNodes have
    462         // a tailTime attribute. Then the AudioNode only needs to remain "active" for tailTime seconds after there are no
    463         // longer any active connections.
    464         if (nodeType() != NodeTypeConvolver && nodeType() != NodeTypeDelay) {
    465             m_isDisabled = true;
    466             for (unsigned i = 0; i < m_outputs.size(); ++i)
    467                 output(i)->disable();
    468         }
    469     }
    470 }
    471 
    472 void AudioNode::ref(RefType refType)
    473 {
    474 #if ENABLE(OILPAN)
    475     ASSERT(m_keepAlive);
    476 #endif
    477     switch (refType) {
    478     case RefTypeNormal:
    479         atomicIncrement(&m_normalRefCount);
    480         break;
    481     case RefTypeConnection:
    482         atomicIncrement(&m_connectionRefCount);
    483         break;
    484     default:
    485         ASSERT_NOT_REACHED();
    486     }
    487 
    488 #if DEBUG_AUDIONODE_REFERENCES
    489     fprintf(stderr, "%p: %d: AudioNode::ref(%d) %d %d\n", this, nodeType(), refType, m_normalRefCount, m_connectionRefCount);
    490 #endif
    491 
    492     // See the disabling code in finishDeref() below. This handles the case where a node
    493     // is being re-connected after being used at least once and disconnected.
    494     // In this case, we need to re-enable.
    495     if (refType == RefTypeConnection)
    496         enableOutputsIfNecessary();
    497 }
    498 
    499 void AudioNode::deref(RefType refType)
    500 {
    501     // The actually work for deref happens completely within the audio context's graph lock.
    502     // In the case of the audio thread, we must use a tryLock to avoid glitches.
    503     bool hasLock = false;
    504     bool mustReleaseLock = false;
    505 
    506     if (context()->isAudioThread()) {
    507         // Real-time audio thread must not contend lock (to avoid glitches).
    508         hasLock = context()->tryLock(mustReleaseLock);
    509     } else {
    510         context()->lock(mustReleaseLock);
    511         hasLock = true;
    512     }
    513 
    514     if (hasLock) {
    515         // This is where the real deref work happens.
    516         finishDeref(refType);
    517 
    518         if (mustReleaseLock)
    519             context()->unlock();
    520     } else {
    521         // We were unable to get the lock, so put this in a list to finish up later.
    522         ASSERT(context()->isAudioThread());
    523         ASSERT(refType == RefTypeConnection);
    524         context()->addDeferredFinishDeref(this);
    525     }
    526 
    527     // Once AudioContext::uninitialize() is called there's no more chances for deleteMarkedNodes() to get called, so we call here.
    528     // We can't call in AudioContext::~AudioContext() since it will never be called as long as any AudioNode is alive
    529     // because AudioNodes keep a reference to the context.
    530     if (!context()->isInitialized())
    531         context()->deleteMarkedNodes();
    532 }
    533 
    534 void AudioNode::finishDeref(RefType refType)
    535 {
    536     ASSERT(context()->isGraphOwner());
    537 
    538     switch (refType) {
    539     case RefTypeNormal:
    540         ASSERT(m_normalRefCount > 0);
    541         atomicDecrement(&m_normalRefCount);
    542         break;
    543     case RefTypeConnection:
    544         ASSERT(m_connectionRefCount > 0);
    545         atomicDecrement(&m_connectionRefCount);
    546         break;
    547     default:
    548         ASSERT_NOT_REACHED();
    549     }
    550 
    551 #if DEBUG_AUDIONODE_REFERENCES
    552     fprintf(stderr, "%p: %d: AudioNode::deref(%d) %d %d\n", this, nodeType(), refType, m_normalRefCount, m_connectionRefCount);
    553 #endif
    554 
    555     if (!m_connectionRefCount) {
    556         if (!m_normalRefCount) {
    557             if (!m_isMarkedForDeletion) {
    558                 // All references are gone - we need to go away.
    559                 for (unsigned i = 0; i < m_outputs.size(); ++i)
    560                     output(i)->disconnectAll(); // This will deref() nodes we're connected to.
    561 
    562                 // Mark for deletion at end of each render quantum or when context shuts down.
    563                 context()->markForDeletion(this);
    564                 m_isMarkedForDeletion = true;
    565             }
    566         } else if (refType == RefTypeConnection)
    567             disableOutputsIfNecessary();
    568     }
    569 }
    570 
    571 #if DEBUG_AUDIONODE_REFERENCES
    572 
    573 bool AudioNode::s_isNodeCountInitialized = false;
    574 int AudioNode::s_nodeCount[NodeTypeEnd];
    575 
    576 void AudioNode::printNodeCounts()
    577 {
    578     fprintf(stderr, "\n\n");
    579     fprintf(stderr, "===========================\n");
    580     fprintf(stderr, "AudioNode: reference counts\n");
    581     fprintf(stderr, "===========================\n");
    582 
    583     for (unsigned i = 0; i < NodeTypeEnd; ++i)
    584         fprintf(stderr, "%d: %d\n", i, s_nodeCount[i]);
    585 
    586     fprintf(stderr, "===========================\n\n\n");
    587 }
    588 
    589 #endif // DEBUG_AUDIONODE_REFERENCES
    590 
    591 void AudioNode::trace(Visitor* visitor)
    592 {
    593     visitor->trace(m_context);
    594     EventTargetWithInlineData::trace(visitor);
    595 }
    596 
    597 #if ENABLE(OILPAN)
    598 void AudioNode::clearKeepAlive()
    599 {
    600     // It is safe to drop the self-persistent when the ref count
    601     // of a AudioNode reaches zero. At that point, the
    602     // AudioNode node is removed from the AudioContext and
    603     // it cannot be reattached. Therefore, the reference count
    604     // will not go above zero again.
    605     ASSERT(m_keepAlive);
    606     m_keepAlive = nullptr;
    607 }
    608 #endif
    609 
    610 } // namespace WebCore
    611 
    612 #endif // ENABLE(WEB_AUDIO)
    613