Home | History | Annotate | Download | only in webaudio
      1 /*
      2  * Copyright (C) 2010, Google Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions
      6  * are met:
      7  * 1.  Redistributions of source code must retain the above copyright
      8  *    notice, this list of conditions and the following disclaimer.
      9  * 2.  Redistributions in binary form must reproduce the above copyright
     10  *    notice, this list of conditions and the following disclaimer in the
     11  *    documentation and/or other materials provided with the distribution.
     12  *
     13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
     14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     16  * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
     17  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     18  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     19  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
     20  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     22  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     23  */
     24 
     25 #include "config.h"
     26 
     27 #if ENABLE(WEB_AUDIO)
     28 
     29 #include "modules/webaudio/AudioContext.h"
     30 
     31 #include "bindings/core/v8/ExceptionMessages.h"
     32 #include "bindings/core/v8/ExceptionState.h"
     33 #include "core/dom/Document.h"
     34 #include "core/dom/ExceptionCode.h"
     35 #include "core/html/HTMLMediaElement.h"
     36 #include "core/inspector/ScriptCallStack.h"
     37 #include "platform/audio/FFTFrame.h"
     38 #include "platform/audio/HRTFPanner.h"
     39 #include "modules/mediastream/MediaStream.h"
     40 #include "modules/webaudio/AnalyserNode.h"
     41 #include "modules/webaudio/AudioBuffer.h"
     42 #include "modules/webaudio/AudioBufferCallback.h"
     43 #include "modules/webaudio/AudioBufferSourceNode.h"
     44 #include "modules/webaudio/AudioListener.h"
     45 #include "modules/webaudio/AudioNodeInput.h"
     46 #include "modules/webaudio/AudioNodeOutput.h"
     47 #include "modules/webaudio/BiquadFilterNode.h"
     48 #include "modules/webaudio/ChannelMergerNode.h"
     49 #include "modules/webaudio/ChannelSplitterNode.h"
     50 #include "modules/webaudio/ConvolverNode.h"
     51 #include "modules/webaudio/DefaultAudioDestinationNode.h"
     52 #include "modules/webaudio/DelayNode.h"
     53 #include "modules/webaudio/DynamicsCompressorNode.h"
     54 #include "modules/webaudio/GainNode.h"
     55 #include "modules/webaudio/MediaElementAudioSourceNode.h"
     56 #include "modules/webaudio/MediaStreamAudioDestinationNode.h"
     57 #include "modules/webaudio/MediaStreamAudioSourceNode.h"
     58 #include "modules/webaudio/OfflineAudioCompletionEvent.h"
     59 #include "modules/webaudio/OfflineAudioContext.h"
     60 #include "modules/webaudio/OfflineAudioDestinationNode.h"
     61 #include "modules/webaudio/OscillatorNode.h"
     62 #include "modules/webaudio/PannerNode.h"
     63 #include "modules/webaudio/PeriodicWave.h"
     64 #include "modules/webaudio/ScriptProcessorNode.h"
     65 #include "modules/webaudio/WaveShaperNode.h"
     66 
     67 #if DEBUG_AUDIONODE_REFERENCES
     68 #include <stdio.h>
     69 #endif
     70 
     71 #include "wtf/ArrayBuffer.h"
     72 #include "wtf/Atomics.h"
     73 #include "wtf/PassOwnPtr.h"
     74 #include "wtf/text/WTFString.h"
     75 
     76 // FIXME: check the proper way to reference an undefined thread ID
     77 const WTF::ThreadIdentifier UndefinedThreadIdentifier = 0xffffffff;
     78 
     79 namespace blink {
     80 
     81 // Don't allow more than this number of simultaneous AudioContexts talking to hardware.
     82 const unsigned MaxHardwareContexts = 6;
     83 unsigned AudioContext::s_hardwareContextCount = 0;
     84 
     85 AudioContext* AudioContext::create(Document& document, ExceptionState& exceptionState)
     86 {
     87     ASSERT(isMainThread());
     88     if (s_hardwareContextCount >= MaxHardwareContexts) {
     89         exceptionState.throwDOMException(
     90             SyntaxError,
     91             "number of hardware contexts reached maximum (" + String::number(MaxHardwareContexts) + ").");
     92         return 0;
     93     }
     94 
     95     AudioContext* audioContext = adoptRefCountedGarbageCollectedWillBeNoop(new AudioContext(&document));
     96     audioContext->suspendIfNeeded();
     97     return audioContext;
     98 }
     99 
    100 // Constructor for rendering to the audio hardware.
    101 AudioContext::AudioContext(Document* document)
    102     : ActiveDOMObject(document)
    103     , m_isStopScheduled(false)
    104     , m_isCleared(false)
    105     , m_isInitialized(false)
    106     , m_destinationNode(nullptr)
    107     , m_automaticPullNodesNeedUpdating(false)
    108     , m_connectionCount(0)
    109     , m_audioThread(0)
    110     , m_graphOwnerThread(UndefinedThreadIdentifier)
    111     , m_isOfflineContext(false)
    112 {
    113     m_destinationNode = DefaultAudioDestinationNode::create(this);
    114 
    115     initialize();
    116 #if DEBUG_AUDIONODE_REFERENCES
    117     fprintf(stderr, "%p: AudioContext::AudioContext() #%u\n", this, AudioContext::s_hardwareContextCount);
    118 #endif
    119 }
    120 
    121 // Constructor for offline (non-realtime) rendering.
    122 AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
    123     : ActiveDOMObject(document)
    124     , m_isStopScheduled(false)
    125     , m_isCleared(false)
    126     , m_isInitialized(false)
    127     , m_destinationNode(nullptr)
    128     , m_automaticPullNodesNeedUpdating(false)
    129     , m_connectionCount(0)
    130     , m_audioThread(0)
    131     , m_graphOwnerThread(UndefinedThreadIdentifier)
    132     , m_isOfflineContext(true)
    133 {
    134     // Create a new destination for offline rendering.
    135     m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
    136     if (m_renderTarget.get())
    137         m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
    138 
    139     initialize();
    140 }
    141 
    142 AudioContext::~AudioContext()
    143 {
    144 #if DEBUG_AUDIONODE_REFERENCES
    145     fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
    146 #endif
    147     // AudioNodes keep a reference to their context, so there should be no way to be in the destructor if there are still AudioNodes around.
    148     ASSERT(!m_isInitialized);
    149     ASSERT(!m_referencedNodes.size());
    150     ASSERT(!m_finishedNodes.size());
    151     ASSERT(!m_automaticPullNodes.size());
    152     if (m_automaticPullNodesNeedUpdating)
    153         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
    154     ASSERT(!m_renderingAutomaticPullNodes.size());
    155 }
    156 
    157 void AudioContext::initialize()
    158 {
    159     if (isInitialized())
    160         return;
    161 
    162     FFTFrame::initialize();
    163     m_listener = AudioListener::create();
    164 
    165     if (m_destinationNode.get()) {
    166         m_destinationNode->initialize();
    167 
    168         if (!isOfflineContext()) {
    169             // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
    170             // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
    171             // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
    172             // We may want to consider requiring it for symmetry with OfflineAudioContext.
    173             m_destinationNode->startRendering();
    174             ++s_hardwareContextCount;
    175         }
    176 
    177         m_isInitialized = true;
    178     }
    179 }
    180 
    181 void AudioContext::clear()
    182 {
    183     // We need to run disposers before destructing m_contextGraphMutex.
    184     m_liveAudioSummingJunctions.clear();
    185     m_liveNodes.clear();
    186     m_destinationNode.clear();
    187     m_isCleared = true;
    188 }
    189 
    190 void AudioContext::uninitialize()
    191 {
    192     ASSERT(isMainThread());
    193 
    194     if (!isInitialized())
    195         return;
    196 
    197     // This stops the audio thread and all audio rendering.
    198     m_destinationNode->uninitialize();
    199 
    200     if (!isOfflineContext()) {
    201         ASSERT(s_hardwareContextCount);
    202         --s_hardwareContextCount;
    203     }
    204 
    205     // Get rid of the sources which may still be playing.
    206     derefUnfinishedSourceNodes();
    207 
    208     m_isInitialized = false;
    209     ASSERT(m_listener);
    210     m_listener->waitForHRTFDatabaseLoaderThreadCompletion();
    211 
    212     clear();
    213 }
    214 
    215 void AudioContext::stop()
    216 {
    217     // Usually ExecutionContext calls stop twice.
    218     if (m_isStopScheduled)
    219         return;
    220     m_isStopScheduled = true;
    221 
    222     // Don't call uninitialize() immediately here because the ExecutionContext is in the middle
    223     // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
    224     // ActiveDOMObjects so let's schedule uninitialize() to be called later.
    225     // FIXME: see if there's a more direct way to handle this issue.
    226     callOnMainThread(bind(&AudioContext::uninitialize, this));
    227 }
    228 
    229 bool AudioContext::hasPendingActivity() const
    230 {
    231     // According to spec AudioContext must die only after page navigates.
    232     return !m_isCleared;
    233 }
    234 
    235 AudioBuffer* AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
    236 {
    237     return AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate, exceptionState);
    238 }
    239 
    240 void AudioContext::decodeAudioData(ArrayBuffer* audioData, AudioBufferCallback* successCallback, AudioBufferCallback* errorCallback, ExceptionState& exceptionState)
    241 {
    242     if (!audioData) {
    243         exceptionState.throwDOMException(
    244             SyntaxError,
    245             "invalid ArrayBuffer for audioData.");
    246         return;
    247     }
    248     m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
    249 }
    250 
    251 AudioBufferSourceNode* AudioContext::createBufferSource()
    252 {
    253     ASSERT(isMainThread());
    254     AudioBufferSourceNode* node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
    255 
    256     // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
    257     // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
    258     refNode(node);
    259 
    260     return node;
    261 }
    262 
    263 MediaElementAudioSourceNode* AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState)
    264 {
    265     ASSERT(isMainThread());
    266     if (!mediaElement) {
    267         exceptionState.throwDOMException(
    268             InvalidStateError,
    269             "invalid HTMLMedialElement.");
    270         return 0;
    271     }
    272 
    273     // First check if this media element already has a source node.
    274     if (mediaElement->audioSourceNode()) {
    275         exceptionState.throwDOMException(
    276             InvalidStateError,
    277             "invalid HTMLMediaElement.");
    278         return 0;
    279     }
    280 
    281     MediaElementAudioSourceNode* node = MediaElementAudioSourceNode::create(this, mediaElement);
    282 
    283     mediaElement->setAudioSourceNode(node);
    284 
    285     refNode(node); // context keeps reference until node is disconnected
    286     return node;
    287 }
    288 
    289 MediaStreamAudioSourceNode* AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& exceptionState)
    290 {
    291     ASSERT(isMainThread());
    292     if (!mediaStream) {
    293         exceptionState.throwDOMException(
    294             InvalidStateError,
    295             "invalid MediaStream source");
    296         return 0;
    297     }
    298 
    299     MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
    300     if (audioTracks.isEmpty()) {
    301         exceptionState.throwDOMException(
    302             InvalidStateError,
    303             "MediaStream has no audio track");
    304         return 0;
    305     }
    306 
    307     // Use the first audio track in the media stream.
    308     MediaStreamTrack* audioTrack = audioTracks[0];
    309     OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource();
    310     MediaStreamAudioSourceNode* node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack, provider.release());
    311 
    312     // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
    313     node->setFormat(2, sampleRate());
    314 
    315     refNode(node); // context keeps reference until node is disconnected
    316     return node;
    317 }
    318 
    319 MediaStreamAudioDestinationNode* AudioContext::createMediaStreamDestination()
    320 {
    321     // Set number of output channels to stereo by default.
    322     return MediaStreamAudioDestinationNode::create(this, 2);
    323 }
    324 
    325 ScriptProcessorNode* AudioContext::createScriptProcessor(ExceptionState& exceptionState)
    326 {
    327     // Set number of input/output channels to stereo by default.
    328     return createScriptProcessor(0, 2, 2, exceptionState);
    329 }
    330 
    331 ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, ExceptionState& exceptionState)
    332 {
    333     // Set number of input/output channels to stereo by default.
    334     return createScriptProcessor(bufferSize, 2, 2, exceptionState);
    335 }
    336 
    337 ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState)
    338 {
    339     // Set number of output channels to stereo by default.
    340     return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exceptionState);
    341 }
    342 
    343 ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState)
    344 {
    345     ASSERT(isMainThread());
    346     ScriptProcessorNode* node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
    347 
    348     if (!node) {
    349         if (!numberOfInputChannels && !numberOfOutputChannels) {
    350             exceptionState.throwDOMException(
    351                 IndexSizeError,
    352                 "number of input channels and output channels cannot both be zero.");
    353         } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels()) {
    354             exceptionState.throwDOMException(
    355                 IndexSizeError,
    356                 "number of input channels (" + String::number(numberOfInputChannels)
    357                 + ") exceeds maximum ("
    358                 + String::number(AudioContext::maxNumberOfChannels()) + ").");
    359         } else if (numberOfOutputChannels > AudioContext::maxNumberOfChannels()) {
    360             exceptionState.throwDOMException(
    361                 IndexSizeError,
    362                 "number of output channels (" + String::number(numberOfInputChannels)
    363                 + ") exceeds maximum ("
    364                 + String::number(AudioContext::maxNumberOfChannels()) + ").");
    365         } else {
    366             exceptionState.throwDOMException(
    367                 IndexSizeError,
    368                 "buffer size (" + String::number(bufferSize)
    369                 + ") must be a power of two between 256 and 16384.");
    370         }
    371         return 0;
    372     }
    373 
    374     refNode(node); // context keeps reference until we stop making javascript rendering callbacks
    375     return node;
    376 }
    377 
    378 BiquadFilterNode* AudioContext::createBiquadFilter()
    379 {
    380     ASSERT(isMainThread());
    381     return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
    382 }
    383 
    384 WaveShaperNode* AudioContext::createWaveShaper()
    385 {
    386     ASSERT(isMainThread());
    387     return WaveShaperNode::create(this);
    388 }
    389 
    390 PannerNode* AudioContext::createPanner()
    391 {
    392     ASSERT(isMainThread());
    393     return PannerNode::create(this, m_destinationNode->sampleRate());
    394 }
    395 
    396 ConvolverNode* AudioContext::createConvolver()
    397 {
    398     ASSERT(isMainThread());
    399     return ConvolverNode::create(this, m_destinationNode->sampleRate());
    400 }
    401 
    402 DynamicsCompressorNode* AudioContext::createDynamicsCompressor()
    403 {
    404     ASSERT(isMainThread());
    405     return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
    406 }
    407 
    408 AnalyserNode* AudioContext::createAnalyser()
    409 {
    410     ASSERT(isMainThread());
    411     return AnalyserNode::create(this, m_destinationNode->sampleRate());
    412 }
    413 
    414 GainNode* AudioContext::createGain()
    415 {
    416     ASSERT(isMainThread());
    417     return GainNode::create(this, m_destinationNode->sampleRate());
    418 }
    419 
    420 DelayNode* AudioContext::createDelay(ExceptionState& exceptionState)
    421 {
    422     const double defaultMaxDelayTime = 1;
    423     return createDelay(defaultMaxDelayTime, exceptionState);
    424 }
    425 
    426 DelayNode* AudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState)
    427 {
    428     ASSERT(isMainThread());
    429     DelayNode* node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, exceptionState);
    430     if (exceptionState.hadException())
    431         return 0;
    432     return node;
    433 }
    434 
    435 ChannelSplitterNode* AudioContext::createChannelSplitter(ExceptionState& exceptionState)
    436 {
    437     const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
    438     return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptionState);
    439 }
    440 
    441 ChannelSplitterNode* AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState)
    442 {
    443     ASSERT(isMainThread());
    444 
    445     ChannelSplitterNode* node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
    446 
    447     if (!node) {
    448         exceptionState.throwDOMException(
    449             IndexSizeError,
    450             "number of outputs (" + String::number(numberOfOutputs)
    451             + ") must be between 1 and "
    452             + String::number(AudioContext::maxNumberOfChannels()) + ".");
    453         return 0;
    454     }
    455 
    456     return node;
    457 }
    458 
    459 ChannelMergerNode* AudioContext::createChannelMerger(ExceptionState& exceptionState)
    460 {
    461     const unsigned ChannelMergerDefaultNumberOfInputs = 6;
    462     return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionState);
    463 }
    464 
    465 ChannelMergerNode* AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionState& exceptionState)
    466 {
    467     ASSERT(isMainThread());
    468 
    469     ChannelMergerNode* node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
    470 
    471     if (!node) {
    472         exceptionState.throwDOMException(
    473             IndexSizeError,
    474             "number of inputs (" + String::number(numberOfInputs)
    475             + ") must be between 1 and "
    476             + String::number(AudioContext::maxNumberOfChannels()) + ".");
    477         return 0;
    478     }
    479 
    480     return node;
    481 }
    482 
    483 OscillatorNode* AudioContext::createOscillator()
    484 {
    485     ASSERT(isMainThread());
    486 
    487     OscillatorNode* node = OscillatorNode::create(this, m_destinationNode->sampleRate());
    488 
    489     // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
    490     // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
    491     refNode(node);
    492 
    493     return node;
    494 }
    495 
    496 PeriodicWave* AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState& exceptionState)
    497 {
    498     ASSERT(isMainThread());
    499 
    500     if (!real) {
    501         exceptionState.throwDOMException(
    502             SyntaxError,
    503             "invalid real array");
    504         return 0;
    505     }
    506 
    507     if (!imag) {
    508         exceptionState.throwDOMException(
    509             SyntaxError,
    510             "invalid imaginary array");
    511         return 0;
    512     }
    513 
    514     if (real->length() != imag->length()) {
    515         exceptionState.throwDOMException(
    516             IndexSizeError,
    517             "length of real array (" + String::number(real->length())
    518             + ") and length of imaginary array (" +  String::number(imag->length())
    519             + ") must match.");
    520         return 0;
    521     }
    522 
    523     if (real->length() > 4096) {
    524         exceptionState.throwDOMException(
    525             IndexSizeError,
    526             "length of real array (" + String::number(real->length())
    527             + ") exceeds allowed maximum of 4096");
    528         return 0;
    529     }
    530 
    531     if (imag->length() > 4096) {
    532         exceptionState.throwDOMException(
    533             IndexSizeError,
    534             "length of imaginary array (" + String::number(imag->length())
    535             + ") exceeds allowed maximum of 4096");
    536         return 0;
    537     }
    538 
    539     return PeriodicWave::create(sampleRate(), real, imag);
    540 }
    541 
    542 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
    543 {
    544     ASSERT(isAudioThread());
    545     m_finishedNodes.append(node);
    546 }
    547 
    548 void AudioContext::derefFinishedSourceNodes()
    549 {
    550     ASSERT(isGraphOwner());
    551     ASSERT(isAudioThread());
    552     for (unsigned i = 0; i < m_finishedNodes.size(); i++)
    553         derefNode(m_finishedNodes[i]);
    554 
    555     m_finishedNodes.clear();
    556 }
    557 
    558 void AudioContext::refNode(AudioNode* node)
    559 {
    560     ASSERT(isMainThread());
    561     AutoLocker locker(this);
    562 
    563     m_referencedNodes.append(node);
    564     node->makeConnection();
    565 }
    566 
    567 void AudioContext::derefNode(AudioNode* node)
    568 {
    569     ASSERT(isGraphOwner());
    570 
    571     for (unsigned i = 0; i < m_referencedNodes.size(); ++i) {
    572         if (node == m_referencedNodes[i].get()) {
    573             node->breakConnection();
    574             m_referencedNodes.remove(i);
    575             break;
    576         }
    577     }
    578 }
    579 
    580 void AudioContext::derefUnfinishedSourceNodes()
    581 {
    582     ASSERT(isMainThread());
    583     for (unsigned i = 0; i < m_referencedNodes.size(); ++i)
    584         m_referencedNodes[i]->breakConnection();
    585 
    586     m_referencedNodes.clear();
    587 }
    588 
    589 void AudioContext::lock(bool& mustReleaseLock)
    590 {
    591     // Don't allow regular lock in real-time audio thread.
    592     ASSERT(isMainThread());
    593 
    594     ThreadIdentifier thisThread = currentThread();
    595 
    596     if (thisThread == m_graphOwnerThread) {
    597         // We already have the lock.
    598         mustReleaseLock = false;
    599     } else {
    600         // Acquire the lock.
    601         m_contextGraphMutex.lock();
    602         m_graphOwnerThread = thisThread;
    603         mustReleaseLock = true;
    604     }
    605 }
    606 
    607 bool AudioContext::tryLock(bool& mustReleaseLock)
    608 {
    609     ThreadIdentifier thisThread = currentThread();
    610     bool isAudioThread = thisThread == audioThread();
    611 
    612     // Try to catch cases of using try lock on main thread - it should use regular lock.
    613     ASSERT(isAudioThread);
    614 
    615     if (!isAudioThread) {
    616         // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
    617         lock(mustReleaseLock);
    618         return true;
    619     }
    620 
    621     bool hasLock;
    622 
    623     if (thisThread == m_graphOwnerThread) {
    624         // Thread already has the lock.
    625         hasLock = true;
    626         mustReleaseLock = false;
    627     } else {
    628         // Don't already have the lock - try to acquire it.
    629         hasLock = m_contextGraphMutex.tryLock();
    630 
    631         if (hasLock)
    632             m_graphOwnerThread = thisThread;
    633 
    634         mustReleaseLock = hasLock;
    635     }
    636 
    637     return hasLock;
    638 }
    639 
    640 void AudioContext::unlock()
    641 {
    642     ASSERT(currentThread() == m_graphOwnerThread);
    643 
    644     m_graphOwnerThread = UndefinedThreadIdentifier;
    645     m_contextGraphMutex.unlock();
    646 }
    647 
    648 bool AudioContext::isAudioThread() const
    649 {
    650     return currentThread() == m_audioThread;
    651 }
    652 
    653 bool AudioContext::isGraphOwner() const
    654 {
    655     return currentThread() == m_graphOwnerThread;
    656 }
    657 
    658 void AudioContext::addDeferredBreakConnection(AudioNode& node)
    659 {
    660     ASSERT(isAudioThread());
    661     m_deferredBreakConnectionList.append(&node);
    662 }
    663 
    664 void AudioContext::handlePreRenderTasks()
    665 {
    666     ASSERT(isAudioThread());
    667 
    668     // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
    669     // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
    670     bool mustReleaseLock;
    671     if (tryLock(mustReleaseLock)) {
    672         // Update the channel count mode.
    673         updateChangedChannelCountMode();
    674 
    675         // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
    676         handleDirtyAudioSummingJunctions();
    677         handleDirtyAudioNodeOutputs();
    678 
    679         updateAutomaticPullNodes();
    680 
    681         if (mustReleaseLock)
    682             unlock();
    683     }
    684 }
    685 
    686 void AudioContext::handlePostRenderTasks()
    687 {
    688     ASSERT(isAudioThread());
    689 
    690     // Must use a tryLock() here too.  Don't worry, the lock will very rarely be contended and this method is called frequently.
    691     // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
    692     // from the render graph (in which case they'll render silence).
    693     bool mustReleaseLock;
    694     if (tryLock(mustReleaseLock)) {
    695         // Update the channel count mode.
    696         updateChangedChannelCountMode();
    697 
    698         // Take care of AudioNode tasks where the tryLock() failed previously.
    699         handleDeferredAudioNodeTasks();
    700 
    701         // Dynamically clean up nodes which are no longer needed.
    702         derefFinishedSourceNodes();
    703 
    704         // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
    705         handleDirtyAudioSummingJunctions();
    706         handleDirtyAudioNodeOutputs();
    707 
    708         updateAutomaticPullNodes();
    709 
    710         if (mustReleaseLock)
    711             unlock();
    712     }
    713 }
    714 
    715 void AudioContext::handleDeferredAudioNodeTasks()
    716 {
    717     ASSERT(isAudioThread() && isGraphOwner());
    718 
    719     for (unsigned i = 0; i < m_deferredBreakConnectionList.size(); ++i)
    720         m_deferredBreakConnectionList[i]->breakConnectionWithLock();
    721     m_deferredBreakConnectionList.clear();
    722 }
    723 
    724 void AudioContext::registerLiveNode(AudioNode& node)
    725 {
    726     ASSERT(isMainThread());
    727     m_liveNodes.add(&node, adoptPtr(new AudioNodeDisposer(node)));
    728 }
    729 
    730 AudioContext::AudioNodeDisposer::~AudioNodeDisposer()
    731 {
    732     ASSERT(isMainThread());
    733     AudioContext::AutoLocker locker(m_node.context());
    734     m_node.dispose();
    735 }
    736 
    737 void AudioContext::registerLiveAudioSummingJunction(AudioSummingJunction& junction)
    738 {
    739     ASSERT(isMainThread());
    740     m_liveAudioSummingJunctions.add(&junction, adoptPtr(new AudioSummingJunctionDisposer(junction)));
    741 }
    742 
    743 AudioContext::AudioSummingJunctionDisposer::~AudioSummingJunctionDisposer()
    744 {
    745     ASSERT(isMainThread());
    746     m_junction.dispose();
    747 }
    748 
    749 void AudioContext::disposeOutputs(AudioNode& node)
    750 {
    751     ASSERT(isGraphOwner());
    752     ASSERT(isMainThread());
    753     for (unsigned i = 0; i < node.numberOfOutputs(); ++i)
    754         node.output(i)->dispose();
    755 }
    756 
    757 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
    758 {
    759     ASSERT(isGraphOwner());
    760     m_dirtySummingJunctions.add(summingJunction);
    761 }
    762 
    763 void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
    764 {
    765     ASSERT(isMainThread());
    766     AutoLocker locker(this);
    767     m_dirtySummingJunctions.remove(summingJunction);
    768 }
    769 
    770 void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
    771 {
    772     ASSERT(isGraphOwner());
    773     ASSERT(isMainThread());
    774     m_dirtyAudioNodeOutputs.add(output);
    775 }
    776 
    777 void AudioContext::removeMarkedAudioNodeOutput(AudioNodeOutput* output)
    778 {
    779     ASSERT(isGraphOwner());
    780     ASSERT(isMainThread());
    781     m_dirtyAudioNodeOutputs.remove(output);
    782 }
    783 
    784 void AudioContext::handleDirtyAudioSummingJunctions()
    785 {
    786     ASSERT(isGraphOwner());
    787 
    788     for (HashSet<AudioSummingJunction*>::iterator i = m_dirtySummingJunctions.begin(); i != m_dirtySummingJunctions.end(); ++i)
    789         (*i)->updateRenderingState();
    790 
    791     m_dirtySummingJunctions.clear();
    792 }
    793 
    794 void AudioContext::handleDirtyAudioNodeOutputs()
    795 {
    796     ASSERT(isGraphOwner());
    797 
    798     for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin(); i != m_dirtyAudioNodeOutputs.end(); ++i)
    799         (*i)->updateRenderingState();
    800 
    801     m_dirtyAudioNodeOutputs.clear();
    802 }
    803 
    804 void AudioContext::addAutomaticPullNode(AudioNode* node)
    805 {
    806     ASSERT(isGraphOwner());
    807 
    808     if (!m_automaticPullNodes.contains(node)) {
    809         m_automaticPullNodes.add(node);
    810         m_automaticPullNodesNeedUpdating = true;
    811     }
    812 }
    813 
    814 void AudioContext::removeAutomaticPullNode(AudioNode* node)
    815 {
    816     ASSERT(isGraphOwner());
    817 
    818     if (m_automaticPullNodes.contains(node)) {
    819         m_automaticPullNodes.remove(node);
    820         m_automaticPullNodesNeedUpdating = true;
    821     }
    822 }
    823 
    824 void AudioContext::updateAutomaticPullNodes()
    825 {
    826     ASSERT(isGraphOwner());
    827 
    828     if (m_automaticPullNodesNeedUpdating) {
    829         // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
    830         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
    831 
    832         unsigned j = 0;
    833         for (HashSet<AudioNode*>::iterator i = m_automaticPullNodes.begin(); i != m_automaticPullNodes.end(); ++i, ++j) {
    834             AudioNode* output = *i;
    835             m_renderingAutomaticPullNodes[j] = output;
    836         }
    837 
    838         m_automaticPullNodesNeedUpdating = false;
    839     }
    840 }
    841 
    842 void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
    843 {
    844     ASSERT(isAudioThread());
    845 
    846     for (unsigned i = 0; i < m_renderingAutomaticPullNodes.size(); ++i)
    847         m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess);
    848 }
    849 
    850 const AtomicString& AudioContext::interfaceName() const
    851 {
    852     return EventTargetNames::AudioContext;
    853 }
    854 
    855 ExecutionContext* AudioContext::executionContext() const
    856 {
    857     return m_isStopScheduled ? 0 : ActiveDOMObject::executionContext();
    858 }
    859 
    860 void AudioContext::startRendering()
    861 {
    862     destination()->startRendering();
    863 }
    864 
    865 void AudioContext::fireCompletionEvent()
    866 {
    867     ASSERT(isMainThread());
    868     if (!isMainThread())
    869         return;
    870 
    871     AudioBuffer* renderedBuffer = m_renderTarget.get();
    872 
    873     ASSERT(renderedBuffer);
    874     if (!renderedBuffer)
    875         return;
    876 
    877     // Avoid firing the event if the document has already gone away.
    878     if (executionContext()) {
    879         // Call the offline rendering completion event listener.
    880         dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
    881     }
    882 }
    883 
    884 void AudioContext::trace(Visitor* visitor)
    885 {
    886     visitor->trace(m_renderTarget);
    887     visitor->trace(m_destinationNode);
    888     visitor->trace(m_listener);
    889     visitor->trace(m_referencedNodes);
    890     visitor->trace(m_liveNodes);
    891     visitor->trace(m_liveAudioSummingJunctions);
    892     EventTargetWithInlineData::trace(visitor);
    893 }
    894 
    895 void AudioContext::addChangedChannelCountMode(AudioNode* node)
    896 {
    897     ASSERT(isGraphOwner());
    898     ASSERT(isMainThread());
    899     m_deferredCountModeChange.add(node);
    900 }
    901 
    902 void AudioContext::removeChangedChannelCountMode(AudioNode* node)
    903 {
    904     ASSERT(isGraphOwner());
    905 
    906     m_deferredCountModeChange.remove(node);
    907 }
    908 
    909 void AudioContext::updateChangedChannelCountMode()
    910 {
    911     ASSERT(isGraphOwner());
    912 
    913     for (HashSet<AudioNode*>::iterator k = m_deferredCountModeChange.begin(); k != m_deferredCountModeChange.end(); ++k)
    914         (*k)->updateChannelCountMode();
    915 
    916     m_deferredCountModeChange.clear();
    917 }
    918 
    919 } // namespace blink
    920 
    921 #endif // ENABLE(WEB_AUDIO)
    922