Home | History | Annotate | Download | only in webaudio
      1 /*
      2  * Copyright (C) 2010, Google Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions
      6  * are met:
      7  * 1.  Redistributions of source code must retain the above copyright
      8  *    notice, this list of conditions and the following disclaimer.
      9  * 2.  Redistributions in binary form must reproduce the above copyright
     10  *    notice, this list of conditions and the following disclaimer in the
     11  *    documentation and/or other materials provided with the distribution.
     12  *
     13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
     14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     16  * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
     17  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     18  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     19  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
     20  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     22  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     23  */
     24 
     25 #include "config.h"
     26 
     27 #if ENABLE(WEB_AUDIO)
     28 
     29 #include "modules/webaudio/AudioContext.h"
     30 
     31 #include "bindings/v8/ExceptionState.h"
     32 #include "core/dom/Document.h"
     33 #include "core/dom/ExceptionCode.h"
     34 #include "core/html/HTMLMediaElement.h"
     35 #include "core/inspector/ScriptCallStack.h"
     36 #include "core/platform/audio/FFTFrame.h"
     37 #include "core/platform/audio/HRTFDatabaseLoader.h"
     38 #include "core/platform/audio/HRTFPanner.h"
     39 #include "modules/mediastream/MediaStream.h"
     40 #include "modules/webaudio/AnalyserNode.h"
     41 #include "modules/webaudio/AsyncAudioDecoder.h"
     42 #include "modules/webaudio/AudioBuffer.h"
     43 #include "modules/webaudio/AudioBufferCallback.h"
     44 #include "modules/webaudio/AudioBufferSourceNode.h"
     45 #include "modules/webaudio/AudioListener.h"
     46 #include "modules/webaudio/AudioNodeInput.h"
     47 #include "modules/webaudio/AudioNodeOutput.h"
     48 #include "modules/webaudio/BiquadFilterNode.h"
     49 #include "modules/webaudio/ChannelMergerNode.h"
     50 #include "modules/webaudio/ChannelSplitterNode.h"
     51 #include "modules/webaudio/ConvolverNode.h"
     52 #include "modules/webaudio/DefaultAudioDestinationNode.h"
     53 #include "modules/webaudio/DelayNode.h"
     54 #include "modules/webaudio/DynamicsCompressorNode.h"
     55 #include "modules/webaudio/GainNode.h"
     56 #include "modules/webaudio/MediaElementAudioSourceNode.h"
     57 #include "modules/webaudio/MediaStreamAudioDestinationNode.h"
     58 #include "modules/webaudio/MediaStreamAudioSourceNode.h"
     59 #include "modules/webaudio/OfflineAudioCompletionEvent.h"
     60 #include "modules/webaudio/OfflineAudioDestinationNode.h"
     61 #include "modules/webaudio/OscillatorNode.h"
     62 #include "modules/webaudio/PannerNode.h"
     63 #include "modules/webaudio/PeriodicWave.h"
     64 #include "modules/webaudio/ScriptProcessorNode.h"
     65 #include "modules/webaudio/WaveShaperNode.h"
     66 
     67 #if DEBUG_AUDIONODE_REFERENCES
     68 #include <stdio.h>
     69 #endif
     70 
     71 #include "wtf/ArrayBuffer.h"
     72 #include "wtf/Atomics.h"
     73 #include "wtf/MainThread.h"
     74 #include "wtf/OwnPtr.h"
     75 #include "wtf/PassOwnPtr.h"
     76 #include "wtf/RefCounted.h"
     77 #include "wtf/text/WTFString.h"
     78 
     79 // FIXME: check the proper way to reference an undefined thread ID
     80 const int UndefinedThreadIdentifier = 0xffffffff;
     81 
     82 const unsigned MaxNodesToDeletePerQuantum = 10;
     83 
     84 namespace WebCore {
     85 
     86 bool AudioContext::isSampleRateRangeGood(float sampleRate)
     87 {
     88     // FIXME: It would be nice if the minimum sample-rate could be less than 44.1KHz,
     89     // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), and some testing there.
     90     return sampleRate >= 44100 && sampleRate <= 96000;
     91 }
     92 
     93 // Don't allow more than this number of simultaneous AudioContexts talking to hardware.
     94 const unsigned MaxHardwareContexts = 4;
     95 unsigned AudioContext::s_hardwareContextCount = 0;
     96 
     97 PassRefPtr<AudioContext> AudioContext::create(Document* document)
     98 {
     99     ASSERT(document);
    100     ASSERT(isMainThread());
    101     if (s_hardwareContextCount >= MaxHardwareContexts)
    102         return 0;
    103 
    104     RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(document)));
    105     audioContext->suspendIfNeeded();
    106     return audioContext.release();
    107 }
    108 
    109 // Constructor for rendering to the audio hardware.
    110 AudioContext::AudioContext(Document* document)
    111     : ActiveDOMObject(document)
    112     , m_isStopScheduled(false)
    113     , m_isInitialized(false)
    114     , m_isAudioThreadFinished(false)
    115     , m_destinationNode(0)
    116     , m_isDeletionScheduled(false)
    117     , m_automaticPullNodesNeedUpdating(false)
    118     , m_connectionCount(0)
    119     , m_audioThread(0)
    120     , m_graphOwnerThread(UndefinedThreadIdentifier)
    121     , m_isOfflineContext(false)
    122     , m_activeSourceCount(0)
    123 {
    124     constructCommon();
    125 
    126     m_destinationNode = DefaultAudioDestinationNode::create(this);
    127 
    128     // This sets in motion an asynchronous loading mechanism on another thread.
    129     // We can check m_hrtfDatabaseLoader->isLoaded() to find out whether or not it has been fully loaded.
    130     // It's not that useful to have a callback function for this since the audio thread automatically starts rendering on the graph
    131     // when this has finished (see AudioDestinationNode).
    132     m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate());
    133 }
    134 
    135 // Constructor for offline (non-realtime) rendering.
    136 AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
    137     : ActiveDOMObject(document)
    138     , m_isStopScheduled(false)
    139     , m_isInitialized(false)
    140     , m_isAudioThreadFinished(false)
    141     , m_destinationNode(0)
    142     , m_automaticPullNodesNeedUpdating(false)
    143     , m_connectionCount(0)
    144     , m_audioThread(0)
    145     , m_graphOwnerThread(UndefinedThreadIdentifier)
    146     , m_isOfflineContext(true)
    147     , m_activeSourceCount(0)
    148 {
    149     constructCommon();
    150 
    151     // FIXME: the passed in sampleRate MUST match the hardware sample-rate since HRTFDatabaseLoader is a singleton.
    152     m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate);
    153 
    154     // Create a new destination for offline rendering.
    155     m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
    156     m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
    157 }
    158 
    159 void AudioContext::constructCommon()
    160 {
    161     ScriptWrappable::init(this);
    162     // According to spec AudioContext must die only after page navigate.
    163     // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
    164     setPendingActivity(this);
    165 
    166     FFTFrame::initialize();
    167 
    168     m_listener = AudioListener::create();
    169 }
    170 
    171 AudioContext::~AudioContext()
    172 {
    173 #if DEBUG_AUDIONODE_REFERENCES
    174     fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
    175 #endif
    176     // AudioNodes keep a reference to their context, so there should be no way to be in the destructor if there are still AudioNodes around.
    177     ASSERT(!m_isInitialized);
    178     ASSERT(m_isStopScheduled);
    179     ASSERT(!m_nodesToDelete.size());
    180     ASSERT(!m_referencedNodes.size());
    181     ASSERT(!m_finishedNodes.size());
    182     ASSERT(!m_automaticPullNodes.size());
    183     if (m_automaticPullNodesNeedUpdating)
    184         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
    185     ASSERT(!m_renderingAutomaticPullNodes.size());
    186 }
    187 
    188 void AudioContext::lazyInitialize()
    189 {
    190     if (!m_isInitialized) {
    191         // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
    192         ASSERT(!m_isAudioThreadFinished);
    193         if (!m_isAudioThreadFinished) {
    194             if (m_destinationNode.get()) {
    195                 m_destinationNode->initialize();
    196 
    197                 if (!isOfflineContext()) {
    198                     // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
    199                     // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
    200                     // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
    201                     // We may want to consider requiring it for symmetry with OfflineAudioContext.
    202                     m_destinationNode->startRendering();
    203                     ++s_hardwareContextCount;
    204                 }
    205 
    206             }
    207             m_isInitialized = true;
    208         }
    209     }
    210 }
    211 
    212 void AudioContext::clear()
    213 {
    214     // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
    215     if (m_destinationNode)
    216         m_destinationNode.clear();
    217 
    218     // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
    219     do {
    220         deleteMarkedNodes();
    221         m_nodesToDelete.append(m_nodesMarkedForDeletion);
    222         m_nodesMarkedForDeletion.clear();
    223     } while (m_nodesToDelete.size());
    224 
    225     // It was set in constructCommon.
    226     unsetPendingActivity(this);
    227 }
    228 
    229 void AudioContext::uninitialize()
    230 {
    231     ASSERT(isMainThread());
    232 
    233     if (!m_isInitialized)
    234         return;
    235 
    236     // This stops the audio thread and all audio rendering.
    237     m_destinationNode->uninitialize();
    238 
    239     // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
    240     m_isAudioThreadFinished = true;
    241 
    242     if (!isOfflineContext()) {
    243         ASSERT(s_hardwareContextCount);
    244         --s_hardwareContextCount;
    245     }
    246 
    247     // Get rid of the sources which may still be playing.
    248     derefUnfinishedSourceNodes();
    249 
    250     m_isInitialized = false;
    251 }
    252 
    253 bool AudioContext::isInitialized() const
    254 {
    255     return m_isInitialized;
    256 }
    257 
    258 bool AudioContext::isRunnable() const
    259 {
    260     if (!isInitialized())
    261         return false;
    262 
    263     // Check with the HRTF spatialization system to see if it's finished loading.
    264     return m_hrtfDatabaseLoader->isLoaded();
    265 }
    266 
    267 void AudioContext::stopDispatch(void* userData)
    268 {
    269     AudioContext* context = reinterpret_cast<AudioContext*>(userData);
    270     ASSERT(context);
    271     if (!context)
    272         return;
    273 
    274     context->uninitialize();
    275     context->clear();
    276 }
    277 
    278 void AudioContext::stop()
    279 {
    280     // Usually ScriptExecutionContext calls stop twice.
    281     if (m_isStopScheduled)
    282         return;
    283     m_isStopScheduled = true;
    284 
    285     // Don't call uninitialize() immediately here because the ScriptExecutionContext is in the middle
    286     // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
    287     // ActiveDOMObjects so let's schedule uninitialize() to be called later.
    288     // FIXME: see if there's a more direct way to handle this issue.
    289     callOnMainThread(stopDispatch, this);
    290 }
    291 
    292 PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& es)
    293 {
    294     RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
    295     if (!audioBuffer.get()) {
    296         es.throwDOMException(SyntaxError);
    297         return 0;
    298     }
    299 
    300     return audioBuffer;
    301 }
    302 
    303 PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono, ExceptionState& es)
    304 {
    305     ASSERT(arrayBuffer);
    306     if (!arrayBuffer) {
    307         es.throwDOMException(SyntaxError);
    308         return 0;
    309     }
    310 
    311     RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
    312     if (!audioBuffer.get()) {
    313         es.throwDOMException(SyntaxError);
    314         return 0;
    315     }
    316 
    317     return audioBuffer;
    318 }
    319 
    320 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback, ExceptionState& es)
    321 {
    322     if (!audioData) {
    323         es.throwDOMException(SyntaxError);
    324         return;
    325     }
    326     m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
    327 }
    328 
    329 PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
    330 {
    331     ASSERT(isMainThread());
    332     lazyInitialize();
    333     RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
    334 
    335     // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
    336     // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
    337     refNode(node.get());
    338 
    339     return node;
    340 }
    341 
    342 PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionState& es)
    343 {
    344     ASSERT(mediaElement);
    345     if (!mediaElement) {
    346         es.throwDOMException(InvalidStateError);
    347         return 0;
    348     }
    349 
    350     ASSERT(isMainThread());
    351     lazyInitialize();
    352 
    353     // First check if this media element already has a source node.
    354     if (mediaElement->audioSourceNode()) {
    355         es.throwDOMException(InvalidStateError);
    356         return 0;
    357     }
    358 
    359     RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
    360 
    361     mediaElement->setAudioSourceNode(node.get());
    362 
    363     refNode(node.get()); // context keeps reference until node is disconnected
    364     return node;
    365 }
    366 
    367 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& es)
    368 {
    369     ASSERT(mediaStream);
    370     if (!mediaStream) {
    371         es.throwDOMException(InvalidStateError);
    372         return 0;
    373     }
    374 
    375     ASSERT(isMainThread());
    376     lazyInitialize();
    377 
    378     AudioSourceProvider* provider = 0;
    379 
    380     MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
    381 
    382     // FIXME: get a provider for non-local MediaStreams (like from a remote peer).
    383     for (size_t i = 0; i < audioTracks.size(); ++i) {
    384         RefPtr<MediaStreamTrack> localAudio = audioTracks[i];
    385         MediaStreamSource* source = localAudio->component()->source();
    386         if (!source->deviceId().isEmpty()) {
    387             destination()->enableInput(source->deviceId());
    388             provider = destination()->localAudioInputProvider();
    389             break;
    390         }
    391     }
    392 
    393     RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, provider);
    394 
    395     // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
    396     node->setFormat(2, sampleRate());
    397 
    398     refNode(node.get()); // context keeps reference until node is disconnected
    399     return node;
    400 }
    401 
    402 PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
    403 {
    404     // FIXME: Add support for an optional argument which specifies the number of channels.
    405     // FIXME: The default should probably be stereo instead of mono.
    406     return MediaStreamAudioDestinationNode::create(this, 1);
    407 }
    408 
    409 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionState& es)
    410 {
    411     // Set number of input/output channels to stereo by default.
    412     return createScriptProcessor(bufferSize, 2, 2, es);
    413 }
    414 
    415 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState& es)
    416 {
    417     // Set number of output channels to stereo by default.
    418     return createScriptProcessor(bufferSize, numberOfInputChannels, 2, es);
    419 }
    420 
    421 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& es)
    422 {
    423     ASSERT(isMainThread());
    424     lazyInitialize();
    425     RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
    426 
    427     if (!node.get()) {
    428         es.throwDOMException(SyntaxError);
    429         return 0;
    430     }
    431 
    432     refNode(node.get()); // context keeps reference until we stop making javascript rendering callbacks
    433     return node;
    434 }
    435 
    436 PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
    437 {
    438     ASSERT(isMainThread());
    439     lazyInitialize();
    440     return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
    441 }
    442 
    443 PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper()
    444 {
    445     ASSERT(isMainThread());
    446     lazyInitialize();
    447     return WaveShaperNode::create(this);
    448 }
    449 
    450 PassRefPtr<PannerNode> AudioContext::createPanner()
    451 {
    452     ASSERT(isMainThread());
    453     lazyInitialize();
    454     return PannerNode::create(this, m_destinationNode->sampleRate());
    455 }
    456 
    457 PassRefPtr<ConvolverNode> AudioContext::createConvolver()
    458 {
    459     ASSERT(isMainThread());
    460     lazyInitialize();
    461     return ConvolverNode::create(this, m_destinationNode->sampleRate());
    462 }
    463 
    464 PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
    465 {
    466     ASSERT(isMainThread());
    467     lazyInitialize();
    468     return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
    469 }
    470 
    471 PassRefPtr<AnalyserNode> AudioContext::createAnalyser()
    472 {
    473     ASSERT(isMainThread());
    474     lazyInitialize();
    475     return AnalyserNode::create(this, m_destinationNode->sampleRate());
    476 }
    477 
    478 PassRefPtr<GainNode> AudioContext::createGain()
    479 {
    480     ASSERT(isMainThread());
    481     lazyInitialize();
    482     return GainNode::create(this, m_destinationNode->sampleRate());
    483 }
    484 
    485 PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionState& es)
    486 {
    487     const double defaultMaxDelayTime = 1;
    488     return createDelay(defaultMaxDelayTime, es);
    489 }
    490 
    491 PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionState& es)
    492 {
    493     ASSERT(isMainThread());
    494     lazyInitialize();
    495     RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, es);
    496     if (es.hadException())
    497         return 0;
    498     return node;
    499 }
    500 
    501 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionState& es)
    502 {
    503     const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
    504     return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, es);
    505 }
    506 
    507 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& es)
    508 {
    509     ASSERT(isMainThread());
    510     lazyInitialize();
    511 
    512     RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
    513 
    514     if (!node.get()) {
    515         es.throwDOMException(SyntaxError);
    516         return 0;
    517     }
    518 
    519     return node;
    520 }
    521 
    522 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState& es)
    523 {
    524     const unsigned ChannelMergerDefaultNumberOfInputs = 6;
    525     return createChannelMerger(ChannelMergerDefaultNumberOfInputs, es);
    526 }
    527 
    528 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionState& es)
    529 {
    530     ASSERT(isMainThread());
    531     lazyInitialize();
    532 
    533     RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
    534 
    535     if (!node.get()) {
    536         es.throwDOMException(SyntaxError);
    537         return 0;
    538     }
    539 
    540     return node;
    541 }
    542 
    543 PassRefPtr<OscillatorNode> AudioContext::createOscillator()
    544 {
    545     ASSERT(isMainThread());
    546     lazyInitialize();
    547 
    548     RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
    549 
    550     // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
    551     // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
    552     refNode(node.get());
    553 
    554     return node;
    555 }
    556 
    557 PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState& es)
    558 {
    559     ASSERT(isMainThread());
    560 
    561     if (!real || !imag || (real->length() != imag->length())) {
    562         es.throwDOMException(SyntaxError);
    563         return 0;
    564     }
    565 
    566     lazyInitialize();
    567     return PeriodicWave::create(sampleRate(), real, imag);
    568 }
    569 
    570 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
    571 {
    572     ASSERT(isAudioThread());
    573     m_finishedNodes.append(node);
    574 }
    575 
    576 void AudioContext::derefFinishedSourceNodes()
    577 {
    578     ASSERT(isGraphOwner());
    579     ASSERT(isAudioThread() || isAudioThreadFinished());
    580     for (unsigned i = 0; i < m_finishedNodes.size(); i++)
    581         derefNode(m_finishedNodes[i]);
    582 
    583     m_finishedNodes.clear();
    584 }
    585 
    586 void AudioContext::refNode(AudioNode* node)
    587 {
    588     ASSERT(isMainThread());
    589     AutoLocker locker(this);
    590 
    591     node->ref(AudioNode::RefTypeConnection);
    592     m_referencedNodes.append(node);
    593 }
    594 
    595 void AudioContext::derefNode(AudioNode* node)
    596 {
    597     ASSERT(isGraphOwner());
    598 
    599     node->deref(AudioNode::RefTypeConnection);
    600 
    601     for (unsigned i = 0; i < m_referencedNodes.size(); ++i) {
    602         if (node == m_referencedNodes[i]) {
    603             m_referencedNodes.remove(i);
    604             break;
    605         }
    606     }
    607 }
    608 
    609 void AudioContext::derefUnfinishedSourceNodes()
    610 {
    611     ASSERT(isMainThread() && isAudioThreadFinished());
    612     for (unsigned i = 0; i < m_referencedNodes.size(); ++i)
    613         m_referencedNodes[i]->deref(AudioNode::RefTypeConnection);
    614 
    615     m_referencedNodes.clear();
    616 }
    617 
    618 void AudioContext::lock(bool& mustReleaseLock)
    619 {
    620     // Don't allow regular lock in real-time audio thread.
    621     ASSERT(isMainThread());
    622 
    623     ThreadIdentifier thisThread = currentThread();
    624 
    625     if (thisThread == m_graphOwnerThread) {
    626         // We already have the lock.
    627         mustReleaseLock = false;
    628     } else {
    629         // Acquire the lock.
    630         m_contextGraphMutex.lock();
    631         m_graphOwnerThread = thisThread;
    632         mustReleaseLock = true;
    633     }
    634 }
    635 
    636 bool AudioContext::tryLock(bool& mustReleaseLock)
    637 {
    638     ThreadIdentifier thisThread = currentThread();
    639     bool isAudioThread = thisThread == audioThread();
    640 
    641     // Try to catch cases of using try lock on main thread - it should use regular lock.
    642     ASSERT(isAudioThread || isAudioThreadFinished());
    643 
    644     if (!isAudioThread) {
    645         // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
    646         lock(mustReleaseLock);
    647         return true;
    648     }
    649 
    650     bool hasLock;
    651 
    652     if (thisThread == m_graphOwnerThread) {
    653         // Thread already has the lock.
    654         hasLock = true;
    655         mustReleaseLock = false;
    656     } else {
    657         // Don't already have the lock - try to acquire it.
    658         hasLock = m_contextGraphMutex.tryLock();
    659 
    660         if (hasLock)
    661             m_graphOwnerThread = thisThread;
    662 
    663         mustReleaseLock = hasLock;
    664     }
    665 
    666     return hasLock;
    667 }
    668 
    669 void AudioContext::unlock()
    670 {
    671     ASSERT(currentThread() == m_graphOwnerThread);
    672 
    673     m_graphOwnerThread = UndefinedThreadIdentifier;
    674     m_contextGraphMutex.unlock();
    675 }
    676 
    677 bool AudioContext::isAudioThread() const
    678 {
    679     return currentThread() == m_audioThread;
    680 }
    681 
    682 bool AudioContext::isGraphOwner() const
    683 {
    684     return currentThread() == m_graphOwnerThread;
    685 }
    686 
    687 void AudioContext::addDeferredFinishDeref(AudioNode* node)
    688 {
    689     ASSERT(isAudioThread());
    690     m_deferredFinishDerefList.append(node);
    691 }
    692 
    693 void AudioContext::handlePreRenderTasks()
    694 {
    695     ASSERT(isAudioThread());
    696 
    697     // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
    698     // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
    699     bool mustReleaseLock;
    700     if (tryLock(mustReleaseLock)) {
    701         // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
    702         handleDirtyAudioSummingJunctions();
    703         handleDirtyAudioNodeOutputs();
    704 
    705         updateAutomaticPullNodes();
    706 
    707         if (mustReleaseLock)
    708             unlock();
    709     }
    710 }
    711 
    712 void AudioContext::handlePostRenderTasks()
    713 {
    714     ASSERT(isAudioThread());
    715 
    716     // Must use a tryLock() here too.  Don't worry, the lock will very rarely be contended and this method is called frequently.
    717     // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
    718     // from the render graph (in which case they'll render silence).
    719     bool mustReleaseLock;
    720     if (tryLock(mustReleaseLock)) {
    721         // Take care of finishing any derefs where the tryLock() failed previously.
    722         handleDeferredFinishDerefs();
    723 
    724         // Dynamically clean up nodes which are no longer needed.
    725         derefFinishedSourceNodes();
    726 
    727         // Don't delete in the real-time thread. Let the main thread do it.
    728         // Ref-counted objects held by certain AudioNodes may not be thread-safe.
    729         scheduleNodeDeletion();
    730 
    731         // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
    732         handleDirtyAudioSummingJunctions();
    733         handleDirtyAudioNodeOutputs();
    734 
    735         updateAutomaticPullNodes();
    736 
    737         if (mustReleaseLock)
    738             unlock();
    739     }
    740 }
    741 
    742 void AudioContext::handleDeferredFinishDerefs()
    743 {
    744     ASSERT(isAudioThread() && isGraphOwner());
    745     for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) {
    746         AudioNode* node = m_deferredFinishDerefList[i];
    747         node->finishDeref(AudioNode::RefTypeConnection);
    748     }
    749 
    750     m_deferredFinishDerefList.clear();
    751 }
    752 
    753 void AudioContext::markForDeletion(AudioNode* node)
    754 {
    755     ASSERT(isGraphOwner());
    756 
    757     if (isAudioThreadFinished())
    758         m_nodesToDelete.append(node);
    759     else
    760         m_nodesMarkedForDeletion.append(node);
    761 
    762     // This is probably the best time for us to remove the node from automatic pull list,
    763     // since all connections are gone and we hold the graph lock. Then when handlePostRenderTasks()
    764     // gets a chance to schedule the deletion work, updateAutomaticPullNodes() also gets a chance to
    765     // modify m_renderingAutomaticPullNodes.
    766     removeAutomaticPullNode(node);
    767 }
    768 
    769 void AudioContext::scheduleNodeDeletion()
    770 {
    771     bool isGood = m_isInitialized && isGraphOwner();
    772     ASSERT(isGood);
    773     if (!isGood)
    774         return;
    775 
    776     // Make sure to call deleteMarkedNodes() on main thread.
    777     if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
    778         m_nodesToDelete.append(m_nodesMarkedForDeletion);
    779         m_nodesMarkedForDeletion.clear();
    780 
    781         m_isDeletionScheduled = true;
    782 
    783         // Don't let ourself get deleted before the callback.
    784         // See matching deref() in deleteMarkedNodesDispatch().
    785         ref();
    786         callOnMainThread(deleteMarkedNodesDispatch, this);
    787     }
    788 }
    789 
    790 void AudioContext::deleteMarkedNodesDispatch(void* userData)
    791 {
    792     AudioContext* context = reinterpret_cast<AudioContext*>(userData);
    793     ASSERT(context);
    794     if (!context)
    795         return;
    796 
    797     context->deleteMarkedNodes();
    798     context->deref();
    799 }
    800 
    801 void AudioContext::deleteMarkedNodes()
    802 {
    803     ASSERT(isMainThread());
    804 
    805     // Protect this object from being deleted before we release the mutex locked by AutoLocker.
    806     RefPtr<AudioContext> protect(this);
    807     {
    808         AutoLocker locker(this);
    809 
    810         while (size_t n = m_nodesToDelete.size()) {
    811             AudioNode* node = m_nodesToDelete[n - 1];
    812             m_nodesToDelete.removeLast();
    813 
    814             // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
    815             unsigned numberOfInputs = node->numberOfInputs();
    816             for (unsigned i = 0; i < numberOfInputs; ++i)
    817                 m_dirtySummingJunctions.remove(node->input(i));
    818 
    819             // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
    820             unsigned numberOfOutputs = node->numberOfOutputs();
    821             for (unsigned i = 0; i < numberOfOutputs; ++i)
    822                 m_dirtyAudioNodeOutputs.remove(node->output(i));
    823 
    824             // Finally, delete it.
    825             delete node;
    826         }
    827         m_isDeletionScheduled = false;
    828     }
    829 }
    830 
    831 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
    832 {
    833     ASSERT(isGraphOwner());
    834     m_dirtySummingJunctions.add(summingJunction);
    835 }
    836 
    837 void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
    838 {
    839     ASSERT(isMainThread());
    840     AutoLocker locker(this);
    841     m_dirtySummingJunctions.remove(summingJunction);
    842 }
    843 
    844 void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
    845 {
    846     ASSERT(isGraphOwner());
    847     m_dirtyAudioNodeOutputs.add(output);
    848 }
    849 
    850 void AudioContext::handleDirtyAudioSummingJunctions()
    851 {
    852     ASSERT(isGraphOwner());
    853 
    854     for (HashSet<AudioSummingJunction*>::iterator i = m_dirtySummingJunctions.begin(); i != m_dirtySummingJunctions.end(); ++i)
    855         (*i)->updateRenderingState();
    856 
    857     m_dirtySummingJunctions.clear();
    858 }
    859 
    860 void AudioContext::handleDirtyAudioNodeOutputs()
    861 {
    862     ASSERT(isGraphOwner());
    863 
    864     for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin(); i != m_dirtyAudioNodeOutputs.end(); ++i)
    865         (*i)->updateRenderingState();
    866 
    867     m_dirtyAudioNodeOutputs.clear();
    868 }
    869 
    870 void AudioContext::addAutomaticPullNode(AudioNode* node)
    871 {
    872     ASSERT(isGraphOwner());
    873 
    874     if (!m_automaticPullNodes.contains(node)) {
    875         m_automaticPullNodes.add(node);
    876         m_automaticPullNodesNeedUpdating = true;
    877     }
    878 }
    879 
    880 void AudioContext::removeAutomaticPullNode(AudioNode* node)
    881 {
    882     ASSERT(isGraphOwner());
    883 
    884     if (m_automaticPullNodes.contains(node)) {
    885         m_automaticPullNodes.remove(node);
    886         m_automaticPullNodesNeedUpdating = true;
    887     }
    888 }
    889 
    890 void AudioContext::updateAutomaticPullNodes()
    891 {
    892     ASSERT(isGraphOwner());
    893 
    894     if (m_automaticPullNodesNeedUpdating) {
    895         // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
    896         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
    897 
    898         unsigned j = 0;
    899         for (HashSet<AudioNode*>::iterator i = m_automaticPullNodes.begin(); i != m_automaticPullNodes.end(); ++i, ++j) {
    900             AudioNode* output = *i;
    901             m_renderingAutomaticPullNodes[j] = output;
    902         }
    903 
    904         m_automaticPullNodesNeedUpdating = false;
    905     }
    906 }
    907 
    908 void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
    909 {
    910     ASSERT(isAudioThread());
    911 
    912     for (unsigned i = 0; i < m_renderingAutomaticPullNodes.size(); ++i)
    913         m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess);
    914 }
    915 
    916 const AtomicString& AudioContext::interfaceName() const
    917 {
    918     return eventNames().interfaceForAudioContext;
    919 }
    920 
    921 ScriptExecutionContext* AudioContext::scriptExecutionContext() const
    922 {
    923     return m_isStopScheduled ? 0 : ActiveDOMObject::scriptExecutionContext();
    924 }
    925 
    926 void AudioContext::startRendering()
    927 {
    928     destination()->startRendering();
    929 }
    930 
    931 void AudioContext::fireCompletionEvent()
    932 {
    933     ASSERT(isMainThread());
    934     if (!isMainThread())
    935         return;
    936 
    937     AudioBuffer* renderedBuffer = m_renderTarget.get();
    938 
    939     ASSERT(renderedBuffer);
    940     if (!renderedBuffer)
    941         return;
    942 
    943     // Avoid firing the event if the document has already gone away.
    944     if (scriptExecutionContext()) {
    945         // Call the offline rendering completion event listener.
    946         dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
    947     }
    948 }
    949 
    950 void AudioContext::incrementActiveSourceCount()
    951 {
    952     atomicIncrement(&m_activeSourceCount);
    953 }
    954 
    955 void AudioContext::decrementActiveSourceCount()
    956 {
    957     atomicDecrement(&m_activeSourceCount);
    958 }
    959 
    960 } // namespace WebCore
    961 
    962 #endif // ENABLE(WEB_AUDIO)
    963