Home | History | Annotate | Download | only in webaudio
      1 /*
      2  * Copyright (C) 2010, Google Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions
      6  * are met:
      7  * 1.  Redistributions of source code must retain the above copyright
      8  *    notice, this list of conditions and the following disclaimer.
      9  * 2.  Redistributions in binary form must reproduce the above copyright
     10  *    notice, this list of conditions and the following disclaimer in the
     11  *    documentation and/or other materials provided with the distribution.
     12  *
     13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
     14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     16  * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
     17  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     18  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     19  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
     20  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     22  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     23  */
     24 
     25 #include "config.h"
     26 
     27 #if ENABLE(WEB_AUDIO)
     28 
     29 #include "modules/webaudio/AudioContext.h"
     30 
     31 #include "bindings/v8/ExceptionMessages.h"
     32 #include "bindings/v8/ExceptionState.h"
     33 #include "core/dom/Document.h"
     34 #include "core/dom/ExceptionCode.h"
     35 #include "core/html/HTMLMediaElement.h"
     36 #include "core/inspector/ScriptCallStack.h"
     37 #include "platform/audio/FFTFrame.h"
     38 #include "platform/audio/HRTFPanner.h"
     39 #include "modules/mediastream/MediaStream.h"
     40 #include "modules/webaudio/AnalyserNode.h"
     41 #include "modules/webaudio/AudioBuffer.h"
     42 #include "modules/webaudio/AudioBufferCallback.h"
     43 #include "modules/webaudio/AudioBufferSourceNode.h"
     44 #include "modules/webaudio/AudioListener.h"
     45 #include "modules/webaudio/AudioNodeInput.h"
     46 #include "modules/webaudio/AudioNodeOutput.h"
     47 #include "modules/webaudio/BiquadFilterNode.h"
     48 #include "modules/webaudio/ChannelMergerNode.h"
     49 #include "modules/webaudio/ChannelSplitterNode.h"
     50 #include "modules/webaudio/ConvolverNode.h"
     51 #include "modules/webaudio/DefaultAudioDestinationNode.h"
     52 #include "modules/webaudio/DelayNode.h"
     53 #include "modules/webaudio/DynamicsCompressorNode.h"
     54 #include "modules/webaudio/GainNode.h"
     55 #include "modules/webaudio/MediaElementAudioSourceNode.h"
     56 #include "modules/webaudio/MediaStreamAudioDestinationNode.h"
     57 #include "modules/webaudio/MediaStreamAudioSourceNode.h"
     58 #include "modules/webaudio/OfflineAudioCompletionEvent.h"
     59 #include "modules/webaudio/OfflineAudioContext.h"
     60 #include "modules/webaudio/OfflineAudioDestinationNode.h"
     61 #include "modules/webaudio/OscillatorNode.h"
     62 #include "modules/webaudio/PannerNode.h"
     63 #include "modules/webaudio/PeriodicWave.h"
     64 #include "modules/webaudio/ScriptProcessorNode.h"
     65 #include "modules/webaudio/WaveShaperNode.h"
     66 
     67 #if DEBUG_AUDIONODE_REFERENCES
     68 #include <stdio.h>
     69 #endif
     70 
     71 #include "wtf/ArrayBuffer.h"
     72 #include "wtf/Atomics.h"
     73 #include "wtf/PassOwnPtr.h"
     74 #include "wtf/text/WTFString.h"
     75 
     76 // FIXME: check the proper way to reference an undefined thread ID
     77 const int UndefinedThreadIdentifier = 0xffffffff;
     78 
     79 namespace WebCore {
     80 
     81 bool AudioContext::isSampleRateRangeGood(float sampleRate)
     82 {
     83     // FIXME: It would be nice if the minimum sample-rate could be less than 44.1KHz,
     84     // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), and some testing there.
     85     return sampleRate >= 44100 && sampleRate <= 96000;
     86 }
     87 
     88 // Don't allow more than this number of simultaneous AudioContexts talking to hardware.
     89 const unsigned MaxHardwareContexts = 4;
     90 unsigned AudioContext::s_hardwareContextCount = 0;
     91 
     92 PassRefPtr<AudioContext> AudioContext::create(Document& document, ExceptionState& exceptionState)
     93 {
     94     ASSERT(isMainThread());
     95     if (s_hardwareContextCount >= MaxHardwareContexts) {
     96         exceptionState.throwDOMException(
     97             SyntaxError,
     98             "number of hardware contexts reached maximum (" + String::number(MaxHardwareContexts) + ").");
     99         return 0;
    100     }
    101 
    102     RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(&document)));
    103     audioContext->suspendIfNeeded();
    104     return audioContext.release();
    105 }
    106 
    107 PassRefPtr<AudioContext> AudioContext::create(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
    108 {
    109     document.addConsoleMessage(JSMessageSource, WarningMessageLevel, "Deprecated AudioContext constructor: use OfflineAudioContext instead");
    110     return OfflineAudioContext::create(&document, numberOfChannels, numberOfFrames, sampleRate, exceptionState);
    111 }
    112 
    113 // Constructor for rendering to the audio hardware.
    114 AudioContext::AudioContext(Document* document)
    115     : ActiveDOMObject(document)
    116     , m_isStopScheduled(false)
    117     , m_isInitialized(false)
    118     , m_isAudioThreadFinished(false)
    119     , m_destinationNode(0)
    120     , m_isDeletionScheduled(false)
    121     , m_automaticPullNodesNeedUpdating(false)
    122     , m_connectionCount(0)
    123     , m_audioThread(0)
    124     , m_graphOwnerThread(UndefinedThreadIdentifier)
    125     , m_isOfflineContext(false)
    126     , m_activeSourceCount(0)
    127 {
    128     constructCommon();
    129 
    130     m_destinationNode = DefaultAudioDestinationNode::create(this);
    131 
    132     // This sets in motion an asynchronous loading mechanism on another thread.
    133     // We can check m_hrtfDatabaseLoader->isLoaded() to find out whether or not it has been fully loaded.
    134     // It's not that useful to have a callback function for this since the audio thread automatically starts rendering on the graph
    135     // when this has finished (see AudioDestinationNode).
    136     m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate());
    137 }
    138 
    139 // Constructor for offline (non-realtime) rendering.
    140 AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
    141     : ActiveDOMObject(document)
    142     , m_isStopScheduled(false)
    143     , m_isInitialized(false)
    144     , m_isAudioThreadFinished(false)
    145     , m_destinationNode(0)
    146     , m_automaticPullNodesNeedUpdating(false)
    147     , m_connectionCount(0)
    148     , m_audioThread(0)
    149     , m_graphOwnerThread(UndefinedThreadIdentifier)
    150     , m_isOfflineContext(true)
    151     , m_activeSourceCount(0)
    152 {
    153     constructCommon();
    154 
    155     m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate);
    156 
    157     // Create a new destination for offline rendering.
    158     m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
    159     ASSERT(m_renderTarget);
    160     m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
    161     ASSERT(m_destinationNode);
    162 }
    163 
    164 void AudioContext::constructCommon()
    165 {
    166     ScriptWrappable::init(this);
    167     // According to spec AudioContext must die only after page navigate.
    168     // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
    169     setPendingActivity(this);
    170 
    171     FFTFrame::initialize();
    172 
    173     m_listener = AudioListener::create();
    174 }
    175 
    176 AudioContext::~AudioContext()
    177 {
    178 #if DEBUG_AUDIONODE_REFERENCES
    179     fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
    180 #endif
    181     // AudioNodes keep a reference to their context, so there should be no way to be in the destructor if there are still AudioNodes around.
    182     ASSERT(!m_isInitialized);
    183     ASSERT(m_isStopScheduled);
    184     ASSERT(!m_nodesToDelete.size());
    185     ASSERT(!m_referencedNodes.size());
    186     ASSERT(!m_finishedNodes.size());
    187     ASSERT(!m_automaticPullNodes.size());
    188     if (m_automaticPullNodesNeedUpdating)
    189         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
    190     ASSERT(!m_renderingAutomaticPullNodes.size());
    191 }
    192 
    193 void AudioContext::lazyInitialize()
    194 {
    195     if (!m_isInitialized) {
    196         // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
    197         ASSERT(!m_isAudioThreadFinished);
    198         if (!m_isAudioThreadFinished) {
    199             if (m_destinationNode.get()) {
    200                 m_destinationNode->initialize();
    201 
    202                 if (!isOfflineContext()) {
    203                     // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
    204                     // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
    205                     // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
    206                     // We may want to consider requiring it for symmetry with OfflineAudioContext.
    207                     m_destinationNode->startRendering();
    208                     ++s_hardwareContextCount;
    209                 }
    210 
    211             }
    212             m_isInitialized = true;
    213         }
    214     }
    215 }
    216 
    217 void AudioContext::clear()
    218 {
    219     // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
    220     if (m_destinationNode)
    221         m_destinationNode.clear();
    222 
    223     // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
    224     do {
    225         deleteMarkedNodes();
    226         m_nodesToDelete.append(m_nodesMarkedForDeletion);
    227         m_nodesMarkedForDeletion.clear();
    228     } while (m_nodesToDelete.size());
    229 
    230     // It was set in constructCommon.
    231     unsetPendingActivity(this);
    232 }
    233 
    234 void AudioContext::uninitialize()
    235 {
    236     ASSERT(isMainThread());
    237 
    238     if (!m_isInitialized)
    239         return;
    240 
    241     // This stops the audio thread and all audio rendering.
    242     m_destinationNode->uninitialize();
    243 
    244     // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
    245     m_isAudioThreadFinished = true;
    246 
    247     if (!isOfflineContext()) {
    248         ASSERT(s_hardwareContextCount);
    249         --s_hardwareContextCount;
    250     }
    251 
    252     // Get rid of the sources which may still be playing.
    253     derefUnfinishedSourceNodes();
    254 
    255     m_isInitialized = false;
    256 }
    257 
    258 bool AudioContext::isInitialized() const
    259 {
    260     return m_isInitialized;
    261 }
    262 
    263 bool AudioContext::isRunnable() const
    264 {
    265     if (!isInitialized())
    266         return false;
    267 
    268     // Check with the HRTF spatialization system to see if it's finished loading.
    269     return m_hrtfDatabaseLoader->isLoaded();
    270 }
    271 
    272 void AudioContext::stopDispatch(void* userData)
    273 {
    274     AudioContext* context = reinterpret_cast<AudioContext*>(userData);
    275     ASSERT(context);
    276     if (!context)
    277         return;
    278 
    279     context->uninitialize();
    280     context->clear();
    281 }
    282 
    283 void AudioContext::stop()
    284 {
    285     // Usually ExecutionContext calls stop twice.
    286     if (m_isStopScheduled)
    287         return;
    288     m_isStopScheduled = true;
    289 
    290     // Don't call uninitialize() immediately here because the ExecutionContext is in the middle
    291     // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
    292     // ActiveDOMObjects so let's schedule uninitialize() to be called later.
    293     // FIXME: see if there's a more direct way to handle this issue.
    294     callOnMainThread(stopDispatch, this);
    295 }
    296 
    297 PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
    298 {
    299     RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
    300     if (!audioBuffer.get()) {
    301         if (numberOfChannels > AudioContext::maxNumberOfChannels()) {
    302             exceptionState.throwDOMException(
    303                 NotSupportedError,
    304                 "requested number of channels (" + String::number(numberOfChannels) + ") exceeds maximum (" + String::number(AudioContext::maxNumberOfChannels()) + ")");
    305         } else if (sampleRate < AudioBuffer::minAllowedSampleRate() || sampleRate > AudioBuffer::maxAllowedSampleRate()) {
    306             exceptionState.throwDOMException(
    307                 NotSupportedError,
    308                 "requested sample rate (" + String::number(sampleRate)
    309                 + ") does not lie in the allowed range of "
    310                 + String::number(AudioBuffer::minAllowedSampleRate())
    311                 + "-" + String::number(AudioBuffer::maxAllowedSampleRate()) + " Hz");
    312         } else if (!numberOfFrames) {
    313             exceptionState.throwDOMException(
    314                 NotSupportedError,
    315                 "number of frames must be greater than 0.");
    316         } else {
    317             exceptionState.throwDOMException(
    318                 NotSupportedError,
    319                 "unable to create buffer of " + String::number(numberOfChannels)
    320                 + " channel(s) of " + String::number(numberOfFrames)
    321                 + " frames each.");
    322         }
    323         return 0;
    324     }
    325 
    326     return audioBuffer;
    327 }
    328 
    329 PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono, ExceptionState& exceptionState)
    330 {
    331     ASSERT(arrayBuffer);
    332     if (!arrayBuffer) {
    333         exceptionState.throwDOMException(
    334             SyntaxError,
    335             "invalid ArrayBuffer.");
    336         return 0;
    337     }
    338 
    339     RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
    340     if (!audioBuffer.get()) {
    341         exceptionState.throwDOMException(
    342             SyntaxError,
    343             "invalid audio data in ArrayBuffer.");
    344         return 0;
    345     }
    346 
    347     return audioBuffer;
    348 }
    349 
    350 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBufferCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, ExceptionState& exceptionState)
    351 {
    352     if (!audioData) {
    353         exceptionState.throwDOMException(
    354             SyntaxError,
    355             "invalid ArrayBuffer for audioData.");
    356         return;
    357     }
    358     m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
    359 }
    360 
    361 PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
    362 {
    363     ASSERT(isMainThread());
    364     lazyInitialize();
    365     RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
    366 
    367     // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
    368     // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
    369     refNode(node.get());
    370 
    371     return node;
    372 }
    373 
    374 PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState)
    375 {
    376     if (!mediaElement) {
    377         exceptionState.throwDOMException(
    378             InvalidStateError,
    379             "invalid HTMLMedialElement.");
    380         return 0;
    381     }
    382 
    383     ASSERT(isMainThread());
    384     lazyInitialize();
    385 
    386     // First check if this media element already has a source node.
    387     if (mediaElement->audioSourceNode()) {
    388         exceptionState.throwDOMException(
    389             InvalidStateError,
    390             "invalid HTMLMediaElement.");
    391         return 0;
    392     }
    393 
    394     RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
    395 
    396     mediaElement->setAudioSourceNode(node.get());
    397 
    398     refNode(node.get()); // context keeps reference until node is disconnected
    399     return node;
    400 }
    401 
    402 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& exceptionState)
    403 {
    404     if (!mediaStream) {
    405         exceptionState.throwDOMException(
    406             InvalidStateError,
    407             "invalid MediaStream source");
    408         return 0;
    409     }
    410 
    411     ASSERT(isMainThread());
    412     lazyInitialize();
    413 
    414     AudioSourceProvider* provider = 0;
    415 
    416     MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
    417     RefPtr<MediaStreamTrack> audioTrack;
    418 
    419     // FIXME: get a provider for non-local MediaStreams (like from a remote peer).
    420     for (size_t i = 0; i < audioTracks.size(); ++i) {
    421         audioTrack = audioTracks[i];
    422         if (audioTrack->component()->audioSourceProvider()) {
    423             provider = audioTrack->component()->audioSourceProvider();
    424             break;
    425         }
    426     }
    427 
    428     RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack.get(), provider);
    429 
    430     // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
    431     node->setFormat(2, sampleRate());
    432 
    433     refNode(node.get()); // context keeps reference until node is disconnected
    434     return node;
    435 }
    436 
    437 PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
    438 {
    439     // FIXME: Add support for an optional argument which specifies the number of channels.
    440     // FIXME: The default should probably be stereo instead of mono.
    441     return MediaStreamAudioDestinationNode::create(this, 1);
    442 }
    443 
    444 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(ExceptionState& exceptionState)
    445 {
    446     // Set number of input/output channels to stereo by default.
    447     return createScriptProcessor(0, 2, 2, exceptionState);
    448 }
    449 
    450 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionState& exceptionState)
    451 {
    452     // Set number of input/output channels to stereo by default.
    453     return createScriptProcessor(bufferSize, 2, 2, exceptionState);
    454 }
    455 
    456 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState)
    457 {
    458     // Set number of output channels to stereo by default.
    459     return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exceptionState);
    460 }
    461 
    462 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState)
    463 {
    464     ASSERT(isMainThread());
    465     lazyInitialize();
    466     RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
    467 
    468     if (!node.get()) {
    469         if (!numberOfInputChannels && !numberOfOutputChannels) {
    470             exceptionState.throwDOMException(
    471                 IndexSizeError,
    472                 "number of input channels and output channels cannot both be zero.");
    473         } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels()) {
    474             exceptionState.throwDOMException(
    475                 IndexSizeError,
    476                 "number of input channels (" + String::number(numberOfInputChannels)
    477                 + ") exceeds maximum ("
    478                 + String::number(AudioContext::maxNumberOfChannels()) + ").");
    479         } else if (numberOfOutputChannels > AudioContext::maxNumberOfChannels()) {
    480             exceptionState.throwDOMException(
    481                 IndexSizeError,
    482                 "number of output channels (" + String::number(numberOfInputChannels)
    483                 + ") exceeds maximum ("
    484                 + String::number(AudioContext::maxNumberOfChannels()) + ").");
    485         } else {
    486             exceptionState.throwDOMException(
    487                 IndexSizeError,
    488                 "buffer size (" + String::number(bufferSize)
    489                 + ") must be a power of two between 256 and 16384.");
    490         }
    491         return 0;
    492     }
    493 
    494     refNode(node.get()); // context keeps reference until we stop making javascript rendering callbacks
    495     return node;
    496 }
    497 
    498 PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
    499 {
    500     ASSERT(isMainThread());
    501     lazyInitialize();
    502     return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
    503 }
    504 
    505 PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper()
    506 {
    507     ASSERT(isMainThread());
    508     lazyInitialize();
    509     return WaveShaperNode::create(this);
    510 }
    511 
    512 PassRefPtr<PannerNode> AudioContext::createPanner()
    513 {
    514     ASSERT(isMainThread());
    515     lazyInitialize();
    516     return PannerNode::create(this, m_destinationNode->sampleRate());
    517 }
    518 
    519 PassRefPtr<ConvolverNode> AudioContext::createConvolver()
    520 {
    521     ASSERT(isMainThread());
    522     lazyInitialize();
    523     return ConvolverNode::create(this, m_destinationNode->sampleRate());
    524 }
    525 
    526 PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
    527 {
    528     ASSERT(isMainThread());
    529     lazyInitialize();
    530     return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
    531 }
    532 
    533 PassRefPtr<AnalyserNode> AudioContext::createAnalyser()
    534 {
    535     ASSERT(isMainThread());
    536     lazyInitialize();
    537     return AnalyserNode::create(this, m_destinationNode->sampleRate());
    538 }
    539 
    540 PassRefPtr<GainNode> AudioContext::createGain()
    541 {
    542     ASSERT(isMainThread());
    543     lazyInitialize();
    544     return GainNode::create(this, m_destinationNode->sampleRate());
    545 }
    546 
    547 PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionState& exceptionState)
    548 {
    549     const double defaultMaxDelayTime = 1;
    550     return createDelay(defaultMaxDelayTime, exceptionState);
    551 }
    552 
    553 PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState)
    554 {
    555     ASSERT(isMainThread());
    556     lazyInitialize();
    557     RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, exceptionState);
    558     if (exceptionState.hadException())
    559         return 0;
    560     return node;
    561 }
    562 
    563 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionState& exceptionState)
    564 {
    565     const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
    566     return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptionState);
    567 }
    568 
    569 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState)
    570 {
    571     ASSERT(isMainThread());
    572     lazyInitialize();
    573 
    574     RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
    575 
    576     if (!node.get()) {
    577         exceptionState.throwDOMException(
    578             IndexSizeError,
    579             "number of outputs (" + String::number(numberOfOutputs)
    580             + ") must be between 1 and "
    581             + String::number(AudioContext::maxNumberOfChannels()) + ".");
    582         return 0;
    583     }
    584 
    585     return node;
    586 }
    587 
    588 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState& exceptionState)
    589 {
    590     const unsigned ChannelMergerDefaultNumberOfInputs = 6;
    591     return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionState);
    592 }
    593 
    594 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionState& exceptionState)
    595 {
    596     ASSERT(isMainThread());
    597     lazyInitialize();
    598 
    599     RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
    600 
    601     if (!node.get()) {
    602         exceptionState.throwDOMException(
    603             IndexSizeError,
    604             "number of inputs (" + String::number(numberOfInputs)
    605             + ") must be between 1 and "
    606             + String::number(AudioContext::maxNumberOfChannels()) + ".");
    607         return 0;
    608     }
    609 
    610     return node;
    611 }
    612 
    613 PassRefPtr<OscillatorNode> AudioContext::createOscillator()
    614 {
    615     ASSERT(isMainThread());
    616     lazyInitialize();
    617 
    618     RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
    619 
    620     // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
    621     // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
    622     refNode(node.get());
    623 
    624     return node;
    625 }
    626 
    627 PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState& exceptionState)
    628 {
    629     ASSERT(isMainThread());
    630 
    631     if (!real) {
    632         exceptionState.throwDOMException(
    633             SyntaxError,
    634             "invalid real array");
    635         return 0;
    636     }
    637 
    638     if (!imag) {
    639         exceptionState.throwDOMException(
    640             SyntaxError,
    641             "invalid imaginary array");
    642         return 0;
    643     }
    644 
    645     if (real->length() != imag->length()) {
    646         exceptionState.throwDOMException(
    647             IndexSizeError,
    648             "length of real array (" + String::number(real->length())
    649             + ") and length of imaginary array (" +  String::number(imag->length())
    650             + ") must match.");
    651         return 0;
    652     }
    653 
    654     if (real->length() > 4096) {
    655         exceptionState.throwDOMException(
    656             IndexSizeError,
    657             "length of real array (" + String::number(real->length())
    658             + ") exceeds allowed maximum of 4096");
    659         return 0;
    660     }
    661 
    662     if (imag->length() > 4096) {
    663         exceptionState.throwDOMException(
    664             IndexSizeError,
    665             "length of imaginary array (" + String::number(imag->length())
    666             + ") exceeds allowed maximum of 4096");
    667         return 0;
    668     }
    669 
    670     lazyInitialize();
    671     return PeriodicWave::create(sampleRate(), real, imag);
    672 }
    673 
    674 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
    675 {
    676     ASSERT(isAudioThread());
    677     m_finishedNodes.append(node);
    678 }
    679 
    680 void AudioContext::derefFinishedSourceNodes()
    681 {
    682     ASSERT(isGraphOwner());
    683     ASSERT(isAudioThread() || isAudioThreadFinished());
    684     for (unsigned i = 0; i < m_finishedNodes.size(); i++)
    685         derefNode(m_finishedNodes[i]);
    686 
    687     m_finishedNodes.clear();
    688 }
    689 
    690 void AudioContext::refNode(AudioNode* node)
    691 {
    692     ASSERT(isMainThread());
    693     AutoLocker locker(this);
    694 
    695     node->ref(AudioNode::RefTypeConnection);
    696     m_referencedNodes.append(node);
    697 }
    698 
    699 void AudioContext::derefNode(AudioNode* node)
    700 {
    701     ASSERT(isGraphOwner());
    702 
    703     node->deref(AudioNode::RefTypeConnection);
    704 
    705     for (unsigned i = 0; i < m_referencedNodes.size(); ++i) {
    706         if (node == m_referencedNodes[i]) {
    707             m_referencedNodes.remove(i);
    708             break;
    709         }
    710     }
    711 }
    712 
    713 void AudioContext::derefUnfinishedSourceNodes()
    714 {
    715     ASSERT(isMainThread() && isAudioThreadFinished());
    716     for (unsigned i = 0; i < m_referencedNodes.size(); ++i)
    717         m_referencedNodes[i]->deref(AudioNode::RefTypeConnection);
    718 
    719     m_referencedNodes.clear();
    720 }
    721 
    722 void AudioContext::lock(bool& mustReleaseLock)
    723 {
    724     // Don't allow regular lock in real-time audio thread.
    725     ASSERT(isMainThread());
    726 
    727     ThreadIdentifier thisThread = currentThread();
    728 
    729     if (thisThread == m_graphOwnerThread) {
    730         // We already have the lock.
    731         mustReleaseLock = false;
    732     } else {
    733         // Acquire the lock.
    734         m_contextGraphMutex.lock();
    735         m_graphOwnerThread = thisThread;
    736         mustReleaseLock = true;
    737     }
    738 }
    739 
    740 bool AudioContext::tryLock(bool& mustReleaseLock)
    741 {
    742     ThreadIdentifier thisThread = currentThread();
    743     bool isAudioThread = thisThread == audioThread();
    744 
    745     // Try to catch cases of using try lock on main thread - it should use regular lock.
    746     ASSERT(isAudioThread || isAudioThreadFinished());
    747 
    748     if (!isAudioThread) {
    749         // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
    750         lock(mustReleaseLock);
    751         return true;
    752     }
    753 
    754     bool hasLock;
    755 
    756     if (thisThread == m_graphOwnerThread) {
    757         // Thread already has the lock.
    758         hasLock = true;
    759         mustReleaseLock = false;
    760     } else {
    761         // Don't already have the lock - try to acquire it.
    762         hasLock = m_contextGraphMutex.tryLock();
    763 
    764         if (hasLock)
    765             m_graphOwnerThread = thisThread;
    766 
    767         mustReleaseLock = hasLock;
    768     }
    769 
    770     return hasLock;
    771 }
    772 
    773 void AudioContext::unlock()
    774 {
    775     ASSERT(currentThread() == m_graphOwnerThread);
    776 
    777     m_graphOwnerThread = UndefinedThreadIdentifier;
    778     m_contextGraphMutex.unlock();
    779 }
    780 
    781 bool AudioContext::isAudioThread() const
    782 {
    783     return currentThread() == m_audioThread;
    784 }
    785 
    786 bool AudioContext::isGraphOwner() const
    787 {
    788     return currentThread() == m_graphOwnerThread;
    789 }
    790 
    791 void AudioContext::addDeferredFinishDeref(AudioNode* node)
    792 {
    793     ASSERT(isAudioThread());
    794     m_deferredFinishDerefList.append(node);
    795 }
    796 
    797 void AudioContext::handlePreRenderTasks()
    798 {
    799     ASSERT(isAudioThread());
    800 
    801     // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
    802     // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
    803     bool mustReleaseLock;
    804     if (tryLock(mustReleaseLock)) {
    805         // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
    806         handleDirtyAudioSummingJunctions();
    807         handleDirtyAudioNodeOutputs();
    808 
    809         updateAutomaticPullNodes();
    810 
    811         if (mustReleaseLock)
    812             unlock();
    813     }
    814 }
    815 
    816 void AudioContext::handlePostRenderTasks()
    817 {
    818     ASSERT(isAudioThread());
    819 
    820     // Must use a tryLock() here too.  Don't worry, the lock will very rarely be contended and this method is called frequently.
    821     // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
    822     // from the render graph (in which case they'll render silence).
    823     bool mustReleaseLock;
    824     if (tryLock(mustReleaseLock)) {
    825         // Take care of finishing any derefs where the tryLock() failed previously.
    826         handleDeferredFinishDerefs();
    827 
    828         // Dynamically clean up nodes which are no longer needed.
    829         derefFinishedSourceNodes();
    830 
    831         // Don't delete in the real-time thread. Let the main thread do it.
    832         // Ref-counted objects held by certain AudioNodes may not be thread-safe.
    833         scheduleNodeDeletion();
    834 
    835         // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
    836         handleDirtyAudioSummingJunctions();
    837         handleDirtyAudioNodeOutputs();
    838 
    839         updateAutomaticPullNodes();
    840 
    841         if (mustReleaseLock)
    842             unlock();
    843     }
    844 }
    845 
    846 void AudioContext::handleDeferredFinishDerefs()
    847 {
    848     ASSERT(isAudioThread() && isGraphOwner());
    849     for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) {
    850         AudioNode* node = m_deferredFinishDerefList[i];
    851         node->finishDeref(AudioNode::RefTypeConnection);
    852     }
    853 
    854     m_deferredFinishDerefList.clear();
    855 }
    856 
    857 void AudioContext::markForDeletion(AudioNode* node)
    858 {
    859     ASSERT(isGraphOwner());
    860 
    861     if (isAudioThreadFinished())
    862         m_nodesToDelete.append(node);
    863     else
    864         m_nodesMarkedForDeletion.append(node);
    865 
    866     // This is probably the best time for us to remove the node from automatic pull list,
    867     // since all connections are gone and we hold the graph lock. Then when handlePostRenderTasks()
    868     // gets a chance to schedule the deletion work, updateAutomaticPullNodes() also gets a chance to
    869     // modify m_renderingAutomaticPullNodes.
    870     removeAutomaticPullNode(node);
    871 }
    872 
    873 void AudioContext::scheduleNodeDeletion()
    874 {
    875     bool isGood = m_isInitialized && isGraphOwner();
    876     ASSERT(isGood);
    877     if (!isGood)
    878         return;
    879 
    880     // Make sure to call deleteMarkedNodes() on main thread.
    881     if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
    882         m_nodesToDelete.append(m_nodesMarkedForDeletion);
    883         m_nodesMarkedForDeletion.clear();
    884 
    885         m_isDeletionScheduled = true;
    886 
    887         // Don't let ourself get deleted before the callback.
    888         // See matching deref() in deleteMarkedNodesDispatch().
    889         ref();
    890         callOnMainThread(deleteMarkedNodesDispatch, this);
    891     }
    892 }
    893 
    894 void AudioContext::deleteMarkedNodesDispatch(void* userData)
    895 {
    896     AudioContext* context = reinterpret_cast<AudioContext*>(userData);
    897     ASSERT(context);
    898     if (!context)
    899         return;
    900 
    901     context->deleteMarkedNodes();
    902     context->deref();
    903 }
    904 
    905 void AudioContext::deleteMarkedNodes()
    906 {
    907     ASSERT(isMainThread());
    908 
    909     // Protect this object from being deleted before we release the mutex locked by AutoLocker.
    910     RefPtr<AudioContext> protect(this);
    911     {
    912         AutoLocker locker(this);
    913 
    914         while (size_t n = m_nodesToDelete.size()) {
    915             AudioNode* node = m_nodesToDelete[n - 1];
    916             m_nodesToDelete.removeLast();
    917 
    918             // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
    919             unsigned numberOfInputs = node->numberOfInputs();
    920             for (unsigned i = 0; i < numberOfInputs; ++i)
    921                 m_dirtySummingJunctions.remove(node->input(i));
    922 
    923             // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
    924             unsigned numberOfOutputs = node->numberOfOutputs();
    925             for (unsigned i = 0; i < numberOfOutputs; ++i)
    926                 m_dirtyAudioNodeOutputs.remove(node->output(i));
    927 
    928             // Finally, delete it.
    929             delete node;
    930         }
    931         m_isDeletionScheduled = false;
    932     }
    933 }
    934 
    935 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
    936 {
    937     ASSERT(isGraphOwner());
    938     m_dirtySummingJunctions.add(summingJunction);
    939 }
    940 
    941 void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
    942 {
    943     ASSERT(isMainThread());
    944     AutoLocker locker(this);
    945     m_dirtySummingJunctions.remove(summingJunction);
    946 }
    947 
    948 void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
    949 {
    950     ASSERT(isGraphOwner());
    951     m_dirtyAudioNodeOutputs.add(output);
    952 }
    953 
    954 void AudioContext::handleDirtyAudioSummingJunctions()
    955 {
    956     ASSERT(isGraphOwner());
    957 
    958     for (HashSet<AudioSummingJunction*>::iterator i = m_dirtySummingJunctions.begin(); i != m_dirtySummingJunctions.end(); ++i)
    959         (*i)->updateRenderingState();
    960 
    961     m_dirtySummingJunctions.clear();
    962 }
    963 
    964 void AudioContext::handleDirtyAudioNodeOutputs()
    965 {
    966     ASSERT(isGraphOwner());
    967 
    968     for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin(); i != m_dirtyAudioNodeOutputs.end(); ++i)
    969         (*i)->updateRenderingState();
    970 
    971     m_dirtyAudioNodeOutputs.clear();
    972 }
    973 
    974 void AudioContext::addAutomaticPullNode(AudioNode* node)
    975 {
    976     ASSERT(isGraphOwner());
    977 
    978     if (!m_automaticPullNodes.contains(node)) {
    979         m_automaticPullNodes.add(node);
    980         m_automaticPullNodesNeedUpdating = true;
    981     }
    982 }
    983 
    984 void AudioContext::removeAutomaticPullNode(AudioNode* node)
    985 {
    986     ASSERT(isGraphOwner());
    987 
    988     if (m_automaticPullNodes.contains(node)) {
    989         m_automaticPullNodes.remove(node);
    990         m_automaticPullNodesNeedUpdating = true;
    991     }
    992 }
    993 
    994 void AudioContext::updateAutomaticPullNodes()
    995 {
    996     ASSERT(isGraphOwner());
    997 
    998     if (m_automaticPullNodesNeedUpdating) {
    999         // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
   1000         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
   1001 
   1002         unsigned j = 0;
   1003         for (HashSet<AudioNode*>::iterator i = m_automaticPullNodes.begin(); i != m_automaticPullNodes.end(); ++i, ++j) {
   1004             AudioNode* output = *i;
   1005             m_renderingAutomaticPullNodes[j] = output;
   1006         }
   1007 
   1008         m_automaticPullNodesNeedUpdating = false;
   1009     }
   1010 }
   1011 
   1012 void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
   1013 {
   1014     ASSERT(isAudioThread());
   1015 
   1016     for (unsigned i = 0; i < m_renderingAutomaticPullNodes.size(); ++i)
   1017         m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess);
   1018 }
   1019 
   1020 const AtomicString& AudioContext::interfaceName() const
   1021 {
   1022     return EventTargetNames::AudioContext;
   1023 }
   1024 
   1025 ExecutionContext* AudioContext::executionContext() const
   1026 {
   1027     return m_isStopScheduled ? 0 : ActiveDOMObject::executionContext();
   1028 }
   1029 
   1030 void AudioContext::startRendering()
   1031 {
   1032     destination()->startRendering();
   1033 }
   1034 
   1035 void AudioContext::fireCompletionEvent()
   1036 {
   1037     ASSERT(isMainThread());
   1038     if (!isMainThread())
   1039         return;
   1040 
   1041     AudioBuffer* renderedBuffer = m_renderTarget.get();
   1042 
   1043     ASSERT(renderedBuffer);
   1044     if (!renderedBuffer)
   1045         return;
   1046 
   1047     // Avoid firing the event if the document has already gone away.
   1048     if (executionContext()) {
   1049         // Call the offline rendering completion event listener.
   1050         dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
   1051     }
   1052 }
   1053 
   1054 void AudioContext::incrementActiveSourceCount()
   1055 {
   1056     atomicIncrement(&m_activeSourceCount);
   1057 }
   1058 
   1059 void AudioContext::decrementActiveSourceCount()
   1060 {
   1061     atomicDecrement(&m_activeSourceCount);
   1062 }
   1063 
   1064 } // namespace WebCore
   1065 
   1066 #endif // ENABLE(WEB_AUDIO)
   1067