Home | History | Annotate | Download | only in webaudio
      1 /*
      2  * Copyright (C) 2010, Google Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions
      6  * are met:
      7  * 1.  Redistributions of source code must retain the above copyright
      8  *    notice, this list of conditions and the following disclaimer.
      9  * 2.  Redistributions in binary form must reproduce the above copyright
     10  *    notice, this list of conditions and the following disclaimer in the
     11  *    documentation and/or other materials provided with the distribution.
     12  *
     13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
     14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     16  * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
     17  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     18  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     19  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
     20  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     22  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     23  */
     24 
     25 #include "config.h"
     26 
     27 #if ENABLE(WEB_AUDIO)
     28 
     29 #include "JavaScriptAudioNode.h"
     30 
     31 #include "AudioBuffer.h"
     32 #include "AudioBus.h"
     33 #include "AudioContext.h"
     34 #include "AudioNodeInput.h"
     35 #include "AudioNodeOutput.h"
     36 #include "AudioProcessingEvent.h"
     37 #include "Document.h"
     38 #include "Float32Array.h"
     39 #include <wtf/MainThread.h>
     40 
     41 namespace WebCore {
     42 
     43 const size_t DefaultBufferSize = 4096;
     44 
     45 PassRefPtr<JavaScriptAudioNode> JavaScriptAudioNode::create(AudioContext* context, double sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs)
     46 {
     47     return adoptRef(new JavaScriptAudioNode(context, sampleRate, bufferSize, numberOfInputs, numberOfOutputs));
     48 }
     49 
     50 JavaScriptAudioNode::JavaScriptAudioNode(AudioContext* context, double sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs)
     51     : AudioNode(context, sampleRate)
     52     , m_doubleBufferIndex(0)
     53     , m_doubleBufferIndexForEvent(0)
     54     , m_bufferSize(bufferSize)
     55     , m_bufferReadWriteIndex(0)
     56     , m_isRequestOutstanding(false)
     57 {
     58     // Check for valid buffer size.
     59     switch (bufferSize) {
     60     case 256:
     61     case 512:
     62     case 1024:
     63     case 2048:
     64     case 4096:
     65     case 8192:
     66     case 16384:
     67         m_bufferSize = bufferSize;
     68         break;
     69     default:
     70         m_bufferSize = DefaultBufferSize;
     71     }
     72 
     73     // Regardless of the allowed buffer sizes above, we still need to process at the granularity of the AudioNode.
     74     if (m_bufferSize < AudioNode::ProcessingSizeInFrames)
     75         m_bufferSize = AudioNode::ProcessingSizeInFrames;
     76 
     77     // FIXME: Right now we're hardcoded to single input and single output.
     78     // Although the specification says this is OK for a simple implementation, multiple inputs and outputs would be good.
     79     ASSERT_UNUSED(numberOfInputs, numberOfInputs == 1);
     80     ASSERT_UNUSED(numberOfOutputs, numberOfOutputs == 1);
     81     addInput(adoptPtr(new AudioNodeInput(this)));
     82     addOutput(adoptPtr(new AudioNodeOutput(this, 2)));
     83 
     84     setType(NodeTypeJavaScript);
     85 
     86     initialize();
     87 }
     88 
     89 JavaScriptAudioNode::~JavaScriptAudioNode()
     90 {
     91     uninitialize();
     92 }
     93 
     94 void JavaScriptAudioNode::initialize()
     95 {
     96     if (isInitialized())
     97         return;
     98 
     99     double sampleRate = context()->sampleRate();
    100 
    101     // Create double buffers on both the input and output sides.
    102     // These AudioBuffers will be directly accessed in the main thread by JavaScript.
    103     for (unsigned i = 0; i < 2; ++i) {
    104         m_inputBuffers.append(AudioBuffer::create(2, bufferSize(), sampleRate));
    105         m_outputBuffers.append(AudioBuffer::create(2, bufferSize(), sampleRate));
    106     }
    107 
    108     AudioNode::initialize();
    109 }
    110 
    111 void JavaScriptAudioNode::uninitialize()
    112 {
    113     if (!isInitialized())
    114         return;
    115 
    116     m_inputBuffers.clear();
    117     m_outputBuffers.clear();
    118 
    119     AudioNode::uninitialize();
    120 }
    121 
    122 JavaScriptAudioNode* JavaScriptAudioNode::toJavaScriptAudioNode()
    123 {
    124     return this;
    125 }
    126 
    127 void JavaScriptAudioNode::process(size_t framesToProcess)
    128 {
    129     // Discussion about inputs and outputs:
    130     // As in other AudioNodes, JavaScriptAudioNode uses an AudioBus for its input and output (see inputBus and outputBus below).
    131     // Additionally, there is a double-buffering for input and output which is exposed directly to JavaScript (see inputBuffer and outputBuffer below).
    132     // This node is the producer for inputBuffer and the consumer for outputBuffer.
    133     // The JavaScript code is the consumer of inputBuffer and the producer for outputBuffer.
    134 
    135     // Get input and output busses.
    136     AudioBus* inputBus = this->input(0)->bus();
    137     AudioBus* outputBus = this->output(0)->bus();
    138 
    139     // Get input and output buffers.  We double-buffer both the input and output sides.
    140     unsigned doubleBufferIndex = this->doubleBufferIndex();
    141     bool isDoubleBufferIndexGood = doubleBufferIndex < 2 && doubleBufferIndex < m_inputBuffers.size() && doubleBufferIndex < m_outputBuffers.size();
    142     ASSERT(isDoubleBufferIndexGood);
    143     if (!isDoubleBufferIndexGood)
    144         return;
    145 
    146     AudioBuffer* inputBuffer = m_inputBuffers[doubleBufferIndex].get();
    147     AudioBuffer* outputBuffer = m_outputBuffers[doubleBufferIndex].get();
    148 
    149     // Check the consistency of input and output buffers.
    150     bool buffersAreGood = inputBuffer && outputBuffer && bufferSize() == inputBuffer->length() && bufferSize() == outputBuffer->length()
    151         && m_bufferReadWriteIndex + framesToProcess <= bufferSize();
    152     ASSERT(buffersAreGood);
    153     if (!buffersAreGood)
    154         return;
    155 
    156     // We assume that bufferSize() is evenly divisible by framesToProcess - should always be true, but we should still check.
    157     bool isFramesToProcessGood = framesToProcess && bufferSize() >= framesToProcess && !(bufferSize() % framesToProcess);
    158     ASSERT(isFramesToProcessGood);
    159     if (!isFramesToProcessGood)
    160         return;
    161 
    162     unsigned numberOfInputChannels = inputBus->numberOfChannels();
    163 
    164     bool channelsAreGood = (numberOfInputChannels == 1 || numberOfInputChannels == 2) && outputBus->numberOfChannels() == 2;
    165     ASSERT(channelsAreGood);
    166     if (!channelsAreGood)
    167         return;
    168 
    169     float* sourceL = inputBus->channel(0)->data();
    170     float* sourceR = numberOfInputChannels > 1 ? inputBus->channel(1)->data() : 0;
    171     float* destinationL = outputBus->channel(0)->data();
    172     float* destinationR = outputBus->channel(1)->data();
    173 
    174     // Copy from the input to the input buffer.  See "buffersAreGood" check above for safety.
    175     size_t bytesToCopy = sizeof(float) * framesToProcess;
    176     memcpy(inputBuffer->getChannelData(0)->data() + m_bufferReadWriteIndex, sourceL, bytesToCopy);
    177 
    178     if (numberOfInputChannels == 2)
    179         memcpy(inputBuffer->getChannelData(1)->data() + m_bufferReadWriteIndex, sourceR, bytesToCopy);
    180     else if (numberOfInputChannels == 1) {
    181         // If the input is mono, then also copy the mono input to the right channel of the AudioBuffer which the AudioProcessingEvent uses.
    182         // FIXME: it is likely the audio API will evolve to present an AudioBuffer with the same number of channels as our input.
    183         memcpy(inputBuffer->getChannelData(1)->data() + m_bufferReadWriteIndex, sourceL, bytesToCopy);
    184     }
    185 
    186     // Copy from the output buffer to the output.  See "buffersAreGood" check above for safety.
    187     memcpy(destinationL, outputBuffer->getChannelData(0)->data() + m_bufferReadWriteIndex, bytesToCopy);
    188     memcpy(destinationR, outputBuffer->getChannelData(1)->data() + m_bufferReadWriteIndex, bytesToCopy);
    189 
    190     // Update the buffering index.
    191     m_bufferReadWriteIndex = (m_bufferReadWriteIndex + framesToProcess) % bufferSize();
    192 
    193     // m_bufferReadWriteIndex will wrap back around to 0 when the current input and output buffers are full.
    194     // When this happens, fire an event and swap buffers.
    195     if (!m_bufferReadWriteIndex) {
    196         // Avoid building up requests on the main thread to fire process events when they're not being handled.
    197         // This could be a problem if the main thread is very busy doing other things and is being held up handling previous requests.
    198         if (m_isRequestOutstanding) {
    199             // We're late in handling the previous request.  The main thread must be very busy.
    200             // The best we can do is clear out the buffer ourself here.
    201             outputBuffer->zero();
    202         } else {
    203             // Reference ourself so we don't accidentally get deleted before fireProcessEvent() gets called.
    204             ref();
    205 
    206             // Fire the event on the main thread, not this one (which is the realtime audio thread).
    207             m_doubleBufferIndexForEvent = m_doubleBufferIndex;
    208             m_isRequestOutstanding = true;
    209             callOnMainThread(fireProcessEventDispatch, this);
    210         }
    211 
    212         swapBuffers();
    213     }
    214 }
    215 
    216 void JavaScriptAudioNode::fireProcessEventDispatch(void* userData)
    217 {
    218     JavaScriptAudioNode* jsAudioNode = static_cast<JavaScriptAudioNode*>(userData);
    219     ASSERT(jsAudioNode);
    220     if (!jsAudioNode)
    221         return;
    222 
    223     jsAudioNode->fireProcessEvent();
    224 
    225     // De-reference to match the ref() call in process().
    226     jsAudioNode->deref();
    227 }
    228 
    229 void JavaScriptAudioNode::fireProcessEvent()
    230 {
    231     ASSERT(isMainThread() && m_isRequestOutstanding);
    232 
    233     bool isIndexGood = m_doubleBufferIndexForEvent < 2;
    234     ASSERT(isIndexGood);
    235     if (!isIndexGood)
    236         return;
    237 
    238     AudioBuffer* inputBuffer = m_inputBuffers[m_doubleBufferIndexForEvent].get();
    239     AudioBuffer* outputBuffer = m_outputBuffers[m_doubleBufferIndexForEvent].get();
    240     ASSERT(inputBuffer && outputBuffer);
    241     if (!inputBuffer || !outputBuffer)
    242         return;
    243 
    244     // Avoid firing the event if the document has already gone away.
    245     if (context()->hasDocument()) {
    246         // Let the audio thread know we've gotten to the point where it's OK for it to make another request.
    247         m_isRequestOutstanding = false;
    248 
    249         // Call the JavaScript event handler which will do the audio processing.
    250         dispatchEvent(AudioProcessingEvent::create(inputBuffer, outputBuffer));
    251     }
    252 }
    253 
    254 void JavaScriptAudioNode::reset()
    255 {
    256     m_bufferReadWriteIndex = 0;
    257     m_doubleBufferIndex = 0;
    258 
    259     for (unsigned i = 0; i < 2; ++i) {
    260         m_inputBuffers[i]->zero();
    261         m_outputBuffers[i]->zero();
    262     }
    263 }
    264 
    265 ScriptExecutionContext* JavaScriptAudioNode::scriptExecutionContext() const
    266 {
    267     return const_cast<JavaScriptAudioNode*>(this)->context()->document();
    268 }
    269 
    270 } // namespace WebCore
    271 
    272 #endif // ENABLE(WEB_AUDIO)
    273