Home | History | Annotate | Download | only in webaudio
      1 /*
      2  * Copyright (C) 2010, Google Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions
      6  * are met:
      7  * 1.  Redistributions of source code must retain the above copyright
      8  *    notice, this list of conditions and the following disclaimer.
      9  * 2.  Redistributions in binary form must reproduce the above copyright
     10  *    notice, this list of conditions and the following disclaimer in the
     11  *    documentation and/or other materials provided with the distribution.
     12  *
     13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
     14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     16  * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
     17  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     18  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     19  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
     20  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     22  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     23  */
     24 
     25 #ifndef AudioNode_h
     26 #define AudioNode_h
     27 
     28 #include "bindings/v8/ScriptWrappable.h"
     29 #include "core/dom/EventTarget.h"
     30 #include "core/platform/audio/AudioBus.h"
     31 #include "wtf/Forward.h"
     32 #include "wtf/OwnPtr.h"
     33 #include "wtf/PassOwnPtr.h"
     34 #include "wtf/RefPtr.h"
     35 #include "wtf/Vector.h"
     36 
     37 #define DEBUG_AUDIONODE_REFERENCES 0
     38 
     39 namespace WebCore {
     40 
     41 class AudioContext;
     42 class AudioNodeInput;
     43 class AudioNodeOutput;
     44 class AudioParam;
     45 class ExceptionState;
     46 
     47 // An AudioNode is the basic building block for handling audio within an AudioContext.
     48 // It may be an audio source, an intermediate processing module, or an audio destination.
     49 // Each AudioNode can have inputs and/or outputs. An AudioSourceNode has no inputs and a single output.
     50 // An AudioDestinationNode has one input and no outputs and represents the final destination to the audio hardware.
     51 // Most processing nodes such as filters will have one input and one output, although multiple inputs and outputs are possible.
     52 
     53 class AudioNode : public ScriptWrappable, public EventTarget {
     54 public:
     55     enum { ProcessingSizeInFrames = 128 };
     56 
     57     AudioNode(AudioContext*, float sampleRate);
     58     virtual ~AudioNode();
     59 
     60     AudioContext* context() { return m_context.get(); }
     61     const AudioContext* context() const { return m_context.get(); }
     62 
     63     enum NodeType {
     64         NodeTypeUnknown,
     65         NodeTypeDestination,
     66         NodeTypeOscillator,
     67         NodeTypeAudioBufferSource,
     68         NodeTypeMediaElementAudioSource,
     69         NodeTypeMediaStreamAudioDestination,
     70         NodeTypeMediaStreamAudioSource,
     71         NodeTypeJavaScript,
     72         NodeTypeBiquadFilter,
     73         NodeTypePanner,
     74         NodeTypeConvolver,
     75         NodeTypeDelay,
     76         NodeTypeGain,
     77         NodeTypeChannelSplitter,
     78         NodeTypeChannelMerger,
     79         NodeTypeAnalyser,
     80         NodeTypeDynamicsCompressor,
     81         NodeTypeWaveShaper,
     82         NodeTypeEnd
     83     };
     84 
     85     enum ChannelCountMode {
     86         Max,
     87         ClampedMax,
     88         Explicit
     89     };
     90 
     91     NodeType nodeType() const { return m_nodeType; }
     92     void setNodeType(NodeType);
     93 
     94     // We handle our own ref-counting because of the threading issues and subtle nature of
     95     // how AudioNodes can continue processing (playing one-shot sound) after there are no more
     96     // JavaScript references to the object.
     97     enum RefType { RefTypeNormal, RefTypeConnection };
     98 
     99     // Can be called from main thread or context's audio thread.
    100     void ref(RefType refType = RefTypeNormal);
    101     void deref(RefType refType = RefTypeNormal);
    102 
    103     // Can be called from main thread or context's audio thread.  It must be called while the context's graph lock is held.
    104     void finishDeref(RefType refType);
    105 
    106     // The AudioNodeInput(s) (if any) will already have their input data available when process() is called.
    107     // Subclasses will take this input data and put the results in the AudioBus(s) of its AudioNodeOutput(s) (if any).
    108     // Called from context's audio thread.
    109     virtual void process(size_t framesToProcess) = 0;
    110 
    111     // Resets DSP processing state (clears delay lines, filter memory, etc.)
    112     // Called from context's audio thread.
    113     virtual void reset() = 0;
    114 
    115     // No significant resources should be allocated until initialize() is called.
    116     // Processing may not occur until a node is initialized.
    117     virtual void initialize();
    118     virtual void uninitialize();
    119 
    120     bool isInitialized() const { return m_isInitialized; }
    121     void lazyInitialize();
    122 
    123     unsigned numberOfInputs() const { return m_inputs.size(); }
    124     unsigned numberOfOutputs() const { return m_outputs.size(); }
    125 
    126     AudioNodeInput* input(unsigned);
    127     AudioNodeOutput* output(unsigned);
    128 
    129     // Called from main thread by corresponding JavaScript methods.
    130     virtual void connect(AudioNode*, unsigned outputIndex, unsigned inputIndex, ExceptionState&);
    131     void connect(AudioParam*, unsigned outputIndex, ExceptionState&);
    132     virtual void disconnect(unsigned outputIndex, ExceptionState&);
    133 
    134     virtual float sampleRate() const { return m_sampleRate; }
    135 
    136     // processIfNecessary() is called by our output(s) when the rendering graph needs this AudioNode to process.
    137     // This method ensures that the AudioNode will only process once per rendering time quantum even if it's called repeatedly.
    138     // This handles the case of "fanout" where an output is connected to multiple AudioNode inputs.
    139     // Called from context's audio thread.
    140     void processIfNecessary(size_t framesToProcess);
    141 
    142     // Called when a new connection has been made to one of our inputs or the connection number of channels has changed.
    143     // This potentially gives us enough information to perform a lazy initialization or, if necessary, a re-initialization.
    144     // Called from main thread.
    145     virtual void checkNumberOfChannelsForInput(AudioNodeInput*);
    146 
    147 #if DEBUG_AUDIONODE_REFERENCES
    148     static void printNodeCounts();
    149 #endif
    150 
    151     bool isMarkedForDeletion() const { return m_isMarkedForDeletion; }
    152 
    153     // tailTime() is the length of time (not counting latency time) where non-zero output may occur after continuous silent input.
    154     virtual double tailTime() const = 0;
    155     // latencyTime() is the length of time it takes for non-zero output to appear after non-zero input is provided. This only applies to
    156     // processing delay which is an artifact of the processing algorithm chosen and is *not* part of the intrinsic desired effect. For
    157     // example, a "delay" effect is expected to delay the signal, and thus would not be considered latency.
    158     virtual double latencyTime() const = 0;
    159 
    160     // propagatesSilence() should return true if the node will generate silent output when given silent input. By default, AudioNode
    161     // will take tailTime() and latencyTime() into account when determining whether the node will propagate silence.
    162     virtual bool propagatesSilence() const;
    163     bool inputsAreSilent();
    164     void silenceOutputs();
    165     void unsilenceOutputs();
    166 
    167     void enableOutputsIfNecessary();
    168     void disableOutputsIfNecessary();
    169 
    170     unsigned long channelCount();
    171     virtual void setChannelCount(unsigned long, ExceptionState&);
    172 
    173     String channelCountMode();
    174     void setChannelCountMode(const String&, ExceptionState&);
    175 
    176     String channelInterpretation();
    177     void setChannelInterpretation(const String&, ExceptionState&);
    178 
    179     ChannelCountMode internalChannelCountMode() const { return m_channelCountMode; }
    180     AudioBus::ChannelInterpretation internalChannelInterpretation() const { return m_channelInterpretation; }
    181 
    182     // EventTarget
    183     virtual const AtomicString& interfaceName() const OVERRIDE;
    184     virtual ScriptExecutionContext* scriptExecutionContext() const OVERRIDE;
    185     virtual EventTargetData* eventTargetData() OVERRIDE { return &m_eventTargetData; }
    186     virtual EventTargetData* ensureEventTargetData() OVERRIDE { return &m_eventTargetData; }
    187 
    188 protected:
    189     // Inputs and outputs must be created before the AudioNode is initialized.
    190     void addInput(PassOwnPtr<AudioNodeInput>);
    191     void addOutput(PassOwnPtr<AudioNodeOutput>);
    192 
    193     // Called by processIfNecessary() to cause all parts of the rendering graph connected to us to process.
    194     // Each rendering quantum, the audio data for each of the AudioNode's inputs will be available after this method is called.
    195     // Called from context's audio thread.
    196     virtual void pullInputs(size_t framesToProcess);
    197 
    198     // Force all inputs to take any channel interpretation changes into account.
    199     void updateChannelsForInputs();
    200 
    201 private:
    202     volatile bool m_isInitialized;
    203     NodeType m_nodeType;
    204     RefPtr<AudioContext> m_context;
    205     float m_sampleRate;
    206     Vector<OwnPtr<AudioNodeInput> > m_inputs;
    207     Vector<OwnPtr<AudioNodeOutput> > m_outputs;
    208 
    209     EventTargetData m_eventTargetData;
    210 
    211     double m_lastProcessingTime;
    212     double m_lastNonSilentTime;
    213 
    214     // Ref-counting
    215     volatile int m_normalRefCount;
    216     volatile int m_connectionRefCount;
    217 
    218     bool m_isMarkedForDeletion;
    219     bool m_isDisabled;
    220 
    221 #if DEBUG_AUDIONODE_REFERENCES
    222     static bool s_isNodeCountInitialized;
    223     static int s_nodeCount[NodeTypeEnd];
    224 #endif
    225 
    226     virtual void refEventTarget() OVERRIDE { ref(); }
    227     virtual void derefEventTarget() OVERRIDE { deref(); }
    228 
    229 protected:
    230     unsigned m_channelCount;
    231     ChannelCountMode m_channelCountMode;
    232     AudioBus::ChannelInterpretation m_channelInterpretation;
    233 };
    234 
    235 } // namespace WebCore
    236 
    237 #endif // AudioNode_h
    238