1 /* 2 * Copyright (C) 2010, Google Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY 17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 */ 24 25 #ifndef AudioNode_h 26 #define AudioNode_h 27 28 #include "bindings/v8/ScriptWrappable.h" 29 #include "core/events/EventTarget.h" 30 #include "platform/audio/AudioBus.h" 31 #include "wtf/Forward.h" 32 #include "wtf/OwnPtr.h" 33 #include "wtf/PassOwnPtr.h" 34 #include "wtf/RefPtr.h" 35 #include "wtf/Vector.h" 36 37 #define DEBUG_AUDIONODE_REFERENCES 0 38 39 namespace WebCore { 40 41 class AudioContext; 42 class AudioNodeInput; 43 class AudioNodeOutput; 44 class AudioParam; 45 class ExceptionState; 46 47 // An AudioNode is the basic building block for handling audio within an AudioContext. 48 // It may be an audio source, an intermediate processing module, or an audio destination. 49 // Each AudioNode can have inputs and/or outputs. An AudioSourceNode has no inputs and a single output. 50 // An AudioDestinationNode has one input and no outputs and represents the final destination to the audio hardware. 51 // Most processing nodes such as filters will have one input and one output, although multiple inputs and outputs are possible. 52 53 class AudioNode : public ScriptWrappable, public EventTargetWithInlineData { 54 public: 55 enum { ProcessingSizeInFrames = 128 }; 56 57 AudioNode(AudioContext*, float sampleRate); 58 virtual ~AudioNode(); 59 60 AudioContext* context() { return m_context.get(); } 61 const AudioContext* context() const { return m_context.get(); } 62 63 enum NodeType { 64 NodeTypeUnknown, 65 NodeTypeDestination, 66 NodeTypeOscillator, 67 NodeTypeAudioBufferSource, 68 NodeTypeMediaElementAudioSource, 69 NodeTypeMediaStreamAudioDestination, 70 NodeTypeMediaStreamAudioSource, 71 NodeTypeJavaScript, 72 NodeTypeBiquadFilter, 73 NodeTypePanner, 74 NodeTypeConvolver, 75 NodeTypeDelay, 76 NodeTypeGain, 77 NodeTypeChannelSplitter, 78 NodeTypeChannelMerger, 79 NodeTypeAnalyser, 80 NodeTypeDynamicsCompressor, 81 NodeTypeWaveShaper, 82 NodeTypeEnd 83 }; 84 85 enum ChannelCountMode { 86 Max, 87 ClampedMax, 88 Explicit 89 }; 90 91 NodeType nodeType() const { return m_nodeType; } 92 String nodeTypeName() const; 93 void setNodeType(NodeType); 94 95 // We handle our own ref-counting because of the threading issues and subtle nature of 96 // how AudioNodes can continue processing (playing one-shot sound) after there are no more 97 // JavaScript references to the object. 98 enum RefType { RefTypeNormal, RefTypeConnection }; 99 100 // Can be called from main thread or context's audio thread. 101 void ref(RefType refType = RefTypeNormal); 102 void deref(RefType refType = RefTypeNormal); 103 104 // Can be called from main thread or context's audio thread. It must be called while the context's graph lock is held. 105 void finishDeref(RefType refType); 106 107 // The AudioNodeInput(s) (if any) will already have their input data available when process() is called. 108 // Subclasses will take this input data and put the results in the AudioBus(s) of its AudioNodeOutput(s) (if any). 109 // Called from context's audio thread. 110 virtual void process(size_t framesToProcess) = 0; 111 112 // Resets DSP processing state (clears delay lines, filter memory, etc.) 113 // Called from context's audio thread. 114 virtual void reset() = 0; 115 116 // No significant resources should be allocated until initialize() is called. 117 // Processing may not occur until a node is initialized. 118 virtual void initialize(); 119 virtual void uninitialize(); 120 121 bool isInitialized() const { return m_isInitialized; } 122 void lazyInitialize(); 123 124 unsigned numberOfInputs() const { return m_inputs.size(); } 125 unsigned numberOfOutputs() const { return m_outputs.size(); } 126 127 AudioNodeInput* input(unsigned); 128 AudioNodeOutput* output(unsigned); 129 130 // Called from main thread by corresponding JavaScript methods. 131 virtual void connect(AudioNode*, unsigned outputIndex, unsigned inputIndex, ExceptionState&); 132 void connect(AudioParam*, unsigned outputIndex, ExceptionState&); 133 virtual void disconnect(unsigned outputIndex, ExceptionState&); 134 135 virtual float sampleRate() const { return m_sampleRate; } 136 137 // processIfNecessary() is called by our output(s) when the rendering graph needs this AudioNode to process. 138 // This method ensures that the AudioNode will only process once per rendering time quantum even if it's called repeatedly. 139 // This handles the case of "fanout" where an output is connected to multiple AudioNode inputs. 140 // Called from context's audio thread. 141 void processIfNecessary(size_t framesToProcess); 142 143 // Called when a new connection has been made to one of our inputs or the connection number of channels has changed. 144 // This potentially gives us enough information to perform a lazy initialization or, if necessary, a re-initialization. 145 // Called from main thread. 146 virtual void checkNumberOfChannelsForInput(AudioNodeInput*); 147 148 #if DEBUG_AUDIONODE_REFERENCES 149 static void printNodeCounts(); 150 #endif 151 152 bool isMarkedForDeletion() const { return m_isMarkedForDeletion; } 153 154 // tailTime() is the length of time (not counting latency time) where non-zero output may occur after continuous silent input. 155 virtual double tailTime() const = 0; 156 // latencyTime() is the length of time it takes for non-zero output to appear after non-zero input is provided. This only applies to 157 // processing delay which is an artifact of the processing algorithm chosen and is *not* part of the intrinsic desired effect. For 158 // example, a "delay" effect is expected to delay the signal, and thus would not be considered latency. 159 virtual double latencyTime() const = 0; 160 161 // propagatesSilence() should return true if the node will generate silent output when given silent input. By default, AudioNode 162 // will take tailTime() and latencyTime() into account when determining whether the node will propagate silence. 163 virtual bool propagatesSilence() const; 164 bool inputsAreSilent(); 165 void silenceOutputs(); 166 void unsilenceOutputs(); 167 168 void enableOutputsIfNecessary(); 169 void disableOutputsIfNecessary(); 170 171 unsigned long channelCount(); 172 virtual void setChannelCount(unsigned long, ExceptionState&); 173 174 String channelCountMode(); 175 void setChannelCountMode(const String&, ExceptionState&); 176 177 String channelInterpretation(); 178 void setChannelInterpretation(const String&, ExceptionState&); 179 180 ChannelCountMode internalChannelCountMode() const { return m_channelCountMode; } 181 AudioBus::ChannelInterpretation internalChannelInterpretation() const { return m_channelInterpretation; } 182 183 // EventTarget 184 virtual const AtomicString& interfaceName() const OVERRIDE; 185 virtual ExecutionContext* executionContext() const OVERRIDE; 186 187 protected: 188 // Inputs and outputs must be created before the AudioNode is initialized. 189 void addInput(PassOwnPtr<AudioNodeInput>); 190 void addOutput(PassOwnPtr<AudioNodeOutput>); 191 192 // Called by processIfNecessary() to cause all parts of the rendering graph connected to us to process. 193 // Each rendering quantum, the audio data for each of the AudioNode's inputs will be available after this method is called. 194 // Called from context's audio thread. 195 virtual void pullInputs(size_t framesToProcess); 196 197 // Force all inputs to take any channel interpretation changes into account. 198 void updateChannelsForInputs(); 199 200 private: 201 volatile bool m_isInitialized; 202 NodeType m_nodeType; 203 RefPtr<AudioContext> m_context; 204 float m_sampleRate; 205 Vector<OwnPtr<AudioNodeInput> > m_inputs; 206 Vector<OwnPtr<AudioNodeOutput> > m_outputs; 207 208 double m_lastProcessingTime; 209 double m_lastNonSilentTime; 210 211 // Ref-counting 212 volatile int m_normalRefCount; 213 volatile int m_connectionRefCount; 214 215 bool m_isMarkedForDeletion; 216 bool m_isDisabled; 217 218 #if DEBUG_AUDIONODE_REFERENCES 219 static bool s_isNodeCountInitialized; 220 static int s_nodeCount[NodeTypeEnd]; 221 #endif 222 223 virtual void refEventTarget() OVERRIDE { ref(); } 224 virtual void derefEventTarget() OVERRIDE { deref(); } 225 226 protected: 227 unsigned m_channelCount; 228 ChannelCountMode m_channelCountMode; 229 AudioBus::ChannelInterpretation m_channelInterpretation; 230 }; 231 232 } // namespace WebCore 233 234 #endif // AudioNode_h 235