1 /* 2 * Copyright (C) 2010, Google Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY 17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 */ 24 25 #ifndef AudioNode_h 26 #define AudioNode_h 27 28 #include "bindings/v8/ScriptWrappable.h" 29 #include "modules/EventTargetModules.h" 30 #include "platform/audio/AudioBus.h" 31 #include "wtf/Forward.h" 32 #include "wtf/OwnPtr.h" 33 #include "wtf/PassOwnPtr.h" 34 #include "wtf/RefPtr.h" 35 #include "wtf/Vector.h" 36 37 #define DEBUG_AUDIONODE_REFERENCES 0 38 39 namespace WebCore { 40 41 class AudioContext; 42 class AudioNodeInput; 43 class AudioNodeOutput; 44 class AudioParam; 45 class ExceptionState; 46 47 // An AudioNode is the basic building block for handling audio within an AudioContext. 48 // It may be an audio source, an intermediate processing module, or an audio destination. 49 // Each AudioNode can have inputs and/or outputs. An AudioSourceNode has no inputs and a single output. 50 // An AudioDestinationNode has one input and no outputs and represents the final destination to the audio hardware. 51 // Most processing nodes such as filters will have one input and one output, although multiple inputs and outputs are possible. 52 53 // AudioNode has its own ref-counting mechanism that use RefTypes so we cannot use RefCountedGarbageCollected. 54 class AudioNode : public NoBaseWillBeGarbageCollectedFinalized<AudioNode>, public ScriptWrappable, public EventTargetWithInlineData { 55 WILL_BE_USING_GARBAGE_COLLECTED_MIXIN(AudioNode); 56 public: 57 enum { ProcessingSizeInFrames = 128 }; 58 59 AudioNode(AudioContext*, float sampleRate); 60 virtual ~AudioNode(); 61 62 AudioContext* context() { return m_context.get(); } 63 const AudioContext* context() const { return m_context.get(); } 64 65 enum NodeType { 66 NodeTypeUnknown, 67 NodeTypeDestination, 68 NodeTypeOscillator, 69 NodeTypeAudioBufferSource, 70 NodeTypeMediaElementAudioSource, 71 NodeTypeMediaStreamAudioDestination, 72 NodeTypeMediaStreamAudioSource, 73 NodeTypeJavaScript, 74 NodeTypeBiquadFilter, 75 NodeTypePanner, 76 NodeTypeConvolver, 77 NodeTypeDelay, 78 NodeTypeGain, 79 NodeTypeChannelSplitter, 80 NodeTypeChannelMerger, 81 NodeTypeAnalyser, 82 NodeTypeDynamicsCompressor, 83 NodeTypeWaveShaper, 84 NodeTypeEnd 85 }; 86 87 enum ChannelCountMode { 88 Max, 89 ClampedMax, 90 Explicit 91 }; 92 93 NodeType nodeType() const { return m_nodeType; } 94 String nodeTypeName() const; 95 void setNodeType(NodeType); 96 97 // We handle our own ref-counting because of the threading issues and subtle nature of 98 // how AudioNodes can continue processing (playing one-shot sound) after there are no more 99 // JavaScript references to the object. 100 enum RefType { RefTypeNormal, RefTypeConnection }; 101 102 // Can be called from main thread or context's audio thread. 103 void ref(RefType refType = RefTypeNormal); 104 void deref(RefType refType = RefTypeNormal); 105 106 // Can be called from main thread or context's audio thread. It must be called while the context's graph lock is held. 107 void finishDeref(RefType refType); 108 109 // The AudioNodeInput(s) (if any) will already have their input data available when process() is called. 110 // Subclasses will take this input data and put the results in the AudioBus(s) of its AudioNodeOutput(s) (if any). 111 // Called from context's audio thread. 112 virtual void process(size_t framesToProcess) = 0; 113 114 // No significant resources should be allocated until initialize() is called. 115 // Processing may not occur until a node is initialized. 116 virtual void initialize(); 117 virtual void uninitialize(); 118 119 bool isInitialized() const { return m_isInitialized; } 120 121 unsigned numberOfInputs() const { return m_inputs.size(); } 122 unsigned numberOfOutputs() const { return m_outputs.size(); } 123 124 AudioNodeInput* input(unsigned); 125 AudioNodeOutput* output(unsigned); 126 127 // Called from main thread by corresponding JavaScript methods. 128 virtual void connect(AudioNode*, unsigned outputIndex, unsigned inputIndex, ExceptionState&); 129 void connect(AudioParam*, unsigned outputIndex, ExceptionState&); 130 virtual void disconnect(unsigned outputIndex, ExceptionState&); 131 132 virtual float sampleRate() const { return m_sampleRate; } 133 134 // processIfNecessary() is called by our output(s) when the rendering graph needs this AudioNode to process. 135 // This method ensures that the AudioNode will only process once per rendering time quantum even if it's called repeatedly. 136 // This handles the case of "fanout" where an output is connected to multiple AudioNode inputs. 137 // Called from context's audio thread. 138 void processIfNecessary(size_t framesToProcess); 139 140 // Called when a new connection has been made to one of our inputs or the connection number of channels has changed. 141 // This potentially gives us enough information to perform a lazy initialization or, if necessary, a re-initialization. 142 // Called from main thread. 143 virtual void checkNumberOfChannelsForInput(AudioNodeInput*); 144 145 #if DEBUG_AUDIONODE_REFERENCES 146 static void printNodeCounts(); 147 #endif 148 149 bool isMarkedForDeletion() const { return m_isMarkedForDeletion; } 150 151 // tailTime() is the length of time (not counting latency time) where non-zero output may occur after continuous silent input. 152 virtual double tailTime() const = 0; 153 // latencyTime() is the length of time it takes for non-zero output to appear after non-zero input is provided. This only applies to 154 // processing delay which is an artifact of the processing algorithm chosen and is *not* part of the intrinsic desired effect. For 155 // example, a "delay" effect is expected to delay the signal, and thus would not be considered latency. 156 virtual double latencyTime() const = 0; 157 158 // propagatesSilence() should return true if the node will generate silent output when given silent input. By default, AudioNode 159 // will take tailTime() and latencyTime() into account when determining whether the node will propagate silence. 160 virtual bool propagatesSilence() const; 161 bool inputsAreSilent(); 162 void silenceOutputs(); 163 void unsilenceOutputs(); 164 165 void enableOutputsIfNecessary(); 166 void disableOutputsIfNecessary(); 167 168 unsigned long channelCount(); 169 virtual void setChannelCount(unsigned long, ExceptionState&); 170 171 String channelCountMode(); 172 void setChannelCountMode(const String&, ExceptionState&); 173 174 String channelInterpretation(); 175 void setChannelInterpretation(const String&, ExceptionState&); 176 177 ChannelCountMode internalChannelCountMode() const { return m_channelCountMode; } 178 AudioBus::ChannelInterpretation internalChannelInterpretation() const { return m_channelInterpretation; } 179 180 // EventTarget 181 virtual const AtomicString& interfaceName() const OVERRIDE FINAL; 182 virtual ExecutionContext* executionContext() const OVERRIDE FINAL; 183 184 virtual void trace(Visitor*) OVERRIDE; 185 186 #if ENABLE(OILPAN) 187 void clearKeepAlive(); 188 #endif 189 190 protected: 191 // Inputs and outputs must be created before the AudioNode is initialized. 192 void addInput(PassOwnPtr<AudioNodeInput>); 193 void addOutput(PassOwnPtr<AudioNodeOutput>); 194 195 // Called by processIfNecessary() to cause all parts of the rendering graph connected to us to process. 196 // Each rendering quantum, the audio data for each of the AudioNode's inputs will be available after this method is called. 197 // Called from context's audio thread. 198 virtual void pullInputs(size_t framesToProcess); 199 200 // Force all inputs to take any channel interpretation changes into account. 201 void updateChannelsForInputs(); 202 203 private: 204 volatile bool m_isInitialized; 205 NodeType m_nodeType; 206 RefPtrWillBeMember<AudioContext> m_context; 207 float m_sampleRate; 208 Vector<OwnPtr<AudioNodeInput> > m_inputs; 209 Vector<OwnPtr<AudioNodeOutput> > m_outputs; 210 211 #if ENABLE(OILPAN) 212 // AudioNodes are in the oilpan heap but they are still reference counted at 213 // the same time. This is because we are not allowed to stop the audio 214 // thread and thus the audio thread cannot allocate objects in the oilpan 215 // heap. 216 // The m_keepAlive handle is used to keep a persistent reference to this 217 // AudioNode while someone has a reference to this AudioNode through a 218 // RefPtr. 219 GC_PLUGIN_IGNORE("http://crbug.com/353083") 220 OwnPtr<Persistent<AudioNode> > m_keepAlive; 221 #endif 222 223 double m_lastProcessingTime; 224 double m_lastNonSilentTime; 225 226 // Ref-counting 227 volatile int m_normalRefCount; 228 volatile int m_connectionRefCount; 229 230 bool m_isMarkedForDeletion; 231 bool m_isDisabled; 232 233 #if DEBUG_AUDIONODE_REFERENCES 234 static bool s_isNodeCountInitialized; 235 static int s_nodeCount[NodeTypeEnd]; 236 #endif 237 238 #if !ENABLE(OILPAN) 239 virtual void refEventTarget() OVERRIDE FINAL { ref(); } 240 virtual void derefEventTarget() OVERRIDE FINAL { deref(); } 241 #endif 242 243 protected: 244 unsigned m_channelCount; 245 ChannelCountMode m_channelCountMode; 246 AudioBus::ChannelInterpretation m_channelInterpretation; 247 }; 248 249 } // namespace WebCore 250 251 #endif // AudioNode_h 252