1 /* 2 * Copyright (C) 2010, Google Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY 17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 */ 24 25 #ifndef AudioContext_h 26 #define AudioContext_h 27 28 #include "bindings/v8/ScriptWrappable.h" 29 #include "core/dom/ActiveDOMObject.h" 30 #include "core/dom/EventListener.h" 31 #include "core/dom/EventTarget.h" 32 #include "core/platform/audio/AudioBus.h" 33 #include "core/platform/audio/HRTFDatabaseLoader.h" 34 #include "modules/webaudio/AsyncAudioDecoder.h" 35 #include "modules/webaudio/AudioDestinationNode.h" 36 #include "wtf/HashSet.h" 37 #include "wtf/MainThread.h" 38 #include "wtf/OwnPtr.h" 39 #include "wtf/PassRefPtr.h" 40 #include "wtf/RefCounted.h" 41 #include "wtf/RefPtr.h" 42 #include "wtf/ThreadSafeRefCounted.h" 43 #include "wtf/Threading.h" 44 #include "wtf/Vector.h" 45 #include "wtf/text/AtomicStringHash.h" 46 47 namespace WebCore { 48 49 class AnalyserNode; 50 class AudioBuffer; 51 class AudioBufferCallback; 52 class AudioBufferSourceNode; 53 class AudioListener; 54 class AudioSummingJunction; 55 class BiquadFilterNode; 56 class ChannelMergerNode; 57 class ChannelSplitterNode; 58 class ConvolverNode; 59 class DelayNode; 60 class Document; 61 class DynamicsCompressorNode; 62 class ExceptionState; 63 class GainNode; 64 class HTMLMediaElement; 65 class MediaElementAudioSourceNode; 66 class MediaStreamAudioDestinationNode; 67 class MediaStreamAudioSourceNode; 68 class OscillatorNode; 69 class PannerNode; 70 class PeriodicWave; 71 class ScriptProcessorNode; 72 class WaveShaperNode; 73 74 // AudioContext is the cornerstone of the web audio API and all AudioNodes are created from it. 75 // For thread safety between the audio thread and the main thread, it has a rendering graph locking mechanism. 76 77 class AudioContext : public ActiveDOMObject, public ScriptWrappable, public ThreadSafeRefCounted<AudioContext>, public EventTarget { 78 public: 79 // Create an AudioContext for rendering to the audio hardware. 80 static PassRefPtr<AudioContext> create(Document*); 81 82 // Create an AudioContext for offline (non-realtime) rendering. 83 static PassRefPtr<AudioContext> createOfflineContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState&); 84 85 virtual ~AudioContext(); 86 87 bool isInitialized() const; 88 89 bool isOfflineContext() { return m_isOfflineContext; } 90 91 // Returns true when initialize() was called AND all asynchronous initialization has completed. 92 bool isRunnable() const; 93 94 HRTFDatabaseLoader* hrtfDatabaseLoader() const { return m_hrtfDatabaseLoader.get(); } 95 96 // Document notification 97 virtual void stop(); 98 99 Document* document() const; // ASSERTs if document no longer exists. 100 bool hasDocument(); 101 102 AudioDestinationNode* destination() { return m_destinationNode.get(); } 103 size_t currentSampleFrame() const { return m_destinationNode->currentSampleFrame(); } 104 double currentTime() const { return m_destinationNode->currentTime(); } 105 float sampleRate() const { return m_destinationNode->sampleRate(); } 106 unsigned long activeSourceCount() const { return static_cast<unsigned long>(m_activeSourceCount); } 107 108 void incrementActiveSourceCount(); 109 void decrementActiveSourceCount(); 110 111 PassRefPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState&); 112 PassRefPtr<AudioBuffer> createBuffer(ArrayBuffer*, bool mixToMono, ExceptionState&); 113 114 // Asynchronous audio file data decoding. 115 void decodeAudioData(ArrayBuffer*, PassRefPtr<AudioBufferCallback>, PassRefPtr<AudioBufferCallback>, ExceptionState&); 116 117 AudioListener* listener() { return m_listener.get(); } 118 119 // The AudioNode create methods are called on the main thread (from JavaScript). 120 PassRefPtr<AudioBufferSourceNode> createBufferSource(); 121 PassRefPtr<MediaElementAudioSourceNode> createMediaElementSource(HTMLMediaElement*, ExceptionState&); 122 PassRefPtr<MediaStreamAudioSourceNode> createMediaStreamSource(MediaStream*, ExceptionState&); 123 PassRefPtr<MediaStreamAudioDestinationNode> createMediaStreamDestination(); 124 PassRefPtr<GainNode> createGain(); 125 PassRefPtr<BiquadFilterNode> createBiquadFilter(); 126 PassRefPtr<WaveShaperNode> createWaveShaper(); 127 PassRefPtr<DelayNode> createDelay(ExceptionState&); 128 PassRefPtr<DelayNode> createDelay(double maxDelayTime, ExceptionState&); 129 PassRefPtr<PannerNode> createPanner(); 130 PassRefPtr<ConvolverNode> createConvolver(); 131 PassRefPtr<DynamicsCompressorNode> createDynamicsCompressor(); 132 PassRefPtr<AnalyserNode> createAnalyser(); 133 PassRefPtr<ScriptProcessorNode> createScriptProcessor(size_t bufferSize, ExceptionState&); 134 PassRefPtr<ScriptProcessorNode> createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState&); 135 PassRefPtr<ScriptProcessorNode> createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState&); 136 PassRefPtr<ChannelSplitterNode> createChannelSplitter(ExceptionState&); 137 PassRefPtr<ChannelSplitterNode> createChannelSplitter(size_t numberOfOutputs, ExceptionState&); 138 PassRefPtr<ChannelMergerNode> createChannelMerger(ExceptionState&); 139 PassRefPtr<ChannelMergerNode> createChannelMerger(size_t numberOfInputs, ExceptionState&); 140 PassRefPtr<OscillatorNode> createOscillator(); 141 PassRefPtr<PeriodicWave> createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState&); 142 143 // When a source node has no more processing to do (has finished playing), then it tells the context to dereference it. 144 void notifyNodeFinishedProcessing(AudioNode*); 145 146 // Called at the start of each render quantum. 147 void handlePreRenderTasks(); 148 149 // Called at the end of each render quantum. 150 void handlePostRenderTasks(); 151 152 // Called periodically at the end of each render quantum to dereference finished source nodes. 153 void derefFinishedSourceNodes(); 154 155 // We schedule deletion of all marked nodes at the end of each realtime render quantum. 156 void markForDeletion(AudioNode*); 157 void deleteMarkedNodes(); 158 159 // AudioContext can pull node(s) at the end of each render quantum even when they are not connected to any downstream nodes. 160 // These two methods are called by the nodes who want to add/remove themselves into/from the automatic pull lists. 161 void addAutomaticPullNode(AudioNode*); 162 void removeAutomaticPullNode(AudioNode*); 163 164 // Called right before handlePostRenderTasks() to handle nodes which need to be pulled even when they are not connected to anything. 165 void processAutomaticPullNodes(size_t framesToProcess); 166 167 // Keeps track of the number of connections made. 168 void incrementConnectionCount() 169 { 170 ASSERT(isMainThread()); 171 m_connectionCount++; 172 } 173 174 unsigned connectionCount() const { return m_connectionCount; } 175 176 // 177 // Thread Safety and Graph Locking: 178 // 179 180 void setAudioThread(ThreadIdentifier thread) { m_audioThread = thread; } // FIXME: check either not initialized or the same 181 ThreadIdentifier audioThread() const { return m_audioThread; } 182 bool isAudioThread() const; 183 184 // Returns true only after the audio thread has been started and then shutdown. 185 bool isAudioThreadFinished() { return m_isAudioThreadFinished; } 186 187 // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired. 188 void lock(bool& mustReleaseLock); 189 190 // Returns true if we own the lock. 191 // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired. 192 bool tryLock(bool& mustReleaseLock); 193 194 void unlock(); 195 196 // Returns true if this thread owns the context's lock. 197 bool isGraphOwner() const; 198 199 // Returns the maximum numuber of channels we can support. 200 static unsigned maxNumberOfChannels() { return MaxNumberOfChannels;} 201 202 class AutoLocker { 203 public: 204 AutoLocker(AudioContext* context) 205 : m_context(context) 206 { 207 ASSERT(context); 208 context->lock(m_mustReleaseLock); 209 } 210 211 ~AutoLocker() 212 { 213 if (m_mustReleaseLock) 214 m_context->unlock(); 215 } 216 private: 217 AudioContext* m_context; 218 bool m_mustReleaseLock; 219 }; 220 221 // In AudioNode::deref() a tryLock() is used for calling finishDeref(), but if it fails keep track here. 222 void addDeferredFinishDeref(AudioNode*); 223 224 // In the audio thread at the start of each render cycle, we'll call handleDeferredFinishDerefs(). 225 void handleDeferredFinishDerefs(); 226 227 // Only accessed when the graph lock is held. 228 void markSummingJunctionDirty(AudioSummingJunction*); 229 void markAudioNodeOutputDirty(AudioNodeOutput*); 230 231 // Must be called on main thread. 232 void removeMarkedSummingJunction(AudioSummingJunction*); 233 234 // EventTarget 235 virtual const AtomicString& interfaceName() const; 236 virtual ScriptExecutionContext* scriptExecutionContext() const; 237 virtual EventTargetData* eventTargetData() { return &m_eventTargetData; } 238 virtual EventTargetData* ensureEventTargetData() { return &m_eventTargetData; } 239 240 DEFINE_ATTRIBUTE_EVENT_LISTENER(complete); 241 242 // Reconcile ref/deref which are defined both in ThreadSafeRefCounted and EventTarget. 243 using ThreadSafeRefCounted<AudioContext>::ref; 244 using ThreadSafeRefCounted<AudioContext>::deref; 245 246 void startRendering(); 247 void fireCompletionEvent(); 248 249 static unsigned s_hardwareContextCount; 250 251 protected: 252 explicit AudioContext(Document*); 253 AudioContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate); 254 255 static bool isSampleRateRangeGood(float sampleRate); 256 257 private: 258 void constructCommon(); 259 260 void lazyInitialize(); 261 void uninitialize(); 262 263 // ScriptExecutionContext calls stop twice. 264 // We'd like to schedule only one stop action for them. 265 bool m_isStopScheduled; 266 static void stopDispatch(void* userData); 267 void clear(); 268 269 void scheduleNodeDeletion(); 270 static void deleteMarkedNodesDispatch(void* userData); 271 272 bool m_isInitialized; 273 bool m_isAudioThreadFinished; 274 275 // The context itself keeps a reference to all source nodes. The source nodes, then reference all nodes they're connected to. 276 // In turn, these nodes reference all nodes they're connected to. All nodes are ultimately connected to the AudioDestinationNode. 277 // When the context dereferences a source node, it will be deactivated from the rendering graph along with all other nodes it is 278 // uniquely connected to. See the AudioNode::ref() and AudioNode::deref() methods for more details. 279 void refNode(AudioNode*); 280 void derefNode(AudioNode*); 281 282 // When the context goes away, there might still be some sources which haven't finished playing. 283 // Make sure to dereference them here. 284 void derefUnfinishedSourceNodes(); 285 286 RefPtr<AudioDestinationNode> m_destinationNode; 287 RefPtr<AudioListener> m_listener; 288 289 // Only accessed in the audio thread. 290 Vector<AudioNode*> m_finishedNodes; 291 292 // We don't use RefPtr<AudioNode> here because AudioNode has a more complex ref() / deref() implementation 293 // with an optional argument for refType. We need to use the special refType: RefTypeConnection 294 // Either accessed when the graph lock is held, or on the main thread when the audio thread has finished. 295 Vector<AudioNode*> m_referencedNodes; 296 297 // Accumulate nodes which need to be deleted here. 298 // This is copied to m_nodesToDelete at the end of a render cycle in handlePostRenderTasks(), where we're assured of a stable graph 299 // state which will have no references to any of the nodes in m_nodesToDelete once the context lock is released 300 // (when handlePostRenderTasks() has completed). 301 Vector<AudioNode*> m_nodesMarkedForDeletion; 302 303 // They will be scheduled for deletion (on the main thread) at the end of a render cycle (in realtime thread). 304 Vector<AudioNode*> m_nodesToDelete; 305 bool m_isDeletionScheduled; 306 307 // Only accessed when the graph lock is held. 308 HashSet<AudioSummingJunction*> m_dirtySummingJunctions; 309 HashSet<AudioNodeOutput*> m_dirtyAudioNodeOutputs; 310 void handleDirtyAudioSummingJunctions(); 311 void handleDirtyAudioNodeOutputs(); 312 313 // For the sake of thread safety, we maintain a seperate Vector of automatic pull nodes for rendering in m_renderingAutomaticPullNodes. 314 // It will be copied from m_automaticPullNodes by updateAutomaticPullNodes() at the very start or end of the rendering quantum. 315 HashSet<AudioNode*> m_automaticPullNodes; 316 Vector<AudioNode*> m_renderingAutomaticPullNodes; 317 // m_automaticPullNodesNeedUpdating keeps track if m_automaticPullNodes is modified. 318 bool m_automaticPullNodesNeedUpdating; 319 void updateAutomaticPullNodes(); 320 321 unsigned m_connectionCount; 322 323 // Graph locking. 324 Mutex m_contextGraphMutex; 325 volatile ThreadIdentifier m_audioThread; 326 volatile ThreadIdentifier m_graphOwnerThread; // if the lock is held then this is the thread which owns it, otherwise == UndefinedThreadIdentifier 327 328 // Only accessed in the audio thread. 329 Vector<AudioNode*> m_deferredFinishDerefList; 330 331 // HRTF Database loader 332 RefPtr<HRTFDatabaseLoader> m_hrtfDatabaseLoader; 333 334 // EventTarget 335 virtual void refEventTarget() { ref(); } 336 virtual void derefEventTarget() { deref(); } 337 EventTargetData m_eventTargetData; 338 339 RefPtr<AudioBuffer> m_renderTarget; 340 341 bool m_isOfflineContext; 342 343 AsyncAudioDecoder m_audioDecoder; 344 345 // This is considering 32 is large enough for multiple channels audio. 346 // It is somewhat arbitrary and could be increased if necessary. 347 enum { MaxNumberOfChannels = 32 }; 348 349 // Number of AudioBufferSourceNodes that are active (playing). 350 int m_activeSourceCount; 351 }; 352 353 } // WebCore 354 355 #endif // AudioContext_h 356