1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "content/renderer/media/webrtc_local_audio_track.h" 6 7 #include "content/public/renderer/media_stream_audio_sink.h" 8 #include "content/renderer/media/media_stream_audio_sink_owner.h" 9 #include "content/renderer/media/media_stream_audio_track_sink.h" 10 #include "content/renderer/media/peer_connection_audio_sink_owner.h" 11 #include "content/renderer/media/webaudio_capturer_source.h" 12 #include "content/renderer/media/webrtc_audio_capturer.h" 13 #include "content/renderer/media/webrtc_local_audio_source_provider.h" 14 #include "media/base/audio_fifo.h" 15 #include "third_party/libjingle/source/talk/media/base/audiorenderer.h" 16 17 namespace content { 18 19 static const size_t kMaxNumberOfBuffersInFifo = 2; 20 static const char kAudioTrackKind[] = "audio"; 21 22 namespace { 23 24 using webrtc::MediaConstraintsInterface; 25 26 // This helper function checks if any audio constraints are set that require 27 // audio processing to be applied. Right now this is a big, single switch for 28 // all of the properties, but in the future they'll be handled one by one. 29 bool NeedsAudioProcessing( 30 const webrtc::MediaConstraintsInterface* constraints) { 31 if (!constraints) 32 return false; 33 34 static const char* kAudioProcessingProperties[] = { 35 MediaConstraintsInterface::kEchoCancellation, 36 MediaConstraintsInterface::kExperimentalEchoCancellation, 37 MediaConstraintsInterface::kAutoGainControl, 38 MediaConstraintsInterface::kExperimentalAutoGainControl, 39 MediaConstraintsInterface::kNoiseSuppression, 40 MediaConstraintsInterface::kHighpassFilter, 41 MediaConstraintsInterface::kTypingNoiseDetection, 42 }; 43 44 for (size_t i = 0; i < arraysize(kAudioProcessingProperties); ++i) { 45 bool value = false; 46 if (webrtc::FindConstraint(constraints, kAudioProcessingProperties[i], 47 &value, NULL) && 48 value) { 49 return true; 50 } 51 } 52 53 return false; 54 } 55 56 } // namespace. 57 58 // This is a temporary audio buffer with parameters used to send data to 59 // callbacks. 60 class WebRtcLocalAudioTrack::ConfiguredBuffer { 61 public: 62 ConfiguredBuffer() {} 63 virtual ~ConfiguredBuffer() {} 64 65 void Configure(const media::AudioParameters& params) { 66 DCHECK(params.IsValid()); 67 68 // PeerConnection uses 10ms as the sink buffer size as its native packet 69 // size. We use the native PeerConnection buffer size to achieve the best 70 // performance when a PeerConnection is connected with a track. 71 int sink_buffer_size = params.sample_rate() / 100; 72 if (params.frames_per_buffer() < sink_buffer_size) { 73 // When the source is running with a buffer size smaller than the peer 74 // connection buffer size, that means no PeerConnection is connected 75 // to the track, use the same buffer size as the incoming format to 76 // avoid extra FIFO for WebAudio. 77 sink_buffer_size = params.frames_per_buffer(); 78 } 79 params_.Reset(params.format(), params.channel_layout(), params.channels(), 80 params.input_channels(), params.sample_rate(), 81 params.bits_per_sample(), sink_buffer_size); 82 83 audio_wrapper_ = media::AudioBus::Create(params_.channels(), 84 params_.frames_per_buffer()); 85 buffer_.reset(new int16[params_.frames_per_buffer() * params_.channels()]); 86 87 // The size of the FIFO should be at least twice of the source buffer size 88 // or twice of the sink buffer size. 89 int buffer_size = std::max( 90 kMaxNumberOfBuffersInFifo * params.frames_per_buffer(), 91 kMaxNumberOfBuffersInFifo * params_.frames_per_buffer()); 92 fifo_.reset(new media::AudioFifo(params_.channels(), buffer_size)); 93 } 94 95 void Push(media::AudioBus* audio_source) { 96 DCHECK(fifo_->frames() + audio_source->frames() <= fifo_->max_frames()); 97 fifo_->Push(audio_source); 98 } 99 100 bool Consume() { 101 if (fifo_->frames() < audio_wrapper_->frames()) 102 return false; 103 104 fifo_->Consume(audio_wrapper_.get(), 0, audio_wrapper_->frames()); 105 audio_wrapper_->ToInterleaved(audio_wrapper_->frames(), 106 params_.bits_per_sample() / 8, 107 buffer()); 108 return true; 109 } 110 111 int16* buffer() const { return buffer_.get(); } 112 113 // Format of the output audio buffer. 114 const media::AudioParameters& params() const { return params_; } 115 116 private: 117 media::AudioParameters params_; 118 scoped_ptr<media::AudioBus> audio_wrapper_; 119 scoped_ptr<media::AudioFifo> fifo_; 120 scoped_ptr<int16[]> buffer_; 121 }; 122 123 scoped_refptr<WebRtcLocalAudioTrack> WebRtcLocalAudioTrack::Create( 124 const std::string& id, 125 const scoped_refptr<WebRtcAudioCapturer>& capturer, 126 WebAudioCapturerSource* webaudio_source, 127 webrtc::AudioSourceInterface* track_source, 128 const webrtc::MediaConstraintsInterface* constraints) { 129 talk_base::RefCountedObject<WebRtcLocalAudioTrack>* track = 130 new talk_base::RefCountedObject<WebRtcLocalAudioTrack>( 131 id, capturer, webaudio_source, track_source, constraints); 132 return track; 133 } 134 135 WebRtcLocalAudioTrack::WebRtcLocalAudioTrack( 136 const std::string& label, 137 const scoped_refptr<WebRtcAudioCapturer>& capturer, 138 WebAudioCapturerSource* webaudio_source, 139 webrtc::AudioSourceInterface* track_source, 140 const webrtc::MediaConstraintsInterface* constraints) 141 : webrtc::MediaStreamTrack<webrtc::AudioTrackInterface>(label), 142 capturer_(capturer), 143 webaudio_source_(webaudio_source), 144 track_source_(track_source), 145 need_audio_processing_(NeedsAudioProcessing(constraints)), 146 buffer_(new ConfiguredBuffer()) { 147 DCHECK(capturer.get() || webaudio_source); 148 if (!webaudio_source_) { 149 source_provider_.reset(new WebRtcLocalAudioSourceProvider()); 150 AddSink(source_provider_.get()); 151 } 152 DVLOG(1) << "WebRtcLocalAudioTrack::WebRtcLocalAudioTrack()"; 153 } 154 155 WebRtcLocalAudioTrack::~WebRtcLocalAudioTrack() { 156 DCHECK(main_render_thread_checker_.CalledOnValidThread()); 157 DVLOG(1) << "WebRtcLocalAudioTrack::~WebRtcLocalAudioTrack()"; 158 // Users might not call Stop() on the track. 159 Stop(); 160 } 161 162 void WebRtcLocalAudioTrack::Capture(media::AudioBus* audio_source, 163 int audio_delay_milliseconds, 164 int volume, 165 bool key_pressed) { 166 DCHECK(capture_thread_checker_.CalledOnValidThread()); 167 scoped_refptr<WebRtcAudioCapturer> capturer; 168 std::vector<int> voe_channels; 169 SinkList::ItemList sinks; 170 SinkList::ItemList sinks_to_notify_format; 171 bool is_webaudio_source = false; 172 { 173 base::AutoLock auto_lock(lock_); 174 capturer = capturer_; 175 voe_channels = voe_channels_; 176 sinks = sinks_.Items(); 177 sinks_.RetrieveAndClearTags(&sinks_to_notify_format); 178 is_webaudio_source = (webaudio_source_.get() != NULL); 179 } 180 181 // Notify the tracks on when the format changes. This will do nothing if 182 // |sinks_to_notify_format| is empty. 183 for (SinkList::ItemList::const_iterator it = sinks_to_notify_format.begin(); 184 it != sinks_to_notify_format.end(); ++it) { 185 (*it)->OnSetFormat(buffer_->params()); 186 } 187 188 // Push the data to the fifo. 189 buffer_->Push(audio_source); 190 191 // When the source is WebAudio, turn off the audio processing if the delay 192 // value is 0 even though the constraint is set to true. In such case, it 193 // indicates the data is not from microphone. 194 // TODO(xians): remove the flag when supporting one APM per audio track. 195 // See crbug/264611 for details. 196 bool need_audio_processing = need_audio_processing_; 197 if (is_webaudio_source && need_audio_processing) 198 need_audio_processing = (audio_delay_milliseconds != 0); 199 200 int current_volume = volume; 201 while (buffer_->Consume()) { 202 // Feed the data to the sinks. 203 // TODO (jiayl): we should not pass the real audio data down if the track is 204 // disabled. This is currently done so to feed input to WebRTC typing 205 // detection and should be changed when audio processing is moved from 206 // WebRTC to the track. 207 for (SinkList::ItemList::const_iterator it = sinks.begin(); 208 it != sinks.end(); 209 ++it) { 210 int new_volume = (*it)->OnData(buffer_->buffer(), 211 buffer_->params().sample_rate(), 212 buffer_->params().channels(), 213 buffer_->params().frames_per_buffer(), 214 voe_channels, 215 audio_delay_milliseconds, 216 current_volume, 217 need_audio_processing, 218 key_pressed); 219 if (new_volume != 0 && capturer.get()) { 220 // Feed the new volume to WebRtc while changing the volume on the 221 // browser. 222 capturer->SetVolume(new_volume); 223 current_volume = new_volume; 224 } 225 } 226 } 227 } 228 229 void WebRtcLocalAudioTrack::OnSetFormat( 230 const media::AudioParameters& params) { 231 DVLOG(1) << "WebRtcLocalAudioTrack::OnSetFormat()"; 232 // If the source is restarted, we might have changed to another capture 233 // thread. 234 capture_thread_checker_.DetachFromThread(); 235 DCHECK(capture_thread_checker_.CalledOnValidThread()); 236 237 DCHECK(params.IsValid()); 238 buffer_->Configure(params); 239 240 base::AutoLock auto_lock(lock_); 241 // Remember to notify all sinks of the new format. 242 sinks_.TagAll(); 243 } 244 245 void WebRtcLocalAudioTrack::AddChannel(int channel_id) { 246 DVLOG(1) << "WebRtcLocalAudioTrack::AddChannel(channel_id=" 247 << channel_id << ")"; 248 base::AutoLock auto_lock(lock_); 249 if (std::find(voe_channels_.begin(), voe_channels_.end(), channel_id) != 250 voe_channels_.end()) { 251 // We need to handle the case when the same channel is connected to the 252 // track more than once. 253 return; 254 } 255 256 voe_channels_.push_back(channel_id); 257 } 258 259 void WebRtcLocalAudioTrack::RemoveChannel(int channel_id) { 260 DVLOG(1) << "WebRtcLocalAudioTrack::RemoveChannel(channel_id=" 261 << channel_id << ")"; 262 base::AutoLock auto_lock(lock_); 263 std::vector<int>::iterator iter = 264 std::find(voe_channels_.begin(), voe_channels_.end(), channel_id); 265 DCHECK(iter != voe_channels_.end()); 266 voe_channels_.erase(iter); 267 } 268 269 // webrtc::AudioTrackInterface implementation. 270 webrtc::AudioSourceInterface* WebRtcLocalAudioTrack::GetSource() const { 271 return track_source_; 272 } 273 274 cricket::AudioRenderer* WebRtcLocalAudioTrack::GetRenderer() { 275 return this; 276 } 277 278 std::string WebRtcLocalAudioTrack::kind() const { 279 return kAudioTrackKind; 280 } 281 282 void WebRtcLocalAudioTrack::AddSink(MediaStreamAudioSink* sink) { 283 DCHECK(main_render_thread_checker_.CalledOnValidThread()); 284 DVLOG(1) << "WebRtcLocalAudioTrack::AddSink()"; 285 base::AutoLock auto_lock(lock_); 286 287 // Verify that |sink| is not already added to the list. 288 DCHECK(!sinks_.Contains( 289 MediaStreamAudioTrackSink::WrapsMediaStreamSink(sink))); 290 291 // Create (and add to the list) a new MediaStreamAudioTrackSink 292 // which owns the |sink| and delagates all calls to the 293 // MediaStreamAudioSink interface. It will be tagged in the list, so 294 // we remember to call OnSetFormat() on the new sink. 295 scoped_refptr<MediaStreamAudioTrackSink> sink_owner( 296 new MediaStreamAudioSinkOwner(sink)); 297 sinks_.AddAndTag(sink_owner); 298 } 299 300 void WebRtcLocalAudioTrack::RemoveSink(MediaStreamAudioSink* sink) { 301 DCHECK(main_render_thread_checker_.CalledOnValidThread()); 302 DVLOG(1) << "WebRtcLocalAudioTrack::RemoveSink()"; 303 304 base::AutoLock auto_lock(lock_); 305 306 scoped_refptr<MediaStreamAudioTrackSink> removed_item = sinks_.Remove( 307 MediaStreamAudioTrackSink::WrapsMediaStreamSink(sink)); 308 309 // Clear the delegate to ensure that no more capture callbacks will 310 // be sent to this sink. Also avoids a possible crash which can happen 311 // if this method is called while capturing is active. 312 if (removed_item.get()) 313 removed_item->Reset(); 314 } 315 316 void WebRtcLocalAudioTrack::AddSink(PeerConnectionAudioSink* sink) { 317 DCHECK(main_render_thread_checker_.CalledOnValidThread()); 318 DVLOG(1) << "WebRtcLocalAudioTrack::AddSink()"; 319 base::AutoLock auto_lock(lock_); 320 321 // Verify that |sink| is not already added to the list. 322 DCHECK(!sinks_.Contains( 323 MediaStreamAudioTrackSink::WrapsPeerConnectionSink(sink))); 324 325 // Create (and add to the list) a new MediaStreamAudioTrackSink 326 // which owns the |sink| and delagates all calls to the 327 // MediaStreamAudioSink interface. It will be tagged in the list, so 328 // we remember to call OnSetFormat() on the new sink. 329 scoped_refptr<MediaStreamAudioTrackSink> sink_owner( 330 new PeerConnectionAudioSinkOwner(sink)); 331 sinks_.AddAndTag(sink_owner); 332 } 333 334 void WebRtcLocalAudioTrack::RemoveSink(PeerConnectionAudioSink* sink) { 335 DCHECK(main_render_thread_checker_.CalledOnValidThread()); 336 DVLOG(1) << "WebRtcLocalAudioTrack::RemoveSink()"; 337 338 base::AutoLock auto_lock(lock_); 339 340 scoped_refptr<MediaStreamAudioTrackSink> removed_item = sinks_.Remove( 341 MediaStreamAudioTrackSink::WrapsPeerConnectionSink(sink)); 342 // Clear the delegate to ensure that no more capture callbacks will 343 // be sent to this sink. Also avoids a possible crash which can happen 344 // if this method is called while capturing is active. 345 if (removed_item.get()) 346 removed_item->Reset(); 347 } 348 349 void WebRtcLocalAudioTrack::Start() { 350 DCHECK(main_render_thread_checker_.CalledOnValidThread()); 351 DVLOG(1) << "WebRtcLocalAudioTrack::Start()"; 352 if (webaudio_source_.get()) { 353 // If the track is hooking up with WebAudio, do NOT add the track to the 354 // capturer as its sink otherwise two streams in different clock will be 355 // pushed through the same track. 356 webaudio_source_->Start(this, capturer_.get()); 357 return; 358 } 359 360 if (capturer_.get()) 361 capturer_->AddTrack(this); 362 363 SinkList::ItemList sinks; 364 { 365 base::AutoLock auto_lock(lock_); 366 sinks = sinks_.Items(); 367 } 368 for (SinkList::ItemList::const_iterator it = sinks.begin(); 369 it != sinks.end(); 370 ++it) { 371 (*it)->OnReadyStateChanged(blink::WebMediaStreamSource::ReadyStateLive); 372 } 373 } 374 375 void WebRtcLocalAudioTrack::Stop() { 376 DCHECK(main_render_thread_checker_.CalledOnValidThread()); 377 DVLOG(1) << "WebRtcLocalAudioTrack::Stop()"; 378 if (!capturer_.get() && !webaudio_source_.get()) 379 return; 380 381 if (webaudio_source_.get()) { 382 // Called Stop() on the |webaudio_source_| explicitly so that 383 // |webaudio_source_| won't push more data to the track anymore. 384 // Also note that the track is not registered as a sink to the |capturer_| 385 // in such case and no need to call RemoveTrack(). 386 webaudio_source_->Stop(); 387 } else { 388 // It is necessary to call RemoveTrack on the |capturer_| to avoid getting 389 // audio callback after Stop(). 390 capturer_->RemoveTrack(this); 391 } 392 393 // Protect the pointers using the lock when accessing |sinks_| and 394 // setting the |capturer_| to NULL. 395 SinkList::ItemList sinks; 396 { 397 base::AutoLock auto_lock(lock_); 398 sinks = sinks_.Items(); 399 sinks_.Clear(); 400 webaudio_source_ = NULL; 401 capturer_ = NULL; 402 } 403 404 for (SinkList::ItemList::const_iterator it = sinks.begin(); 405 it != sinks.end(); 406 ++it){ 407 (*it)->OnReadyStateChanged(blink::WebMediaStreamSource::ReadyStateEnded); 408 (*it)->Reset(); 409 } 410 } 411 412 } // namespace content 413