Home | History | Annotate | Download | only in media
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "content/renderer/media/webrtc_audio_capturer.h"
      6 
      7 #include "base/bind.h"
      8 #include "base/logging.h"
      9 #include "base/metrics/histogram.h"
     10 #include "base/strings/string_util.h"
     11 #include "base/strings/stringprintf.h"
     12 #include "content/child/child_process.h"
     13 #include "content/renderer/media/audio_device_factory.h"
     14 #include "content/renderer/media/media_stream_audio_processor.h"
     15 #include "content/renderer/media/media_stream_audio_processor_options.h"
     16 #include "content/renderer/media/media_stream_audio_source.h"
     17 #include "content/renderer/media/webrtc_audio_device_impl.h"
     18 #include "content/renderer/media/webrtc_local_audio_track.h"
     19 #include "content/renderer/media/webrtc_logging.h"
     20 #include "media/audio/sample_rates.h"
     21 
     22 namespace content {
     23 
     24 namespace {
     25 
     26 // Time constant for AudioPowerMonitor.  See AudioPowerMonitor ctor comments
     27 // for semantics.  This value was arbitrarily chosen, but seems to work well.
     28 const int kPowerMonitorTimeConstantMs = 10;
     29 
     30 // The time between two audio power level samples.
     31 const int kPowerMonitorLogIntervalSeconds = 10;
     32 
     33 // Method to check if any of the data in |audio_source| has energy.
     34 bool HasDataEnergy(const media::AudioBus& audio_source) {
     35   for (int ch = 0; ch < audio_source.channels(); ++ch) {
     36     const float* channel_ptr = audio_source.channel(ch);
     37     for (int frame = 0; frame < audio_source.frames(); ++frame) {
     38       if (channel_ptr[frame] != 0)
     39         return true;
     40     }
     41   }
     42 
     43   // All the data is zero.
     44   return false;
     45 }
     46 
     47 }  // namespace
     48 
     49 // Reference counted container of WebRtcLocalAudioTrack delegate.
     50 // TODO(xians): Switch to MediaStreamAudioSinkOwner.
     51 class WebRtcAudioCapturer::TrackOwner
     52     : public base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner> {
     53  public:
     54   explicit TrackOwner(WebRtcLocalAudioTrack* track)
     55       : delegate_(track) {}
     56 
     57   void Capture(const int16* audio_data,
     58                base::TimeDelta delay,
     59                double volume,
     60                bool key_pressed,
     61                bool need_audio_processing,
     62                bool force_report_nonzero_energy) {
     63     base::AutoLock lock(lock_);
     64     if (delegate_) {
     65       delegate_->Capture(audio_data,
     66                          delay,
     67                          volume,
     68                          key_pressed,
     69                          need_audio_processing,
     70                          force_report_nonzero_energy);
     71     }
     72   }
     73 
     74   void OnSetFormat(const media::AudioParameters& params) {
     75     base::AutoLock lock(lock_);
     76     if (delegate_)
     77       delegate_->OnSetFormat(params);
     78   }
     79 
     80   void SetAudioProcessor(
     81       const scoped_refptr<MediaStreamAudioProcessor>& processor) {
     82     base::AutoLock lock(lock_);
     83     if (delegate_)
     84       delegate_->SetAudioProcessor(processor);
     85   }
     86 
     87   void Reset() {
     88     base::AutoLock lock(lock_);
     89     delegate_ = NULL;
     90   }
     91 
     92   void Stop() {
     93     base::AutoLock lock(lock_);
     94     DCHECK(delegate_);
     95 
     96     // This can be reentrant so reset |delegate_| before calling out.
     97     WebRtcLocalAudioTrack* temp = delegate_;
     98     delegate_ = NULL;
     99     temp->Stop();
    100   }
    101 
    102   // Wrapper which allows to use std::find_if() when adding and removing
    103   // sinks to/from the list.
    104   struct TrackWrapper {
    105     TrackWrapper(WebRtcLocalAudioTrack* track) : track_(track) {}
    106     bool operator()(
    107         const scoped_refptr<WebRtcAudioCapturer::TrackOwner>& owner) const {
    108       return owner->IsEqual(track_);
    109     }
    110     WebRtcLocalAudioTrack* track_;
    111   };
    112 
    113  protected:
    114   virtual ~TrackOwner() {}
    115 
    116  private:
    117   friend class base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner>;
    118 
    119   bool IsEqual(const WebRtcLocalAudioTrack* other) const {
    120     base::AutoLock lock(lock_);
    121     return (other == delegate_);
    122   }
    123 
    124   // Do NOT reference count the |delegate_| to avoid cyclic reference counting.
    125   WebRtcLocalAudioTrack* delegate_;
    126   mutable base::Lock lock_;
    127 
    128   DISALLOW_COPY_AND_ASSIGN(TrackOwner);
    129 };
    130 
    131 // static
    132 scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer(
    133     int render_view_id, const StreamDeviceInfo& device_info,
    134     const blink::WebMediaConstraints& constraints,
    135     WebRtcAudioDeviceImpl* audio_device,
    136     MediaStreamAudioSource* audio_source) {
    137   scoped_refptr<WebRtcAudioCapturer> capturer = new WebRtcAudioCapturer(
    138       render_view_id, device_info, constraints, audio_device, audio_source);
    139   if (capturer->Initialize())
    140     return capturer;
    141 
    142   return NULL;
    143 }
    144 
    145 bool WebRtcAudioCapturer::Initialize() {
    146   DCHECK(thread_checker_.CalledOnValidThread());
    147   DVLOG(1) << "WebRtcAudioCapturer::Initialize()";
    148   WebRtcLogMessage(base::StringPrintf(
    149       "WAC::Initialize. render_view_id=%d"
    150       ", channel_layout=%d, sample_rate=%d, buffer_size=%d"
    151       ", session_id=%d, paired_output_sample_rate=%d"
    152       ", paired_output_frames_per_buffer=%d, effects=%d. ",
    153       render_view_id_,
    154       device_info_.device.input.channel_layout,
    155       device_info_.device.input.sample_rate,
    156       device_info_.device.input.frames_per_buffer,
    157       device_info_.session_id,
    158       device_info_.device.matched_output.sample_rate,
    159       device_info_.device.matched_output.frames_per_buffer,
    160       device_info_.device.input.effects));
    161 
    162   if (render_view_id_ == -1) {
    163     // Return true here to allow injecting a new source via
    164     // SetCapturerSourceForTesting() at a later state.
    165     return true;
    166   }
    167 
    168   MediaAudioConstraints audio_constraints(constraints_,
    169                                           device_info_.device.input.effects);
    170   if (!audio_constraints.IsValid())
    171     return false;
    172 
    173   media::ChannelLayout channel_layout = static_cast<media::ChannelLayout>(
    174       device_info_.device.input.channel_layout);
    175 
    176   // If KEYBOARD_MIC effect is set, change the layout to the corresponding
    177   // layout that includes the keyboard mic.
    178   if ((device_info_.device.input.effects &
    179           media::AudioParameters::KEYBOARD_MIC) &&
    180       MediaStreamAudioProcessor::IsAudioTrackProcessingEnabled() &&
    181       audio_constraints.GetProperty(
    182           MediaAudioConstraints::kGoogExperimentalNoiseSuppression)) {
    183     if (channel_layout == media::CHANNEL_LAYOUT_STEREO) {
    184       channel_layout = media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC;
    185       DVLOG(1) << "Changed stereo layout to stereo + keyboard mic layout due "
    186                << "to KEYBOARD_MIC effect.";
    187     } else {
    188       DVLOG(1) << "KEYBOARD_MIC effect ignored, not compatible with layout "
    189                << channel_layout;
    190     }
    191   }
    192 
    193   DVLOG(1) << "Audio input hardware channel layout: " << channel_layout;
    194   UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout",
    195                             channel_layout, media::CHANNEL_LAYOUT_MAX + 1);
    196 
    197   // Verify that the reported input channel configuration is supported.
    198   if (channel_layout != media::CHANNEL_LAYOUT_MONO &&
    199       channel_layout != media::CHANNEL_LAYOUT_STEREO &&
    200       channel_layout != media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC) {
    201     DLOG(ERROR) << channel_layout
    202                 << " is not a supported input channel configuration.";
    203     return false;
    204   }
    205 
    206   DVLOG(1) << "Audio input hardware sample rate: "
    207            << device_info_.device.input.sample_rate;
    208   media::AudioSampleRate asr;
    209   if (media::ToAudioSampleRate(device_info_.device.input.sample_rate, &asr)) {
    210     UMA_HISTOGRAM_ENUMERATION(
    211         "WebRTC.AudioInputSampleRate", asr, media::kAudioSampleRateMax + 1);
    212   } else {
    213     UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected",
    214                          device_info_.device.input.sample_rate);
    215   }
    216 
    217   // Create and configure the default audio capturing source.
    218   SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id_),
    219                     channel_layout,
    220                     static_cast<float>(device_info_.device.input.sample_rate));
    221 
    222   // Add the capturer to the WebRtcAudioDeviceImpl since it needs some hardware
    223   // information from the capturer.
    224   if (audio_device_)
    225     audio_device_->AddAudioCapturer(this);
    226 
    227   return true;
    228 }
    229 
    230 WebRtcAudioCapturer::WebRtcAudioCapturer(
    231     int render_view_id,
    232     const StreamDeviceInfo& device_info,
    233     const blink::WebMediaConstraints& constraints,
    234     WebRtcAudioDeviceImpl* audio_device,
    235     MediaStreamAudioSource* audio_source)
    236     : constraints_(constraints),
    237       audio_processor_(
    238           new rtc::RefCountedObject<MediaStreamAudioProcessor>(
    239               constraints, device_info.device.input.effects, audio_device)),
    240       running_(false),
    241       render_view_id_(render_view_id),
    242       device_info_(device_info),
    243       volume_(0),
    244       peer_connection_mode_(false),
    245       key_pressed_(false),
    246       need_audio_processing_(false),
    247       audio_device_(audio_device),
    248       audio_source_(audio_source),
    249       audio_power_monitor_(
    250           device_info_.device.input.sample_rate,
    251           base::TimeDelta::FromMilliseconds(kPowerMonitorTimeConstantMs)) {
    252   DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()";
    253 }
    254 
    255 WebRtcAudioCapturer::~WebRtcAudioCapturer() {
    256   DCHECK(thread_checker_.CalledOnValidThread());
    257   DCHECK(tracks_.IsEmpty());
    258   DVLOG(1) << "WebRtcAudioCapturer::~WebRtcAudioCapturer()";
    259   Stop();
    260 }
    261 
    262 void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) {
    263   DCHECK(track);
    264   DVLOG(1) << "WebRtcAudioCapturer::AddTrack()";
    265 
    266   {
    267     base::AutoLock auto_lock(lock_);
    268     // Verify that |track| is not already added to the list.
    269     DCHECK(!tracks_.Contains(TrackOwner::TrackWrapper(track)));
    270 
    271     // Add with a tag, so we remember to call OnSetFormat() on the new
    272     // track.
    273     scoped_refptr<TrackOwner> track_owner(new TrackOwner(track));
    274     tracks_.AddAndTag(track_owner.get());
    275   }
    276 }
    277 
    278 void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) {
    279   DCHECK(thread_checker_.CalledOnValidThread());
    280   DVLOG(1) << "WebRtcAudioCapturer::RemoveTrack()";
    281   bool stop_source = false;
    282   {
    283     base::AutoLock auto_lock(lock_);
    284 
    285     scoped_refptr<TrackOwner> removed_item =
    286         tracks_.Remove(TrackOwner::TrackWrapper(track));
    287 
    288     // Clear the delegate to ensure that no more capture callbacks will
    289     // be sent to this sink. Also avoids a possible crash which can happen
    290     // if this method is called while capturing is active.
    291     if (removed_item.get()) {
    292       removed_item->Reset();
    293       stop_source = tracks_.IsEmpty();
    294     }
    295   }
    296   if (stop_source) {
    297     // Since WebRtcAudioCapturer does not inherit MediaStreamAudioSource,
    298     // and instead MediaStreamAudioSource is composed of a WebRtcAudioCapturer,
    299     // we have to call StopSource on the MediaStreamSource. This will call
    300     // MediaStreamAudioSource::DoStopSource which in turn call
    301     // WebRtcAudioCapturerer::Stop();
    302     audio_source_->StopSource();
    303   }
    304 }
    305 
    306 void WebRtcAudioCapturer::SetCapturerSource(
    307     const scoped_refptr<media::AudioCapturerSource>& source,
    308     media::ChannelLayout channel_layout,
    309     float sample_rate) {
    310   DCHECK(thread_checker_.CalledOnValidThread());
    311   DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << ","
    312            << "sample_rate=" << sample_rate << ")";
    313   scoped_refptr<media::AudioCapturerSource> old_source;
    314   {
    315     base::AutoLock auto_lock(lock_);
    316     if (source_.get() == source.get())
    317       return;
    318 
    319     source_.swap(old_source);
    320     source_ = source;
    321 
    322     // Reset the flag to allow starting the new source.
    323     running_ = false;
    324   }
    325 
    326   DVLOG(1) << "Switching to a new capture source.";
    327   if (old_source.get())
    328     old_source->Stop();
    329 
    330   // Dispatch the new parameters both to the sink(s) and to the new source,
    331   // also apply the new |constraints|.
    332   // The idea is to get rid of any dependency of the microphone parameters
    333   // which would normally be used by default.
    334   // bits_per_sample is always 16 for now.
    335   int buffer_size = GetBufferSize(sample_rate);
    336   media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
    337                                 channel_layout, sample_rate,
    338                                 16, buffer_size,
    339                                 device_info_.device.input.effects);
    340 
    341   {
    342     base::AutoLock auto_lock(lock_);
    343     // Notify the |audio_processor_| of the new format.
    344     audio_processor_->OnCaptureFormatChanged(params);
    345 
    346     MediaAudioConstraints audio_constraints(constraints_,
    347                                             device_info_.device.input.effects);
    348     need_audio_processing_ = audio_constraints.NeedsAudioProcessing();
    349     // Notify all tracks about the new format.
    350     tracks_.TagAll();
    351   }
    352 
    353   if (source.get())
    354     source->Initialize(params, this, session_id());
    355 
    356   Start();
    357 }
    358 
    359 void WebRtcAudioCapturer::EnablePeerConnectionMode() {
    360   DCHECK(thread_checker_.CalledOnValidThread());
    361   DVLOG(1) << "EnablePeerConnectionMode";
    362   // Do nothing if the peer connection mode has been enabled.
    363   if (peer_connection_mode_)
    364     return;
    365 
    366   peer_connection_mode_ = true;
    367   int render_view_id = -1;
    368   media::AudioParameters input_params;
    369   {
    370     base::AutoLock auto_lock(lock_);
    371     // Simply return if there is no existing source or the |render_view_id_| is
    372     // not valid.
    373     if (!source_.get() || render_view_id_== -1)
    374       return;
    375 
    376     render_view_id = render_view_id_;
    377     input_params = audio_processor_->InputFormat();
    378   }
    379 
    380   // Do nothing if the current buffer size is the WebRtc native buffer size.
    381   if (GetBufferSize(input_params.sample_rate()) ==
    382           input_params.frames_per_buffer()) {
    383     return;
    384   }
    385 
    386   // Create a new audio stream as source which will open the hardware using
    387   // WebRtc native buffer size.
    388   SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id),
    389                     input_params.channel_layout(),
    390                     static_cast<float>(input_params.sample_rate()));
    391 }
    392 
    393 void WebRtcAudioCapturer::Start() {
    394   DCHECK(thread_checker_.CalledOnValidThread());
    395   DVLOG(1) << "WebRtcAudioCapturer::Start()";
    396   base::AutoLock auto_lock(lock_);
    397   if (running_ || !source_.get())
    398     return;
    399 
    400   // Start the data source, i.e., start capturing data from the current source.
    401   // We need to set the AGC control before starting the stream.
    402   source_->SetAutomaticGainControl(true);
    403   source_->Start();
    404   running_ = true;
    405 }
    406 
    407 void WebRtcAudioCapturer::Stop() {
    408   DCHECK(thread_checker_.CalledOnValidThread());
    409   DVLOG(1) << "WebRtcAudioCapturer::Stop()";
    410   scoped_refptr<media::AudioCapturerSource> source;
    411   TrackList::ItemList tracks;
    412   {
    413     base::AutoLock auto_lock(lock_);
    414     if (!running_)
    415       return;
    416 
    417     source = source_;
    418     tracks = tracks_.Items();
    419     tracks_.Clear();
    420     running_ = false;
    421   }
    422 
    423   // Remove the capturer object from the WebRtcAudioDeviceImpl.
    424   if (audio_device_)
    425     audio_device_->RemoveAudioCapturer(this);
    426 
    427   for (TrackList::ItemList::const_iterator it = tracks.begin();
    428        it != tracks.end();
    429        ++it) {
    430     (*it)->Stop();
    431   }
    432 
    433   if (source.get())
    434     source->Stop();
    435 
    436   // Stop the audio processor to avoid feeding render data into the processor.
    437   audio_processor_->Stop();
    438 }
    439 
    440 void WebRtcAudioCapturer::SetVolume(int volume) {
    441   DVLOG(1) << "WebRtcAudioCapturer::SetVolume()";
    442   DCHECK_LE(volume, MaxVolume());
    443   double normalized_volume = static_cast<double>(volume) / MaxVolume();
    444   base::AutoLock auto_lock(lock_);
    445   if (source_.get())
    446     source_->SetVolume(normalized_volume);
    447 }
    448 
    449 int WebRtcAudioCapturer::Volume() const {
    450   base::AutoLock auto_lock(lock_);
    451   return volume_;
    452 }
    453 
    454 int WebRtcAudioCapturer::MaxVolume() const {
    455   return WebRtcAudioDeviceImpl::kMaxVolumeLevel;
    456 }
    457 
    458 void WebRtcAudioCapturer::Capture(const media::AudioBus* audio_source,
    459                                   int audio_delay_milliseconds,
    460                                   double volume,
    461                                   bool key_pressed) {
    462 // This callback is driven by AudioInputDevice::AudioThreadCallback if
    463 // |source_| is AudioInputDevice, otherwise it is driven by client's
    464 // CaptureCallback.
    465 #if defined(OS_WIN) || defined(OS_MACOSX)
    466   DCHECK_LE(volume, 1.0);
    467 #elif (defined(OS_LINUX) && !defined(OS_CHROMEOS)) || defined(OS_OPENBSD)
    468   // We have a special situation on Linux where the microphone volume can be
    469   // "higher than maximum". The input volume slider in the sound preference
    470   // allows the user to set a scaling that is higher than 100%. It means that
    471   // even if the reported maximum levels is N, the actual microphone level can
    472   // go up to 1.5x*N and that corresponds to a normalized |volume| of 1.5x.
    473   DCHECK_LE(volume, 1.6);
    474 #endif
    475 
    476   TrackList::ItemList tracks;
    477   TrackList::ItemList tracks_to_notify_format;
    478   int current_volume = 0;
    479   base::TimeDelta audio_delay;
    480   bool need_audio_processing = true;
    481   {
    482     base::AutoLock auto_lock(lock_);
    483     if (!running_)
    484       return;
    485 
    486     // Map internal volume range of [0.0, 1.0] into [0, 255] used by AGC.
    487     // The volume can be higher than 255 on Linux, and it will be cropped to
    488     // 255 since AGC does not allow values out of range.
    489     volume_ = static_cast<int>((volume * MaxVolume()) + 0.5);
    490     current_volume = volume_ > MaxVolume() ? MaxVolume() : volume_;
    491     audio_delay = base::TimeDelta::FromMilliseconds(audio_delay_milliseconds);
    492     audio_delay_ = audio_delay;
    493     key_pressed_ = key_pressed;
    494     tracks = tracks_.Items();
    495     tracks_.RetrieveAndClearTags(&tracks_to_notify_format);
    496 
    497     // Set the flag to turn on the audio processing in PeerConnection level.
    498     // Note that, we turn off the audio processing in PeerConnection if the
    499     // processor has already processed the data.
    500     need_audio_processing = need_audio_processing_ ?
    501         !MediaStreamAudioProcessor::IsAudioTrackProcessingEnabled() : false;
    502   }
    503 
    504   DCHECK(audio_processor_->InputFormat().IsValid());
    505   DCHECK_EQ(audio_source->channels(),
    506             audio_processor_->InputFormat().channels());
    507   DCHECK_EQ(audio_source->frames(),
    508             audio_processor_->InputFormat().frames_per_buffer());
    509 
    510   // Notify the tracks on when the format changes. This will do nothing if
    511   // |tracks_to_notify_format| is empty.
    512   media::AudioParameters output_params = audio_processor_->OutputFormat();
    513   for (TrackList::ItemList::const_iterator it = tracks_to_notify_format.begin();
    514        it != tracks_to_notify_format.end(); ++it) {
    515     (*it)->OnSetFormat(output_params);
    516     (*it)->SetAudioProcessor(audio_processor_);
    517   }
    518 
    519   if ((base::TimeTicks::Now() - last_audio_level_log_time_).InSeconds() >
    520           kPowerMonitorLogIntervalSeconds) {
    521     audio_power_monitor_.Scan(*audio_source, audio_source->frames());
    522 
    523     last_audio_level_log_time_ = base::TimeTicks::Now();
    524 
    525     std::pair<float, bool> result =
    526         audio_power_monitor_.ReadCurrentPowerAndClip();
    527     WebRtcLogMessage(base::StringPrintf(
    528         "WAC::Capture: current_audio_power=%.2fdBFS.", result.first));
    529 
    530     audio_power_monitor_.Reset();
    531   }
    532 
    533   // Figure out if the pre-processed data has any energy or not, the
    534   // information will be passed to the track to force the calculator
    535   // to report energy in case the post-processed data is zeroed by the audio
    536   // processing.
    537   const bool force_report_nonzero_energy = HasDataEnergy(*audio_source);
    538 
    539   // Push the data to the processor for processing.
    540   audio_processor_->PushCaptureData(audio_source);
    541 
    542   // Process and consume the data in the processor until there is not enough
    543   // data in the processor.
    544   int16* output = NULL;
    545   int new_volume = 0;
    546   while (audio_processor_->ProcessAndConsumeData(
    547       audio_delay, current_volume, key_pressed, &new_volume, &output)) {
    548     // Feed the post-processed data to the tracks.
    549     for (TrackList::ItemList::const_iterator it = tracks.begin();
    550          it != tracks.end(); ++it) {
    551       (*it)->Capture(output, audio_delay, current_volume, key_pressed,
    552                      need_audio_processing, force_report_nonzero_energy);
    553     }
    554 
    555     if (new_volume) {
    556       SetVolume(new_volume);
    557 
    558       // Update the |current_volume| to avoid passing the old volume to AGC.
    559       current_volume = new_volume;
    560     }
    561   }
    562 }
    563 
    564 void WebRtcAudioCapturer::OnCaptureError() {
    565   NOTIMPLEMENTED();
    566 }
    567 
    568 media::AudioParameters WebRtcAudioCapturer::source_audio_parameters() const {
    569   base::AutoLock auto_lock(lock_);
    570   return audio_processor_.get() ? audio_processor_->InputFormat()
    571                                 : media::AudioParameters();
    572 }
    573 
    574 bool WebRtcAudioCapturer::GetPairedOutputParameters(
    575     int* session_id,
    576     int* output_sample_rate,
    577     int* output_frames_per_buffer) const {
    578   // Don't set output parameters unless all of them are valid.
    579   if (device_info_.session_id <= 0 ||
    580       !device_info_.device.matched_output.sample_rate ||
    581       !device_info_.device.matched_output.frames_per_buffer)
    582     return false;
    583 
    584   *session_id = device_info_.session_id;
    585   *output_sample_rate = device_info_.device.matched_output.sample_rate;
    586   *output_frames_per_buffer =
    587       device_info_.device.matched_output.frames_per_buffer;
    588 
    589   return true;
    590 }
    591 
    592 int WebRtcAudioCapturer::GetBufferSize(int sample_rate) const {
    593   DCHECK(thread_checker_.CalledOnValidThread());
    594 #if defined(OS_ANDROID)
    595   // TODO(henrika): Tune and adjust buffer size on Android.
    596   return (2 * sample_rate / 100);
    597 #endif
    598 
    599   // PeerConnection is running at a buffer size of 10ms data. A multiple of
    600   // 10ms as the buffer size can give the best performance to PeerConnection.
    601   int peer_connection_buffer_size = sample_rate / 100;
    602 
    603   // Use the native hardware buffer size in non peer connection mode when the
    604   // platform is using a native buffer size smaller than the PeerConnection
    605   // buffer size.
    606   int hardware_buffer_size = device_info_.device.input.frames_per_buffer;
    607   if (!peer_connection_mode_ && hardware_buffer_size &&
    608       hardware_buffer_size <= peer_connection_buffer_size) {
    609     return hardware_buffer_size;
    610   }
    611 
    612   return (sample_rate / 100);
    613 }
    614 
    615 void WebRtcAudioCapturer::GetAudioProcessingParams(
    616     base::TimeDelta* delay, int* volume, bool* key_pressed) {
    617   base::AutoLock auto_lock(lock_);
    618   *delay = audio_delay_;
    619   *volume = volume_;
    620   *key_pressed = key_pressed_;
    621 }
    622 
    623 void WebRtcAudioCapturer::SetCapturerSourceForTesting(
    624     const scoped_refptr<media::AudioCapturerSource>& source,
    625     media::AudioParameters params) {
    626   // Create a new audio stream as source which uses the new source.
    627   SetCapturerSource(source, params.channel_layout(),
    628                     static_cast<float>(params.sample_rate()));
    629 }
    630 
    631 }  // namespace content
    632