Home | History | Annotate | Download | only in media
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include <vector>
      6 
      7 #include "base/environment.h"
      8 #include "base/file_util.h"
      9 #include "base/files/file_path.h"
     10 #include "base/path_service.h"
     11 #include "base/strings/stringprintf.h"
     12 #include "base/test/test_timeouts.h"
     13 #include "content/renderer/media/webrtc_audio_capturer.h"
     14 #include "content/renderer/media/webrtc_audio_device_impl.h"
     15 #include "content/renderer/media/webrtc_audio_renderer.h"
     16 #include "content/renderer/media/webrtc_local_audio_track.h"
     17 #include "content/renderer/render_thread_impl.h"
     18 #include "content/test/webrtc_audio_device_test.h"
     19 #include "media/audio/audio_manager_base.h"
     20 #include "media/base/audio_hardware_config.h"
     21 #include "testing/gmock/include/gmock/gmock.h"
     22 #include "third_party/webrtc/voice_engine/include/voe_audio_processing.h"
     23 #include "third_party/webrtc/voice_engine/include/voe_base.h"
     24 #include "third_party/webrtc/voice_engine/include/voe_codec.h"
     25 #include "third_party/webrtc/voice_engine/include/voe_external_media.h"
     26 #include "third_party/webrtc/voice_engine/include/voe_file.h"
     27 #include "third_party/webrtc/voice_engine/include/voe_network.h"
     28 
     29 #if defined(OS_WIN)
     30 #include "base/win/windows_version.h"
     31 #endif
     32 
     33 using media::AudioParameters;
     34 using media::CHANNEL_LAYOUT_STEREO;
     35 using testing::_;
     36 using testing::AnyNumber;
     37 using testing::InvokeWithoutArgs;
     38 using testing::Return;
     39 using testing::StrEq;
     40 
     41 namespace content {
     42 
     43 namespace {
     44 
     45 const int kRenderViewId = 1;
     46 
     47 // The number of packers that RunWebRtcLoopbackTimeTest() uses for measurement.
     48 const int kNumberOfPacketsForLoopbackTest = 100;
     49 
     50 // The hardware latency we feed to WebRtc.
     51 const int kHardwareLatencyInMs = 50;
     52 
     53 scoped_ptr<media::AudioHardwareConfig> CreateRealHardwareConfig(
     54     media::AudioManager* manager) {
     55   const AudioParameters output_parameters =
     56       manager->GetDefaultOutputStreamParameters();
     57   const AudioParameters input_parameters =
     58       manager->GetInputStreamParameters(
     59           media::AudioManagerBase::kDefaultDeviceId);
     60 
     61   return make_scoped_ptr(new media::AudioHardwareConfig(
     62       input_parameters, output_parameters));
     63 }
     64 
     65 // Return true if at least one element in the array matches |value|.
     66 bool FindElementInArray(const int* array, int size, int value) {
     67   return (std::find(&array[0], &array[0] + size, value) != &array[size]);
     68 }
     69 
     70 // This method returns false if a non-supported rate is detected on the
     71 // input or output side.
     72 // TODO(henrika): add support for automatic fallback to Windows Wave audio
     73 // if a non-supported rate is detected. It is probably better to detect
     74 // invalid audio settings by actually trying to open the audio streams instead
     75 // of relying on hard coded conditions.
     76 bool HardwareSampleRatesAreValid() {
     77   // These are the currently supported hardware sample rates in both directions.
     78   // The actual WebRTC client can limit these ranges further depending on
     79   // platform but this is the maximum range we support today.
     80   int valid_input_rates[] = {16000, 32000, 44100, 48000, 96000};
     81   int valid_output_rates[] = {16000, 32000, 44100, 48000, 96000};
     82 
     83   media::AudioHardwareConfig* hardware_config =
     84       RenderThreadImpl::current()->GetAudioHardwareConfig();
     85 
     86   // Verify the input sample rate.
     87   int input_sample_rate = hardware_config->GetInputSampleRate();
     88 
     89   if (!FindElementInArray(valid_input_rates, arraysize(valid_input_rates),
     90                           input_sample_rate)) {
     91     LOG(WARNING) << "Non-supported input sample rate detected.";
     92     return false;
     93   }
     94 
     95   // Given that the input rate was OK, verify the output rate as well.
     96   int output_sample_rate = hardware_config->GetOutputSampleRate();
     97   if (!FindElementInArray(valid_output_rates, arraysize(valid_output_rates),
     98                           output_sample_rate)) {
     99     LOG(WARNING) << "Non-supported output sample rate detected.";
    100     return false;
    101   }
    102 
    103   return true;
    104 }
    105 
    106 // Utility method which creates and initializes the audio capturer and adds it
    107 // to WebRTC audio device. This method should be used in tests where
    108 // HardwareSampleRatesAreValid() has been called and returned true.
    109 bool CreateAndInitializeCapturer(WebRtcAudioDeviceImpl* webrtc_audio_device) {
    110   DCHECK(webrtc_audio_device);
    111   scoped_refptr<WebRtcAudioCapturer> capturer(
    112       WebRtcAudioCapturer::CreateCapturer());
    113 
    114   media::AudioHardwareConfig* hardware_config =
    115       RenderThreadImpl::current()->GetAudioHardwareConfig();
    116 
    117   // Use native capture sample rate and channel configuration to get some
    118   // action in this test.
    119   int sample_rate = hardware_config->GetInputSampleRate();
    120   media::ChannelLayout channel_layout =
    121       hardware_config->GetInputChannelLayout();
    122   if (!capturer->Initialize(kRenderViewId, channel_layout, sample_rate, 0, 1,
    123                             media::AudioManagerBase::kDefaultDeviceId, 0, 0,
    124                             media::AudioParameters::NO_EFFECTS)) {
    125     return false;
    126   }
    127 
    128   // Add the capturer to the WebRtcAudioDeviceImpl.
    129   webrtc_audio_device->AddAudioCapturer(capturer);
    130 
    131   return true;
    132 }
    133 
    134 // Create and start a local audio track. Starting the audio track will connect
    135 // the audio track to the capturer and also start the source of the capturer.
    136 // Also, connect the sink to the audio track.
    137 scoped_refptr<WebRtcLocalAudioTrack>
    138 CreateAndStartLocalAudioTrack(WebRtcAudioCapturer* capturer,
    139                               PeerConnectionAudioSink* sink) {
    140   scoped_refptr<WebRtcLocalAudioTrack> local_audio_track(
    141       WebRtcLocalAudioTrack::Create(std::string(), capturer, NULL, NULL, NULL));
    142   local_audio_track->AddSink(sink);
    143   local_audio_track->Start();
    144   return local_audio_track;
    145 }
    146 
    147 class WebRTCMediaProcessImpl : public webrtc::VoEMediaProcess {
    148  public:
    149   explicit WebRTCMediaProcessImpl(base::WaitableEvent* event)
    150       : event_(event),
    151         channel_id_(-1),
    152         type_(webrtc::kPlaybackPerChannel),
    153         packet_size_(0),
    154         sample_rate_(0),
    155         channels_(0) {
    156   }
    157   virtual ~WebRTCMediaProcessImpl() {}
    158 
    159   // TODO(henrika): Refactor in WebRTC and convert to Chrome coding style.
    160   virtual void Process(int channel,
    161                        webrtc::ProcessingTypes type,
    162                        int16_t audio_10ms[],
    163                        int length,
    164                        int sampling_freq,
    165                        bool is_stereo) OVERRIDE {
    166     base::AutoLock auto_lock(lock_);
    167     channel_id_ = channel;
    168     type_ = type;
    169     packet_size_ = length;
    170     sample_rate_ = sampling_freq;
    171     channels_ = (is_stereo ? 2 : 1);
    172     if (event_) {
    173       // Signal that a new callback has been received.
    174       event_->Signal();
    175     }
    176   }
    177 
    178   int channel_id() const {
    179     base::AutoLock auto_lock(lock_);
    180     return channel_id_;
    181   }
    182 
    183   int type() const {
    184     base::AutoLock auto_lock(lock_);
    185     return type_;
    186   }
    187 
    188   int packet_size() const {
    189     base::AutoLock auto_lock(lock_);
    190     return packet_size_;
    191   }
    192 
    193   int sample_rate() const {
    194     base::AutoLock auto_lock(lock_);
    195     return sample_rate_;
    196   }
    197 
    198  private:
    199   base::WaitableEvent* event_;
    200   int channel_id_;
    201   webrtc::ProcessingTypes type_;
    202   int packet_size_;
    203   int sample_rate_;
    204   int channels_;
    205   mutable base::Lock lock_;
    206   DISALLOW_COPY_AND_ASSIGN(WebRTCMediaProcessImpl);
    207 };
    208 
    209 // TODO(xians): Use MediaStreamAudioSink.
    210 class MockMediaStreamAudioSink : public PeerConnectionAudioSink {
    211  public:
    212   explicit MockMediaStreamAudioSink(base::WaitableEvent* event)
    213       : event_(event) {
    214     DCHECK(event_);
    215   }
    216   virtual ~MockMediaStreamAudioSink() {}
    217 
    218   // PeerConnectionAudioSink implementation.
    219   virtual int OnData(const int16* audio_data,
    220                      int sample_rate,
    221                      int number_of_channels,
    222                      int number_of_frames,
    223                      const std::vector<int>& channels,
    224                      int audio_delay_milliseconds,
    225                      int current_volume,
    226                      bool need_audio_processing,
    227                      bool key_pressed) OVERRIDE {
    228     // Signal that a callback has been received.
    229     event_->Signal();
    230     return 0;
    231   }
    232 
    233   // Set the format for the capture audio parameters.
    234   virtual void OnSetFormat(
    235       const media::AudioParameters& params) OVERRIDE {}
    236 
    237  private:
    238    base::WaitableEvent* event_;
    239 
    240    DISALLOW_COPY_AND_ASSIGN(MockMediaStreamAudioSink);
    241 };
    242 
    243 class MockWebRtcAudioRendererSource : public WebRtcAudioRendererSource {
    244  public:
    245   explicit MockWebRtcAudioRendererSource(base::WaitableEvent* event)
    246       : event_(event) {
    247     DCHECK(event_);
    248   }
    249   virtual ~MockWebRtcAudioRendererSource() {}
    250 
    251   // WebRtcAudioRendererSource implementation.
    252   virtual void RenderData(uint8* audio_data,
    253                           int number_of_channels,
    254                           int number_of_frames,
    255                           int audio_delay_milliseconds) OVERRIDE {
    256     // Signal that a callback has been received.
    257     // Initialize the memory to zero to avoid uninitialized warning from
    258     // Valgrind.
    259     memset(audio_data, 0,
    260            sizeof(int16) * number_of_channels * number_of_frames);
    261     event_->Signal();
    262   }
    263 
    264   virtual void SetRenderFormat(const media::AudioParameters& params) OVERRIDE {
    265   }
    266 
    267   virtual void RemoveAudioRenderer(WebRtcAudioRenderer* renderer) OVERRIDE {};
    268 
    269  private:
    270    base::WaitableEvent* event_;
    271 
    272    DISALLOW_COPY_AND_ASSIGN(MockWebRtcAudioRendererSource);
    273 };
    274 
    275 // Prints numerical information to stdout in a controlled format so we can plot
    276 // the result.
    277 void PrintPerfResultMs(const char* graph, const char* trace, float time_ms) {
    278   std::string times;
    279   base::StringAppendF(&times, "%.2f,", time_ms);
    280   std::string result = base::StringPrintf(
    281       "%sRESULT %s%s: %s= %s%s%s %s\n", "*", graph, "",
    282       trace,  "[", times.c_str(), "]", "ms");
    283 
    284   fflush(stdout);
    285   printf("%s", result.c_str());
    286   fflush(stdout);
    287 }
    288 
    289 void ReadDataFromSpeechFile(char* data, int length) {
    290   base::FilePath data_file;
    291   CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &data_file));
    292   data_file =
    293       data_file.Append(FILE_PATH_LITERAL("media"))
    294                .Append(FILE_PATH_LITERAL("test"))
    295                .Append(FILE_PATH_LITERAL("data"))
    296                .Append(FILE_PATH_LITERAL("speech_16b_stereo_48kHz.raw"));
    297   DCHECK(base::PathExists(data_file));
    298   int64 data_file_size64 = 0;
    299   DCHECK(base::GetFileSize(data_file, &data_file_size64));
    300   EXPECT_EQ(length, base::ReadFile(data_file, data, length));
    301   DCHECK(data_file_size64 > length);
    302 }
    303 
    304 void SetChannelCodec(webrtc::VoiceEngine* engine, int channel) {
    305   // TODO(xians): move the codec as an input param to this function, and add
    306   // tests for different codecs, also add support to Android and IOS.
    307 #if !defined(OS_ANDROID) && !defined(OS_IOS)
    308   webrtc::CodecInst isac;
    309   strcpy(isac.plname, "ISAC");
    310   isac.pltype = 104;
    311   isac.pacsize = 960;
    312   isac.plfreq = 32000;
    313   isac.channels = 1;
    314   isac.rate = -1;
    315   ScopedWebRTCPtr<webrtc::VoECodec> codec(engine);
    316   EXPECT_EQ(0, codec->SetRecPayloadType(channel, isac));
    317   EXPECT_EQ(0, codec->SetSendCodec(channel, isac));
    318 #endif
    319 }
    320 
    321 // Returns the time in millisecond for sending packets to WebRtc for encoding,
    322 // signal processing, decoding and receiving them back.
    323 int RunWebRtcLoopbackTimeTest(media::AudioManager* manager,
    324                               bool enable_apm) {
    325   scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
    326       new WebRtcAudioDeviceImpl());
    327   WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
    328   EXPECT_TRUE(engine.valid());
    329   ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
    330   EXPECT_TRUE(base.valid());
    331   int err = base->Init(webrtc_audio_device.get());
    332   EXPECT_EQ(0, err);
    333 
    334   // We use OnSetFormat() and SetRenderFormat() to configure the audio
    335   // parameters so that this test can run on machine without hardware device.
    336   const media::AudioParameters params = media::AudioParameters(
    337       media::AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
    338       48000, 2, 480);
    339   PeerConnectionAudioSink* capturer_sink =
    340       static_cast<PeerConnectionAudioSink*>(webrtc_audio_device.get());
    341   WebRtcAudioRendererSource* renderer_source =
    342       static_cast<WebRtcAudioRendererSource*>(webrtc_audio_device.get());
    343   renderer_source->SetRenderFormat(params);
    344 
    345   // Turn on/off all the signal processing components like AGC, AEC and NS.
    346   ScopedWebRTCPtr<webrtc::VoEAudioProcessing> audio_processing(engine.get());
    347   EXPECT_TRUE(audio_processing.valid());
    348   audio_processing->SetAgcStatus(enable_apm);
    349   audio_processing->SetNsStatus(enable_apm);
    350   audio_processing->SetEcStatus(enable_apm);
    351 
    352   // Create a voice channel for the WebRtc.
    353   int channel = base->CreateChannel();
    354   EXPECT_NE(-1, channel);
    355   SetChannelCodec(engine.get(), channel);
    356 
    357   // Use our fake network transmission and start playout and recording.
    358   ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get());
    359   EXPECT_TRUE(network.valid());
    360   scoped_ptr<WebRTCTransportImpl> transport(
    361       new WebRTCTransportImpl(network.get()));
    362   EXPECT_EQ(0, network->RegisterExternalTransport(channel, *transport.get()));
    363   EXPECT_EQ(0, base->StartPlayout(channel));
    364   EXPECT_EQ(0, base->StartSend(channel));
    365 
    366   // Read speech data from a speech test file.
    367   const int input_packet_size =
    368       params.frames_per_buffer() * 2 * params.channels();
    369   const int num_output_channels = webrtc_audio_device->output_channels();
    370   const int output_packet_size = webrtc_audio_device->output_buffer_size() * 2 *
    371       num_output_channels;
    372   const size_t length = input_packet_size * kNumberOfPacketsForLoopbackTest;
    373   scoped_ptr<char[]> capture_data(new char[length]);
    374   ReadDataFromSpeechFile(capture_data.get(), length);
    375 
    376   // Start the timer.
    377   scoped_ptr<uint8[]> buffer(new uint8[output_packet_size]);
    378   base::Time start_time = base::Time::Now();
    379   int delay = 0;
    380   std::vector<int> voe_channels;
    381   voe_channels.push_back(channel);
    382   for (int j = 0; j < kNumberOfPacketsForLoopbackTest; ++j) {
    383     // Sending fake capture data to WebRtc.
    384     capturer_sink->OnData(
    385         reinterpret_cast<int16*>(capture_data.get() + input_packet_size * j),
    386         params.sample_rate(),
    387         params.channels(),
    388         params.frames_per_buffer(),
    389         voe_channels,
    390         kHardwareLatencyInMs,
    391         1.0,
    392         enable_apm,
    393         false);
    394 
    395     // Receiving data from WebRtc.
    396     renderer_source->RenderData(
    397         reinterpret_cast<uint8*>(buffer.get()),
    398         num_output_channels, webrtc_audio_device->output_buffer_size(),
    399         kHardwareLatencyInMs + delay);
    400     delay = (base::Time::Now() - start_time).InMilliseconds();
    401   }
    402 
    403   int latency = (base::Time::Now() - start_time).InMilliseconds();
    404 
    405   EXPECT_EQ(0, base->StopSend(channel));
    406   EXPECT_EQ(0, base->StopPlayout(channel));
    407   EXPECT_EQ(0, base->DeleteChannel(channel));
    408   EXPECT_EQ(0, base->Terminate());
    409 
    410   return latency;
    411 }
    412 
    413 }  // namespace
    414 
    415 // Trivial test which verifies that one part of the test harness
    416 // (HardwareSampleRatesAreValid()) works as intended for all supported
    417 // hardware input sample rates.
    418 TEST_F(MAYBE_WebRTCAudioDeviceTest, TestValidInputRates) {
    419   int valid_rates[] = {16000, 32000, 44100, 48000, 96000};
    420 
    421   // Verify that we will approve all rates listed in |valid_rates|.
    422   for (size_t i = 0; i < arraysize(valid_rates); ++i) {
    423     EXPECT_TRUE(FindElementInArray(valid_rates, arraysize(valid_rates),
    424         valid_rates[i]));
    425   }
    426 
    427   // Verify that any value outside the valid range results in negative
    428   // find results.
    429   int invalid_rates[] = {-1, 0, 8000, 11025, 22050, 192000};
    430   for (size_t i = 0; i < arraysize(invalid_rates); ++i) {
    431     EXPECT_FALSE(FindElementInArray(valid_rates, arraysize(valid_rates),
    432         invalid_rates[i]));
    433   }
    434 }
    435 
    436 // Trivial test which verifies that one part of the test harness
    437 // (HardwareSampleRatesAreValid()) works as intended for all supported
    438 // hardware output sample rates.
    439 TEST_F(MAYBE_WebRTCAudioDeviceTest, TestValidOutputRates) {
    440   int valid_rates[] = {44100, 48000, 96000};
    441 
    442   // Verify that we will approve all rates listed in |valid_rates|.
    443   for (size_t i = 0; i < arraysize(valid_rates); ++i) {
    444     EXPECT_TRUE(FindElementInArray(valid_rates, arraysize(valid_rates),
    445         valid_rates[i]));
    446   }
    447 
    448   // Verify that any value outside the valid range results in negative
    449   // find results.
    450   int invalid_rates[] = {-1, 0, 8000, 11025, 22050, 32000, 192000};
    451   for (size_t i = 0; i < arraysize(invalid_rates); ++i) {
    452     EXPECT_FALSE(FindElementInArray(valid_rates, arraysize(valid_rates),
    453         invalid_rates[i]));
    454   }
    455 }
    456 
    457 // Basic test that instantiates and initializes an instance of
    458 // WebRtcAudioDeviceImpl.
    459 TEST_F(MAYBE_WebRTCAudioDeviceTest, Construct) {
    460 #if defined(OS_WIN)
    461   // This test crashes on Win XP bots.
    462   if (base::win::GetVersion() <= base::win::VERSION_XP)
    463     return;
    464 #endif
    465 
    466   AudioParameters input_params(
    467       AudioParameters::AUDIO_PCM_LOW_LATENCY,
    468       media::CHANNEL_LAYOUT_MONO,
    469       48000,
    470       16,
    471       480);
    472 
    473   AudioParameters output_params(
    474       AudioParameters::AUDIO_PCM_LOW_LATENCY,
    475       media::CHANNEL_LAYOUT_STEREO,
    476       48000,
    477       16,
    478       480);
    479 
    480   media::AudioHardwareConfig audio_config(input_params, output_params);
    481   SetAudioHardwareConfig(&audio_config);
    482 
    483   scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
    484       new WebRtcAudioDeviceImpl());
    485 
    486   WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
    487   ASSERT_TRUE(engine.valid());
    488 
    489   ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
    490   int err = base->Init(webrtc_audio_device.get());
    491   EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get()));
    492   EXPECT_EQ(0, err);
    493   EXPECT_EQ(0, base->Terminate());
    494 }
    495 
    496 // Verify that a call to webrtc::VoEBase::StartPlayout() starts audio output
    497 // with the correct set of parameters. A WebRtcAudioDeviceImpl instance will
    498 // be utilized to implement the actual audio path. The test registers a
    499 // webrtc::VoEExternalMedia implementation to hijack the output audio and
    500 // verify that streaming starts correctly.
    501 // TODO(henrika): include on Android as well as soon as alla race conditions
    502 // in OpenSLES are resolved.
    503 #if defined(OS_ANDROID)
    504 #define MAYBE_StartPlayout DISABLED_StartPlayout
    505 #else
    506 #define MAYBE_StartPlayout StartPlayout
    507 #endif
    508 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_StartPlayout) {
    509   if (!has_output_devices_) {
    510     LOG(WARNING) << "No output device detected.";
    511     return;
    512   }
    513 
    514   scoped_ptr<media::AudioHardwareConfig> config =
    515       CreateRealHardwareConfig(audio_manager_.get());
    516   SetAudioHardwareConfig(config.get());
    517 
    518   if (!HardwareSampleRatesAreValid())
    519     return;
    520 
    521   WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
    522   ASSERT_TRUE(engine.valid());
    523   ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
    524   ASSERT_TRUE(base.valid());
    525 
    526   scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
    527       new WebRtcAudioDeviceImpl());
    528   int err = base->Init(webrtc_audio_device.get());
    529   ASSERT_EQ(0, err);
    530 
    531   ScopedWebRTCPtr<webrtc::VoEExternalMedia> external_media(engine.get());
    532   ASSERT_TRUE(external_media.valid());
    533   base::WaitableEvent event(false, false);
    534   scoped_ptr<WebRTCMediaProcessImpl> media_process(
    535       new WebRTCMediaProcessImpl(&event));
    536   int ch = base->CreateChannel();
    537   EXPECT_NE(-1, ch);
    538   EXPECT_EQ(0, external_media->RegisterExternalMediaProcessing(
    539       ch, webrtc::kPlaybackPerChannel, *media_process.get()));
    540 
    541   EXPECT_EQ(0, base->StartPlayout(ch));
    542   scoped_refptr<WebRtcAudioRenderer> renderer(
    543       CreateDefaultWebRtcAudioRenderer(kRenderViewId));
    544   scoped_refptr<MediaStreamAudioRenderer> proxy(
    545       renderer->CreateSharedAudioRendererProxy());
    546   EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
    547   proxy->Start();
    548   proxy->Play();
    549 
    550   EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
    551   WaitForIOThreadCompletion();
    552 
    553   EXPECT_TRUE(webrtc_audio_device->Playing());
    554   EXPECT_FALSE(webrtc_audio_device->Recording());
    555   EXPECT_EQ(ch, media_process->channel_id());
    556   EXPECT_EQ(webrtc::kPlaybackPerChannel, media_process->type());
    557   EXPECT_EQ(80, media_process->packet_size());
    558   EXPECT_EQ(8000, media_process->sample_rate());
    559 
    560   EXPECT_EQ(0, external_media->DeRegisterExternalMediaProcessing(
    561       ch, webrtc::kPlaybackPerChannel));
    562   EXPECT_EQ(0, base->StopPlayout(ch));
    563   proxy->Stop();
    564   EXPECT_EQ(0, base->DeleteChannel(ch));
    565   EXPECT_EQ(0, base->Terminate());
    566 }
    567 
    568 // Verify that a call to webrtc::VoEBase::StartRecording() starts audio input
    569 // with the correct set of parameters. A WebRtcAudioDeviceImpl instance will
    570 // be utilized to implement the actual audio path. The test registers a
    571 // webrtc::VoEExternalMedia implementation to hijack the input audio and
    572 // verify that streaming starts correctly. An external transport implementation
    573 // is also required to ensure that "sending" can start without actually trying
    574 // to send encoded packets to the network. Our main interest here is to ensure
    575 // that the audio capturing starts as it should.
    576 // Disabled when running headless since the bots don't have the required config.
    577 
    578 // TODO(leozwang): Because ExternalMediaProcessing is disabled in webrtc,
    579 // disable this unit test on Android for now.
    580 #if defined(OS_ANDROID)
    581 #define MAYBE_StartRecording DISABLED_StartRecording
    582 #elif defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
    583 // This test is failing on ARM linux: http://crbug.com/238490
    584 #define MAYBE_StartRecording DISABLED_StartRecording
    585 #else
    586 // Flakily hangs on all other platforms as well: crbug.com/268376.
    587 // When the flakiness has been fixed, you probably want to leave it disabled
    588 // on the above platforms.
    589 #define MAYBE_StartRecording DISABLED_StartRecording
    590 #endif
    591 
    592 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_StartRecording) {
    593   if (!has_input_devices_ || !has_output_devices_) {
    594     LOG(WARNING) << "Missing audio devices.";
    595     return;
    596   }
    597 
    598   scoped_ptr<media::AudioHardwareConfig> config =
    599       CreateRealHardwareConfig(audio_manager_.get());
    600   SetAudioHardwareConfig(config.get());
    601 
    602   if (!HardwareSampleRatesAreValid())
    603     return;
    604 
    605   // TODO(tommi): extend MediaObserver and MockMediaObserver with support
    606   // for new interfaces, like OnSetAudioStreamRecording(). When done, add
    607   // EXPECT_CALL() macros here.
    608   scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
    609       new WebRtcAudioDeviceImpl());
    610 
    611   WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
    612   ASSERT_TRUE(engine.valid());
    613 
    614   ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
    615   ASSERT_TRUE(base.valid());
    616   int err = base->Init(webrtc_audio_device.get());
    617   ASSERT_EQ(0, err);
    618 
    619   int ch = base->CreateChannel();
    620   EXPECT_NE(-1, ch);
    621 
    622   ScopedWebRTCPtr<webrtc::VoEExternalMedia> external_media(engine.get());
    623   ASSERT_TRUE(external_media.valid());
    624 
    625   base::WaitableEvent event(false, false);
    626   scoped_ptr<WebRTCMediaProcessImpl> media_process(
    627       new WebRTCMediaProcessImpl(&event));
    628   EXPECT_EQ(0, external_media->RegisterExternalMediaProcessing(
    629       ch, webrtc::kRecordingPerChannel, *media_process.get()));
    630 
    631   // We must add an external transport implementation to be able to start
    632   // recording without actually sending encoded packets to the network. All
    633   // we want to do here is to verify that audio capturing starts as it should.
    634   ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get());
    635   scoped_ptr<WebRTCTransportImpl> transport(
    636       new WebRTCTransportImpl(network.get()));
    637   EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get()));
    638   EXPECT_EQ(0, base->StartSend(ch));
    639 
    640   // Create and initialize the capturer which starts the source of the data
    641   // flow.
    642   EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get()));
    643 
    644   // Create and start a local audio track which is bridging the data flow
    645   // between the capturer and WebRtcAudioDeviceImpl.
    646   scoped_refptr<WebRtcLocalAudioTrack> local_audio_track(
    647       CreateAndStartLocalAudioTrack(webrtc_audio_device->GetDefaultCapturer(),
    648                                     webrtc_audio_device));
    649   // connect the VoE voice channel to the audio track
    650   static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())->
    651       GetRenderer()->AddChannel(ch);
    652 
    653   // Verify we get the data flow.
    654   EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
    655   WaitForIOThreadCompletion();
    656 
    657   EXPECT_FALSE(webrtc_audio_device->Playing());
    658   EXPECT_TRUE(webrtc_audio_device->Recording());
    659   EXPECT_EQ(ch, media_process->channel_id());
    660   EXPECT_EQ(webrtc::kRecordingPerChannel, media_process->type());
    661   EXPECT_EQ(80, media_process->packet_size());
    662   EXPECT_EQ(8000, media_process->sample_rate());
    663 
    664   EXPECT_EQ(0, external_media->DeRegisterExternalMediaProcessing(
    665       ch, webrtc::kRecordingPerChannel));
    666   EXPECT_EQ(0, base->StopSend(ch));
    667 
    668   webrtc_audio_device->GetDefaultCapturer()->Stop();
    669   EXPECT_EQ(0, base->DeleteChannel(ch));
    670   EXPECT_EQ(0, base->Terminate());
    671 }
    672 
    673 // Uses WebRtcAudioDeviceImpl to play a local wave file.
    674 // TODO(henrika): include on Android as well as soon as alla race conditions
    675 // in OpenSLES are resolved.
    676 #if defined(OS_ANDROID)
    677 #define MAYBE_PlayLocalFile DISABLED_PlayLocalFile
    678 #else
    679 #define MAYBE_PlayLocalFile PlayLocalFile
    680 #endif
    681 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_PlayLocalFile) {
    682   if (!has_output_devices_) {
    683     LOG(WARNING) << "No output device detected.";
    684     return;
    685   }
    686 
    687   std::string file_path(
    688       GetTestDataPath(FILE_PATH_LITERAL("speechmusic_mono_16kHz.pcm")));
    689 
    690   scoped_ptr<media::AudioHardwareConfig> config =
    691       CreateRealHardwareConfig(audio_manager_.get());
    692   SetAudioHardwareConfig(config.get());
    693 
    694   if (!HardwareSampleRatesAreValid())
    695     return;
    696 
    697   WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
    698   ASSERT_TRUE(engine.valid());
    699   ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
    700   ASSERT_TRUE(base.valid());
    701 
    702   scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
    703       new WebRtcAudioDeviceImpl());
    704   int err = base->Init(webrtc_audio_device.get());
    705   ASSERT_EQ(0, err);
    706   int ch = base->CreateChannel();
    707   EXPECT_NE(-1, ch);
    708   EXPECT_EQ(0, base->StartPlayout(ch));
    709   scoped_refptr<WebRtcAudioRenderer> renderer(
    710       CreateDefaultWebRtcAudioRenderer(kRenderViewId));
    711   scoped_refptr<MediaStreamAudioRenderer> proxy(
    712       renderer->CreateSharedAudioRendererProxy());
    713   EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
    714   proxy->Start();
    715   proxy->Play();
    716 
    717   ScopedWebRTCPtr<webrtc::VoEFile> file(engine.get());
    718   ASSERT_TRUE(file.valid());
    719   int duration = 0;
    720   EXPECT_EQ(0, file->GetFileDuration(file_path.c_str(), duration,
    721                                      webrtc::kFileFormatPcm16kHzFile));
    722   EXPECT_NE(0, duration);
    723 
    724   EXPECT_EQ(0, file->StartPlayingFileLocally(ch, file_path.c_str(), false,
    725                                              webrtc::kFileFormatPcm16kHzFile));
    726 
    727   // Play 2 seconds worth of audio and then quit.
    728   message_loop_.PostDelayedTask(FROM_HERE,
    729                                 base::MessageLoop::QuitClosure(),
    730                                 base::TimeDelta::FromSeconds(2));
    731   message_loop_.Run();
    732 
    733   proxy->Stop();
    734   EXPECT_EQ(0, base->StopSend(ch));
    735   EXPECT_EQ(0, base->StopPlayout(ch));
    736   EXPECT_EQ(0, base->DeleteChannel(ch));
    737   EXPECT_EQ(0, base->Terminate());
    738 }
    739 
    740 // Uses WebRtcAudioDeviceImpl to play out recorded audio in loopback.
    741 // An external transport implementation is utilized to feed back RTP packets
    742 // which are recorded, encoded, packetized into RTP packets and finally
    743 // "transmitted". The RTP packets are then fed back into the VoiceEngine
    744 // where they are decoded and played out on the default audio output device.
    745 // Disabled when running headless since the bots don't have the required config.
    746 // TODO(henrika): improve quality by using a wideband codec, enabling noise-
    747 // suppressions etc.
    748 // FullDuplexAudioWithAGC is flaky on Android, disable it for now.
    749 // Also flakily hangs on Windows: crbug.com/269348.
    750 #if defined(OS_ANDROID) || defined(OS_WIN)
    751 #define MAYBE_FullDuplexAudioWithAGC DISABLED_FullDuplexAudioWithAGC
    752 #else
    753 #define MAYBE_FullDuplexAudioWithAGC FullDuplexAudioWithAGC
    754 #endif
    755 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_FullDuplexAudioWithAGC) {
    756   if (!has_output_devices_ || !has_input_devices_) {
    757     LOG(WARNING) << "Missing audio devices.";
    758     return;
    759   }
    760 
    761   scoped_ptr<media::AudioHardwareConfig> config =
    762       CreateRealHardwareConfig(audio_manager_.get());
    763   SetAudioHardwareConfig(config.get());
    764 
    765   if (!HardwareSampleRatesAreValid())
    766     return;
    767 
    768   WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
    769   ASSERT_TRUE(engine.valid());
    770   ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
    771   ASSERT_TRUE(base.valid());
    772 
    773   scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
    774       new WebRtcAudioDeviceImpl());
    775   int err = base->Init(webrtc_audio_device.get());
    776   ASSERT_EQ(0, err);
    777 
    778   ScopedWebRTCPtr<webrtc::VoEAudioProcessing> audio_processing(engine.get());
    779   ASSERT_TRUE(audio_processing.valid());
    780 #if defined(OS_ANDROID)
    781   // On Android, by default AGC is off.
    782   bool enabled = true;
    783   webrtc::AgcModes agc_mode = webrtc::kAgcDefault;
    784   EXPECT_EQ(0, audio_processing->GetAgcStatus(enabled, agc_mode));
    785   EXPECT_FALSE(enabled);
    786 #else
    787   bool enabled = false;
    788   webrtc::AgcModes agc_mode = webrtc::kAgcDefault;
    789   EXPECT_EQ(0, audio_processing->GetAgcStatus(enabled, agc_mode));
    790   EXPECT_TRUE(enabled);
    791   EXPECT_EQ(agc_mode, webrtc::kAgcAdaptiveAnalog);
    792 #endif
    793 
    794   int ch = base->CreateChannel();
    795   EXPECT_NE(-1, ch);
    796 
    797   EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get()));
    798   scoped_refptr<WebRtcLocalAudioTrack> local_audio_track(
    799       CreateAndStartLocalAudioTrack(webrtc_audio_device->GetDefaultCapturer(),
    800                                     webrtc_audio_device));
    801   // connect the VoE voice channel to the audio track
    802   static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())->
    803       GetRenderer()->AddChannel(ch);
    804 
    805   ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get());
    806   ASSERT_TRUE(network.valid());
    807   scoped_ptr<WebRTCTransportImpl> transport(
    808       new WebRTCTransportImpl(network.get()));
    809   EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get()));
    810   EXPECT_EQ(0, base->StartPlayout(ch));
    811   EXPECT_EQ(0, base->StartSend(ch));
    812   scoped_refptr<WebRtcAudioRenderer> renderer(
    813       CreateDefaultWebRtcAudioRenderer(kRenderViewId));
    814   scoped_refptr<MediaStreamAudioRenderer> proxy(
    815       renderer->CreateSharedAudioRendererProxy());
    816   EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
    817   proxy->Start();
    818   proxy->Play();
    819 
    820   VLOG(0) << ">> You should now be able to hear yourself in loopback...";
    821   message_loop_.PostDelayedTask(FROM_HERE,
    822                                 base::MessageLoop::QuitClosure(),
    823                                 base::TimeDelta::FromSeconds(2));
    824   message_loop_.Run();
    825 
    826   webrtc_audio_device->GetDefaultCapturer()->Stop();
    827   proxy->Stop();
    828   EXPECT_EQ(0, base->StopSend(ch));
    829   EXPECT_EQ(0, base->StopPlayout(ch));
    830 
    831   EXPECT_EQ(0, base->DeleteChannel(ch));
    832   EXPECT_EQ(0, base->Terminate());
    833 }
    834 
    835 // Test times out on bots, see http://crbug.com/247447
    836 TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_WebRtcRecordingSetupTime) {
    837   if (!has_input_devices_) {
    838     LOG(WARNING) << "Missing audio capture devices.";
    839     return;
    840   }
    841 
    842   scoped_ptr<media::AudioHardwareConfig> config =
    843       CreateRealHardwareConfig(audio_manager_.get());
    844   SetAudioHardwareConfig(config.get());
    845 
    846   if (!HardwareSampleRatesAreValid())
    847     return;
    848 
    849   scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
    850       new WebRtcAudioDeviceImpl());
    851 
    852   WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
    853   ASSERT_TRUE(engine.valid());
    854 
    855   ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
    856   ASSERT_TRUE(base.valid());
    857   int err = base->Init(webrtc_audio_device.get());
    858   ASSERT_EQ(0, err);
    859 
    860   int ch = base->CreateChannel();
    861   EXPECT_NE(-1, ch);
    862 
    863   EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get()));
    864   base::WaitableEvent event(false, false);
    865   scoped_ptr<MockMediaStreamAudioSink> sink(
    866       new MockMediaStreamAudioSink(&event));
    867 
    868   // Create and start a local audio track. Starting the audio track will connect
    869   // the audio track to the capturer and also start the source of the capturer.
    870   scoped_refptr<WebRtcLocalAudioTrack> local_audio_track(
    871       CreateAndStartLocalAudioTrack(
    872           webrtc_audio_device->GetDefaultCapturer().get(), sink.get()));
    873 
    874   // connect the VoE voice channel to the audio track.
    875   static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())->
    876       GetRenderer()->AddChannel(ch);
    877 
    878   base::Time start_time = base::Time::Now();
    879   EXPECT_EQ(0, base->StartSend(ch));
    880 
    881   EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
    882   int delay = (base::Time::Now() - start_time).InMilliseconds();
    883   PrintPerfResultMs("webrtc_recording_setup_c", "t", delay);
    884 
    885   webrtc_audio_device->GetDefaultCapturer()->Stop();
    886   EXPECT_EQ(0, base->StopSend(ch));
    887   EXPECT_EQ(0, base->DeleteChannel(ch));
    888   EXPECT_EQ(0, base->Terminate());
    889 }
    890 
    891 
    892 // TODO(henrika): include on Android as well as soon as alla race conditions
    893 // in OpenSLES are resolved.
    894 #if defined(OS_ANDROID)
    895 #define MAYBE_WebRtcPlayoutSetupTime DISABLED_WebRtcPlayoutSetupTime
    896 #else
    897 #define MAYBE_WebRtcPlayoutSetupTime WebRtcPlayoutSetupTime
    898 #endif
    899 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_WebRtcPlayoutSetupTime) {
    900   if (!has_output_devices_) {
    901     LOG(WARNING) << "No output device detected.";
    902     return;
    903   }
    904 
    905   scoped_ptr<media::AudioHardwareConfig> config =
    906       CreateRealHardwareConfig(audio_manager_.get());
    907   SetAudioHardwareConfig(config.get());
    908 
    909   if (!HardwareSampleRatesAreValid())
    910     return;
    911 
    912   base::WaitableEvent event(false, false);
    913   scoped_ptr<MockWebRtcAudioRendererSource> renderer_source(
    914       new MockWebRtcAudioRendererSource(&event));
    915 
    916   scoped_refptr<WebRtcAudioRenderer> renderer(
    917       CreateDefaultWebRtcAudioRenderer(kRenderViewId));
    918   renderer->Initialize(renderer_source.get());
    919   scoped_refptr<MediaStreamAudioRenderer> proxy(
    920       renderer->CreateSharedAudioRendererProxy());
    921   proxy->Start();
    922 
    923   // Start the timer and playout.
    924   base::Time start_time = base::Time::Now();
    925   proxy->Play();
    926   EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
    927   int delay = (base::Time::Now() - start_time).InMilliseconds();
    928   PrintPerfResultMs("webrtc_playout_setup_c", "t", delay);
    929 
    930   proxy->Stop();
    931 }
    932 
    933 #if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
    934 // Timing out on ARM linux bot: http://crbug.com/238490
    935 #define MAYBE_WebRtcLoopbackTimeWithoutSignalProcessing \
    936         DISABLED_WebRtcLoopbackTimeWithoutSignalProcessing
    937 #else
    938 #define MAYBE_WebRtcLoopbackTimeWithoutSignalProcessing \
    939         WebRtcLoopbackTimeWithoutSignalProcessing
    940 #endif
    941 
    942 TEST_F(MAYBE_WebRTCAudioDeviceTest,
    943        MAYBE_WebRtcLoopbackTimeWithoutSignalProcessing) {
    944 #if defined(OS_WIN)
    945   // This test hangs on WinXP: see http://crbug.com/318189.
    946   if (base::win::GetVersion() <= base::win::VERSION_XP) {
    947     LOG(WARNING) << "Test disabled due to the test hangs on WinXP.";
    948     return;
    949   }
    950 #endif
    951   int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), false);
    952   PrintPerfResultMs("webrtc_loopback_without_sigal_processing (100 packets)",
    953                     "t", latency);
    954 }
    955 
    956 #if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
    957 // Timing out on ARM linux bot: http://crbug.com/238490
    958 #define MAYBE_WebRtcLoopbackTimeWithSignalProcessing \
    959         DISABLED_WebRtcLoopbackTimeWithSignalProcessing
    960 #else
    961 #define MAYBE_WebRtcLoopbackTimeWithSignalProcessing \
    962         WebRtcLoopbackTimeWithSignalProcessing
    963 #endif
    964 
    965 TEST_F(MAYBE_WebRTCAudioDeviceTest,
    966        MAYBE_WebRtcLoopbackTimeWithSignalProcessing) {
    967 #if defined(OS_WIN)
    968   // This test hangs on WinXP: see http://crbug.com/318189.
    969   if (base::win::GetVersion() <= base::win::VERSION_XP) {
    970     LOG(WARNING) << "Test disabled due to the test hangs on WinXP.";
    971     return;
    972   }
    973 #endif
    974   int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), true);
    975   PrintPerfResultMs("webrtc_loopback_with_signal_processing (100 packets)",
    976                     "t", latency);
    977 }
    978 
    979 }  // namespace content
    980