Home | History | Annotate | Download | only in win
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "media/audio/win/audio_low_latency_output_win.h"
      6 
      7 #include <Functiondiscoverykeys_devpkey.h>
      8 
      9 #include "base/command_line.h"
     10 #include "base/debug/trace_event.h"
     11 #include "base/logging.h"
     12 #include "base/memory/scoped_ptr.h"
     13 #include "base/metrics/histogram.h"
     14 #include "base/strings/utf_string_conversions.h"
     15 #include "base/win/scoped_propvariant.h"
     16 #include "media/audio/win/audio_manager_win.h"
     17 #include "media/audio/win/avrt_wrapper_win.h"
     18 #include "media/audio/win/core_audio_util_win.h"
     19 #include "media/base/limits.h"
     20 #include "media/base/media_switches.h"
     21 
     22 using base::win::ScopedComPtr;
     23 using base::win::ScopedCOMInitializer;
     24 using base::win::ScopedCoMem;
     25 
     26 namespace media {
     27 
     28 // static
     29 AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() {
     30   const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
     31   if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio))
     32     return AUDCLNT_SHAREMODE_EXCLUSIVE;
     33   return AUDCLNT_SHAREMODE_SHARED;
     34 }
     35 
     36 // static
     37 int WASAPIAudioOutputStream::HardwareSampleRate(const std::string& device_id) {
     38   WAVEFORMATPCMEX format;
     39   ScopedComPtr<IAudioClient> client;
     40   if (device_id.empty()) {
     41     client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
     42   } else {
     43     ScopedComPtr<IMMDevice> device(CoreAudioUtil::CreateDevice(device_id));
     44     if (!device)
     45       return 0;
     46     client = CoreAudioUtil::CreateClient(device);
     47   }
     48 
     49   if (!client || FAILED(CoreAudioUtil::GetSharedModeMixFormat(client, &format)))
     50     return 0;
     51 
     52   return static_cast<int>(format.Format.nSamplesPerSec);
     53 }
     54 
     55 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
     56                                                  const std::string& device_id,
     57                                                  const AudioParameters& params,
     58                                                  ERole device_role)
     59     : creating_thread_id_(base::PlatformThread::CurrentId()),
     60       manager_(manager),
     61       format_(),
     62       opened_(false),
     63       volume_(1.0),
     64       packet_size_frames_(0),
     65       packet_size_bytes_(0),
     66       endpoint_buffer_size_frames_(0),
     67       device_id_(device_id),
     68       device_role_(device_role),
     69       share_mode_(GetShareMode()),
     70       num_written_frames_(0),
     71       source_(NULL),
     72       audio_bus_(AudioBus::Create(params)) {
     73   DCHECK(manager_);
     74 
     75   VLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()";
     76   VLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE)
     77       << "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
     78 
     79   // Load the Avrt DLL if not already loaded. Required to support MMCSS.
     80   bool avrt_init = avrt::Initialize();
     81   DCHECK(avrt_init) << "Failed to load the avrt.dll";
     82 
     83   // Set up the desired render format specified by the client. We use the
     84   // WAVE_FORMAT_EXTENSIBLE structure to ensure that multiple channel ordering
     85   // and high precision data can be supported.
     86 
     87   // Begin with the WAVEFORMATEX structure that specifies the basic format.
     88   WAVEFORMATEX* format = &format_.Format;
     89   format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
     90   format->nChannels = params.channels();
     91   format->nSamplesPerSec = params.sample_rate();
     92   format->wBitsPerSample = params.bits_per_sample();
     93   format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels;
     94   format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign;
     95   format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
     96 
     97   // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE.
     98   format_.Samples.wValidBitsPerSample = params.bits_per_sample();
     99   format_.dwChannelMask = CoreAudioUtil::GetChannelConfig(device_id, eRender);
    100   format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
    101 
    102   // Store size (in different units) of audio packets which we expect to
    103   // get from the audio endpoint device in each render event.
    104   packet_size_frames_ = params.frames_per_buffer();
    105   packet_size_bytes_ = params.GetBytesPerBuffer();
    106   VLOG(1) << "Number of bytes per audio frame  : " << format->nBlockAlign;
    107   VLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
    108   VLOG(1) << "Number of bytes per packet       : " << packet_size_bytes_;
    109   VLOG(1) << "Number of milliseconds per packet: "
    110           << params.GetBufferDuration().InMillisecondsF();
    111 
    112   // All events are auto-reset events and non-signaled initially.
    113 
    114   // Create the event which the audio engine will signal each time
    115   // a buffer becomes ready to be processed by the client.
    116   audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
    117   DCHECK(audio_samples_render_event_.IsValid());
    118 
    119   // Create the event which will be set in Stop() when capturing shall stop.
    120   stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
    121   DCHECK(stop_render_event_.IsValid());
    122 }
    123 
    124 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {
    125   DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
    126 }
    127 
    128 bool WASAPIAudioOutputStream::Open() {
    129   VLOG(1) << "WASAPIAudioOutputStream::Open()";
    130   DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
    131   if (opened_)
    132     return true;
    133 
    134   DCHECK(!audio_client_);
    135   DCHECK(!audio_render_client_);
    136 
    137   // Will be set to true if we ended up opening the default communications
    138   // device.
    139   bool communications_device = false;
    140 
    141   // Create an IAudioClient interface for the default rendering IMMDevice.
    142   ScopedComPtr<IAudioClient> audio_client;
    143   if (device_id_.empty() ||
    144       CoreAudioUtil::DeviceIsDefault(eRender, device_role_, device_id_)) {
    145     audio_client = CoreAudioUtil::CreateDefaultClient(eRender, device_role_);
    146     communications_device = (device_role_ == eCommunications);
    147   } else {
    148     ScopedComPtr<IMMDevice> device(CoreAudioUtil::CreateDevice(device_id_));
    149     DLOG_IF(ERROR, !device) << "Failed to open device: " << device_id_;
    150     if (device)
    151       audio_client = CoreAudioUtil::CreateClient(device);
    152   }
    153 
    154   if (!audio_client)
    155     return false;
    156 
    157   // Extra sanity to ensure that the provided device format is still valid.
    158   if (!CoreAudioUtil::IsFormatSupported(audio_client,
    159                                         share_mode_,
    160                                         &format_)) {
    161     LOG(ERROR) << "Audio parameters are not supported.";
    162     return false;
    163   }
    164 
    165   HRESULT hr = S_FALSE;
    166   if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
    167     // Initialize the audio stream between the client and the device in shared
    168     // mode and using event-driven buffer handling.
    169     hr = CoreAudioUtil::SharedModeInitialize(
    170         audio_client, &format_, audio_samples_render_event_.Get(),
    171         &endpoint_buffer_size_frames_,
    172         communications_device ? &kCommunicationsSessionId : NULL);
    173     if (FAILED(hr))
    174       return false;
    175 
    176     // We know from experience that the best possible callback sequence is
    177     // achieved when the packet size (given by the native device period)
    178     // is an even divisor of the endpoint buffer size.
    179     // Examples: 48kHz => 960 % 480, 44.1kHz => 896 % 448 or 882 % 441.
    180     if (endpoint_buffer_size_frames_ % packet_size_frames_ != 0) {
    181       LOG(ERROR)
    182           << "Bailing out due to non-perfect timing.  Buffer size of "
    183           << packet_size_frames_ << " is not an even divisor of "
    184           << endpoint_buffer_size_frames_;
    185       return false;
    186     }
    187   } else {
    188     // TODO(henrika): break out to CoreAudioUtil::ExclusiveModeInitialize()
    189     // when removing the enable-exclusive-audio flag.
    190     hr = ExclusiveModeInitialization(audio_client,
    191                                      audio_samples_render_event_.Get(),
    192                                      &endpoint_buffer_size_frames_);
    193     if (FAILED(hr))
    194       return false;
    195 
    196     // The buffer scheme for exclusive mode streams is not designed for max
    197     // flexibility. We only allow a "perfect match" between the packet size set
    198     // by the user and the actual endpoint buffer size.
    199     if (endpoint_buffer_size_frames_ != packet_size_frames_) {
    200       LOG(ERROR) << "Bailing out due to non-perfect timing.";
    201       return false;
    202     }
    203   }
    204 
    205   // Create an IAudioRenderClient client for an initialized IAudioClient.
    206   // The IAudioRenderClient interface enables us to write output data to
    207   // a rendering endpoint buffer.
    208   ScopedComPtr<IAudioRenderClient> audio_render_client =
    209       CoreAudioUtil::CreateRenderClient(audio_client);
    210   if (!audio_render_client)
    211     return false;
    212 
    213   // Store valid COM interfaces.
    214   audio_client_ = audio_client;
    215   audio_render_client_ = audio_render_client;
    216 
    217   hr = audio_client_->GetService(__uuidof(IAudioClock),
    218                                  audio_clock_.ReceiveVoid());
    219   if (FAILED(hr)) {
    220     LOG(ERROR) << "Failed to get IAudioClock service.";
    221     return false;
    222   }
    223 
    224   opened_ = true;
    225   return true;
    226 }
    227 
    228 void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) {
    229   VLOG(1) << "WASAPIAudioOutputStream::Start()";
    230   DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
    231   CHECK(callback);
    232   CHECK(opened_);
    233 
    234   if (render_thread_) {
    235     CHECK_EQ(callback, source_);
    236     return;
    237   }
    238 
    239   source_ = callback;
    240 
    241   // Ensure that the endpoint buffer is prepared with silence.
    242   if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
    243     if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
    244              audio_client_, audio_render_client_)) {
    245       LOG(ERROR) << "Failed to prepare endpoint buffers with silence.";
    246       callback->OnError(this);
    247       return;
    248     }
    249   }
    250   num_written_frames_ = endpoint_buffer_size_frames_;
    251 
    252   // Create and start the thread that will drive the rendering by waiting for
    253   // render events.
    254   render_thread_.reset(
    255       new base::DelegateSimpleThread(this, "wasapi_render_thread"));
    256   render_thread_->Start();
    257   if (!render_thread_->HasBeenStarted()) {
    258     LOG(ERROR) << "Failed to start WASAPI render thread.";
    259     StopThread();
    260     callback->OnError(this);
    261     return;
    262   }
    263 
    264   // Start streaming data between the endpoint buffer and the audio engine.
    265   HRESULT hr = audio_client_->Start();
    266   if (FAILED(hr)) {
    267     PLOG(ERROR) << "Failed to start output streaming: " << std::hex << hr;
    268     StopThread();
    269     callback->OnError(this);
    270   }
    271 }
    272 
    273 void WASAPIAudioOutputStream::Stop() {
    274   VLOG(1) << "WASAPIAudioOutputStream::Stop()";
    275   DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
    276   if (!render_thread_)
    277     return;
    278 
    279   // Stop output audio streaming.
    280   HRESULT hr = audio_client_->Stop();
    281   if (FAILED(hr)) {
    282     PLOG(ERROR) << "Failed to stop output streaming: " << std::hex << hr;
    283     source_->OnError(this);
    284   }
    285 
    286   // Make a local copy of |source_| since StopThread() will clear it.
    287   AudioSourceCallback* callback = source_;
    288   StopThread();
    289 
    290   // Flush all pending data and reset the audio clock stream position to 0.
    291   hr = audio_client_->Reset();
    292   if (FAILED(hr)) {
    293     PLOG(ERROR) << "Failed to reset streaming: " << std::hex << hr;
    294     callback->OnError(this);
    295   }
    296 
    297   // Extra safety check to ensure that the buffers are cleared.
    298   // If the buffers are not cleared correctly, the next call to Start()
    299   // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer().
    300   // This check is is only needed for shared-mode streams.
    301   if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
    302     UINT32 num_queued_frames = 0;
    303     audio_client_->GetCurrentPadding(&num_queued_frames);
    304     DCHECK_EQ(0u, num_queued_frames);
    305   }
    306 }
    307 
    308 void WASAPIAudioOutputStream::Close() {
    309   VLOG(1) << "WASAPIAudioOutputStream::Close()";
    310   DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
    311 
    312   // It is valid to call Close() before calling open or Start().
    313   // It is also valid to call Close() after Start() has been called.
    314   Stop();
    315 
    316   // Inform the audio manager that we have been closed. This will cause our
    317   // destruction.
    318   manager_->ReleaseOutputStream(this);
    319 }
    320 
    321 void WASAPIAudioOutputStream::SetVolume(double volume) {
    322   VLOG(1) << "SetVolume(volume=" << volume << ")";
    323   float volume_float = static_cast<float>(volume);
    324   if (volume_float < 0.0f || volume_float > 1.0f) {
    325     return;
    326   }
    327   volume_ = volume_float;
    328 }
    329 
    330 void WASAPIAudioOutputStream::GetVolume(double* volume) {
    331   VLOG(1) << "GetVolume()";
    332   *volume = static_cast<double>(volume_);
    333 }
    334 
    335 void WASAPIAudioOutputStream::Run() {
    336   ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
    337 
    338   // Increase the thread priority.
    339   render_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
    340 
    341   // Enable MMCSS to ensure that this thread receives prioritized access to
    342   // CPU resources.
    343   DWORD task_index = 0;
    344   HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio",
    345                                                       &task_index);
    346   bool mmcss_is_ok =
    347       (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL));
    348   if (!mmcss_is_ok) {
    349     // Failed to enable MMCSS on this thread. It is not fatal but can lead
    350     // to reduced QoS at high load.
    351     DWORD err = GetLastError();
    352     LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ").";
    353   }
    354 
    355   HRESULT hr = S_FALSE;
    356 
    357   bool playing = true;
    358   bool error = false;
    359   HANDLE wait_array[] = { stop_render_event_.Get(),
    360                           audio_samples_render_event_.Get() };
    361   UINT64 device_frequency = 0;
    362 
    363   // The device frequency is the frequency generated by the hardware clock in
    364   // the audio device. The GetFrequency() method reports a constant frequency.
    365   hr = audio_clock_->GetFrequency(&device_frequency);
    366   error = FAILED(hr);
    367   PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: "
    368                         << std::hex << hr;
    369 
    370   // Keep rendering audio until the stop event or the stream-switch event
    371   // is signaled. An error event can also break the main thread loop.
    372   while (playing && !error) {
    373     // Wait for a close-down event, stream-switch event or a new render event.
    374     DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array),
    375                                                wait_array,
    376                                                FALSE,
    377                                                INFINITE);
    378 
    379     switch (wait_result) {
    380       case WAIT_OBJECT_0 + 0:
    381         // |stop_render_event_| has been set.
    382         playing = false;
    383         break;
    384       case WAIT_OBJECT_0 + 1:
    385         // |audio_samples_render_event_| has been set.
    386         error = !RenderAudioFromSource(device_frequency);
    387         break;
    388       default:
    389         error = true;
    390         break;
    391     }
    392   }
    393 
    394   if (playing && error) {
    395     // Stop audio rendering since something has gone wrong in our main thread
    396     // loop. Note that, we are still in a "started" state, hence a Stop() call
    397     // is required to join the thread properly.
    398     audio_client_->Stop();
    399     PLOG(ERROR) << "WASAPI rendering failed.";
    400   }
    401 
    402   // Disable MMCSS.
    403   if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
    404     PLOG(WARNING) << "Failed to disable MMCSS";
    405   }
    406 }
    407 
    408 bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) {
    409   TRACE_EVENT0("audio", "RenderAudioFromSource");
    410 
    411   HRESULT hr = S_FALSE;
    412   UINT32 num_queued_frames = 0;
    413   uint8* audio_data = NULL;
    414 
    415   // Contains how much new data we can write to the buffer without
    416   // the risk of overwriting previously written data that the audio
    417   // engine has not yet read from the buffer.
    418   size_t num_available_frames = 0;
    419 
    420   if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
    421     // Get the padding value which represents the amount of rendering
    422     // data that is queued up to play in the endpoint buffer.
    423     hr = audio_client_->GetCurrentPadding(&num_queued_frames);
    424     num_available_frames =
    425         endpoint_buffer_size_frames_ - num_queued_frames;
    426     if (FAILED(hr)) {
    427       DLOG(ERROR) << "Failed to retrieve amount of available space: "
    428                   << std::hex << hr;
    429       return false;
    430     }
    431   } else {
    432     // While the stream is running, the system alternately sends one
    433     // buffer or the other to the client. This form of double buffering
    434     // is referred to as "ping-ponging". Each time the client receives
    435     // a buffer from the system (triggers this event) the client must
    436     // process the entire buffer. Calls to the GetCurrentPadding method
    437     // are unnecessary because the packet size must always equal the
    438     // buffer size. In contrast to the shared mode buffering scheme,
    439     // the latency for an event-driven, exclusive-mode stream depends
    440     // directly on the buffer size.
    441     num_available_frames = endpoint_buffer_size_frames_;
    442   }
    443 
    444   // Check if there is enough available space to fit the packet size
    445   // specified by the client.
    446   if (num_available_frames < packet_size_frames_)
    447     return true;
    448 
    449   DLOG_IF(ERROR, num_available_frames % packet_size_frames_ != 0)
    450       << "Non-perfect timing detected (num_available_frames="
    451       << num_available_frames << ", packet_size_frames="
    452       << packet_size_frames_ << ")";
    453 
    454   // Derive the number of packets we need to get from the client to
    455   // fill up the available area in the endpoint buffer.
    456   // |num_packets| will always be one for exclusive-mode streams and
    457   // will be one in most cases for shared mode streams as well.
    458   // However, we have found that two packets can sometimes be
    459   // required.
    460   size_t num_packets = (num_available_frames / packet_size_frames_);
    461 
    462   for (size_t n = 0; n < num_packets; ++n) {
    463     // Grab all available space in the rendering endpoint buffer
    464     // into which the client can write a data packet.
    465     hr = audio_render_client_->GetBuffer(packet_size_frames_,
    466                                          &audio_data);
    467     if (FAILED(hr)) {
    468       DLOG(ERROR) << "Failed to use rendering audio buffer: "
    469                  << std::hex << hr;
    470       return false;
    471     }
    472 
    473     // Derive the audio delay which corresponds to the delay between
    474     // a render event and the time when the first audio sample in a
    475     // packet is played out through the speaker. This delay value
    476     // can typically be utilized by an acoustic echo-control (AEC)
    477     // unit at the render side.
    478     UINT64 position = 0;
    479     int audio_delay_bytes = 0;
    480     hr = audio_clock_->GetPosition(&position, NULL);
    481     if (SUCCEEDED(hr)) {
    482       // Stream position of the sample that is currently playing
    483       // through the speaker.
    484       double pos_sample_playing_frames = format_.Format.nSamplesPerSec *
    485           (static_cast<double>(position) / device_frequency);
    486 
    487       // Stream position of the last sample written to the endpoint
    488       // buffer. Note that, the packet we are about to receive in
    489       // the upcoming callback is also included.
    490       size_t pos_last_sample_written_frames =
    491           num_written_frames_ + packet_size_frames_;
    492 
    493       // Derive the actual delay value which will be fed to the
    494       // render client using the OnMoreData() callback.
    495       audio_delay_bytes = (pos_last_sample_written_frames -
    496           pos_sample_playing_frames) *  format_.Format.nBlockAlign;
    497     }
    498 
    499     // Read a data packet from the registered client source and
    500     // deliver a delay estimate in the same callback to the client.
    501     // A time stamp is also stored in the AudioBuffersState. This
    502     // time stamp can be used at the client side to compensate for
    503     // the delay between the usage of the delay value and the time
    504     // of generation.
    505 
    506     int frames_filled = source_->OnMoreData(
    507         audio_bus_.get(), AudioBuffersState(0, audio_delay_bytes));
    508     uint32 num_filled_bytes = frames_filled * format_.Format.nBlockAlign;
    509     DCHECK_LE(num_filled_bytes, packet_size_bytes_);
    510 
    511     // Note: If this ever changes to output raw float the data must be
    512     // clipped and sanitized since it may come from an untrusted
    513     // source such as NaCl.
    514     const int bytes_per_sample = format_.Format.wBitsPerSample >> 3;
    515     audio_bus_->Scale(volume_);
    516     audio_bus_->ToInterleaved(
    517         frames_filled, bytes_per_sample, audio_data);
    518 
    519 
    520     // Release the buffer space acquired in the GetBuffer() call.
    521     // Render silence if we were not able to fill up the buffer totally.
    522     DWORD flags = (num_filled_bytes < packet_size_bytes_) ?
    523         AUDCLNT_BUFFERFLAGS_SILENT : 0;
    524     audio_render_client_->ReleaseBuffer(packet_size_frames_, flags);
    525 
    526     num_written_frames_ += packet_size_frames_;
    527   }
    528 
    529   return true;
    530 }
    531 
    532 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization(
    533     IAudioClient* client, HANDLE event_handle, uint32* endpoint_buffer_size) {
    534   DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE);
    535 
    536   float f = (1000.0 * packet_size_frames_) / format_.Format.nSamplesPerSec;
    537   REFERENCE_TIME requested_buffer_duration =
    538       static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5);
    539 
    540   DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST;
    541   bool use_event = (event_handle != NULL &&
    542                     event_handle != INVALID_HANDLE_VALUE);
    543   if (use_event)
    544     stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
    545   VLOG(2) << "stream_flags: 0x" << std::hex << stream_flags;
    546 
    547   // Initialize the audio stream between the client and the device.
    548   // For an exclusive-mode stream that uses event-driven buffering, the
    549   // caller must specify nonzero values for hnsPeriodicity and
    550   // hnsBufferDuration, and the values of these two parameters must be equal.
    551   // The Initialize method allocates two buffers for the stream. Each buffer
    552   // is equal in duration to the value of the hnsBufferDuration parameter.
    553   // Following the Initialize call for a rendering stream, the caller should
    554   // fill the first of the two buffers before starting the stream.
    555   HRESULT hr = S_FALSE;
    556   hr = client->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE,
    557                           stream_flags,
    558                           requested_buffer_duration,
    559                           requested_buffer_duration,
    560                           reinterpret_cast<WAVEFORMATEX*>(&format_),
    561                           NULL);
    562   if (FAILED(hr)) {
    563     if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) {
    564       LOG(ERROR) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED";
    565 
    566       UINT32 aligned_buffer_size = 0;
    567       client->GetBufferSize(&aligned_buffer_size);
    568       VLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size;
    569 
    570       // Calculate new aligned periodicity. Each unit of reference time
    571       // is 100 nanoseconds.
    572       REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>(
    573           (10000000.0 * aligned_buffer_size / format_.Format.nSamplesPerSec)
    574           + 0.5);
    575 
    576       // It is possible to re-activate and re-initialize the audio client
    577       // at this stage but we bail out with an error code instead and
    578       // combine it with a log message which informs about the suggested
    579       // aligned buffer size which should be used instead.
    580       VLOG(1) << "aligned_buffer_duration: "
    581               << static_cast<double>(aligned_buffer_duration / 10000.0)
    582               << " [ms]";
    583     } else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) {
    584       // We will get this error if we try to use a smaller buffer size than
    585       // the minimum supported size (usually ~3ms on Windows 7).
    586       LOG(ERROR) << "AUDCLNT_E_INVALID_DEVICE_PERIOD";
    587     }
    588     return hr;
    589   }
    590 
    591   if (use_event) {
    592     hr = client->SetEventHandle(event_handle);
    593     if (FAILED(hr)) {
    594       VLOG(1) << "IAudioClient::SetEventHandle: " << std::hex << hr;
    595       return hr;
    596     }
    597   }
    598 
    599   UINT32 buffer_size_in_frames = 0;
    600   hr = client->GetBufferSize(&buffer_size_in_frames);
    601   if (FAILED(hr)) {
    602     VLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr;
    603     return hr;
    604   }
    605 
    606   *endpoint_buffer_size = buffer_size_in_frames;
    607   VLOG(2) << "endpoint buffer size: " << buffer_size_in_frames;
    608   return hr;
    609 }
    610 
    611 void WASAPIAudioOutputStream::StopThread() {
    612   if (render_thread_ ) {
    613     if (render_thread_->HasBeenStarted()) {
    614       // Wait until the thread completes and perform cleanup.
    615       SetEvent(stop_render_event_.Get());
    616       render_thread_->Join();
    617     }
    618 
    619     render_thread_.reset();
    620 
    621     // Ensure that we don't quit the main thread loop immediately next
    622     // time Start() is called.
    623     ResetEvent(stop_render_event_.Get());
    624   }
    625 
    626   source_ = NULL;
    627 }
    628 
    629 }  // namespace media
    630