Home | History | Annotate | Download | only in win
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "media/audio/win/core_audio_util_win.h"
      6 
      7 #include <Audioclient.h>
      8 #include <Functiondiscoverykeys_devpkey.h>
      9 
     10 #include "base/command_line.h"
     11 #include "base/logging.h"
     12 #include "base/strings/stringprintf.h"
     13 #include "base/strings/utf_string_conversions.h"
     14 #include "base/win/scoped_co_mem.h"
     15 #include "base/win/scoped_handle.h"
     16 #include "base/win/scoped_propvariant.h"
     17 #include "base/win/windows_version.h"
     18 #include "media/base/media_switches.h"
     19 
     20 using base::win::ScopedCoMem;
     21 using base::win::ScopedHandle;
     22 
     23 namespace media {
     24 
     25 enum { KSAUDIO_SPEAKER_UNSUPPORTED = 0 };
     26 
     27 typedef uint32 ChannelConfig;
     28 
     29 // Converts Microsoft's channel configuration to ChannelLayout.
     30 // This mapping is not perfect but the best we can do given the current
     31 // ChannelLayout enumerator and the Windows-specific speaker configurations
     32 // defined in ksmedia.h. Don't assume that the channel ordering in
     33 // ChannelLayout is exactly the same as the Windows specific configuration.
     34 // As an example: KSAUDIO_SPEAKER_7POINT1_SURROUND is mapped to
     35 // CHANNEL_LAYOUT_7_1 but the positions of Back L, Back R and Side L, Side R
     36 // speakers are different in these two definitions.
     37 static ChannelLayout ChannelConfigToChannelLayout(ChannelConfig config) {
     38   switch (config) {
     39     case KSAUDIO_SPEAKER_DIRECTOUT:
     40       DVLOG(2) << "KSAUDIO_SPEAKER_DIRECTOUT=>CHANNEL_LAYOUT_NONE";
     41       return CHANNEL_LAYOUT_NONE;
     42     case KSAUDIO_SPEAKER_MONO:
     43       DVLOG(2) << "KSAUDIO_SPEAKER_MONO=>CHANNEL_LAYOUT_MONO";
     44       return CHANNEL_LAYOUT_MONO;
     45     case KSAUDIO_SPEAKER_STEREO:
     46       DVLOG(2) << "KSAUDIO_SPEAKER_STEREO=>CHANNEL_LAYOUT_STEREO";
     47       return CHANNEL_LAYOUT_STEREO;
     48     case KSAUDIO_SPEAKER_QUAD:
     49       DVLOG(2) << "KSAUDIO_SPEAKER_QUAD=>CHANNEL_LAYOUT_QUAD";
     50       return CHANNEL_LAYOUT_QUAD;
     51     case KSAUDIO_SPEAKER_SURROUND:
     52       DVLOG(2) << "KSAUDIO_SPEAKER_SURROUND=>CHANNEL_LAYOUT_4_0";
     53       return CHANNEL_LAYOUT_4_0;
     54     case KSAUDIO_SPEAKER_5POINT1:
     55       DVLOG(2) << "KSAUDIO_SPEAKER_5POINT1=>CHANNEL_LAYOUT_5_1_BACK";
     56       return CHANNEL_LAYOUT_5_1_BACK;
     57     case KSAUDIO_SPEAKER_5POINT1_SURROUND:
     58       DVLOG(2) << "KSAUDIO_SPEAKER_5POINT1_SURROUND=>CHANNEL_LAYOUT_5_1";
     59       return CHANNEL_LAYOUT_5_1;
     60     case KSAUDIO_SPEAKER_7POINT1:
     61       DVLOG(2) << "KSAUDIO_SPEAKER_7POINT1=>CHANNEL_LAYOUT_7_1_WIDE";
     62       return CHANNEL_LAYOUT_7_1_WIDE;
     63     case KSAUDIO_SPEAKER_7POINT1_SURROUND:
     64       DVLOG(2) << "KSAUDIO_SPEAKER_7POINT1_SURROUND=>CHANNEL_LAYOUT_7_1";
     65       return CHANNEL_LAYOUT_7_1;
     66     default:
     67       DVLOG(2) << "Unsupported channel configuration: " << config;
     68       return CHANNEL_LAYOUT_UNSUPPORTED;
     69   }
     70 }
     71 
     72 // TODO(henrika): add mapping for all types in the ChannelLayout enumerator.
     73 static ChannelConfig ChannelLayoutToChannelConfig(ChannelLayout layout) {
     74   switch (layout) {
     75     case CHANNEL_LAYOUT_NONE:
     76       DVLOG(2) << "CHANNEL_LAYOUT_NONE=>KSAUDIO_SPEAKER_UNSUPPORTED";
     77       return KSAUDIO_SPEAKER_UNSUPPORTED;
     78     case CHANNEL_LAYOUT_UNSUPPORTED:
     79       DVLOG(2) << "CHANNEL_LAYOUT_UNSUPPORTED=>KSAUDIO_SPEAKER_UNSUPPORTED";
     80       return KSAUDIO_SPEAKER_UNSUPPORTED;
     81     case CHANNEL_LAYOUT_MONO:
     82       DVLOG(2) << "CHANNEL_LAYOUT_MONO=>KSAUDIO_SPEAKER_MONO";
     83       return KSAUDIO_SPEAKER_MONO;
     84     case CHANNEL_LAYOUT_STEREO:
     85       DVLOG(2) << "CHANNEL_LAYOUT_STEREO=>KSAUDIO_SPEAKER_STEREO";
     86       return KSAUDIO_SPEAKER_STEREO;
     87     case CHANNEL_LAYOUT_QUAD:
     88       DVLOG(2) << "CHANNEL_LAYOUT_QUAD=>KSAUDIO_SPEAKER_QUAD";
     89       return KSAUDIO_SPEAKER_QUAD;
     90     case CHANNEL_LAYOUT_4_0:
     91       DVLOG(2) << "CHANNEL_LAYOUT_4_0=>KSAUDIO_SPEAKER_SURROUND";
     92       return KSAUDIO_SPEAKER_SURROUND;
     93     case CHANNEL_LAYOUT_5_1_BACK:
     94       DVLOG(2) << "CHANNEL_LAYOUT_5_1_BACK=>KSAUDIO_SPEAKER_5POINT1";
     95       return KSAUDIO_SPEAKER_5POINT1;
     96     case CHANNEL_LAYOUT_5_1:
     97       DVLOG(2) << "CHANNEL_LAYOUT_5_1=>KSAUDIO_SPEAKER_5POINT1_SURROUND";
     98       return KSAUDIO_SPEAKER_5POINT1_SURROUND;
     99     case CHANNEL_LAYOUT_7_1_WIDE:
    100       DVLOG(2) << "CHANNEL_LAYOUT_7_1_WIDE=>KSAUDIO_SPEAKER_7POINT1";
    101       return KSAUDIO_SPEAKER_7POINT1;
    102     case CHANNEL_LAYOUT_7_1:
    103       DVLOG(2) << "CHANNEL_LAYOUT_7_1=>KSAUDIO_SPEAKER_7POINT1_SURROUND";
    104       return KSAUDIO_SPEAKER_7POINT1_SURROUND;
    105     default:
    106       DVLOG(2) << "Unsupported channel layout: " << layout;
    107       return KSAUDIO_SPEAKER_UNSUPPORTED;
    108   }
    109 }
    110 
    111 static std::ostream& operator<<(std::ostream& os,
    112                                 const WAVEFORMATPCMEX& format) {
    113   os << "wFormatTag: 0x" << std::hex << format.Format.wFormatTag
    114      << ", nChannels: " << std::dec << format.Format.nChannels
    115      << ", nSamplesPerSec: " << format.Format.nSamplesPerSec
    116      << ", nAvgBytesPerSec: " << format.Format.nAvgBytesPerSec
    117      << ", nBlockAlign: " << format.Format.nBlockAlign
    118      << ", wBitsPerSample: " << format.Format.wBitsPerSample
    119      << ", cbSize: " << format.Format.cbSize
    120      << ", wValidBitsPerSample: " << format.Samples.wValidBitsPerSample
    121      << ", dwChannelMask: 0x" << std::hex << format.dwChannelMask;
    122   return os;
    123 }
    124 
    125 bool LoadAudiosesDll() {
    126   static const wchar_t* const kAudiosesDLL =
    127       L"%WINDIR%\\system32\\audioses.dll";
    128 
    129   wchar_t path[MAX_PATH] = {0};
    130   ExpandEnvironmentStringsW(kAudiosesDLL, path, arraysize(path));
    131   return (LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH) != NULL);
    132 }
    133 
    134 bool CanCreateDeviceEnumerator() {
    135   ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
    136   HRESULT hr = device_enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
    137                                                 NULL, CLSCTX_INPROC_SERVER);
    138 
    139   // If we hit CO_E_NOTINITIALIZED, CoInitialize has not been called and it
    140   // must be called at least once for each thread that uses the COM library.
    141   CHECK_NE(hr, CO_E_NOTINITIALIZED);
    142 
    143   return SUCCEEDED(hr);
    144 }
    145 
    146 bool CoreAudioUtil::IsSupported() {
    147   // It is possible to force usage of WaveXxx APIs by using a command line flag.
    148   const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
    149   if (cmd_line->HasSwitch(switches::kForceWaveAudio)) {
    150     LOG(WARNING) << "Forcing usage of Windows WaveXxx APIs";
    151     return false;
    152   }
    153 
    154   // Microsoft does not plan to make the Core Audio APIs available for use
    155   // with earlier versions of Windows, including Microsoft Windows Server 2003,
    156   // Windows XP, Windows Millennium Edition, Windows 2000, and Windows 98.
    157   if (base::win::GetVersion() < base::win::VERSION_VISTA)
    158     return false;
    159 
    160   // The audio core APIs are implemented in the Mmdevapi.dll and Audioses.dll
    161   // system components.
    162   // Dependency Walker shows that it is enough to verify possibility to load
    163   // the Audioses DLL since it depends on Mmdevapi.dll.
    164   // See http://crbug.com/166397 why this extra step is required to guarantee
    165   // Core Audio support.
    166   static bool g_audioses_dll_available = LoadAudiosesDll();
    167   if (!g_audioses_dll_available)
    168     return false;
    169 
    170   // Being able to load the Audioses.dll does not seem to be sufficient for
    171   // all devices to guarantee Core Audio support. To be 100%, we also verify
    172   // that it is possible to a create the IMMDeviceEnumerator interface. If this
    173   // works as well we should be home free.
    174   static bool g_can_create_device_enumerator = CanCreateDeviceEnumerator();
    175   LOG_IF(ERROR, !g_can_create_device_enumerator)
    176       << "Failed to create Core Audio device enumerator on thread with ID "
    177       << GetCurrentThreadId();
    178   return g_can_create_device_enumerator;
    179 }
    180 
    181 base::TimeDelta CoreAudioUtil::RefererenceTimeToTimeDelta(REFERENCE_TIME time) {
    182   // Each unit of reference time is 100 nanoseconds <=> 0.1 microsecond.
    183   return base::TimeDelta::FromMicroseconds(0.1 * time + 0.5);
    184 }
    185 
    186 AUDCLNT_SHAREMODE CoreAudioUtil::GetShareMode() {
    187   const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
    188   if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio))
    189     return AUDCLNT_SHAREMODE_EXCLUSIVE;
    190   return AUDCLNT_SHAREMODE_SHARED;
    191 }
    192 
    193 int CoreAudioUtil::NumberOfActiveDevices(EDataFlow data_flow) {
    194   DCHECK(IsSupported());
    195   // Create the IMMDeviceEnumerator interface.
    196   ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
    197       CreateDeviceEnumerator();
    198   if (!device_enumerator)
    199     return 0;
    200 
    201   // Generate a collection of active (present and not disabled) audio endpoint
    202   // devices for the specified data-flow direction.
    203   // This method will succeed even if all devices are disabled.
    204   ScopedComPtr<IMMDeviceCollection> collection;
    205   HRESULT hr = device_enumerator->EnumAudioEndpoints(data_flow,
    206                                                      DEVICE_STATE_ACTIVE,
    207                                                      collection.Receive());
    208   if (FAILED(hr)) {
    209     LOG(ERROR) << "IMMDeviceCollection::EnumAudioEndpoints: " << std::hex << hr;
    210     return 0;
    211   }
    212 
    213   // Retrieve the number of active audio devices for the specified direction
    214   UINT number_of_active_devices = 0;
    215   collection->GetCount(&number_of_active_devices);
    216   DVLOG(2) << ((data_flow == eCapture) ? "[in ] " : "[out] ")
    217            << "number of devices: " << number_of_active_devices;
    218   return static_cast<int>(number_of_active_devices);
    219 }
    220 
    221 ScopedComPtr<IMMDeviceEnumerator> CoreAudioUtil::CreateDeviceEnumerator() {
    222   DCHECK(IsSupported());
    223   ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
    224   HRESULT hr = device_enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
    225                                                 NULL, CLSCTX_INPROC_SERVER);
    226   CHECK(SUCCEEDED(hr));
    227   return device_enumerator;
    228 }
    229 
    230 ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDefaultDevice(EDataFlow data_flow,
    231                                                            ERole role) {
    232   DCHECK(IsSupported());
    233   ScopedComPtr<IMMDevice> endpoint_device;
    234 
    235   // Create the IMMDeviceEnumerator interface.
    236   ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
    237       CreateDeviceEnumerator();
    238   if (!device_enumerator)
    239     return endpoint_device;
    240 
    241   // Retrieve the default audio endpoint for the specified data-flow
    242   // direction and role.
    243   HRESULT hr = device_enumerator->GetDefaultAudioEndpoint(
    244       data_flow, role, endpoint_device.Receive());
    245 
    246   if (FAILED(hr)) {
    247     DVLOG(1) << "IMMDeviceEnumerator::GetDefaultAudioEndpoint: "
    248              << std::hex << hr;
    249     return endpoint_device;
    250   }
    251 
    252   // Verify that the audio endpoint device is active, i.e., that the audio
    253   // adapter that connects to the endpoint device is present and enabled.
    254   DWORD state = DEVICE_STATE_DISABLED;
    255   hr = endpoint_device->GetState(&state);
    256   if (SUCCEEDED(hr)) {
    257     if (!(state & DEVICE_STATE_ACTIVE)) {
    258       DVLOG(1) << "Selected endpoint device is not active";
    259       endpoint_device.Release();
    260     }
    261   }
    262   return endpoint_device;
    263 }
    264 
    265 ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDevice(
    266     const std::string& device_id) {
    267   DCHECK(IsSupported());
    268   ScopedComPtr<IMMDevice> endpoint_device;
    269 
    270   // Create the IMMDeviceEnumerator interface.
    271   ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
    272       CreateDeviceEnumerator();
    273   if (!device_enumerator)
    274     return endpoint_device;
    275 
    276   // Retrieve an audio device specified by an endpoint device-identification
    277   // string.
    278   HRESULT hr = device_enumerator->GetDevice(UTF8ToUTF16(device_id).c_str(),
    279                                             endpoint_device.Receive());
    280   DVLOG_IF(1, FAILED(hr)) << "IMMDeviceEnumerator::GetDevice: "
    281                           << std::hex << hr;
    282   return endpoint_device;
    283 }
    284 
    285 HRESULT CoreAudioUtil::GetDeviceName(IMMDevice* device, AudioDeviceName* name) {
    286   DCHECK(IsSupported());
    287 
    288   // Retrieve unique name of endpoint device.
    289   // Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}".
    290   AudioDeviceName device_name;
    291   ScopedCoMem<WCHAR> endpoint_device_id;
    292   HRESULT hr = device->GetId(&endpoint_device_id);
    293   if (FAILED(hr))
    294     return hr;
    295   WideToUTF8(endpoint_device_id, wcslen(endpoint_device_id),
    296              &device_name.unique_id);
    297 
    298   // Retrieve user-friendly name of endpoint device.
    299   // Example: "Microphone (Realtek High Definition Audio)".
    300   ScopedComPtr<IPropertyStore> properties;
    301   hr = device->OpenPropertyStore(STGM_READ, properties.Receive());
    302   if (FAILED(hr))
    303     return hr;
    304   base::win::ScopedPropVariant friendly_name;
    305   hr = properties->GetValue(PKEY_Device_FriendlyName, friendly_name.Receive());
    306   if (FAILED(hr))
    307     return hr;
    308   if (friendly_name.get().vt == VT_LPWSTR && friendly_name.get().pwszVal) {
    309     WideToUTF8(friendly_name.get().pwszVal,
    310                wcslen(friendly_name.get().pwszVal),
    311                &device_name.device_name);
    312   }
    313 
    314   *name = device_name;
    315   DVLOG(2) << "friendly name: " << device_name.device_name;
    316   DVLOG(2) << "unique id    : " << device_name.unique_id;
    317   return hr;
    318 }
    319 
    320 std::string CoreAudioUtil::GetFriendlyName(const std::string& device_id) {
    321   DCHECK(IsSupported());
    322   ScopedComPtr<IMMDevice> audio_device = CreateDevice(device_id);
    323   if (!audio_device)
    324     return std::string();
    325 
    326   AudioDeviceName device_name;
    327   HRESULT hr = GetDeviceName(audio_device, &device_name);
    328   if (FAILED(hr))
    329     return std::string();
    330 
    331   return device_name.device_name;
    332 }
    333 
    334 bool CoreAudioUtil::DeviceIsDefault(EDataFlow flow,
    335                                     ERole role,
    336                                     const std::string& device_id) {
    337   DCHECK(IsSupported());
    338   ScopedComPtr<IMMDevice> device = CreateDefaultDevice(flow, role);
    339   if (!device)
    340     return false;
    341 
    342   ScopedCoMem<WCHAR> default_device_id;
    343   HRESULT hr = device->GetId(&default_device_id);
    344   if (FAILED(hr))
    345     return false;
    346 
    347   std::string str_default;
    348   WideToUTF8(default_device_id, wcslen(default_device_id), &str_default);
    349   if (device_id.compare(str_default) != 0)
    350     return false;
    351   return true;
    352 }
    353 
    354 EDataFlow CoreAudioUtil::GetDataFlow(IMMDevice* device) {
    355   DCHECK(IsSupported());
    356   ScopedComPtr<IMMEndpoint> endpoint;
    357   HRESULT hr = device->QueryInterface(endpoint.Receive());
    358   if (FAILED(hr)) {
    359     DVLOG(1) << "IMMDevice::QueryInterface: " << std::hex << hr;
    360     return eAll;
    361   }
    362 
    363   EDataFlow data_flow;
    364   hr = endpoint->GetDataFlow(&data_flow);
    365   if (FAILED(hr)) {
    366     DVLOG(1) << "IMMEndpoint::GetDataFlow: " << std::hex << hr;
    367     return eAll;
    368   }
    369   return data_flow;
    370 }
    371 
    372 ScopedComPtr<IAudioClient> CoreAudioUtil::CreateClient(
    373     IMMDevice* audio_device) {
    374   DCHECK(IsSupported());
    375 
    376   // Creates and activates an IAudioClient COM object given the selected
    377   // endpoint device.
    378   ScopedComPtr<IAudioClient> audio_client;
    379   HRESULT hr = audio_device->Activate(__uuidof(IAudioClient),
    380                                       CLSCTX_INPROC_SERVER,
    381                                       NULL,
    382                                       audio_client.ReceiveVoid());
    383   DVLOG_IF(1, FAILED(hr)) << "IMMDevice::Activate: " << std::hex << hr;
    384   return audio_client;
    385 }
    386 
    387 ScopedComPtr<IAudioClient> CoreAudioUtil::CreateDefaultClient(
    388     EDataFlow data_flow, ERole role) {
    389   DCHECK(IsSupported());
    390   ScopedComPtr<IMMDevice> default_device(CreateDefaultDevice(data_flow, role));
    391   return (default_device ? CreateClient(default_device) :
    392       ScopedComPtr<IAudioClient>());
    393 }
    394 
    395 HRESULT CoreAudioUtil::GetSharedModeMixFormat(
    396     IAudioClient* client, WAVEFORMATPCMEX* format) {
    397   DCHECK(IsSupported());
    398   ScopedCoMem<WAVEFORMATPCMEX> format_pcmex;
    399   HRESULT hr = client->GetMixFormat(
    400       reinterpret_cast<WAVEFORMATEX**>(&format_pcmex));
    401   if (FAILED(hr))
    402     return hr;
    403 
    404   size_t bytes = sizeof(WAVEFORMATEX) + format_pcmex->Format.cbSize;
    405   DCHECK_EQ(bytes, sizeof(WAVEFORMATPCMEX));
    406 
    407   memcpy(format, format_pcmex, bytes);
    408   DVLOG(2) << *format;
    409 
    410   return hr;
    411 }
    412 
    413 HRESULT CoreAudioUtil::GetDefaultSharedModeMixFormat(
    414     EDataFlow data_flow, ERole role, WAVEFORMATPCMEX* format) {
    415   DCHECK(IsSupported());
    416   ScopedComPtr<IAudioClient> client(CreateDefaultClient(data_flow, role));
    417   if (!client) {
    418     // Map NULL-pointer to new error code which can be different from the
    419     // actual error code. The exact value is not important here.
    420     return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
    421   }
    422   return CoreAudioUtil::GetSharedModeMixFormat(client, format);
    423 }
    424 
    425 bool CoreAudioUtil::IsFormatSupported(IAudioClient* client,
    426                                       AUDCLNT_SHAREMODE share_mode,
    427                                       const WAVEFORMATPCMEX* format) {
    428   DCHECK(IsSupported());
    429   ScopedCoMem<WAVEFORMATEXTENSIBLE> closest_match;
    430   HRESULT hr = client->IsFormatSupported(
    431       share_mode, reinterpret_cast<const WAVEFORMATEX*>(format),
    432       reinterpret_cast<WAVEFORMATEX**>(&closest_match));
    433 
    434   // This log can only be triggered for shared mode.
    435   DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
    436                                 << "but a closest match exists.";
    437   // This log can be triggered both for shared and exclusive modes.
    438   DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format.";
    439   if (hr == S_FALSE) {
    440     DVLOG(2) << *closest_match;
    441   }
    442 
    443   return (hr == S_OK);
    444 }
    445 
    446 bool CoreAudioUtil::IsChannelLayoutSupported(EDataFlow data_flow, ERole role,
    447                                              ChannelLayout channel_layout) {
    448   DCHECK(IsSupported());
    449 
    450   // First, get the preferred mixing format for shared mode streams.
    451 
    452   ScopedComPtr<IAudioClient> client(CreateDefaultClient(data_flow, role));
    453   if (!client)
    454     return false;
    455 
    456   WAVEFORMATPCMEX format;
    457   HRESULT hr = CoreAudioUtil::GetSharedModeMixFormat(client, &format);
    458   if (FAILED(hr))
    459     return false;
    460 
    461   // Next, check if it is possible to use an alternative format where the
    462   // channel layout (and possibly number of channels) is modified.
    463 
    464   // Convert generic channel layout into Windows-specific channel configuration.
    465   ChannelConfig new_config = ChannelLayoutToChannelConfig(channel_layout);
    466   if (new_config == KSAUDIO_SPEAKER_UNSUPPORTED) {
    467     return false;
    468   }
    469   format.dwChannelMask = new_config;
    470 
    471   // Modify the format if the new channel layout has changed the number of
    472   // utilized channels.
    473   const int channels = ChannelLayoutToChannelCount(channel_layout);
    474   if (channels != format.Format.nChannels) {
    475     format.Format.nChannels = channels;
    476     format.Format.nBlockAlign = (format.Format.wBitsPerSample / 8) * channels;
    477     format.Format.nAvgBytesPerSec = format.Format.nSamplesPerSec *
    478                                     format.Format.nBlockAlign;
    479   }
    480   DVLOG(2) << format;
    481 
    482   // Some devices can initialize a shared-mode stream with a format that is
    483   // not identical to the mix format obtained from the GetMixFormat() method.
    484   // However, chances of succeeding increases if we use the same number of
    485   // channels and the same sample rate as the mix format. I.e, this call will
    486   // return true only in those cases where the audio engine is able to support
    487   // an even wider range of shared-mode formats where the installation package
    488   // for the audio device includes a local effects (LFX) audio processing
    489   // object (APO) that can handle format conversions.
    490   return CoreAudioUtil::IsFormatSupported(client, AUDCLNT_SHAREMODE_SHARED,
    491                                           &format);
    492 }
    493 
    494 HRESULT CoreAudioUtil::GetDevicePeriod(IAudioClient* client,
    495                                        AUDCLNT_SHAREMODE share_mode,
    496                                        REFERENCE_TIME* device_period) {
    497   DCHECK(IsSupported());
    498 
    499   // Get the period of the engine thread.
    500   REFERENCE_TIME default_period = 0;
    501   REFERENCE_TIME minimum_period = 0;
    502   HRESULT hr = client->GetDevicePeriod(&default_period, &minimum_period);
    503   if (FAILED(hr))
    504     return hr;
    505 
    506   *device_period = (share_mode == AUDCLNT_SHAREMODE_SHARED) ? default_period :
    507       minimum_period;
    508   DVLOG(2) << "device_period: "
    509            << RefererenceTimeToTimeDelta(*device_period).InMillisecondsF()
    510            << " [ms]";
    511   return hr;
    512 }
    513 
    514 HRESULT CoreAudioUtil::GetPreferredAudioParameters(
    515     IAudioClient* client, AudioParameters* params) {
    516   DCHECK(IsSupported());
    517   WAVEFORMATPCMEX mix_format;
    518   HRESULT hr = GetSharedModeMixFormat(client, &mix_format);
    519   if (FAILED(hr))
    520     return hr;
    521 
    522   REFERENCE_TIME default_period = 0;
    523   hr = GetDevicePeriod(client, AUDCLNT_SHAREMODE_SHARED, &default_period);
    524   if (FAILED(hr))
    525     return hr;
    526 
    527   // Get the integer mask which corresponds to the channel layout the
    528   // audio engine uses for its internal processing/mixing of shared-mode
    529   // streams. This mask indicates which channels are present in the multi-
    530   // channel stream. The least significant bit corresponds with the Front Left
    531   // speaker, the next least significant bit corresponds to the Front Right
    532   // speaker, and so on, continuing in the order defined in KsMedia.h.
    533   // See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083.aspx
    534   // for more details.
    535   ChannelConfig channel_config = mix_format.dwChannelMask;
    536 
    537   // Convert Microsoft's channel configuration to genric ChannelLayout.
    538   ChannelLayout channel_layout = ChannelConfigToChannelLayout(channel_config);
    539 
    540   // Preferred sample rate.
    541   int sample_rate = mix_format.Format.nSamplesPerSec;
    542 
    543   // TODO(henrika): possibly use format.Format.wBitsPerSample here instead.
    544   // We use a hard-coded value of 16 bits per sample today even if most audio
    545   // engines does the actual mixing in 32 bits per sample.
    546   int bits_per_sample = 16;
    547 
    548   // We are using the native device period to derive the smallest possible
    549   // buffer size in shared mode. Note that the actual endpoint buffer will be
    550   // larger than this size but it will be possible to fill it up in two calls.
    551   // TODO(henrika): ensure that this scheme works for capturing as well.
    552   int frames_per_buffer = static_cast<int>(sample_rate *
    553       RefererenceTimeToTimeDelta(default_period).InSecondsF() + 0.5);
    554 
    555   DVLOG(1) << "channel_layout   : " << channel_layout;
    556   DVLOG(1) << "sample_rate      : " << sample_rate;
    557   DVLOG(1) << "bits_per_sample  : " << bits_per_sample;
    558   DVLOG(1) << "frames_per_buffer: " << frames_per_buffer;
    559 
    560   AudioParameters audio_params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
    561                                channel_layout,
    562                                sample_rate,
    563                                bits_per_sample,
    564                                frames_per_buffer);
    565 
    566   *params = audio_params;
    567   return hr;
    568 }
    569 
    570 HRESULT CoreAudioUtil::GetPreferredAudioParameters(
    571     EDataFlow data_flow, ERole role, AudioParameters* params) {
    572   DCHECK(IsSupported());
    573   ScopedComPtr<IAudioClient> client(CreateDefaultClient(data_flow, role));
    574   if (!client) {
    575     // Map NULL-pointer to new error code which can be different from the
    576     // actual error code. The exact value is not important here.
    577     return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
    578   }
    579   return GetPreferredAudioParameters(client, params);
    580 }
    581 
    582 HRESULT CoreAudioUtil::GetPreferredAudioParameters(
    583     const std::string& device_id, AudioParameters* params) {
    584   DCHECK(IsSupported());
    585   ScopedComPtr<IMMDevice> device(CreateDevice(device_id));
    586   if (!device) {
    587     // Map NULL-pointer to new error code which can be different from the
    588     // actual error code. The exact value is not important here.
    589     return AUDCLNT_E_DEVICE_INVALIDATED;
    590   }
    591 
    592   ScopedComPtr<IAudioClient> client(CreateClient(device));
    593   if (!client) {
    594     // Map NULL-pointer to new error code which can be different from the
    595     // actual error code. The exact value is not important here.
    596     return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
    597   }
    598   return GetPreferredAudioParameters(client, params);
    599 }
    600 
    601 HRESULT CoreAudioUtil::SharedModeInitialize(IAudioClient* client,
    602                                             const WAVEFORMATPCMEX* format,
    603                                             HANDLE event_handle,
    604                                             uint32* endpoint_buffer_size) {
    605   DCHECK(IsSupported());
    606 
    607   // Use default flags (i.e, dont set AUDCLNT_STREAMFLAGS_NOPERSIST) to
    608   // ensure that the volume level and muting state for a rendering session
    609   // are persistent across system restarts. The volume level and muting
    610   // state for a capture session are never persistent.
    611   DWORD stream_flags = 0;
    612 
    613   // Enable event-driven streaming if a valid event handle is provided.
    614   // After the stream starts, the audio engine will signal the event handle
    615   // to notify the client each time a buffer becomes ready to process.
    616   // Event-driven buffering is supported for both rendering and capturing.
    617   // Both shared-mode and exclusive-mode streams can use event-driven buffering.
    618   bool use_event = (event_handle != NULL &&
    619                     event_handle != INVALID_HANDLE_VALUE);
    620   if (use_event)
    621     stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
    622   DVLOG(2) << "stream_flags: 0x" << std::hex << stream_flags;
    623 
    624   // Initialize the shared mode client for minimal delay.
    625   HRESULT hr = client->Initialize(AUDCLNT_SHAREMODE_SHARED,
    626                                   stream_flags,
    627                                   0,
    628                                   0,
    629                                   reinterpret_cast<const WAVEFORMATEX*>(format),
    630                                   NULL);
    631   if (FAILED(hr)) {
    632     DVLOG(1) << "IAudioClient::Initialize: " << std::hex << hr;
    633     return hr;
    634   }
    635 
    636   if (use_event) {
    637     hr = client->SetEventHandle(event_handle);
    638     if (FAILED(hr)) {
    639       DVLOG(1) << "IAudioClient::SetEventHandle: " << std::hex << hr;
    640       return hr;
    641     }
    642   }
    643 
    644   UINT32 buffer_size_in_frames = 0;
    645   hr = client->GetBufferSize(&buffer_size_in_frames);
    646   if (FAILED(hr)) {
    647     DVLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr;
    648     return hr;
    649   }
    650 
    651   *endpoint_buffer_size = buffer_size_in_frames;
    652   DVLOG(2) << "endpoint buffer size: " << buffer_size_in_frames;
    653 
    654   // TODO(henrika): utilize when delay measurements are added.
    655   REFERENCE_TIME  latency = 0;
    656   hr = client->GetStreamLatency(&latency);
    657   DVLOG(2) << "stream latency: "
    658            << RefererenceTimeToTimeDelta(latency).InMillisecondsF() << " [ms]";
    659   return hr;
    660 }
    661 
    662 ScopedComPtr<IAudioRenderClient> CoreAudioUtil::CreateRenderClient(
    663     IAudioClient* client) {
    664   DCHECK(IsSupported());
    665 
    666   // Get access to the IAudioRenderClient interface. This interface
    667   // enables us to write output data to a rendering endpoint buffer.
    668   ScopedComPtr<IAudioRenderClient> audio_render_client;
    669   HRESULT hr = client->GetService(__uuidof(IAudioRenderClient),
    670                                   audio_render_client.ReceiveVoid());
    671   if (FAILED(hr)) {
    672     DVLOG(1) << "IAudioClient::GetService: " << std::hex << hr;
    673     return ScopedComPtr<IAudioRenderClient>();
    674   }
    675   return audio_render_client;
    676 }
    677 
    678 ScopedComPtr<IAudioCaptureClient> CoreAudioUtil::CreateCaptureClient(
    679     IAudioClient* client) {
    680   DCHECK(IsSupported());
    681 
    682   // Get access to the IAudioCaptureClient interface. This interface
    683   // enables us to read input data from a capturing endpoint buffer.
    684   ScopedComPtr<IAudioCaptureClient> audio_capture_client;
    685   HRESULT hr = client->GetService(__uuidof(IAudioCaptureClient),
    686                                   audio_capture_client.ReceiveVoid());
    687   if (FAILED(hr)) {
    688     DVLOG(1) << "IAudioClient::GetService: " << std::hex << hr;
    689     return ScopedComPtr<IAudioCaptureClient>();
    690   }
    691   return audio_capture_client;
    692 }
    693 
    694 bool CoreAudioUtil::FillRenderEndpointBufferWithSilence(
    695     IAudioClient* client, IAudioRenderClient* render_client) {
    696   DCHECK(IsSupported());
    697 
    698   UINT32 endpoint_buffer_size = 0;
    699   if (FAILED(client->GetBufferSize(&endpoint_buffer_size)))
    700     return false;
    701 
    702   UINT32 num_queued_frames = 0;
    703   if (FAILED(client->GetCurrentPadding(&num_queued_frames)))
    704     return false;
    705 
    706   BYTE* data = NULL;
    707   int num_frames_to_fill = endpoint_buffer_size - num_queued_frames;
    708   if (FAILED(render_client->GetBuffer(num_frames_to_fill, &data)))
    709     return false;
    710 
    711   // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to
    712   // explicitly write silence data to the rendering buffer.
    713   DVLOG(2) << "filling up " << num_frames_to_fill << " frames with silence";
    714   return SUCCEEDED(render_client->ReleaseBuffer(num_frames_to_fill,
    715                                                 AUDCLNT_BUFFERFLAGS_SILENT));
    716 }
    717 
    718 }  // namespace media
    719