Home | History | Annotate | Download | only in mac
      1 /*
      2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 #include "webrtc/base/arraysize.h"
     12 #include "webrtc/base/checks.h"
     13 #include "webrtc/base/platform_thread.h"
     14 #include "webrtc/modules/audio_device/audio_device_config.h"
     15 #include "webrtc/modules/audio_device/mac/audio_device_mac.h"
     16 #include "webrtc/modules/audio_device/mac/portaudio/pa_ringbuffer.h"
     17 #include "webrtc/system_wrappers/include/event_wrapper.h"
     18 #include "webrtc/system_wrappers/include/trace.h"
     19 
     20 #include <ApplicationServices/ApplicationServices.h>
     21 #include <libkern/OSAtomic.h>  // OSAtomicCompareAndSwap()
     22 #include <mach/mach.h>         // mach_task_self()
     23 #include <sys/sysctl.h>        // sysctlbyname()
     24 
     25 namespace webrtc {
     26 
     27 #define WEBRTC_CA_RETURN_ON_ERR(expr)                                  \
     28   do {                                                                 \
     29     err = expr;                                                        \
     30     if (err != noErr) {                                                \
     31       logCAMsg(kTraceError, kTraceAudioDevice, _id, "Error in " #expr, \
     32                (const char*) & err);                                   \
     33       return -1;                                                       \
     34     }                                                                  \
     35   } while (0)
     36 
     37 #define WEBRTC_CA_LOG_ERR(expr)                                        \
     38   do {                                                                 \
     39     err = expr;                                                        \
     40     if (err != noErr) {                                                \
     41       logCAMsg(kTraceError, kTraceAudioDevice, _id, "Error in " #expr, \
     42                (const char*) & err);                                   \
     43     }                                                                  \
     44   } while (0)
     45 
     46 #define WEBRTC_CA_LOG_WARN(expr)                                         \
     47   do {                                                                   \
     48     err = expr;                                                          \
     49     if (err != noErr) {                                                  \
     50       logCAMsg(kTraceWarning, kTraceAudioDevice, _id, "Error in " #expr, \
     51                (const char*) & err);                                     \
     52     }                                                                    \
     53   } while (0)
     54 
     55 enum { MaxNumberDevices = 64 };
     56 
     57 void AudioDeviceMac::AtomicSet32(int32_t* theValue, int32_t newValue) {
     58   while (1) {
     59     int32_t oldValue = *theValue;
     60     if (OSAtomicCompareAndSwap32Barrier(oldValue, newValue, theValue) == true) {
     61       return;
     62     }
     63   }
     64 }
     65 
     66 int32_t AudioDeviceMac::AtomicGet32(int32_t* theValue) {
     67   while (1) {
     68     int32_t value = *theValue;
     69     if (OSAtomicCompareAndSwap32Barrier(value, value, theValue) == true) {
     70       return value;
     71     }
     72   }
     73 }
     74 
     75 // CoreAudio errors are best interpreted as four character strings.
     76 void AudioDeviceMac::logCAMsg(const TraceLevel level,
     77                               const TraceModule module,
     78                               const int32_t id,
     79                               const char* msg,
     80                               const char* err) {
     81   RTC_DCHECK(msg != NULL);
     82   RTC_DCHECK(err != NULL);
     83 
     84 #ifdef WEBRTC_ARCH_BIG_ENDIAN
     85   WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err);
     86 #else
     87   // We need to flip the characters in this case.
     88   WEBRTC_TRACE(level, module, id, "%s: %.1s%.1s%.1s%.1s", msg, err + 3, err + 2,
     89                err + 1, err);
     90 #endif
     91 }
     92 
     93 AudioDeviceMac::AudioDeviceMac(const int32_t id)
     94     : _ptrAudioBuffer(NULL),
     95       _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
     96       _stopEventRec(*EventWrapper::Create()),
     97       _stopEvent(*EventWrapper::Create()),
     98       _id(id),
     99       _mixerManager(id),
    100       _inputDeviceIndex(0),
    101       _outputDeviceIndex(0),
    102       _inputDeviceID(kAudioObjectUnknown),
    103       _outputDeviceID(kAudioObjectUnknown),
    104       _inputDeviceIsSpecified(false),
    105       _outputDeviceIsSpecified(false),
    106       _recChannels(N_REC_CHANNELS),
    107       _playChannels(N_PLAY_CHANNELS),
    108       _captureBufData(NULL),
    109       _renderBufData(NULL),
    110       _playBufType(AudioDeviceModule::kFixedBufferSize),
    111       _initialized(false),
    112       _isShutDown(false),
    113       _recording(false),
    114       _playing(false),
    115       _recIsInitialized(false),
    116       _playIsInitialized(false),
    117       _AGC(false),
    118       _renderDeviceIsAlive(1),
    119       _captureDeviceIsAlive(1),
    120       _twoDevices(true),
    121       _doStop(false),
    122       _doStopRec(false),
    123       _macBookPro(false),
    124       _macBookProPanRight(false),
    125       _captureLatencyUs(0),
    126       _renderLatencyUs(0),
    127       _captureDelayUs(0),
    128       _renderDelayUs(0),
    129       _renderDelayOffsetSamples(0),
    130       _playBufDelayFixed(20),
    131       _playWarning(0),
    132       _playError(0),
    133       _recWarning(0),
    134       _recError(0),
    135       _paCaptureBuffer(NULL),
    136       _paRenderBuffer(NULL),
    137       _captureBufSizeSamples(0),
    138       _renderBufSizeSamples(0),
    139       prev_key_state_(),
    140       get_mic_volume_counter_ms_(0) {
    141   WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created", __FUNCTION__);
    142 
    143   RTC_DCHECK(&_stopEvent != NULL);
    144   RTC_DCHECK(&_stopEventRec != NULL);
    145 
    146   memset(_renderConvertData, 0, sizeof(_renderConvertData));
    147   memset(&_outStreamFormat, 0, sizeof(AudioStreamBasicDescription));
    148   memset(&_outDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
    149   memset(&_inStreamFormat, 0, sizeof(AudioStreamBasicDescription));
    150   memset(&_inDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
    151 }
    152 
    153 AudioDeviceMac::~AudioDeviceMac() {
    154   WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed",
    155                __FUNCTION__);
    156 
    157   if (!_isShutDown) {
    158     Terminate();
    159   }
    160 
    161   RTC_DCHECK(!capture_worker_thread_.get());
    162   RTC_DCHECK(!render_worker_thread_.get());
    163 
    164   if (_paRenderBuffer) {
    165     delete _paRenderBuffer;
    166     _paRenderBuffer = NULL;
    167   }
    168 
    169   if (_paCaptureBuffer) {
    170     delete _paCaptureBuffer;
    171     _paCaptureBuffer = NULL;
    172   }
    173 
    174   if (_renderBufData) {
    175     delete[] _renderBufData;
    176     _renderBufData = NULL;
    177   }
    178 
    179   if (_captureBufData) {
    180     delete[] _captureBufData;
    181     _captureBufData = NULL;
    182   }
    183 
    184   kern_return_t kernErr = KERN_SUCCESS;
    185   kernErr = semaphore_destroy(mach_task_self(), _renderSemaphore);
    186   if (kernErr != KERN_SUCCESS) {
    187     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
    188                  " semaphore_destroy() error: %d", kernErr);
    189   }
    190 
    191   kernErr = semaphore_destroy(mach_task_self(), _captureSemaphore);
    192   if (kernErr != KERN_SUCCESS) {
    193     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
    194                  " semaphore_destroy() error: %d", kernErr);
    195   }
    196 
    197   delete &_stopEvent;
    198   delete &_stopEventRec;
    199   delete &_critSect;
    200 }
    201 
    202 // ============================================================================
    203 //                                     API
    204 // ============================================================================
    205 
    206 void AudioDeviceMac::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
    207   CriticalSectionScoped lock(&_critSect);
    208 
    209   _ptrAudioBuffer = audioBuffer;
    210 
    211   // inform the AudioBuffer about default settings for this implementation
    212   _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
    213   _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
    214   _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS);
    215   _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
    216 }
    217 
    218 int32_t AudioDeviceMac::ActiveAudioLayer(
    219     AudioDeviceModule::AudioLayer& audioLayer) const {
    220   audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
    221   return 0;
    222 }
    223 
    224 int32_t AudioDeviceMac::Init() {
    225   CriticalSectionScoped lock(&_critSect);
    226 
    227   if (_initialized) {
    228     return 0;
    229   }
    230 
    231   OSStatus err = noErr;
    232 
    233   _isShutDown = false;
    234 
    235   // PortAudio ring buffers require an elementCount which is a power of two.
    236   if (_renderBufData == NULL) {
    237     UInt32 powerOfTwo = 1;
    238     while (powerOfTwo < PLAY_BUF_SIZE_IN_SAMPLES) {
    239       powerOfTwo <<= 1;
    240     }
    241     _renderBufSizeSamples = powerOfTwo;
    242     _renderBufData = new SInt16[_renderBufSizeSamples];
    243   }
    244 
    245   if (_paRenderBuffer == NULL) {
    246     _paRenderBuffer = new PaUtilRingBuffer;
    247     PaRingBufferSize bufSize = -1;
    248     bufSize = PaUtil_InitializeRingBuffer(
    249         _paRenderBuffer, sizeof(SInt16), _renderBufSizeSamples, _renderBufData);
    250     if (bufSize == -1) {
    251       WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
    252                    " PaUtil_InitializeRingBuffer() error");
    253       return -1;
    254     }
    255   }
    256 
    257   if (_captureBufData == NULL) {
    258     UInt32 powerOfTwo = 1;
    259     while (powerOfTwo < REC_BUF_SIZE_IN_SAMPLES) {
    260       powerOfTwo <<= 1;
    261     }
    262     _captureBufSizeSamples = powerOfTwo;
    263     _captureBufData = new Float32[_captureBufSizeSamples];
    264   }
    265 
    266   if (_paCaptureBuffer == NULL) {
    267     _paCaptureBuffer = new PaUtilRingBuffer;
    268     PaRingBufferSize bufSize = -1;
    269     bufSize =
    270         PaUtil_InitializeRingBuffer(_paCaptureBuffer, sizeof(Float32),
    271                                     _captureBufSizeSamples, _captureBufData);
    272     if (bufSize == -1) {
    273       WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
    274                    " PaUtil_InitializeRingBuffer() error");
    275       return -1;
    276     }
    277   }
    278 
    279   kern_return_t kernErr = KERN_SUCCESS;
    280   kernErr = semaphore_create(mach_task_self(), &_renderSemaphore,
    281                              SYNC_POLICY_FIFO, 0);
    282   if (kernErr != KERN_SUCCESS) {
    283     WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
    284                  " semaphore_create() error: %d", kernErr);
    285     return -1;
    286   }
    287 
    288   kernErr = semaphore_create(mach_task_self(), &_captureSemaphore,
    289                              SYNC_POLICY_FIFO, 0);
    290   if (kernErr != KERN_SUCCESS) {
    291     WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
    292                  " semaphore_create() error: %d", kernErr);
    293     return -1;
    294   }
    295 
    296   // Setting RunLoop to NULL here instructs HAL to manage its own thread for
    297   // notifications. This was the default behaviour on OS X 10.5 and earlier,
    298   // but now must be explicitly specified. HAL would otherwise try to use the
    299   // main thread to issue notifications.
    300   AudioObjectPropertyAddress propertyAddress = {
    301       kAudioHardwarePropertyRunLoop, kAudioObjectPropertyScopeGlobal,
    302       kAudioObjectPropertyElementMaster};
    303   CFRunLoopRef runLoop = NULL;
    304   UInt32 size = sizeof(CFRunLoopRef);
    305   WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
    306       kAudioObjectSystemObject, &propertyAddress, 0, NULL, size, &runLoop));
    307 
    308   // Listen for any device changes.
    309   propertyAddress.mSelector = kAudioHardwarePropertyDevices;
    310   WEBRTC_CA_LOG_ERR(AudioObjectAddPropertyListener(
    311       kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this));
    312 
    313   // Determine if this is a MacBook Pro
    314   _macBookPro = false;
    315   _macBookProPanRight = false;
    316   char buf[128];
    317   size_t length = sizeof(buf);
    318   memset(buf, 0, length);
    319 
    320   int intErr = sysctlbyname("hw.model", buf, &length, NULL, 0);
    321   if (intErr != 0) {
    322     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
    323                  " Error in sysctlbyname(): %d", err);
    324   } else {
    325     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Hardware model: %s",
    326                  buf);
    327     if (strncmp(buf, "MacBookPro", 10) == 0) {
    328       _macBookPro = true;
    329     }
    330   }
    331 
    332   _playWarning = 0;
    333   _playError = 0;
    334   _recWarning = 0;
    335   _recError = 0;
    336 
    337   get_mic_volume_counter_ms_ = 0;
    338 
    339   _initialized = true;
    340 
    341   return 0;
    342 }
    343 
    344 int32_t AudioDeviceMac::Terminate() {
    345   if (!_initialized) {
    346     return 0;
    347   }
    348 
    349   if (_recording) {
    350     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
    351                  " Recording must be stopped");
    352     return -1;
    353   }
    354 
    355   if (_playing) {
    356     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
    357                  " Playback must be stopped");
    358     return -1;
    359   }
    360 
    361   _critSect.Enter();
    362 
    363   _mixerManager.Close();
    364 
    365   OSStatus err = noErr;
    366   int retVal = 0;
    367 
    368   AudioObjectPropertyAddress propertyAddress = {
    369       kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
    370       kAudioObjectPropertyElementMaster};
    371   WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
    372       kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this));
    373 
    374   err = AudioHardwareUnload();
    375   if (err != noErr) {
    376     logCAMsg(kTraceError, kTraceAudioDevice, _id,
    377              "Error in AudioHardwareUnload()", (const char*)&err);
    378     retVal = -1;
    379   }
    380 
    381   _isShutDown = true;
    382   _initialized = false;
    383   _outputDeviceIsSpecified = false;
    384   _inputDeviceIsSpecified = false;
    385 
    386   _critSect.Leave();
    387 
    388   return retVal;
    389 }
    390 
    391 bool AudioDeviceMac::Initialized() const {
    392   return (_initialized);
    393 }
    394 
    395 int32_t AudioDeviceMac::SpeakerIsAvailable(bool& available) {
    396   bool wasInitialized = _mixerManager.SpeakerIsInitialized();
    397 
    398   // Make an attempt to open up the
    399   // output mixer corresponding to the currently selected output device.
    400   //
    401   if (!wasInitialized && InitSpeaker() == -1) {
    402     available = false;
    403     return 0;
    404   }
    405 
    406   // Given that InitSpeaker was successful, we know that a valid speaker
    407   // exists.
    408   available = true;
    409 
    410   // Close the initialized output mixer
    411   //
    412   if (!wasInitialized) {
    413     _mixerManager.CloseSpeaker();
    414   }
    415 
    416   return 0;
    417 }
    418 
    419 int32_t AudioDeviceMac::InitSpeaker() {
    420   CriticalSectionScoped lock(&_critSect);
    421 
    422   if (_playing) {
    423     return -1;
    424   }
    425 
    426   if (InitDevice(_outputDeviceIndex, _outputDeviceID, false) == -1) {
    427     return -1;
    428   }
    429 
    430   if (_inputDeviceID == _outputDeviceID) {
    431     _twoDevices = false;
    432   } else {
    433     _twoDevices = true;
    434   }
    435 
    436   if (_mixerManager.OpenSpeaker(_outputDeviceID) == -1) {
    437     return -1;
    438   }
    439 
    440   return 0;
    441 }
    442 
    443 int32_t AudioDeviceMac::MicrophoneIsAvailable(bool& available) {
    444   bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
    445 
    446   // Make an attempt to open up the
    447   // input mixer corresponding to the currently selected output device.
    448   //
    449   if (!wasInitialized && InitMicrophone() == -1) {
    450     available = false;
    451     return 0;
    452   }
    453 
    454   // Given that InitMicrophone was successful, we know that a valid microphone
    455   // exists.
    456   available = true;
    457 
    458   // Close the initialized input mixer
    459   //
    460   if (!wasInitialized) {
    461     _mixerManager.CloseMicrophone();
    462   }
    463 
    464   return 0;
    465 }
    466 
    467 int32_t AudioDeviceMac::InitMicrophone() {
    468   CriticalSectionScoped lock(&_critSect);
    469 
    470   if (_recording) {
    471     return -1;
    472   }
    473 
    474   if (InitDevice(_inputDeviceIndex, _inputDeviceID, true) == -1) {
    475     return -1;
    476   }
    477 
    478   if (_inputDeviceID == _outputDeviceID) {
    479     _twoDevices = false;
    480   } else {
    481     _twoDevices = true;
    482   }
    483 
    484   if (_mixerManager.OpenMicrophone(_inputDeviceID) == -1) {
    485     return -1;
    486   }
    487 
    488   return 0;
    489 }
    490 
    491 bool AudioDeviceMac::SpeakerIsInitialized() const {
    492   return (_mixerManager.SpeakerIsInitialized());
    493 }
    494 
    495 bool AudioDeviceMac::MicrophoneIsInitialized() const {
    496   return (_mixerManager.MicrophoneIsInitialized());
    497 }
    498 
    499 int32_t AudioDeviceMac::SpeakerVolumeIsAvailable(bool& available) {
    500   bool wasInitialized = _mixerManager.SpeakerIsInitialized();
    501 
    502   // Make an attempt to open up the
    503   // output mixer corresponding to the currently selected output device.
    504   //
    505   if (!wasInitialized && InitSpeaker() == -1) {
    506     // If we end up here it means that the selected speaker has no volume
    507     // control.
    508     available = false;
    509     return 0;
    510   }
    511 
    512   // Given that InitSpeaker was successful, we know that a volume control exists
    513   //
    514   available = true;
    515 
    516   // Close the initialized output mixer
    517   //
    518   if (!wasInitialized) {
    519     _mixerManager.CloseSpeaker();
    520   }
    521 
    522   return 0;
    523 }
    524 
    525 int32_t AudioDeviceMac::SetSpeakerVolume(uint32_t volume) {
    526   return (_mixerManager.SetSpeakerVolume(volume));
    527 }
    528 
    529 int32_t AudioDeviceMac::SpeakerVolume(uint32_t& volume) const {
    530   uint32_t level(0);
    531 
    532   if (_mixerManager.SpeakerVolume(level) == -1) {
    533     return -1;
    534   }
    535 
    536   volume = level;
    537   return 0;
    538 }
    539 
    540 int32_t AudioDeviceMac::SetWaveOutVolume(uint16_t volumeLeft,
    541                                          uint16_t volumeRight) {
    542   WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
    543                "  API call not supported on this platform");
    544   return -1;
    545 }
    546 
    547 int32_t AudioDeviceMac::WaveOutVolume(uint16_t& /*volumeLeft*/,
    548                                       uint16_t& /*volumeRight*/) const {
    549   WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
    550                "  API call not supported on this platform");
    551   return -1;
    552 }
    553 
    554 int32_t AudioDeviceMac::MaxSpeakerVolume(uint32_t& maxVolume) const {
    555   uint32_t maxVol(0);
    556 
    557   if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) {
    558     return -1;
    559   }
    560 
    561   maxVolume = maxVol;
    562   return 0;
    563 }
    564 
    565 int32_t AudioDeviceMac::MinSpeakerVolume(uint32_t& minVolume) const {
    566   uint32_t minVol(0);
    567 
    568   if (_mixerManager.MinSpeakerVolume(minVol) == -1) {
    569     return -1;
    570   }
    571 
    572   minVolume = minVol;
    573   return 0;
    574 }
    575 
    576 int32_t AudioDeviceMac::SpeakerVolumeStepSize(uint16_t& stepSize) const {
    577   uint16_t delta(0);
    578 
    579   if (_mixerManager.SpeakerVolumeStepSize(delta) == -1) {
    580     return -1;
    581   }
    582 
    583   stepSize = delta;
    584   return 0;
    585 }
    586 
    587 int32_t AudioDeviceMac::SpeakerMuteIsAvailable(bool& available) {
    588   bool isAvailable(false);
    589   bool wasInitialized = _mixerManager.SpeakerIsInitialized();
    590 
    591   // Make an attempt to open up the
    592   // output mixer corresponding to the currently selected output device.
    593   //
    594   if (!wasInitialized && InitSpeaker() == -1) {
    595     // If we end up here it means that the selected speaker has no volume
    596     // control, hence it is safe to state that there is no mute control
    597     // already at this stage.
    598     available = false;
    599     return 0;
    600   }
    601 
    602   // Check if the selected speaker has a mute control
    603   //
    604   _mixerManager.SpeakerMuteIsAvailable(isAvailable);
    605 
    606   available = isAvailable;
    607 
    608   // Close the initialized output mixer
    609   //
    610   if (!wasInitialized) {
    611     _mixerManager.CloseSpeaker();
    612   }
    613 
    614   return 0;
    615 }
    616 
    617 int32_t AudioDeviceMac::SetSpeakerMute(bool enable) {
    618   return (_mixerManager.SetSpeakerMute(enable));
    619 }
    620 
    621 int32_t AudioDeviceMac::SpeakerMute(bool& enabled) const {
    622   bool muted(0);
    623 
    624   if (_mixerManager.SpeakerMute(muted) == -1) {
    625     return -1;
    626   }
    627 
    628   enabled = muted;
    629   return 0;
    630 }
    631 
    632 int32_t AudioDeviceMac::MicrophoneMuteIsAvailable(bool& available) {
    633   bool isAvailable(false);
    634   bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
    635 
    636   // Make an attempt to open up the
    637   // input mixer corresponding to the currently selected input device.
    638   //
    639   if (!wasInitialized && InitMicrophone() == -1) {
    640     // If we end up here it means that the selected microphone has no volume
    641     // control, hence it is safe to state that there is no boost control
    642     // already at this stage.
    643     available = false;
    644     return 0;
    645   }
    646 
    647   // Check if the selected microphone has a mute control
    648   //
    649   _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
    650   available = isAvailable;
    651 
    652   // Close the initialized input mixer
    653   //
    654   if (!wasInitialized) {
    655     _mixerManager.CloseMicrophone();
    656   }
    657 
    658   return 0;
    659 }
    660 
    661 int32_t AudioDeviceMac::SetMicrophoneMute(bool enable) {
    662   return (_mixerManager.SetMicrophoneMute(enable));
    663 }
    664 
    665 int32_t AudioDeviceMac::MicrophoneMute(bool& enabled) const {
    666   bool muted(0);
    667 
    668   if (_mixerManager.MicrophoneMute(muted) == -1) {
    669     return -1;
    670   }
    671 
    672   enabled = muted;
    673   return 0;
    674 }
    675 
    676 int32_t AudioDeviceMac::MicrophoneBoostIsAvailable(bool& available) {
    677   bool isAvailable(false);
    678   bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
    679 
    680   // Enumerate all avaliable microphone and make an attempt to open up the
    681   // input mixer corresponding to the currently selected input device.
    682   //
    683   if (!wasInitialized && InitMicrophone() == -1) {
    684     // If we end up here it means that the selected microphone has no volume
    685     // control, hence it is safe to state that there is no boost control
    686     // already at this stage.
    687     available = false;
    688     return 0;
    689   }
    690 
    691   // Check if the selected microphone has a boost control
    692   //
    693   _mixerManager.MicrophoneBoostIsAvailable(isAvailable);
    694   available = isAvailable;
    695 
    696   // Close the initialized input mixer
    697   //
    698   if (!wasInitialized) {
    699     _mixerManager.CloseMicrophone();
    700   }
    701 
    702   return 0;
    703 }
    704 
    705 int32_t AudioDeviceMac::SetMicrophoneBoost(bool enable) {
    706   return (_mixerManager.SetMicrophoneBoost(enable));
    707 }
    708 
    709 int32_t AudioDeviceMac::MicrophoneBoost(bool& enabled) const {
    710   bool onOff(0);
    711 
    712   if (_mixerManager.MicrophoneBoost(onOff) == -1) {
    713     return -1;
    714   }
    715 
    716   enabled = onOff;
    717   return 0;
    718 }
    719 
    720 int32_t AudioDeviceMac::StereoRecordingIsAvailable(bool& available) {
    721   bool isAvailable(false);
    722   bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
    723 
    724   if (!wasInitialized && InitMicrophone() == -1) {
    725     // Cannot open the specified device
    726     available = false;
    727     return 0;
    728   }
    729 
    730   // Check if the selected microphone can record stereo
    731   //
    732   _mixerManager.StereoRecordingIsAvailable(isAvailable);
    733   available = isAvailable;
    734 
    735   // Close the initialized input mixer
    736   //
    737   if (!wasInitialized) {
    738     _mixerManager.CloseMicrophone();
    739   }
    740 
    741   return 0;
    742 }
    743 
    744 int32_t AudioDeviceMac::SetStereoRecording(bool enable) {
    745   if (enable)
    746     _recChannels = 2;
    747   else
    748     _recChannels = 1;
    749 
    750   return 0;
    751 }
    752 
    753 int32_t AudioDeviceMac::StereoRecording(bool& enabled) const {
    754   if (_recChannels == 2)
    755     enabled = true;
    756   else
    757     enabled = false;
    758 
    759   return 0;
    760 }
    761 
    762 int32_t AudioDeviceMac::StereoPlayoutIsAvailable(bool& available) {
    763   bool isAvailable(false);
    764   bool wasInitialized = _mixerManager.SpeakerIsInitialized();
    765 
    766   if (!wasInitialized && InitSpeaker() == -1) {
    767     // Cannot open the specified device
    768     available = false;
    769     return 0;
    770   }
    771 
    772   // Check if the selected microphone can record stereo
    773   //
    774   _mixerManager.StereoPlayoutIsAvailable(isAvailable);
    775   available = isAvailable;
    776 
    777   // Close the initialized input mixer
    778   //
    779   if (!wasInitialized) {
    780     _mixerManager.CloseSpeaker();
    781   }
    782 
    783   return 0;
    784 }
    785 
    786 int32_t AudioDeviceMac::SetStereoPlayout(bool enable) {
    787   if (enable)
    788     _playChannels = 2;
    789   else
    790     _playChannels = 1;
    791 
    792   return 0;
    793 }
    794 
    795 int32_t AudioDeviceMac::StereoPlayout(bool& enabled) const {
    796   if (_playChannels == 2)
    797     enabled = true;
    798   else
    799     enabled = false;
    800 
    801   return 0;
    802 }
    803 
    804 int32_t AudioDeviceMac::SetAGC(bool enable) {
    805   _AGC = enable;
    806 
    807   return 0;
    808 }
    809 
    810 bool AudioDeviceMac::AGC() const {
    811   return _AGC;
    812 }
    813 
    814 int32_t AudioDeviceMac::MicrophoneVolumeIsAvailable(bool& available) {
    815   bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
    816 
    817   // Make an attempt to open up the
    818   // input mixer corresponding to the currently selected output device.
    819   //
    820   if (!wasInitialized && InitMicrophone() == -1) {
    821     // If we end up here it means that the selected microphone has no volume
    822     // control.
    823     available = false;
    824     return 0;
    825   }
    826 
    827   // Given that InitMicrophone was successful, we know that a volume control
    828   // exists
    829   //
    830   available = true;
    831 
    832   // Close the initialized input mixer
    833   //
    834   if (!wasInitialized) {
    835     _mixerManager.CloseMicrophone();
    836   }
    837 
    838   return 0;
    839 }
    840 
    841 int32_t AudioDeviceMac::SetMicrophoneVolume(uint32_t volume) {
    842   return (_mixerManager.SetMicrophoneVolume(volume));
    843 }
    844 
    845 int32_t AudioDeviceMac::MicrophoneVolume(uint32_t& volume) const {
    846   uint32_t level(0);
    847 
    848   if (_mixerManager.MicrophoneVolume(level) == -1) {
    849     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
    850                  "  failed to retrive current microphone level");
    851     return -1;
    852   }
    853 
    854   volume = level;
    855   return 0;
    856 }
    857 
    858 int32_t AudioDeviceMac::MaxMicrophoneVolume(uint32_t& maxVolume) const {
    859   uint32_t maxVol(0);
    860 
    861   if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) {
    862     return -1;
    863   }
    864 
    865   maxVolume = maxVol;
    866   return 0;
    867 }
    868 
    869 int32_t AudioDeviceMac::MinMicrophoneVolume(uint32_t& minVolume) const {
    870   uint32_t minVol(0);
    871 
    872   if (_mixerManager.MinMicrophoneVolume(minVol) == -1) {
    873     return -1;
    874   }
    875 
    876   minVolume = minVol;
    877   return 0;
    878 }
    879 
    880 int32_t AudioDeviceMac::MicrophoneVolumeStepSize(uint16_t& stepSize) const {
    881   uint16_t delta(0);
    882 
    883   if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1) {
    884     return -1;
    885   }
    886 
    887   stepSize = delta;
    888   return 0;
    889 }
    890 
    891 int16_t AudioDeviceMac::PlayoutDevices() {
    892   AudioDeviceID playDevices[MaxNumberDevices];
    893   return GetNumberDevices(kAudioDevicePropertyScopeOutput, playDevices,
    894                           MaxNumberDevices);
    895 }
    896 
    897 int32_t AudioDeviceMac::SetPlayoutDevice(uint16_t index) {
    898   CriticalSectionScoped lock(&_critSect);
    899 
    900   if (_playIsInitialized) {
    901     return -1;
    902   }
    903 
    904   AudioDeviceID playDevices[MaxNumberDevices];
    905   uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput,
    906                                        playDevices, MaxNumberDevices);
    907   WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
    908                "  number of availiable waveform-audio output devices is %u",
    909                nDevices);
    910 
    911   if (index > (nDevices - 1)) {
    912     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
    913                  "  device index is out of range [0,%u]", (nDevices - 1));
    914     return -1;
    915   }
    916 
    917   _outputDeviceIndex = index;
    918   _outputDeviceIsSpecified = true;
    919 
    920   return 0;
    921 }
    922 
    923 int32_t AudioDeviceMac::SetPlayoutDevice(
    924     AudioDeviceModule::WindowsDeviceType /*device*/) {
    925   WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
    926                "WindowsDeviceType not supported");
    927   return -1;
    928 }
    929 
    930 int32_t AudioDeviceMac::PlayoutDeviceName(uint16_t index,
    931                                           char name[kAdmMaxDeviceNameSize],
    932                                           char guid[kAdmMaxGuidSize]) {
    933   const uint16_t nDevices(PlayoutDevices());
    934 
    935   if ((index > (nDevices - 1)) || (name == NULL)) {
    936     return -1;
    937   }
    938 
    939   memset(name, 0, kAdmMaxDeviceNameSize);
    940 
    941   if (guid != NULL) {
    942     memset(guid, 0, kAdmMaxGuidSize);
    943   }
    944 
    945   return GetDeviceName(kAudioDevicePropertyScopeOutput, index, name);
    946 }
    947 
    948 int32_t AudioDeviceMac::RecordingDeviceName(uint16_t index,
    949                                             char name[kAdmMaxDeviceNameSize],
    950                                             char guid[kAdmMaxGuidSize]) {
    951   const uint16_t nDevices(RecordingDevices());
    952 
    953   if ((index > (nDevices - 1)) || (name == NULL)) {
    954     return -1;
    955   }
    956 
    957   memset(name, 0, kAdmMaxDeviceNameSize);
    958 
    959   if (guid != NULL) {
    960     memset(guid, 0, kAdmMaxGuidSize);
    961   }
    962 
    963   return GetDeviceName(kAudioDevicePropertyScopeInput, index, name);
    964 }
    965 
    966 int16_t AudioDeviceMac::RecordingDevices() {
    967   AudioDeviceID recDevices[MaxNumberDevices];
    968   return GetNumberDevices(kAudioDevicePropertyScopeInput, recDevices,
    969                           MaxNumberDevices);
    970 }
    971 
    972 int32_t AudioDeviceMac::SetRecordingDevice(uint16_t index) {
    973   if (_recIsInitialized) {
    974     return -1;
    975   }
    976 
    977   AudioDeviceID recDevices[MaxNumberDevices];
    978   uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeInput,
    979                                        recDevices, MaxNumberDevices);
    980   WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
    981                "  number of availiable waveform-audio input devices is %u",
    982                nDevices);
    983 
    984   if (index > (nDevices - 1)) {
    985     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
    986                  "  device index is out of range [0,%u]", (nDevices - 1));
    987     return -1;
    988   }
    989 
    990   _inputDeviceIndex = index;
    991   _inputDeviceIsSpecified = true;
    992 
    993   return 0;
    994 }
    995 
    996 int32_t AudioDeviceMac::SetRecordingDevice(
    997     AudioDeviceModule::WindowsDeviceType /*device*/) {
    998   WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
    999                "WindowsDeviceType not supported");
   1000   return -1;
   1001 }
   1002 
   1003 int32_t AudioDeviceMac::PlayoutIsAvailable(bool& available) {
   1004   available = true;
   1005 
   1006   // Try to initialize the playout side
   1007   if (InitPlayout() == -1) {
   1008     available = false;
   1009   }
   1010 
   1011   // We destroy the IOProc created by InitPlayout() in implDeviceIOProc().
   1012   // We must actually start playout here in order to have the IOProc
   1013   // deleted by calling StopPlayout().
   1014   if (StartPlayout() == -1) {
   1015     available = false;
   1016   }
   1017 
   1018   // Cancel effect of initialization
   1019   if (StopPlayout() == -1) {
   1020     available = false;
   1021   }
   1022 
   1023   return 0;
   1024 }
   1025 
   1026 int32_t AudioDeviceMac::RecordingIsAvailable(bool& available) {
   1027   available = true;
   1028 
   1029   // Try to initialize the recording side
   1030   if (InitRecording() == -1) {
   1031     available = false;
   1032   }
   1033 
   1034   // We destroy the IOProc created by InitRecording() in implInDeviceIOProc().
   1035   // We must actually start recording here in order to have the IOProc
   1036   // deleted by calling StopRecording().
   1037   if (StartRecording() == -1) {
   1038     available = false;
   1039   }
   1040 
   1041   // Cancel effect of initialization
   1042   if (StopRecording() == -1) {
   1043     available = false;
   1044   }
   1045 
   1046   return 0;
   1047 }
   1048 
   1049 int32_t AudioDeviceMac::InitPlayout() {
   1050   CriticalSectionScoped lock(&_critSect);
   1051 
   1052   if (_playing) {
   1053     return -1;
   1054   }
   1055 
   1056   if (!_outputDeviceIsSpecified) {
   1057     return -1;
   1058   }
   1059 
   1060   if (_playIsInitialized) {
   1061     return 0;
   1062   }
   1063 
   1064   // Initialize the speaker (devices might have been added or removed)
   1065   if (InitSpeaker() == -1) {
   1066     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   1067                  "  InitSpeaker() failed");
   1068   }
   1069 
   1070   if (!MicrophoneIsInitialized()) {
   1071     // Make this call to check if we are using
   1072     // one or two devices (_twoDevices)
   1073     bool available = false;
   1074     if (MicrophoneIsAvailable(available) == -1) {
   1075       WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   1076                    "  MicrophoneIsAvailable() failed");
   1077     }
   1078   }
   1079 
   1080   PaUtil_FlushRingBuffer(_paRenderBuffer);
   1081 
   1082   OSStatus err = noErr;
   1083   UInt32 size = 0;
   1084   _renderDelayOffsetSamples = 0;
   1085   _renderDelayUs = 0;
   1086   _renderLatencyUs = 0;
   1087   _renderDeviceIsAlive = 1;
   1088   _doStop = false;
   1089 
   1090   // The internal microphone of a MacBook Pro is located under the left speaker
   1091   // grille. When the internal speakers are in use, we want to fully stereo
   1092   // pan to the right.
   1093   AudioObjectPropertyAddress propertyAddress = {
   1094       kAudioDevicePropertyDataSource, kAudioDevicePropertyScopeOutput, 0};
   1095   if (_macBookPro) {
   1096     _macBookProPanRight = false;
   1097     Boolean hasProperty =
   1098         AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
   1099     if (hasProperty) {
   1100       UInt32 dataSource = 0;
   1101       size = sizeof(dataSource);
   1102       WEBRTC_CA_LOG_WARN(AudioObjectGetPropertyData(
   1103           _outputDeviceID, &propertyAddress, 0, NULL, &size, &dataSource));
   1104 
   1105       if (dataSource == 'ispk') {
   1106         _macBookProPanRight = true;
   1107         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1108                      "MacBook Pro using internal speakers; stereo"
   1109                      " panning right");
   1110       } else {
   1111         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1112                      "MacBook Pro not using internal speakers");
   1113       }
   1114 
   1115       // Add a listener to determine if the status changes.
   1116       WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
   1117           _outputDeviceID, &propertyAddress, &objectListenerProc, this));
   1118     }
   1119   }
   1120 
   1121   // Get current stream description
   1122   propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
   1123   memset(&_outStreamFormat, 0, sizeof(_outStreamFormat));
   1124   size = sizeof(_outStreamFormat);
   1125   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
   1126       _outputDeviceID, &propertyAddress, 0, NULL, &size, &_outStreamFormat));
   1127 
   1128   if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM) {
   1129     logCAMsg(kTraceError, kTraceAudioDevice, _id,
   1130              "Unacceptable output stream format -> mFormatID",
   1131              (const char*)&_outStreamFormat.mFormatID);
   1132     return -1;
   1133   }
   1134 
   1135   if (_outStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
   1136     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   1137                  "Too many channels on output device (mChannelsPerFrame = %d)",
   1138                  _outStreamFormat.mChannelsPerFrame);
   1139     return -1;
   1140   }
   1141 
   1142   if (_outStreamFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) {
   1143     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   1144                  "Non-interleaved audio data is not supported.",
   1145                  "AudioHardware streams should not have this format.");
   1146     return -1;
   1147   }
   1148 
   1149   WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Ouput stream format:");
   1150   WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1151                "mSampleRate = %f, mChannelsPerFrame = %u",
   1152                _outStreamFormat.mSampleRate,
   1153                _outStreamFormat.mChannelsPerFrame);
   1154   WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1155                "mBytesPerPacket = %u, mFramesPerPacket = %u",
   1156                _outStreamFormat.mBytesPerPacket,
   1157                _outStreamFormat.mFramesPerPacket);
   1158   WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1159                "mBytesPerFrame = %u, mBitsPerChannel = %u",
   1160                _outStreamFormat.mBytesPerFrame,
   1161                _outStreamFormat.mBitsPerChannel);
   1162   WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "mFormatFlags = %u",
   1163                _outStreamFormat.mFormatFlags);
   1164   logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
   1165            (const char*)&_outStreamFormat.mFormatID);
   1166 
   1167   // Our preferred format to work with.
   1168   if (_outStreamFormat.mChannelsPerFrame < 2) {
   1169     // Disable stereo playout when we only have one channel on the device.
   1170     _playChannels = 1;
   1171     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1172                  "Stereo playout unavailable on this device");
   1173   }
   1174   WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat());
   1175 
   1176   // Listen for format changes.
   1177   propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
   1178   WEBRTC_CA_RETURN_ON_ERR(AudioObjectAddPropertyListener(
   1179       _outputDeviceID, &propertyAddress, &objectListenerProc, this));
   1180 
   1181   // Listen for processor overloads.
   1182   propertyAddress.mSelector = kAudioDeviceProcessorOverload;
   1183   WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
   1184       _outputDeviceID, &propertyAddress, &objectListenerProc, this));
   1185 
   1186   if (_twoDevices || !_recIsInitialized) {
   1187     WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(
   1188         _outputDeviceID, deviceIOProc, this, &_deviceIOProcID));
   1189   }
   1190 
   1191   _playIsInitialized = true;
   1192 
   1193   return 0;
   1194 }
   1195 
   1196 int32_t AudioDeviceMac::InitRecording() {
   1197   CriticalSectionScoped lock(&_critSect);
   1198 
   1199   if (_recording) {
   1200     return -1;
   1201   }
   1202 
   1203   if (!_inputDeviceIsSpecified) {
   1204     return -1;
   1205   }
   1206 
   1207   if (_recIsInitialized) {
   1208     return 0;
   1209   }
   1210 
   1211   // Initialize the microphone (devices might have been added or removed)
   1212   if (InitMicrophone() == -1) {
   1213     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   1214                  "  InitMicrophone() failed");
   1215   }
   1216 
   1217   if (!SpeakerIsInitialized()) {
   1218     // Make this call to check if we are using
   1219     // one or two devices (_twoDevices)
   1220     bool available = false;
   1221     if (SpeakerIsAvailable(available) == -1) {
   1222       WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   1223                    "  SpeakerIsAvailable() failed");
   1224     }
   1225   }
   1226 
   1227   OSStatus err = noErr;
   1228   UInt32 size = 0;
   1229 
   1230   PaUtil_FlushRingBuffer(_paCaptureBuffer);
   1231 
   1232   _captureDelayUs = 0;
   1233   _captureLatencyUs = 0;
   1234   _captureDeviceIsAlive = 1;
   1235   _doStopRec = false;
   1236 
   1237   // Get current stream description
   1238   AudioObjectPropertyAddress propertyAddress = {
   1239       kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0};
   1240   memset(&_inStreamFormat, 0, sizeof(_inStreamFormat));
   1241   size = sizeof(_inStreamFormat);
   1242   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
   1243       _inputDeviceID, &propertyAddress, 0, NULL, &size, &_inStreamFormat));
   1244 
   1245   if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM) {
   1246     logCAMsg(kTraceError, kTraceAudioDevice, _id,
   1247              "Unacceptable input stream format -> mFormatID",
   1248              (const char*)&_inStreamFormat.mFormatID);
   1249     return -1;
   1250   }
   1251 
   1252   if (_inStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
   1253     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   1254                  "Too many channels on input device (mChannelsPerFrame = %d)",
   1255                  _inStreamFormat.mChannelsPerFrame);
   1256     return -1;
   1257   }
   1258 
   1259   const int io_block_size_samples = _inStreamFormat.mChannelsPerFrame *
   1260                                     _inStreamFormat.mSampleRate / 100 *
   1261                                     N_BLOCKS_IO;
   1262   if (io_block_size_samples > _captureBufSizeSamples) {
   1263     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   1264                  "Input IO block size (%d) is larger than ring buffer (%u)",
   1265                  io_block_size_samples, _captureBufSizeSamples);
   1266     return -1;
   1267   }
   1268 
   1269   WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Input stream format:");
   1270   WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1271                " mSampleRate = %f, mChannelsPerFrame = %u",
   1272                _inStreamFormat.mSampleRate, _inStreamFormat.mChannelsPerFrame);
   1273   WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1274                " mBytesPerPacket = %u, mFramesPerPacket = %u",
   1275                _inStreamFormat.mBytesPerPacket,
   1276                _inStreamFormat.mFramesPerPacket);
   1277   WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1278                " mBytesPerFrame = %u, mBitsPerChannel = %u",
   1279                _inStreamFormat.mBytesPerFrame, _inStreamFormat.mBitsPerChannel);
   1280   WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " mFormatFlags = %u",
   1281                _inStreamFormat.mFormatFlags);
   1282   logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
   1283            (const char*)&_inStreamFormat.mFormatID);
   1284 
   1285   // Our preferred format to work with
   1286   if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) {
   1287     _inDesiredFormat.mChannelsPerFrame = 2;
   1288   } else {
   1289     // Disable stereo recording when we only have one channel on the device.
   1290     _inDesiredFormat.mChannelsPerFrame = 1;
   1291     _recChannels = 1;
   1292     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1293                  "Stereo recording unavailable on this device");
   1294   }
   1295 
   1296   if (_ptrAudioBuffer) {
   1297     // Update audio buffer with the selected parameters
   1298     _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
   1299     _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
   1300   }
   1301 
   1302   _inDesiredFormat.mSampleRate = N_REC_SAMPLES_PER_SEC;
   1303   _inDesiredFormat.mBytesPerPacket =
   1304       _inDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
   1305   _inDesiredFormat.mFramesPerPacket = 1;
   1306   _inDesiredFormat.mBytesPerFrame =
   1307       _inDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
   1308   _inDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
   1309 
   1310   _inDesiredFormat.mFormatFlags =
   1311       kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
   1312 #ifdef WEBRTC_ARCH_BIG_ENDIAN
   1313   _inDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
   1314 #endif
   1315   _inDesiredFormat.mFormatID = kAudioFormatLinearPCM;
   1316 
   1317   WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_inStreamFormat, &_inDesiredFormat,
   1318                                             &_captureConverter));
   1319 
   1320   // First try to set buffer size to desired value (10 ms * N_BLOCKS_IO)
   1321   // TODO(xians): investigate this block.
   1322   UInt32 bufByteCount =
   1323       (UInt32)((_inStreamFormat.mSampleRate / 1000.0) * 10.0 * N_BLOCKS_IO *
   1324                _inStreamFormat.mChannelsPerFrame * sizeof(Float32));
   1325   if (_inStreamFormat.mFramesPerPacket != 0) {
   1326     if (bufByteCount % _inStreamFormat.mFramesPerPacket != 0) {
   1327       bufByteCount =
   1328           ((UInt32)(bufByteCount / _inStreamFormat.mFramesPerPacket) + 1) *
   1329           _inStreamFormat.mFramesPerPacket;
   1330     }
   1331   }
   1332 
   1333   // Ensure the buffer size is within the acceptable range provided by the
   1334   // device.
   1335   propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
   1336   AudioValueRange range;
   1337   size = sizeof(range);
   1338   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
   1339       _inputDeviceID, &propertyAddress, 0, NULL, &size, &range));
   1340   if (range.mMinimum > bufByteCount) {
   1341     bufByteCount = range.mMinimum;
   1342   } else if (range.mMaximum < bufByteCount) {
   1343     bufByteCount = range.mMaximum;
   1344   }
   1345 
   1346   propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
   1347   size = sizeof(bufByteCount);
   1348   WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
   1349       _inputDeviceID, &propertyAddress, 0, NULL, size, &bufByteCount));
   1350 
   1351   // Get capture device latency
   1352   propertyAddress.mSelector = kAudioDevicePropertyLatency;
   1353   UInt32 latency = 0;
   1354   size = sizeof(UInt32);
   1355   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
   1356       _inputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
   1357   _captureLatencyUs = (UInt32)((1.0e6 * latency) / _inStreamFormat.mSampleRate);
   1358 
   1359   // Get capture stream latency
   1360   propertyAddress.mSelector = kAudioDevicePropertyStreams;
   1361   AudioStreamID stream = 0;
   1362   size = sizeof(AudioStreamID);
   1363   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
   1364       _inputDeviceID, &propertyAddress, 0, NULL, &size, &stream));
   1365   propertyAddress.mSelector = kAudioStreamPropertyLatency;
   1366   size = sizeof(UInt32);
   1367   latency = 0;
   1368   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
   1369       _inputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
   1370   _captureLatencyUs +=
   1371       (UInt32)((1.0e6 * latency) / _inStreamFormat.mSampleRate);
   1372 
   1373   // Listen for format changes
   1374   // TODO(xians): should we be using kAudioDevicePropertyDeviceHasChanged?
   1375   propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
   1376   WEBRTC_CA_RETURN_ON_ERR(AudioObjectAddPropertyListener(
   1377       _inputDeviceID, &propertyAddress, &objectListenerProc, this));
   1378 
   1379   // Listen for processor overloads
   1380   propertyAddress.mSelector = kAudioDeviceProcessorOverload;
   1381   WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
   1382       _inputDeviceID, &propertyAddress, &objectListenerProc, this));
   1383 
   1384   if (_twoDevices) {
   1385     WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(
   1386         _inputDeviceID, inDeviceIOProc, this, &_inDeviceIOProcID));
   1387   } else if (!_playIsInitialized) {
   1388     WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(
   1389         _inputDeviceID, deviceIOProc, this, &_deviceIOProcID));
   1390   }
   1391 
   1392   // Mark recording side as initialized
   1393   _recIsInitialized = true;
   1394 
   1395   return 0;
   1396 }
   1397 
   1398 int32_t AudioDeviceMac::StartRecording() {
   1399   CriticalSectionScoped lock(&_critSect);
   1400 
   1401   if (!_recIsInitialized) {
   1402     return -1;
   1403   }
   1404 
   1405   if (_recording) {
   1406     return 0;
   1407   }
   1408 
   1409   if (!_initialized) {
   1410     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   1411                  " Recording worker thread has not been started");
   1412     return -1;
   1413   }
   1414 
   1415   RTC_DCHECK(!capture_worker_thread_.get());
   1416   capture_worker_thread_.reset(
   1417       new rtc::PlatformThread(RunCapture, this, "CaptureWorkerThread"));
   1418   RTC_DCHECK(capture_worker_thread_.get());
   1419   capture_worker_thread_->Start();
   1420   capture_worker_thread_->SetPriority(rtc::kRealtimePriority);
   1421 
   1422   OSStatus err = noErr;
   1423   if (_twoDevices) {
   1424     WEBRTC_CA_RETURN_ON_ERR(
   1425         AudioDeviceStart(_inputDeviceID, _inDeviceIOProcID));
   1426   } else if (!_playing) {
   1427     WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, _deviceIOProcID));
   1428   }
   1429 
   1430   _recording = true;
   1431 
   1432   return 0;
   1433 }
   1434 
   1435 int32_t AudioDeviceMac::StopRecording() {
   1436   CriticalSectionScoped lock(&_critSect);
   1437 
   1438   if (!_recIsInitialized) {
   1439     return 0;
   1440   }
   1441 
   1442   OSStatus err = noErr;
   1443 
   1444   // Stop device
   1445   int32_t captureDeviceIsAlive = AtomicGet32(&_captureDeviceIsAlive);
   1446   if (_twoDevices) {
   1447     if (_recording && captureDeviceIsAlive == 1) {
   1448       _recording = false;
   1449       _doStopRec = true;  // Signal to io proc to stop audio device
   1450       _critSect.Leave();  // Cannot be under lock, risk of deadlock
   1451       if (kEventTimeout == _stopEventRec.Wait(2000)) {
   1452         CriticalSectionScoped critScoped(&_critSect);
   1453         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   1454                      " Timed out stopping the capture IOProc. "
   1455                      "We may have failed to detect a device removal.");
   1456 
   1457         WEBRTC_CA_LOG_WARN(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
   1458         WEBRTC_CA_LOG_WARN(
   1459             AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
   1460       }
   1461       _critSect.Enter();
   1462       _doStopRec = false;
   1463       WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " Recording stopped");
   1464     }
   1465   } else {
   1466     // We signal a stop for a shared device even when rendering has
   1467     // not yet ended. This is to ensure the IOProc will return early as
   1468     // intended (by checking |_recording|) before accessing
   1469     // resources we free below (e.g. the capture converter).
   1470     //
   1471     // In the case of a shared devcie, the IOProc will verify
   1472     // rendering has ended before stopping itself.
   1473     if (_recording && captureDeviceIsAlive == 1) {
   1474       _recording = false;
   1475       _doStop = true;     // Signal to io proc to stop audio device
   1476       _critSect.Leave();  // Cannot be under lock, risk of deadlock
   1477       if (kEventTimeout == _stopEvent.Wait(2000)) {
   1478         CriticalSectionScoped critScoped(&_critSect);
   1479         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   1480                      " Timed out stopping the shared IOProc. "
   1481                      "We may have failed to detect a device removal.");
   1482 
   1483         // We assume rendering on a shared device has stopped as well if
   1484         // the IOProc times out.
   1485         WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
   1486         WEBRTC_CA_LOG_WARN(
   1487             AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
   1488       }
   1489       _critSect.Enter();
   1490       _doStop = false;
   1491       WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
   1492                    " Recording stopped (shared)");
   1493     }
   1494   }
   1495 
   1496   // Setting this signal will allow the worker thread to be stopped.
   1497   AtomicSet32(&_captureDeviceIsAlive, 0);
   1498 
   1499   if (capture_worker_thread_.get()) {
   1500     _critSect.Leave();
   1501     capture_worker_thread_->Stop();
   1502     capture_worker_thread_.reset();
   1503     _critSect.Enter();
   1504   }
   1505 
   1506   WEBRTC_CA_LOG_WARN(AudioConverterDispose(_captureConverter));
   1507 
   1508   // Remove listeners.
   1509   AudioObjectPropertyAddress propertyAddress = {
   1510       kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0};
   1511   WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
   1512       _inputDeviceID, &propertyAddress, &objectListenerProc, this));
   1513 
   1514   propertyAddress.mSelector = kAudioDeviceProcessorOverload;
   1515   WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
   1516       _inputDeviceID, &propertyAddress, &objectListenerProc, this));
   1517 
   1518   _recIsInitialized = false;
   1519   _recording = false;
   1520 
   1521   return 0;
   1522 }
   1523 
   1524 bool AudioDeviceMac::RecordingIsInitialized() const {
   1525   return (_recIsInitialized);
   1526 }
   1527 
   1528 bool AudioDeviceMac::Recording() const {
   1529   return (_recording);
   1530 }
   1531 
   1532 bool AudioDeviceMac::PlayoutIsInitialized() const {
   1533   return (_playIsInitialized);
   1534 }
   1535 
   1536 int32_t AudioDeviceMac::StartPlayout() {
   1537   CriticalSectionScoped lock(&_critSect);
   1538 
   1539   if (!_playIsInitialized) {
   1540     return -1;
   1541   }
   1542 
   1543   if (_playing) {
   1544     return 0;
   1545   }
   1546 
   1547   RTC_DCHECK(!render_worker_thread_.get());
   1548   render_worker_thread_.reset(
   1549       new rtc::PlatformThread(RunRender, this, "RenderWorkerThread"));
   1550   render_worker_thread_->Start();
   1551   render_worker_thread_->SetPriority(rtc::kRealtimePriority);
   1552 
   1553   if (_twoDevices || !_recording) {
   1554     OSStatus err = noErr;
   1555     WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_outputDeviceID, _deviceIOProcID));
   1556   }
   1557   _playing = true;
   1558 
   1559   return 0;
   1560 }
   1561 
   1562 int32_t AudioDeviceMac::StopPlayout() {
   1563   CriticalSectionScoped lock(&_critSect);
   1564 
   1565   if (!_playIsInitialized) {
   1566     return 0;
   1567   }
   1568 
   1569   OSStatus err = noErr;
   1570 
   1571   int32_t renderDeviceIsAlive = AtomicGet32(&_renderDeviceIsAlive);
   1572   if (_playing && renderDeviceIsAlive == 1) {
   1573     // We signal a stop for a shared device even when capturing has not
   1574     // yet ended. This is to ensure the IOProc will return early as
   1575     // intended (by checking |_playing|) before accessing resources we
   1576     // free below (e.g. the render converter).
   1577     //
   1578     // In the case of a shared device, the IOProc will verify capturing
   1579     // has ended before stopping itself.
   1580     _playing = false;
   1581     _doStop = true;     // Signal to io proc to stop audio device
   1582     _critSect.Leave();  // Cannot be under lock, risk of deadlock
   1583     if (kEventTimeout == _stopEvent.Wait(2000)) {
   1584       CriticalSectionScoped critScoped(&_critSect);
   1585       WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   1586                    " Timed out stopping the render IOProc. "
   1587                    "We may have failed to detect a device removal.");
   1588 
   1589       // We assume capturing on a shared device has stopped as well if the
   1590       // IOProc times out.
   1591       WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
   1592       WEBRTC_CA_LOG_WARN(
   1593           AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
   1594     }
   1595     _critSect.Enter();
   1596     _doStop = false;
   1597     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, "Playout stopped");
   1598   }
   1599 
   1600   // Setting this signal will allow the worker thread to be stopped.
   1601   AtomicSet32(&_renderDeviceIsAlive, 0);
   1602   if (render_worker_thread_.get()) {
   1603     _critSect.Leave();
   1604     render_worker_thread_->Stop();
   1605     render_worker_thread_.reset();
   1606     _critSect.Enter();
   1607   }
   1608 
   1609   WEBRTC_CA_LOG_WARN(AudioConverterDispose(_renderConverter));
   1610 
   1611   // Remove listeners.
   1612   AudioObjectPropertyAddress propertyAddress = {
   1613       kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeOutput, 0};
   1614   WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
   1615       _outputDeviceID, &propertyAddress, &objectListenerProc, this));
   1616 
   1617   propertyAddress.mSelector = kAudioDeviceProcessorOverload;
   1618   WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
   1619       _outputDeviceID, &propertyAddress, &objectListenerProc, this));
   1620 
   1621   if (_macBookPro) {
   1622     Boolean hasProperty =
   1623         AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
   1624     if (hasProperty) {
   1625       propertyAddress.mSelector = kAudioDevicePropertyDataSource;
   1626       WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
   1627           _outputDeviceID, &propertyAddress, &objectListenerProc, this));
   1628     }
   1629   }
   1630 
   1631   _playIsInitialized = false;
   1632   _playing = false;
   1633 
   1634   return 0;
   1635 }
   1636 
   1637 int32_t AudioDeviceMac::PlayoutDelay(uint16_t& delayMS) const {
   1638   int32_t renderDelayUs = AtomicGet32(&_renderDelayUs);
   1639   delayMS =
   1640       static_cast<uint16_t>(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5);
   1641   return 0;
   1642 }
   1643 
   1644 int32_t AudioDeviceMac::RecordingDelay(uint16_t& delayMS) const {
   1645   int32_t captureDelayUs = AtomicGet32(&_captureDelayUs);
   1646   delayMS =
   1647       static_cast<uint16_t>(1e-3 * (captureDelayUs + _captureLatencyUs) + 0.5);
   1648   return 0;
   1649 }
   1650 
   1651 bool AudioDeviceMac::Playing() const {
   1652   return (_playing);
   1653 }
   1654 
   1655 int32_t AudioDeviceMac::SetPlayoutBuffer(
   1656     const AudioDeviceModule::BufferType type,
   1657     uint16_t sizeMS) {
   1658   if (type != AudioDeviceModule::kFixedBufferSize) {
   1659     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   1660                  " Adaptive buffer size not supported on this platform");
   1661     return -1;
   1662   }
   1663 
   1664   _playBufType = type;
   1665   _playBufDelayFixed = sizeMS;
   1666   return 0;
   1667 }
   1668 
   1669 int32_t AudioDeviceMac::PlayoutBuffer(AudioDeviceModule::BufferType& type,
   1670                                       uint16_t& sizeMS) const {
   1671   type = _playBufType;
   1672   sizeMS = _playBufDelayFixed;
   1673 
   1674   return 0;
   1675 }
   1676 
   1677 // Not implemented for Mac.
   1678 int32_t AudioDeviceMac::CPULoad(uint16_t& /*load*/) const {
   1679   WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   1680                "  API call not supported on this platform");
   1681 
   1682   return -1;
   1683 }
   1684 
   1685 bool AudioDeviceMac::PlayoutWarning() const {
   1686   return (_playWarning > 0);
   1687 }
   1688 
   1689 bool AudioDeviceMac::PlayoutError() const {
   1690   return (_playError > 0);
   1691 }
   1692 
   1693 bool AudioDeviceMac::RecordingWarning() const {
   1694   return (_recWarning > 0);
   1695 }
   1696 
   1697 bool AudioDeviceMac::RecordingError() const {
   1698   return (_recError > 0);
   1699 }
   1700 
   1701 void AudioDeviceMac::ClearPlayoutWarning() {
   1702   _playWarning = 0;
   1703 }
   1704 
   1705 void AudioDeviceMac::ClearPlayoutError() {
   1706   _playError = 0;
   1707 }
   1708 
   1709 void AudioDeviceMac::ClearRecordingWarning() {
   1710   _recWarning = 0;
   1711 }
   1712 
   1713 void AudioDeviceMac::ClearRecordingError() {
   1714   _recError = 0;
   1715 }
   1716 
   1717 // ============================================================================
   1718 //                                 Private Methods
   1719 // ============================================================================
   1720 
   1721 int32_t AudioDeviceMac::GetNumberDevices(const AudioObjectPropertyScope scope,
   1722                                          AudioDeviceID scopedDeviceIds[],
   1723                                          const uint32_t deviceListLength) {
   1724   OSStatus err = noErr;
   1725 
   1726   AudioObjectPropertyAddress propertyAddress = {
   1727       kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
   1728       kAudioObjectPropertyElementMaster};
   1729   UInt32 size = 0;
   1730   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyDataSize(
   1731       kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size));
   1732   if (size == 0) {
   1733     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "No devices");
   1734     return 0;
   1735   }
   1736 
   1737   AudioDeviceID* deviceIds = (AudioDeviceID*)malloc(size);
   1738   UInt32 numberDevices = size / sizeof(AudioDeviceID);
   1739   AudioBufferList* bufferList = NULL;
   1740   UInt32 numberScopedDevices = 0;
   1741 
   1742   // First check if there is a default device and list it
   1743   UInt32 hardwareProperty = 0;
   1744   if (scope == kAudioDevicePropertyScopeOutput) {
   1745     hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
   1746   } else {
   1747     hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
   1748   }
   1749 
   1750   AudioObjectPropertyAddress propertyAddressDefault = {
   1751       hardwareProperty, kAudioObjectPropertyScopeGlobal,
   1752       kAudioObjectPropertyElementMaster};
   1753 
   1754   AudioDeviceID usedID;
   1755   UInt32 uintSize = sizeof(UInt32);
   1756   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
   1757                                                      &propertyAddressDefault, 0,
   1758                                                      NULL, &uintSize, &usedID));
   1759   if (usedID != kAudioDeviceUnknown) {
   1760     scopedDeviceIds[numberScopedDevices] = usedID;
   1761     numberScopedDevices++;
   1762   } else {
   1763     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   1764                  "GetNumberDevices(): Default device unknown");
   1765   }
   1766 
   1767   // Then list the rest of the devices
   1768   bool listOK = true;
   1769 
   1770   WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(
   1771       kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, deviceIds));
   1772   if (err != noErr) {
   1773     listOK = false;
   1774   } else {
   1775     propertyAddress.mSelector = kAudioDevicePropertyStreamConfiguration;
   1776     propertyAddress.mScope = scope;
   1777     propertyAddress.mElement = 0;
   1778     for (UInt32 i = 0; i < numberDevices; i++) {
   1779       // Check for input channels
   1780       WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyDataSize(
   1781           deviceIds[i], &propertyAddress, 0, NULL, &size));
   1782       if (err == kAudioHardwareBadDeviceError) {
   1783         // This device doesn't actually exist; continue iterating.
   1784         continue;
   1785       } else if (err != noErr) {
   1786         listOK = false;
   1787         break;
   1788       }
   1789 
   1790       bufferList = (AudioBufferList*)malloc(size);
   1791       WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(
   1792           deviceIds[i], &propertyAddress, 0, NULL, &size, bufferList));
   1793       if (err != noErr) {
   1794         listOK = false;
   1795         break;
   1796       }
   1797 
   1798       if (bufferList->mNumberBuffers > 0) {
   1799         if (numberScopedDevices >= deviceListLength) {
   1800           WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   1801                        "Device list is not long enough");
   1802           listOK = false;
   1803           break;
   1804         }
   1805 
   1806         scopedDeviceIds[numberScopedDevices] = deviceIds[i];
   1807         numberScopedDevices++;
   1808       }
   1809 
   1810       free(bufferList);
   1811       bufferList = NULL;
   1812     }  // for
   1813   }
   1814 
   1815   if (!listOK) {
   1816     if (deviceIds) {
   1817       free(deviceIds);
   1818       deviceIds = NULL;
   1819     }
   1820 
   1821     if (bufferList) {
   1822       free(bufferList);
   1823       bufferList = NULL;
   1824     }
   1825 
   1826     return -1;
   1827   }
   1828 
   1829   // Happy ending
   1830   if (deviceIds) {
   1831     free(deviceIds);
   1832     deviceIds = NULL;
   1833   }
   1834 
   1835   return numberScopedDevices;
   1836 }
   1837 
   1838 int32_t AudioDeviceMac::GetDeviceName(const AudioObjectPropertyScope scope,
   1839                                       const uint16_t index,
   1840                                       char* name) {
   1841   OSStatus err = noErr;
   1842   UInt32 len = kAdmMaxDeviceNameSize;
   1843   AudioDeviceID deviceIds[MaxNumberDevices];
   1844 
   1845   int numberDevices = GetNumberDevices(scope, deviceIds, MaxNumberDevices);
   1846   if (numberDevices < 0) {
   1847     return -1;
   1848   } else if (numberDevices == 0) {
   1849     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "No devices");
   1850     return -1;
   1851   }
   1852 
   1853   // If the number is below the number of devices, assume it's "WEBRTC ID"
   1854   // otherwise assume it's a CoreAudio ID
   1855   AudioDeviceID usedID;
   1856 
   1857   // Check if there is a default device
   1858   bool isDefaultDevice = false;
   1859   if (index == 0) {
   1860     UInt32 hardwareProperty = 0;
   1861     if (scope == kAudioDevicePropertyScopeOutput) {
   1862       hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
   1863     } else {
   1864       hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
   1865     }
   1866     AudioObjectPropertyAddress propertyAddress = {
   1867         hardwareProperty, kAudioObjectPropertyScopeGlobal,
   1868         kAudioObjectPropertyElementMaster};
   1869     UInt32 size = sizeof(UInt32);
   1870     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
   1871         kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &usedID));
   1872     if (usedID == kAudioDeviceUnknown) {
   1873       WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   1874                    "GetDeviceName(): Default device unknown");
   1875     } else {
   1876       isDefaultDevice = true;
   1877     }
   1878   }
   1879 
   1880   AudioObjectPropertyAddress propertyAddress = {kAudioDevicePropertyDeviceName,
   1881                                                 scope, 0};
   1882 
   1883   if (isDefaultDevice) {
   1884     char devName[len];
   1885 
   1886     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID, &propertyAddress,
   1887                                                        0, NULL, &len, devName));
   1888 
   1889     sprintf(name, "default (%s)", devName);
   1890   } else {
   1891     if (index < numberDevices) {
   1892       usedID = deviceIds[index];
   1893     } else {
   1894       usedID = index;
   1895     }
   1896 
   1897     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID, &propertyAddress,
   1898                                                        0, NULL, &len, name));
   1899   }
   1900 
   1901   return 0;
   1902 }
   1903 
   1904 int32_t AudioDeviceMac::InitDevice(const uint16_t userDeviceIndex,
   1905                                    AudioDeviceID& deviceId,
   1906                                    const bool isInput) {
   1907   OSStatus err = noErr;
   1908   UInt32 size = 0;
   1909   AudioObjectPropertyScope deviceScope;
   1910   AudioObjectPropertySelector defaultDeviceSelector;
   1911   AudioDeviceID deviceIds[MaxNumberDevices];
   1912 
   1913   if (isInput) {
   1914     deviceScope = kAudioDevicePropertyScopeInput;
   1915     defaultDeviceSelector = kAudioHardwarePropertyDefaultInputDevice;
   1916   } else {
   1917     deviceScope = kAudioDevicePropertyScopeOutput;
   1918     defaultDeviceSelector = kAudioHardwarePropertyDefaultOutputDevice;
   1919   }
   1920 
   1921   AudioObjectPropertyAddress propertyAddress = {
   1922       defaultDeviceSelector, kAudioObjectPropertyScopeGlobal,
   1923       kAudioObjectPropertyElementMaster};
   1924 
   1925   // Get the actual device IDs
   1926   int numberDevices =
   1927       GetNumberDevices(deviceScope, deviceIds, MaxNumberDevices);
   1928   if (numberDevices < 0) {
   1929     return -1;
   1930   } else if (numberDevices == 0) {
   1931     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   1932                  "InitDevice(): No devices");
   1933     return -1;
   1934   }
   1935 
   1936   bool isDefaultDevice = false;
   1937   deviceId = kAudioDeviceUnknown;
   1938   if (userDeviceIndex == 0) {
   1939     // Try to use default system device
   1940     size = sizeof(AudioDeviceID);
   1941     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
   1942         kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &deviceId));
   1943     if (deviceId == kAudioDeviceUnknown) {
   1944       WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   1945                    " No default device exists");
   1946     } else {
   1947       isDefaultDevice = true;
   1948     }
   1949   }
   1950 
   1951   if (!isDefaultDevice) {
   1952     deviceId = deviceIds[userDeviceIndex];
   1953   }
   1954 
   1955   // Obtain device name and manufacturer for logging.
   1956   // Also use this as a test to ensure a user-set device ID is valid.
   1957   char devName[128];
   1958   char devManf[128];
   1959   memset(devName, 0, sizeof(devName));
   1960   memset(devManf, 0, sizeof(devManf));
   1961 
   1962   propertyAddress.mSelector = kAudioDevicePropertyDeviceName;
   1963   propertyAddress.mScope = deviceScope;
   1964   propertyAddress.mElement = 0;
   1965   size = sizeof(devName);
   1966   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress,
   1967                                                      0, NULL, &size, devName));
   1968 
   1969   propertyAddress.mSelector = kAudioDevicePropertyDeviceManufacturer;
   1970   size = sizeof(devManf);
   1971   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress,
   1972                                                      0, NULL, &size, devManf));
   1973 
   1974   if (isInput) {
   1975     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Input device: %s %s",
   1976                  devManf, devName);
   1977   } else {
   1978     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Output device: %s %s",
   1979                  devManf, devName);
   1980   }
   1981 
   1982   return 0;
   1983 }
   1984 
   1985 OSStatus AudioDeviceMac::SetDesiredPlayoutFormat() {
   1986   // Our preferred format to work with.
   1987   _outDesiredFormat.mSampleRate = N_PLAY_SAMPLES_PER_SEC;
   1988   _outDesiredFormat.mChannelsPerFrame = _playChannels;
   1989 
   1990   if (_ptrAudioBuffer) {
   1991     // Update audio buffer with the selected parameters.
   1992     _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
   1993     _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
   1994   }
   1995 
   1996   _renderDelayOffsetSamples = _renderBufSizeSamples -
   1997                               N_BUFFERS_OUT * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES *
   1998                                   _outDesiredFormat.mChannelsPerFrame;
   1999 
   2000   _outDesiredFormat.mBytesPerPacket =
   2001       _outDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
   2002   // In uncompressed audio, a packet is one frame.
   2003   _outDesiredFormat.mFramesPerPacket = 1;
   2004   _outDesiredFormat.mBytesPerFrame =
   2005       _outDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
   2006   _outDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
   2007 
   2008   _outDesiredFormat.mFormatFlags =
   2009       kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
   2010 #ifdef WEBRTC_ARCH_BIG_ENDIAN
   2011   _outDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
   2012 #endif
   2013   _outDesiredFormat.mFormatID = kAudioFormatLinearPCM;
   2014 
   2015   OSStatus err = noErr;
   2016   WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(
   2017       &_outDesiredFormat, &_outStreamFormat, &_renderConverter));
   2018 
   2019   // Try to set buffer size to desired value (_playBufDelayFixed).
   2020   UInt32 bufByteCount = static_cast<UInt32>(
   2021       (_outStreamFormat.mSampleRate / 1000.0) * _playBufDelayFixed *
   2022       _outStreamFormat.mChannelsPerFrame * sizeof(Float32));
   2023   if (_outStreamFormat.mFramesPerPacket != 0) {
   2024     if (bufByteCount % _outStreamFormat.mFramesPerPacket != 0) {
   2025       bufByteCount = (static_cast<UInt32>(bufByteCount /
   2026                                           _outStreamFormat.mFramesPerPacket) +
   2027                       1) *
   2028                      _outStreamFormat.mFramesPerPacket;
   2029     }
   2030   }
   2031 
   2032   // Ensure the buffer size is within the range provided by the device.
   2033   AudioObjectPropertyAddress propertyAddress = {
   2034       kAudioDevicePropertyDataSource, kAudioDevicePropertyScopeOutput, 0};
   2035   propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
   2036   AudioValueRange range;
   2037   UInt32 size = sizeof(range);
   2038   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
   2039       _outputDeviceID, &propertyAddress, 0, NULL, &size, &range));
   2040   if (range.mMinimum > bufByteCount) {
   2041     bufByteCount = range.mMinimum;
   2042   } else if (range.mMaximum < bufByteCount) {
   2043     bufByteCount = range.mMaximum;
   2044   }
   2045 
   2046   propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
   2047   size = sizeof(bufByteCount);
   2048   WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
   2049       _outputDeviceID, &propertyAddress, 0, NULL, size, &bufByteCount));
   2050 
   2051   // Get render device latency.
   2052   propertyAddress.mSelector = kAudioDevicePropertyLatency;
   2053   UInt32 latency = 0;
   2054   size = sizeof(UInt32);
   2055   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
   2056       _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
   2057   _renderLatencyUs =
   2058       static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate);
   2059 
   2060   // Get render stream latency.
   2061   propertyAddress.mSelector = kAudioDevicePropertyStreams;
   2062   AudioStreamID stream = 0;
   2063   size = sizeof(AudioStreamID);
   2064   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
   2065       _outputDeviceID, &propertyAddress, 0, NULL, &size, &stream));
   2066   propertyAddress.mSelector = kAudioStreamPropertyLatency;
   2067   size = sizeof(UInt32);
   2068   latency = 0;
   2069   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
   2070       _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
   2071   _renderLatencyUs +=
   2072       static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate);
   2073 
   2074   WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   2075                "  initial playout status: _renderDelayOffsetSamples=%d,"
   2076                " _renderDelayUs=%d, _renderLatencyUs=%d",
   2077                _renderDelayOffsetSamples, _renderDelayUs, _renderLatencyUs);
   2078   return 0;
   2079 }
   2080 
   2081 OSStatus AudioDeviceMac::objectListenerProc(
   2082     AudioObjectID objectId,
   2083     UInt32 numberAddresses,
   2084     const AudioObjectPropertyAddress addresses[],
   2085     void* clientData) {
   2086   AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData;
   2087   RTC_DCHECK(ptrThis != NULL);
   2088 
   2089   ptrThis->implObjectListenerProc(objectId, numberAddresses, addresses);
   2090 
   2091   // AudioObjectPropertyListenerProc functions are supposed to return 0
   2092   return 0;
   2093 }
   2094 
   2095 OSStatus AudioDeviceMac::implObjectListenerProc(
   2096     const AudioObjectID objectId,
   2097     const UInt32 numberAddresses,
   2098     const AudioObjectPropertyAddress addresses[]) {
   2099   WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
   2100                "AudioDeviceMac::implObjectListenerProc()");
   2101 
   2102   for (UInt32 i = 0; i < numberAddresses; i++) {
   2103     if (addresses[i].mSelector == kAudioHardwarePropertyDevices) {
   2104       HandleDeviceChange();
   2105     } else if (addresses[i].mSelector == kAudioDevicePropertyStreamFormat) {
   2106       HandleStreamFormatChange(objectId, addresses[i]);
   2107     } else if (addresses[i].mSelector == kAudioDevicePropertyDataSource) {
   2108       HandleDataSourceChange(objectId, addresses[i]);
   2109     } else if (addresses[i].mSelector == kAudioDeviceProcessorOverload) {
   2110       HandleProcessorOverload(addresses[i]);
   2111     }
   2112   }
   2113 
   2114   return 0;
   2115 }
   2116 
   2117 int32_t AudioDeviceMac::HandleDeviceChange() {
   2118   OSStatus err = noErr;
   2119 
   2120   WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
   2121                "kAudioHardwarePropertyDevices");
   2122 
   2123   // A device has changed. Check if our registered devices have been removed.
   2124   // Ensure the devices have been initialized, meaning the IDs are valid.
   2125   if (MicrophoneIsInitialized()) {
   2126     AudioObjectPropertyAddress propertyAddress = {
   2127         kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeInput, 0};
   2128     UInt32 deviceIsAlive = 1;
   2129     UInt32 size = sizeof(UInt32);
   2130     err = AudioObjectGetPropertyData(_inputDeviceID, &propertyAddress, 0, NULL,
   2131                                      &size, &deviceIsAlive);
   2132 
   2133     if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) {
   2134       WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   2135                    "Capture device is not alive (probably removed)");
   2136       AtomicSet32(&_captureDeviceIsAlive, 0);
   2137       _mixerManager.CloseMicrophone();
   2138       if (_recError == 1) {
   2139         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   2140                      "  pending recording error exists");
   2141       }
   2142       _recError = 1;  // triggers callback from module process thread
   2143     } else if (err != noErr) {
   2144       logCAMsg(kTraceError, kTraceAudioDevice, _id,
   2145                "Error in AudioDeviceGetProperty()", (const char*)&err);
   2146       return -1;
   2147     }
   2148   }
   2149 
   2150   if (SpeakerIsInitialized()) {
   2151     AudioObjectPropertyAddress propertyAddress = {
   2152         kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeOutput, 0};
   2153     UInt32 deviceIsAlive = 1;
   2154     UInt32 size = sizeof(UInt32);
   2155     err = AudioObjectGetPropertyData(_outputDeviceID, &propertyAddress, 0, NULL,
   2156                                      &size, &deviceIsAlive);
   2157 
   2158     if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) {
   2159       WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   2160                    "Render device is not alive (probably removed)");
   2161       AtomicSet32(&_renderDeviceIsAlive, 0);
   2162       _mixerManager.CloseSpeaker();
   2163       if (_playError == 1) {
   2164         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   2165                      "  pending playout error exists");
   2166       }
   2167       _playError = 1;  // triggers callback from module process thread
   2168     } else if (err != noErr) {
   2169       logCAMsg(kTraceError, kTraceAudioDevice, _id,
   2170                "Error in AudioDeviceGetProperty()", (const char*)&err);
   2171       return -1;
   2172     }
   2173   }
   2174 
   2175   return 0;
   2176 }
   2177 
   2178 int32_t AudioDeviceMac::HandleStreamFormatChange(
   2179     const AudioObjectID objectId,
   2180     const AudioObjectPropertyAddress propertyAddress) {
   2181   OSStatus err = noErr;
   2182 
   2183   WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, "Stream format changed");
   2184 
   2185   if (objectId != _inputDeviceID && objectId != _outputDeviceID) {
   2186     return 0;
   2187   }
   2188 
   2189   // Get the new device format
   2190   AudioStreamBasicDescription streamFormat;
   2191   UInt32 size = sizeof(streamFormat);
   2192   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
   2193       objectId, &propertyAddress, 0, NULL, &size, &streamFormat));
   2194 
   2195   if (streamFormat.mFormatID != kAudioFormatLinearPCM) {
   2196     logCAMsg(kTraceError, kTraceAudioDevice, _id,
   2197              "Unacceptable input stream format -> mFormatID",
   2198              (const char*)&streamFormat.mFormatID);
   2199     return -1;
   2200   }
   2201 
   2202   if (streamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
   2203     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   2204                  "Too many channels on device (mChannelsPerFrame = %d)",
   2205                  streamFormat.mChannelsPerFrame);
   2206     return -1;
   2207   }
   2208 
   2209   WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Stream format:");
   2210   WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   2211                "mSampleRate = %f, mChannelsPerFrame = %u",
   2212                streamFormat.mSampleRate, streamFormat.mChannelsPerFrame);
   2213   WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   2214                "mBytesPerPacket = %u, mFramesPerPacket = %u",
   2215                streamFormat.mBytesPerPacket, streamFormat.mFramesPerPacket);
   2216   WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   2217                "mBytesPerFrame = %u, mBitsPerChannel = %u",
   2218                streamFormat.mBytesPerFrame, streamFormat.mBitsPerChannel);
   2219   WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "mFormatFlags = %u",
   2220                streamFormat.mFormatFlags);
   2221   logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
   2222            (const char*)&streamFormat.mFormatID);
   2223 
   2224   if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) {
   2225     const int io_block_size_samples = streamFormat.mChannelsPerFrame *
   2226                                       streamFormat.mSampleRate / 100 *
   2227                                       N_BLOCKS_IO;
   2228     if (io_block_size_samples > _captureBufSizeSamples) {
   2229       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   2230                    "Input IO block size (%d) is larger than ring buffer (%u)",
   2231                    io_block_size_samples, _captureBufSizeSamples);
   2232       return -1;
   2233     }
   2234 
   2235     memcpy(&_inStreamFormat, &streamFormat, sizeof(streamFormat));
   2236 
   2237     if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) {
   2238       _inDesiredFormat.mChannelsPerFrame = 2;
   2239     } else {
   2240       // Disable stereo recording when we only have one channel on the device.
   2241       _inDesiredFormat.mChannelsPerFrame = 1;
   2242       _recChannels = 1;
   2243       WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   2244                    "Stereo recording unavailable on this device");
   2245     }
   2246 
   2247     if (_ptrAudioBuffer) {
   2248       // Update audio buffer with the selected parameters
   2249       _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
   2250       _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
   2251     }
   2252 
   2253     // Recreate the converter with the new format
   2254     // TODO(xians): make this thread safe
   2255     WEBRTC_CA_RETURN_ON_ERR(AudioConverterDispose(_captureConverter));
   2256 
   2257     WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&streamFormat, &_inDesiredFormat,
   2258                                               &_captureConverter));
   2259   } else {
   2260     memcpy(&_outStreamFormat, &streamFormat, sizeof(streamFormat));
   2261 
   2262     // Our preferred format to work with
   2263     if (_outStreamFormat.mChannelsPerFrame < 2) {
   2264       _playChannels = 1;
   2265       WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   2266                    "Stereo playout unavailable on this device");
   2267     }
   2268     WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat());
   2269   }
   2270   return 0;
   2271 }
   2272 
   2273 int32_t AudioDeviceMac::HandleDataSourceChange(
   2274     const AudioObjectID objectId,
   2275     const AudioObjectPropertyAddress propertyAddress) {
   2276   OSStatus err = noErr;
   2277 
   2278   if (_macBookPro &&
   2279       propertyAddress.mScope == kAudioDevicePropertyScopeOutput) {
   2280     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, "Data source changed");
   2281 
   2282     _macBookProPanRight = false;
   2283     UInt32 dataSource = 0;
   2284     UInt32 size = sizeof(UInt32);
   2285     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
   2286         objectId, &propertyAddress, 0, NULL, &size, &dataSource));
   2287     if (dataSource == 'ispk') {
   2288       _macBookProPanRight = true;
   2289       WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   2290                    "MacBook Pro using internal speakers; stereo panning right");
   2291     } else {
   2292       WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   2293                    "MacBook Pro not using internal speakers");
   2294     }
   2295   }
   2296 
   2297   return 0;
   2298 }
   2299 int32_t AudioDeviceMac::HandleProcessorOverload(
   2300     const AudioObjectPropertyAddress propertyAddress) {
   2301   // TODO(xians): we probably want to notify the user in some way of the
   2302   // overload. However, the Windows interpretations of these errors seem to
   2303   // be more severe than what ProcessorOverload is thrown for.
   2304   //
   2305   // We don't log the notification, as it's sent from the HAL's IO thread. We
   2306   // don't want to slow it down even further.
   2307   if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) {
   2308     // WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "Capture processor
   2309     // overload");
   2310     //_callback->ProblemIsReported(
   2311     // SndCardStreamObserver::ERecordingProblem);
   2312   } else {
   2313     // WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   2314     // "Render processor overload");
   2315     //_callback->ProblemIsReported(
   2316     // SndCardStreamObserver::EPlaybackProblem);
   2317   }
   2318 
   2319   return 0;
   2320 }
   2321 
   2322 // ============================================================================
   2323 //                                  Thread Methods
   2324 // ============================================================================
   2325 
   2326 OSStatus AudioDeviceMac::deviceIOProc(AudioDeviceID,
   2327                                       const AudioTimeStamp*,
   2328                                       const AudioBufferList* inputData,
   2329                                       const AudioTimeStamp* inputTime,
   2330                                       AudioBufferList* outputData,
   2331                                       const AudioTimeStamp* outputTime,
   2332                                       void* clientData) {
   2333   AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData;
   2334   RTC_DCHECK(ptrThis != NULL);
   2335 
   2336   ptrThis->implDeviceIOProc(inputData, inputTime, outputData, outputTime);
   2337 
   2338   // AudioDeviceIOProc functions are supposed to return 0
   2339   return 0;
   2340 }
   2341 
   2342 OSStatus AudioDeviceMac::outConverterProc(AudioConverterRef,
   2343                                           UInt32* numberDataPackets,
   2344                                           AudioBufferList* data,
   2345                                           AudioStreamPacketDescription**,
   2346                                           void* userData) {
   2347   AudioDeviceMac* ptrThis = (AudioDeviceMac*)userData;
   2348   RTC_DCHECK(ptrThis != NULL);
   2349 
   2350   return ptrThis->implOutConverterProc(numberDataPackets, data);
   2351 }
   2352 
   2353 OSStatus AudioDeviceMac::inDeviceIOProc(AudioDeviceID,
   2354                                         const AudioTimeStamp*,
   2355                                         const AudioBufferList* inputData,
   2356                                         const AudioTimeStamp* inputTime,
   2357                                         AudioBufferList*,
   2358                                         const AudioTimeStamp*,
   2359                                         void* clientData) {
   2360   AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData;
   2361   RTC_DCHECK(ptrThis != NULL);
   2362 
   2363   ptrThis->implInDeviceIOProc(inputData, inputTime);
   2364 
   2365   // AudioDeviceIOProc functions are supposed to return 0
   2366   return 0;
   2367 }
   2368 
   2369 OSStatus AudioDeviceMac::inConverterProc(
   2370     AudioConverterRef,
   2371     UInt32* numberDataPackets,
   2372     AudioBufferList* data,
   2373     AudioStreamPacketDescription** /*dataPacketDescription*/,
   2374     void* userData) {
   2375   AudioDeviceMac* ptrThis = static_cast<AudioDeviceMac*>(userData);
   2376   RTC_DCHECK(ptrThis != NULL);
   2377 
   2378   return ptrThis->implInConverterProc(numberDataPackets, data);
   2379 }
   2380 
   2381 OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList* inputData,
   2382                                           const AudioTimeStamp* inputTime,
   2383                                           AudioBufferList* outputData,
   2384                                           const AudioTimeStamp* outputTime) {
   2385   OSStatus err = noErr;
   2386   UInt64 outputTimeNs = AudioConvertHostTimeToNanos(outputTime->mHostTime);
   2387   UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
   2388 
   2389   if (!_twoDevices && _recording) {
   2390     implInDeviceIOProc(inputData, inputTime);
   2391   }
   2392 
   2393   // Check if we should close down audio device
   2394   // Double-checked locking optimization to remove locking overhead
   2395   if (_doStop) {
   2396     _critSect.Enter();
   2397     if (_doStop) {
   2398       if (_twoDevices || (!_recording && !_playing)) {
   2399         // In the case of a shared device, the single driving ioProc
   2400         // is stopped here
   2401         WEBRTC_CA_LOG_ERR(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
   2402         WEBRTC_CA_LOG_WARN(
   2403             AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
   2404         if (err == noErr) {
   2405           WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
   2406                        " Playout or shared device stopped");
   2407         }
   2408       }
   2409 
   2410       _doStop = false;
   2411       _stopEvent.Set();
   2412       _critSect.Leave();
   2413       return 0;
   2414     }
   2415     _critSect.Leave();
   2416   }
   2417 
   2418   if (!_playing) {
   2419     // This can be the case when a shared device is capturing but not
   2420     // rendering. We allow the checks above before returning to avoid a
   2421     // timeout when capturing is stopped.
   2422     return 0;
   2423   }
   2424 
   2425   RTC_DCHECK(_outStreamFormat.mBytesPerFrame != 0);
   2426   UInt32 size =
   2427       outputData->mBuffers->mDataByteSize / _outStreamFormat.mBytesPerFrame;
   2428 
   2429   // TODO(xians): signal an error somehow?
   2430   err = AudioConverterFillComplexBuffer(_renderConverter, outConverterProc,
   2431                                         this, &size, outputData, NULL);
   2432   if (err != noErr) {
   2433     if (err == 1) {
   2434       // This is our own error.
   2435       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   2436                    " Error in AudioConverterFillComplexBuffer()");
   2437       return 1;
   2438     } else {
   2439       logCAMsg(kTraceError, kTraceAudioDevice, _id,
   2440                "Error in AudioConverterFillComplexBuffer()", (const char*)&err);
   2441       return 1;
   2442     }
   2443   }
   2444 
   2445   PaRingBufferSize bufSizeSamples =
   2446       PaUtil_GetRingBufferReadAvailable(_paRenderBuffer);
   2447 
   2448   int32_t renderDelayUs =
   2449       static_cast<int32_t>(1e-3 * (outputTimeNs - nowNs) + 0.5);
   2450   renderDelayUs += static_cast<int32_t>(
   2451       (1.0e6 * bufSizeSamples) / _outDesiredFormat.mChannelsPerFrame /
   2452           _outDesiredFormat.mSampleRate +
   2453       0.5);
   2454 
   2455   AtomicSet32(&_renderDelayUs, renderDelayUs);
   2456 
   2457   return 0;
   2458 }
   2459 
   2460 OSStatus AudioDeviceMac::implOutConverterProc(UInt32* numberDataPackets,
   2461                                               AudioBufferList* data) {
   2462   RTC_DCHECK(data->mNumberBuffers == 1);
   2463   PaRingBufferSize numSamples =
   2464       *numberDataPackets * _outDesiredFormat.mChannelsPerFrame;
   2465 
   2466   data->mBuffers->mNumberChannels = _outDesiredFormat.mChannelsPerFrame;
   2467   // Always give the converter as much as it wants, zero padding as required.
   2468   data->mBuffers->mDataByteSize =
   2469       *numberDataPackets * _outDesiredFormat.mBytesPerPacket;
   2470   data->mBuffers->mData = _renderConvertData;
   2471   memset(_renderConvertData, 0, sizeof(_renderConvertData));
   2472 
   2473   PaUtil_ReadRingBuffer(_paRenderBuffer, _renderConvertData, numSamples);
   2474 
   2475   kern_return_t kernErr = semaphore_signal_all(_renderSemaphore);
   2476   if (kernErr != KERN_SUCCESS) {
   2477     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   2478                  " semaphore_signal_all() error: %d", kernErr);
   2479     return 1;
   2480   }
   2481 
   2482   return 0;
   2483 }
   2484 
   2485 OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList* inputData,
   2486                                             const AudioTimeStamp* inputTime) {
   2487   OSStatus err = noErr;
   2488   UInt64 inputTimeNs = AudioConvertHostTimeToNanos(inputTime->mHostTime);
   2489   UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
   2490 
   2491   // Check if we should close down audio device
   2492   // Double-checked locking optimization to remove locking overhead
   2493   if (_doStopRec) {
   2494     _critSect.Enter();
   2495     if (_doStopRec) {
   2496       // This will be signalled only when a shared device is not in use.
   2497       WEBRTC_CA_LOG_ERR(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
   2498       WEBRTC_CA_LOG_WARN(
   2499           AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
   2500       if (err == noErr) {
   2501         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
   2502                      " Recording device stopped");
   2503       }
   2504 
   2505       _doStopRec = false;
   2506       _stopEventRec.Set();
   2507       _critSect.Leave();
   2508       return 0;
   2509     }
   2510     _critSect.Leave();
   2511   }
   2512 
   2513   if (!_recording) {
   2514     // Allow above checks to avoid a timeout on stopping capture.
   2515     return 0;
   2516   }
   2517 
   2518   PaRingBufferSize bufSizeSamples =
   2519       PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer);
   2520 
   2521   int32_t captureDelayUs =
   2522       static_cast<int32_t>(1e-3 * (nowNs - inputTimeNs) + 0.5);
   2523   captureDelayUs += static_cast<int32_t>((1.0e6 * bufSizeSamples) /
   2524                                              _inStreamFormat.mChannelsPerFrame /
   2525                                              _inStreamFormat.mSampleRate +
   2526                                          0.5);
   2527 
   2528   AtomicSet32(&_captureDelayUs, captureDelayUs);
   2529 
   2530   RTC_DCHECK(inputData->mNumberBuffers == 1);
   2531   PaRingBufferSize numSamples = inputData->mBuffers->mDataByteSize *
   2532                                 _inStreamFormat.mChannelsPerFrame /
   2533                                 _inStreamFormat.mBytesPerPacket;
   2534   PaUtil_WriteRingBuffer(_paCaptureBuffer, inputData->mBuffers->mData,
   2535                          numSamples);
   2536 
   2537   kern_return_t kernErr = semaphore_signal_all(_captureSemaphore);
   2538   if (kernErr != KERN_SUCCESS) {
   2539     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   2540                  " semaphore_signal_all() error: %d", kernErr);
   2541   }
   2542 
   2543   return err;
   2544 }
   2545 
   2546 OSStatus AudioDeviceMac::implInConverterProc(UInt32* numberDataPackets,
   2547                                              AudioBufferList* data) {
   2548   RTC_DCHECK(data->mNumberBuffers == 1);
   2549   PaRingBufferSize numSamples =
   2550       *numberDataPackets * _inStreamFormat.mChannelsPerFrame;
   2551 
   2552   while (PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer) < numSamples) {
   2553     mach_timespec_t timeout;
   2554     timeout.tv_sec = 0;
   2555     timeout.tv_nsec = TIMER_PERIOD_MS;
   2556 
   2557     kern_return_t kernErr = semaphore_timedwait(_captureSemaphore, timeout);
   2558     if (kernErr == KERN_OPERATION_TIMED_OUT) {
   2559       int32_t signal = AtomicGet32(&_captureDeviceIsAlive);
   2560       if (signal == 0) {
   2561         // The capture device is no longer alive; stop the worker thread.
   2562         *numberDataPackets = 0;
   2563         return 1;
   2564       }
   2565     } else if (kernErr != KERN_SUCCESS) {
   2566       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   2567                    " semaphore_wait() error: %d", kernErr);
   2568     }
   2569   }
   2570 
   2571   // Pass the read pointer directly to the converter to avoid a memcpy.
   2572   void* dummyPtr;
   2573   PaRingBufferSize dummySize;
   2574   PaUtil_GetRingBufferReadRegions(_paCaptureBuffer, numSamples,
   2575                                   &data->mBuffers->mData, &numSamples,
   2576                                   &dummyPtr, &dummySize);
   2577   PaUtil_AdvanceRingBufferReadIndex(_paCaptureBuffer, numSamples);
   2578 
   2579   data->mBuffers->mNumberChannels = _inStreamFormat.mChannelsPerFrame;
   2580   *numberDataPackets = numSamples / _inStreamFormat.mChannelsPerFrame;
   2581   data->mBuffers->mDataByteSize =
   2582       *numberDataPackets * _inStreamFormat.mBytesPerPacket;
   2583 
   2584   return 0;
   2585 }
   2586 
   2587 bool AudioDeviceMac::RunRender(void* ptrThis) {
   2588   return static_cast<AudioDeviceMac*>(ptrThis)->RenderWorkerThread();
   2589 }
   2590 
   2591 bool AudioDeviceMac::RenderWorkerThread() {
   2592   PaRingBufferSize numSamples =
   2593       ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * _outDesiredFormat.mChannelsPerFrame;
   2594   while (PaUtil_GetRingBufferWriteAvailable(_paRenderBuffer) -
   2595              _renderDelayOffsetSamples <
   2596          numSamples) {
   2597     mach_timespec_t timeout;
   2598     timeout.tv_sec = 0;
   2599     timeout.tv_nsec = TIMER_PERIOD_MS;
   2600 
   2601     kern_return_t kernErr = semaphore_timedwait(_renderSemaphore, timeout);
   2602     if (kernErr == KERN_OPERATION_TIMED_OUT) {
   2603       int32_t signal = AtomicGet32(&_renderDeviceIsAlive);
   2604       if (signal == 0) {
   2605         // The render device is no longer alive; stop the worker thread.
   2606         return false;
   2607       }
   2608     } else if (kernErr != KERN_SUCCESS) {
   2609       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   2610                    " semaphore_timedwait() error: %d", kernErr);
   2611     }
   2612   }
   2613 
   2614   int8_t playBuffer[4 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
   2615 
   2616   if (!_ptrAudioBuffer) {
   2617     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   2618                  "  capture AudioBuffer is invalid");
   2619     return false;
   2620   }
   2621 
   2622   // Ask for new PCM data to be played out using the AudioDeviceBuffer.
   2623   uint32_t nSamples =
   2624       _ptrAudioBuffer->RequestPlayoutData(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES);
   2625 
   2626   nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer);
   2627   if (nSamples != ENGINE_PLAY_BUF_SIZE_IN_SAMPLES) {
   2628     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   2629                  "  invalid number of output samples(%d)", nSamples);
   2630   }
   2631 
   2632   uint32_t nOutSamples = nSamples * _outDesiredFormat.mChannelsPerFrame;
   2633 
   2634   SInt16* pPlayBuffer = (SInt16*)&playBuffer;
   2635   if (_macBookProPanRight && (_playChannels == 2)) {
   2636     // Mix entirely into the right channel and zero the left channel.
   2637     SInt32 sampleInt32 = 0;
   2638     for (uint32_t sampleIdx = 0; sampleIdx < nOutSamples; sampleIdx += 2) {
   2639       sampleInt32 = pPlayBuffer[sampleIdx];
   2640       sampleInt32 += pPlayBuffer[sampleIdx + 1];
   2641       sampleInt32 /= 2;
   2642 
   2643       if (sampleInt32 > 32767) {
   2644         sampleInt32 = 32767;
   2645       } else if (sampleInt32 < -32768) {
   2646         sampleInt32 = -32768;
   2647       }
   2648 
   2649       pPlayBuffer[sampleIdx] = 0;
   2650       pPlayBuffer[sampleIdx + 1] = static_cast<SInt16>(sampleInt32);
   2651     }
   2652   }
   2653 
   2654   PaUtil_WriteRingBuffer(_paRenderBuffer, pPlayBuffer, nOutSamples);
   2655 
   2656   return true;
   2657 }
   2658 
   2659 bool AudioDeviceMac::RunCapture(void* ptrThis) {
   2660   return static_cast<AudioDeviceMac*>(ptrThis)->CaptureWorkerThread();
   2661 }
   2662 
   2663 bool AudioDeviceMac::CaptureWorkerThread() {
   2664   OSStatus err = noErr;
   2665   UInt32 noRecSamples =
   2666       ENGINE_REC_BUF_SIZE_IN_SAMPLES * _inDesiredFormat.mChannelsPerFrame;
   2667   SInt16 recordBuffer[noRecSamples];
   2668   UInt32 size = ENGINE_REC_BUF_SIZE_IN_SAMPLES;
   2669 
   2670   AudioBufferList engineBuffer;
   2671   engineBuffer.mNumberBuffers = 1;  // Interleaved channels.
   2672   engineBuffer.mBuffers->mNumberChannels = _inDesiredFormat.mChannelsPerFrame;
   2673   engineBuffer.mBuffers->mDataByteSize =
   2674       _inDesiredFormat.mBytesPerPacket * noRecSamples;
   2675   engineBuffer.mBuffers->mData = recordBuffer;
   2676 
   2677   err = AudioConverterFillComplexBuffer(_captureConverter, inConverterProc,
   2678                                         this, &size, &engineBuffer, NULL);
   2679   if (err != noErr) {
   2680     if (err == 1) {
   2681       // This is our own error.
   2682       return false;
   2683     } else {
   2684       logCAMsg(kTraceError, kTraceAudioDevice, _id,
   2685                "Error in AudioConverterFillComplexBuffer()", (const char*)&err);
   2686       return false;
   2687     }
   2688   }
   2689 
   2690   // TODO(xians): what if the returned size is incorrect?
   2691   if (size == ENGINE_REC_BUF_SIZE_IN_SAMPLES) {
   2692     uint32_t currentMicLevel(0);
   2693     uint32_t newMicLevel(0);
   2694     int32_t msecOnPlaySide;
   2695     int32_t msecOnRecordSide;
   2696 
   2697     int32_t captureDelayUs = AtomicGet32(&_captureDelayUs);
   2698     int32_t renderDelayUs = AtomicGet32(&_renderDelayUs);
   2699 
   2700     msecOnPlaySide =
   2701         static_cast<int32_t>(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5);
   2702     msecOnRecordSide =
   2703         static_cast<int32_t>(1e-3 * (captureDelayUs + _captureLatencyUs) + 0.5);
   2704 
   2705     if (!_ptrAudioBuffer) {
   2706       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   2707                    "  capture AudioBuffer is invalid");
   2708       return false;
   2709     }
   2710 
   2711     // store the recorded buffer (no action will be taken if the
   2712     // #recorded samples is not a full buffer)
   2713     _ptrAudioBuffer->SetRecordedBuffer((int8_t*)&recordBuffer, (uint32_t)size);
   2714 
   2715     if (AGC()) {
   2716       // Use mod to ensure we check the volume on the first pass.
   2717       if (get_mic_volume_counter_ms_ % kGetMicVolumeIntervalMs == 0) {
   2718         get_mic_volume_counter_ms_ = 0;
   2719         // store current mic level in the audio buffer if AGC is enabled
   2720         if (MicrophoneVolume(currentMicLevel) == 0) {
   2721           // this call does not affect the actual microphone volume
   2722           _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
   2723         }
   2724       }
   2725       get_mic_volume_counter_ms_ += kBufferSizeMs;
   2726     }
   2727 
   2728     _ptrAudioBuffer->SetVQEData(msecOnPlaySide, msecOnRecordSide, 0);
   2729 
   2730     _ptrAudioBuffer->SetTypingStatus(KeyPressed());
   2731 
   2732     // deliver recorded samples at specified sample rate, mic level etc.
   2733     // to the observer using callback
   2734     _ptrAudioBuffer->DeliverRecordedData();
   2735 
   2736     if (AGC()) {
   2737       newMicLevel = _ptrAudioBuffer->NewMicLevel();
   2738       if (newMicLevel != 0) {
   2739         // The VQE will only deliver non-zero microphone levels when
   2740         // a change is needed.
   2741         // Set this new mic level (received from the observer as return
   2742         // value in the callback).
   2743         WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id,
   2744                      "  AGC change of volume: old=%u => new=%u",
   2745                      currentMicLevel, newMicLevel);
   2746         if (SetMicrophoneVolume(newMicLevel) == -1) {
   2747           WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   2748                        "  the required modification of the microphone "
   2749                        "volume failed");
   2750         }
   2751       }
   2752     }
   2753   }
   2754 
   2755   return true;
   2756 }
   2757 
   2758 bool AudioDeviceMac::KeyPressed() {
   2759   bool key_down = false;
   2760   // Loop through all Mac virtual key constant values.
   2761   for (unsigned int key_index = 0; key_index < arraysize(prev_key_state_);
   2762        ++key_index) {
   2763     bool keyState =
   2764         CGEventSourceKeyState(kCGEventSourceStateHIDSystemState, key_index);
   2765     // A false -> true change in keymap means a key is pressed.
   2766     key_down |= (keyState && !prev_key_state_[key_index]);
   2767     // Save current state.
   2768     prev_key_state_[key_index] = keyState;
   2769   }
   2770   return key_down;
   2771 }
   2772 }  // namespace webrtc
   2773