Home | History | Annotate | Download | only in mac
      1 /*
      2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 #include "webrtc/modules/audio_device/audio_device_config.h"
     12 #include "webrtc/modules/audio_device/audio_device_utility.h"
     13 #include "webrtc/modules/audio_device/mac/audio_device_mac.h"
     14 
     15 #include "webrtc/modules/audio_device/mac/portaudio/pa_ringbuffer.h"
     16 #include "webrtc/system_wrappers/interface/event_wrapper.h"
     17 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
     18 #include "webrtc/system_wrappers/interface/trace.h"
     19 
     20 #include <ApplicationServices/ApplicationServices.h>
     21 #include <assert.h>
     22 #include <libkern/OSAtomic.h>   // OSAtomicCompareAndSwap()
     23 #include <mach/mach.h>          // mach_task_self()
     24 #include <sys/sysctl.h>         // sysctlbyname()
     25 
     26 
     27 
     28 namespace webrtc
     29 {
     30 
     31 #define WEBRTC_CA_RETURN_ON_ERR(expr)                                   \
     32     do {                                                                \
     33         err = expr;                                                     \
     34         if (err != noErr) {                                             \
     35             logCAMsg(kTraceError, kTraceAudioDevice, _id,               \
     36                 "Error in " #expr, (const char *)&err);                 \
     37             return -1;                                                  \
     38         }                                                               \
     39     } while(0)
     40 
     41 #define WEBRTC_CA_LOG_ERR(expr)                                         \
     42     do {                                                                \
     43         err = expr;                                                     \
     44         if (err != noErr) {                                             \
     45             logCAMsg(kTraceError, kTraceAudioDevice, _id,               \
     46                 "Error in " #expr, (const char *)&err);                 \
     47         }                                                               \
     48     } while(0)
     49 
     50 #define WEBRTC_CA_LOG_WARN(expr)                                        \
     51     do {                                                                \
     52         err = expr;                                                     \
     53         if (err != noErr) {                                             \
     54             logCAMsg(kTraceWarning, kTraceAudioDevice, _id,             \
     55                 "Error in " #expr, (const char *)&err);                 \
     56         }                                                               \
     57     } while(0)
     58 
     59 #define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
     60 
     61 enum
     62 {
     63     MaxNumberDevices = 64
     64 };
     65 
     66 void AudioDeviceMac::AtomicSet32(int32_t* theValue, int32_t newValue)
     67 {
     68     while (1)
     69     {
     70         int32_t oldValue = *theValue;
     71         if (OSAtomicCompareAndSwap32Barrier(oldValue, newValue, theValue)
     72             == true)
     73         {
     74             return;
     75         }
     76     }
     77 }
     78 
     79 int32_t AudioDeviceMac::AtomicGet32(int32_t* theValue)
     80 {
     81     while (1)
     82     {
     83         int32_t value = *theValue;
     84         if (OSAtomicCompareAndSwap32Barrier(value, value, theValue) == true)
     85         {
     86             return value;
     87         }
     88     }
     89 }
     90 
     91 // CoreAudio errors are best interpreted as four character strings.
     92 void AudioDeviceMac::logCAMsg(const TraceLevel level,
     93                               const TraceModule module,
     94                               const int32_t id, const char *msg,
     95                               const char *err)
     96 {
     97     assert(msg != NULL);
     98     assert(err != NULL);
     99 
    100 #ifdef WEBRTC_ARCH_BIG_ENDIAN
    101     WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err);
    102 #else
    103     // We need to flip the characters in this case.
    104     WEBRTC_TRACE(level, module, id, "%s: %.1s%.1s%.1s%.1s", msg, err + 3, err
    105         + 2, err + 1, err);
    106 #endif
    107 }
    108 
    109 AudioDeviceMac::AudioDeviceMac(const int32_t id) :
    110     _ptrAudioBuffer(NULL),
    111     _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
    112     _stopEventRec(*EventWrapper::Create()),
    113     _stopEvent(*EventWrapper::Create()),
    114     _captureWorkerThread(NULL),
    115     _renderWorkerThread(NULL),
    116     _captureWorkerThreadId(0),
    117     _renderWorkerThreadId(0),
    118     _id(id),
    119     _mixerManager(id),
    120     _inputDeviceIndex(0),
    121     _outputDeviceIndex(0),
    122     _inputDeviceID(kAudioObjectUnknown),
    123     _outputDeviceID(kAudioObjectUnknown),
    124     _inputDeviceIsSpecified(false),
    125     _outputDeviceIsSpecified(false),
    126     _recChannels(N_REC_CHANNELS),
    127     _playChannels(N_PLAY_CHANNELS),
    128     _captureBufData(NULL),
    129     _renderBufData(NULL),
    130     _playBufType(AudioDeviceModule::kFixedBufferSize),
    131     _initialized(false),
    132     _isShutDown(false),
    133     _recording(false),
    134     _playing(false),
    135     _recIsInitialized(false),
    136     _playIsInitialized(false),
    137     _AGC(false),
    138     _renderDeviceIsAlive(1),
    139     _captureDeviceIsAlive(1),
    140     _twoDevices(true),
    141     _doStop(false),
    142     _doStopRec(false),
    143     _macBookPro(false),
    144     _macBookProPanRight(false),
    145     _captureLatencyUs(0),
    146     _renderLatencyUs(0),
    147     _captureDelayUs(0),
    148     _renderDelayUs(0),
    149     _renderDelayOffsetSamples(0),
    150     _playBufDelayFixed(20),
    151     _playWarning(0),
    152     _playError(0),
    153     _recWarning(0),
    154     _recError(0),
    155     _paCaptureBuffer(NULL),
    156     _paRenderBuffer(NULL),
    157     _captureBufSizeSamples(0),
    158     _renderBufSizeSamples(0),
    159     prev_key_state_()
    160 {
    161     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
    162                  "%s created", __FUNCTION__);
    163 
    164     assert(&_stopEvent != NULL);
    165     assert(&_stopEventRec != NULL);
    166 
    167     memset(_renderConvertData, 0, sizeof(_renderConvertData));
    168     memset(&_outStreamFormat, 0, sizeof(AudioStreamBasicDescription));
    169     memset(&_outDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
    170     memset(&_inStreamFormat, 0, sizeof(AudioStreamBasicDescription));
    171     memset(&_inDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
    172 }
    173 
    174 
    175 AudioDeviceMac::~AudioDeviceMac()
    176 {
    177     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
    178                  "%s destroyed", __FUNCTION__);
    179 
    180     if (!_isShutDown)
    181     {
    182         Terminate();
    183     }
    184 
    185     if (_captureWorkerThread)
    186     {
    187         delete _captureWorkerThread;
    188         _captureWorkerThread = NULL;
    189     }
    190 
    191     if (_renderWorkerThread)
    192     {
    193         delete _renderWorkerThread;
    194         _renderWorkerThread = NULL;
    195     }
    196 
    197     if (_paRenderBuffer)
    198     {
    199         delete _paRenderBuffer;
    200         _paRenderBuffer = NULL;
    201     }
    202 
    203     if (_paCaptureBuffer)
    204     {
    205         delete _paCaptureBuffer;
    206         _paCaptureBuffer = NULL;
    207     }
    208 
    209     if (_renderBufData)
    210     {
    211         delete[] _renderBufData;
    212         _renderBufData = NULL;
    213     }
    214 
    215     if (_captureBufData)
    216     {
    217         delete[] _captureBufData;
    218         _captureBufData = NULL;
    219     }
    220 
    221     kern_return_t kernErr = KERN_SUCCESS;
    222     kernErr = semaphore_destroy(mach_task_self(), _renderSemaphore);
    223     if (kernErr != KERN_SUCCESS)
    224     {
    225         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
    226                      " semaphore_destroy() error: %d", kernErr);
    227     }
    228 
    229     kernErr = semaphore_destroy(mach_task_self(), _captureSemaphore);
    230     if (kernErr != KERN_SUCCESS)
    231     {
    232         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
    233                      " semaphore_destroy() error: %d", kernErr);
    234     }
    235 
    236     delete &_stopEvent;
    237     delete &_stopEventRec;
    238     delete &_critSect;
    239 }
    240 
    241 // ============================================================================
    242 //                                     API
    243 // ============================================================================
    244 
    245 void AudioDeviceMac::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
    246 {
    247 
    248     CriticalSectionScoped lock(&_critSect);
    249 
    250     _ptrAudioBuffer = audioBuffer;
    251 
    252     // inform the AudioBuffer about default settings for this implementation
    253     _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
    254     _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
    255     _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS);
    256     _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
    257 }
    258 
    259 int32_t AudioDeviceMac::ActiveAudioLayer(
    260     AudioDeviceModule::AudioLayer& audioLayer) const
    261 {
    262     audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
    263     return 0;
    264 }
    265 
    266 int32_t AudioDeviceMac::Init()
    267 {
    268 
    269     CriticalSectionScoped lock(&_critSect);
    270 
    271     if (_initialized)
    272     {
    273         return 0;
    274     }
    275 
    276     OSStatus err = noErr;
    277 
    278     _isShutDown = false;
    279 
    280     // PortAudio ring buffers require an elementCount which is a power of two.
    281     if (_renderBufData == NULL)
    282     {
    283         UInt32 powerOfTwo = 1;
    284         while (powerOfTwo < PLAY_BUF_SIZE_IN_SAMPLES)
    285         {
    286             powerOfTwo <<= 1;
    287         }
    288         _renderBufSizeSamples = powerOfTwo;
    289         _renderBufData = new SInt16[_renderBufSizeSamples];
    290     }
    291 
    292     if (_paRenderBuffer == NULL)
    293     {
    294         _paRenderBuffer = new PaUtilRingBuffer;
    295         ring_buffer_size_t bufSize = -1;
    296         bufSize = PaUtil_InitializeRingBuffer(_paRenderBuffer, sizeof(SInt16),
    297                                               _renderBufSizeSamples,
    298                                               _renderBufData);
    299         if (bufSize == -1)
    300         {
    301             WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
    302                          _id, " PaUtil_InitializeRingBuffer() error");
    303             return -1;
    304         }
    305     }
    306 
    307     if (_captureBufData == NULL)
    308     {
    309         UInt32 powerOfTwo = 1;
    310         while (powerOfTwo < REC_BUF_SIZE_IN_SAMPLES)
    311         {
    312             powerOfTwo <<= 1;
    313         }
    314         _captureBufSizeSamples = powerOfTwo;
    315         _captureBufData = new Float32[_captureBufSizeSamples];
    316     }
    317 
    318     if (_paCaptureBuffer == NULL)
    319     {
    320         _paCaptureBuffer = new PaUtilRingBuffer;
    321         ring_buffer_size_t bufSize = -1;
    322         bufSize = PaUtil_InitializeRingBuffer(_paCaptureBuffer,
    323                                               sizeof(Float32),
    324                                               _captureBufSizeSamples,
    325                                               _captureBufData);
    326         if (bufSize == -1)
    327         {
    328             WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
    329                          _id, " PaUtil_InitializeRingBuffer() error");
    330             return -1;
    331         }
    332     }
    333 
    334     if (_renderWorkerThread == NULL)
    335     {
    336         _renderWorkerThread
    337             = ThreadWrapper::CreateThread(RunRender, this, kRealtimePriority,
    338                                           "RenderWorkerThread");
    339         if (_renderWorkerThread == NULL)
    340         {
    341             WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
    342                          _id, " Render CreateThread() error");
    343             return -1;
    344         }
    345     }
    346 
    347     if (_captureWorkerThread == NULL)
    348     {
    349         _captureWorkerThread
    350             = ThreadWrapper::CreateThread(RunCapture, this, kRealtimePriority,
    351                                           "CaptureWorkerThread");
    352         if (_captureWorkerThread == NULL)
    353         {
    354             WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
    355                          _id, " Capture CreateThread() error");
    356             return -1;
    357         }
    358     }
    359 
    360     kern_return_t kernErr = KERN_SUCCESS;
    361     kernErr = semaphore_create(mach_task_self(), &_renderSemaphore,
    362                                SYNC_POLICY_FIFO, 0);
    363     if (kernErr != KERN_SUCCESS)
    364     {
    365         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
    366                      " semaphore_create() error: %d", kernErr);
    367         return -1;
    368     }
    369 
    370     kernErr = semaphore_create(mach_task_self(), &_captureSemaphore,
    371                                SYNC_POLICY_FIFO, 0);
    372     if (kernErr != KERN_SUCCESS)
    373     {
    374         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
    375                      " semaphore_create() error: %d", kernErr);
    376         return -1;
    377     }
    378 
    379     // Setting RunLoop to NULL here instructs HAL to manage its own thread for
    380     // notifications. This was the default behaviour on OS X 10.5 and earlier,
    381     // but now must be explicitly specified. HAL would otherwise try to use the
    382     // main thread to issue notifications.
    383     AudioObjectPropertyAddress propertyAddress = {
    384             kAudioHardwarePropertyRunLoop,
    385             kAudioObjectPropertyScopeGlobal,
    386             kAudioObjectPropertyElementMaster };
    387     CFRunLoopRef runLoop = NULL;
    388     UInt32 size = sizeof(CFRunLoopRef);
    389     WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(kAudioObjectSystemObject,
    390             &propertyAddress, 0, NULL, size, &runLoop));
    391 
    392     // Listen for any device changes.
    393     propertyAddress.mSelector = kAudioHardwarePropertyDevices;
    394     WEBRTC_CA_LOG_ERR(AudioObjectAddPropertyListener(kAudioObjectSystemObject,
    395             &propertyAddress, &objectListenerProc, this));
    396 
    397     // Determine if this is a MacBook Pro
    398     _macBookPro = false;
    399     _macBookProPanRight = false;
    400     char buf[128];
    401     size_t length = sizeof(buf);
    402     memset(buf, 0, length);
    403 
    404     int intErr = sysctlbyname("hw.model", buf, &length, NULL, 0);
    405     if (intErr != 0)
    406     {
    407         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
    408                      " Error in sysctlbyname(): %d", err);
    409     } else
    410     {
    411         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
    412                      " Hardware model: %s", buf);
    413         if (strncmp(buf, "MacBookPro", 10) == 0)
    414         {
    415             _macBookPro = true;
    416         }
    417     }
    418 
    419     _playWarning = 0;
    420     _playError = 0;
    421     _recWarning = 0;
    422     _recError = 0;
    423 
    424     _initialized = true;
    425 
    426     return 0;
    427 }
    428 
    429 int32_t AudioDeviceMac::Terminate()
    430 {
    431 
    432     if (!_initialized)
    433     {
    434         return 0;
    435     }
    436 
    437     if (_recording)
    438     {
    439         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
    440                      " Recording must be stopped");
    441         return -1;
    442     }
    443 
    444     if (_playing)
    445     {
    446         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
    447                      " Playback must be stopped");
    448         return -1;
    449     }
    450 
    451     _critSect.Enter();
    452 
    453     _mixerManager.Close();
    454 
    455     OSStatus err = noErr;
    456     int retVal = 0;
    457 
    458     AudioObjectPropertyAddress propertyAddress = {
    459             kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
    460             kAudioObjectPropertyElementMaster };
    461     WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(kAudioObjectSystemObject,
    462             &propertyAddress, &objectListenerProc, this));
    463 
    464     err = AudioHardwareUnload();
    465     if (err != noErr)
    466     {
    467         logCAMsg(kTraceError, kTraceAudioDevice, _id,
    468                  "Error in AudioHardwareUnload()", (const char*) &err);
    469         retVal = -1;
    470     }
    471 
    472     _critSect.Leave();
    473 
    474     _isShutDown = true;
    475     _initialized = false;
    476     _outputDeviceIsSpecified = false;
    477     _inputDeviceIsSpecified = false;
    478 
    479     return retVal;
    480 }
    481 
    482 bool AudioDeviceMac::Initialized() const
    483 {
    484     return (_initialized);
    485 }
    486 
    487 int32_t AudioDeviceMac::SpeakerIsAvailable(bool& available)
    488 {
    489 
    490     bool wasInitialized = _mixerManager.SpeakerIsInitialized();
    491 
    492     // Make an attempt to open up the
    493     // output mixer corresponding to the currently selected output device.
    494     //
    495     if (!wasInitialized && InitSpeaker() == -1)
    496     {
    497         available = false;
    498         return 0;
    499     }
    500 
    501     // Given that InitSpeaker was successful, we know that a valid speaker
    502     // exists.
    503     available = true;
    504 
    505     // Close the initialized output mixer
    506     //
    507     if (!wasInitialized)
    508     {
    509         _mixerManager.CloseSpeaker();
    510     }
    511 
    512     return 0;
    513 }
    514 
    515 int32_t AudioDeviceMac::InitSpeaker()
    516 {
    517 
    518     CriticalSectionScoped lock(&_critSect);
    519 
    520     if (_playing)
    521     {
    522         return -1;
    523     }
    524 
    525     if (InitDevice(_outputDeviceIndex, _outputDeviceID, false) == -1)
    526     {
    527         return -1;
    528     }
    529 
    530     if (_inputDeviceID == _outputDeviceID)
    531     {
    532         _twoDevices = false;
    533     } else
    534     {
    535         _twoDevices = true;
    536     }
    537 
    538     if (_mixerManager.OpenSpeaker(_outputDeviceID) == -1)
    539     {
    540         return -1;
    541     }
    542 
    543     return 0;
    544 }
    545 
    546 int32_t AudioDeviceMac::MicrophoneIsAvailable(bool& available)
    547 {
    548 
    549     bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
    550 
    551     // Make an attempt to open up the
    552     // input mixer corresponding to the currently selected output device.
    553     //
    554     if (!wasInitialized && InitMicrophone() == -1)
    555     {
    556         available = false;
    557         return 0;
    558     }
    559 
    560     // Given that InitMicrophone was successful, we know that a valid microphone
    561     // exists.
    562     available = true;
    563 
    564     // Close the initialized input mixer
    565     //
    566     if (!wasInitialized)
    567     {
    568         _mixerManager.CloseMicrophone();
    569     }
    570 
    571     return 0;
    572 }
    573 
    574 int32_t AudioDeviceMac::InitMicrophone()
    575 {
    576 
    577     CriticalSectionScoped lock(&_critSect);
    578 
    579     if (_recording)
    580     {
    581         return -1;
    582     }
    583 
    584     if (InitDevice(_inputDeviceIndex, _inputDeviceID, true) == -1)
    585     {
    586         return -1;
    587     }
    588 
    589     if (_inputDeviceID == _outputDeviceID)
    590     {
    591         _twoDevices = false;
    592     } else
    593     {
    594         _twoDevices = true;
    595     }
    596 
    597     if (_mixerManager.OpenMicrophone(_inputDeviceID) == -1)
    598     {
    599         return -1;
    600     }
    601 
    602     return 0;
    603 }
    604 
    605 bool AudioDeviceMac::SpeakerIsInitialized() const
    606 {
    607     return (_mixerManager.SpeakerIsInitialized());
    608 }
    609 
    610 bool AudioDeviceMac::MicrophoneIsInitialized() const
    611 {
    612     return (_mixerManager.MicrophoneIsInitialized());
    613 }
    614 
    615 int32_t AudioDeviceMac::SpeakerVolumeIsAvailable(bool& available)
    616 {
    617 
    618     bool wasInitialized = _mixerManager.SpeakerIsInitialized();
    619 
    620     // Make an attempt to open up the
    621     // output mixer corresponding to the currently selected output device.
    622     //
    623     if (!wasInitialized && InitSpeaker() == -1)
    624     {
    625         // If we end up here it means that the selected speaker has no volume
    626         // control.
    627         available = false;
    628         return 0;
    629     }
    630 
    631     // Given that InitSpeaker was successful, we know that a volume control exists
    632     //
    633     available = true;
    634 
    635     // Close the initialized output mixer
    636     //
    637     if (!wasInitialized)
    638     {
    639         _mixerManager.CloseSpeaker();
    640     }
    641 
    642     return 0;
    643 }
    644 
    645 int32_t AudioDeviceMac::SetSpeakerVolume(uint32_t volume)
    646 {
    647 
    648     return (_mixerManager.SetSpeakerVolume(volume));
    649 }
    650 
    651 int32_t AudioDeviceMac::SpeakerVolume(uint32_t& volume) const
    652 {
    653 
    654     uint32_t level(0);
    655 
    656     if (_mixerManager.SpeakerVolume(level) == -1)
    657     {
    658         return -1;
    659     }
    660 
    661     volume = level;
    662     return 0;
    663 }
    664 
    665 int32_t AudioDeviceMac::SetWaveOutVolume(uint16_t volumeLeft,
    666                                          uint16_t volumeRight)
    667 {
    668 
    669     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
    670                  "  API call not supported on this platform");
    671     return -1;
    672 }
    673 
    674 int32_t
    675 AudioDeviceMac::WaveOutVolume(uint16_t& /*volumeLeft*/,
    676                               uint16_t& /*volumeRight*/) const
    677 {
    678 
    679     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
    680                  "  API call not supported on this platform");
    681     return -1;
    682 }
    683 
    684 int32_t AudioDeviceMac::MaxSpeakerVolume(uint32_t& maxVolume) const
    685 {
    686 
    687     uint32_t maxVol(0);
    688 
    689     if (_mixerManager.MaxSpeakerVolume(maxVol) == -1)
    690     {
    691         return -1;
    692     }
    693 
    694     maxVolume = maxVol;
    695     return 0;
    696 }
    697 
    698 int32_t AudioDeviceMac::MinSpeakerVolume(uint32_t& minVolume) const
    699 {
    700 
    701     uint32_t minVol(0);
    702 
    703     if (_mixerManager.MinSpeakerVolume(minVol) == -1)
    704     {
    705         return -1;
    706     }
    707 
    708     minVolume = minVol;
    709     return 0;
    710 }
    711 
    712 int32_t
    713 AudioDeviceMac::SpeakerVolumeStepSize(uint16_t& stepSize) const
    714 {
    715 
    716     uint16_t delta(0);
    717 
    718     if (_mixerManager.SpeakerVolumeStepSize(delta) == -1)
    719     {
    720         return -1;
    721     }
    722 
    723     stepSize = delta;
    724     return 0;
    725 }
    726 
    727 int32_t AudioDeviceMac::SpeakerMuteIsAvailable(bool& available)
    728 {
    729 
    730     bool isAvailable(false);
    731     bool wasInitialized = _mixerManager.SpeakerIsInitialized();
    732 
    733     // Make an attempt to open up the
    734     // output mixer corresponding to the currently selected output device.
    735     //
    736     if (!wasInitialized && InitSpeaker() == -1)
    737     {
    738         // If we end up here it means that the selected speaker has no volume
    739         // control, hence it is safe to state that there is no mute control
    740         // already at this stage.
    741         available = false;
    742         return 0;
    743     }
    744 
    745     // Check if the selected speaker has a mute control
    746     //
    747     _mixerManager.SpeakerMuteIsAvailable(isAvailable);
    748 
    749     available = isAvailable;
    750 
    751     // Close the initialized output mixer
    752     //
    753     if (!wasInitialized)
    754     {
    755         _mixerManager.CloseSpeaker();
    756     }
    757 
    758     return 0;
    759 }
    760 
    761 int32_t AudioDeviceMac::SetSpeakerMute(bool enable)
    762 {
    763     return (_mixerManager.SetSpeakerMute(enable));
    764 }
    765 
    766 int32_t AudioDeviceMac::SpeakerMute(bool& enabled) const
    767 {
    768 
    769     bool muted(0);
    770 
    771     if (_mixerManager.SpeakerMute(muted) == -1)
    772     {
    773         return -1;
    774     }
    775 
    776     enabled = muted;
    777     return 0;
    778 }
    779 
    780 int32_t AudioDeviceMac::MicrophoneMuteIsAvailable(bool& available)
    781 {
    782 
    783     bool isAvailable(false);
    784     bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
    785 
    786     // Make an attempt to open up the
    787     // input mixer corresponding to the currently selected input device.
    788     //
    789     if (!wasInitialized && InitMicrophone() == -1)
    790     {
    791         // If we end up here it means that the selected microphone has no volume
    792         // control, hence it is safe to state that there is no boost control
    793         // already at this stage.
    794         available = false;
    795         return 0;
    796     }
    797 
    798     // Check if the selected microphone has a mute control
    799     //
    800     _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
    801     available = isAvailable;
    802 
    803     // Close the initialized input mixer
    804     //
    805     if (!wasInitialized)
    806     {
    807         _mixerManager.CloseMicrophone();
    808     }
    809 
    810     return 0;
    811 }
    812 
    813 int32_t AudioDeviceMac::SetMicrophoneMute(bool enable)
    814 {
    815     return (_mixerManager.SetMicrophoneMute(enable));
    816 }
    817 
    818 int32_t AudioDeviceMac::MicrophoneMute(bool& enabled) const
    819 {
    820 
    821     bool muted(0);
    822 
    823     if (_mixerManager.MicrophoneMute(muted) == -1)
    824     {
    825         return -1;
    826     }
    827 
    828     enabled = muted;
    829     return 0;
    830 }
    831 
    832 int32_t AudioDeviceMac::MicrophoneBoostIsAvailable(bool& available)
    833 {
    834 
    835     bool isAvailable(false);
    836     bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
    837 
    838     // Enumerate all avaliable microphone and make an attempt to open up the
    839     // input mixer corresponding to the currently selected input device.
    840     //
    841     if (!wasInitialized && InitMicrophone() == -1)
    842     {
    843         // If we end up here it means that the selected microphone has no volume
    844         // control, hence it is safe to state that there is no boost control
    845         // already at this stage.
    846         available = false;
    847         return 0;
    848     }
    849 
    850     // Check if the selected microphone has a boost control
    851     //
    852     _mixerManager.MicrophoneBoostIsAvailable(isAvailable);
    853     available = isAvailable;
    854 
    855     // Close the initialized input mixer
    856     //
    857     if (!wasInitialized)
    858     {
    859         _mixerManager.CloseMicrophone();
    860     }
    861 
    862     return 0;
    863 }
    864 
    865 int32_t AudioDeviceMac::SetMicrophoneBoost(bool enable)
    866 {
    867 
    868     return (_mixerManager.SetMicrophoneBoost(enable));
    869 }
    870 
    871 int32_t AudioDeviceMac::MicrophoneBoost(bool& enabled) const
    872 {
    873 
    874     bool onOff(0);
    875 
    876     if (_mixerManager.MicrophoneBoost(onOff) == -1)
    877     {
    878         return -1;
    879     }
    880 
    881     enabled = onOff;
    882     return 0;
    883 }
    884 
    885 int32_t AudioDeviceMac::StereoRecordingIsAvailable(bool& available)
    886 {
    887 
    888     bool isAvailable(false);
    889     bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
    890 
    891     if (!wasInitialized && InitMicrophone() == -1)
    892     {
    893         // Cannot open the specified device
    894         available = false;
    895         return 0;
    896     }
    897 
    898     // Check if the selected microphone can record stereo
    899     //
    900     _mixerManager.StereoRecordingIsAvailable(isAvailable);
    901     available = isAvailable;
    902 
    903     // Close the initialized input mixer
    904     //
    905     if (!wasInitialized)
    906     {
    907         _mixerManager.CloseMicrophone();
    908     }
    909 
    910     return 0;
    911 }
    912 
    913 int32_t AudioDeviceMac::SetStereoRecording(bool enable)
    914 {
    915 
    916     if (enable)
    917         _recChannels = 2;
    918     else
    919         _recChannels = 1;
    920 
    921     return 0;
    922 }
    923 
    924 int32_t AudioDeviceMac::StereoRecording(bool& enabled) const
    925 {
    926 
    927     if (_recChannels == 2)
    928         enabled = true;
    929     else
    930         enabled = false;
    931 
    932     return 0;
    933 }
    934 
    935 int32_t AudioDeviceMac::StereoPlayoutIsAvailable(bool& available)
    936 {
    937 
    938     bool isAvailable(false);
    939     bool wasInitialized = _mixerManager.SpeakerIsInitialized();
    940 
    941     if (!wasInitialized && InitSpeaker() == -1)
    942     {
    943         // Cannot open the specified device
    944         available = false;
    945         return 0;
    946     }
    947 
    948     // Check if the selected microphone can record stereo
    949     //
    950     _mixerManager.StereoPlayoutIsAvailable(isAvailable);
    951     available = isAvailable;
    952 
    953     // Close the initialized input mixer
    954     //
    955     if (!wasInitialized)
    956     {
    957         _mixerManager.CloseSpeaker();
    958     }
    959 
    960     return 0;
    961 }
    962 
    963 int32_t AudioDeviceMac::SetStereoPlayout(bool enable)
    964 {
    965 
    966     if (enable)
    967         _playChannels = 2;
    968     else
    969         _playChannels = 1;
    970 
    971     return 0;
    972 }
    973 
    974 int32_t AudioDeviceMac::StereoPlayout(bool& enabled) const
    975 {
    976 
    977     if (_playChannels == 2)
    978         enabled = true;
    979     else
    980         enabled = false;
    981 
    982     return 0;
    983 }
    984 
    985 int32_t AudioDeviceMac::SetAGC(bool enable)
    986 {
    987 
    988     _AGC = enable;
    989 
    990     return 0;
    991 }
    992 
    993 bool AudioDeviceMac::AGC() const
    994 {
    995 
    996     return _AGC;
    997 }
    998 
    999 int32_t AudioDeviceMac::MicrophoneVolumeIsAvailable(bool& available)
   1000 {
   1001 
   1002     bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
   1003 
   1004     // Make an attempt to open up the
   1005     // input mixer corresponding to the currently selected output device.
   1006     //
   1007     if (!wasInitialized && InitMicrophone() == -1)
   1008     {
   1009         // If we end up here it means that the selected microphone has no volume
   1010         // control.
   1011         available = false;
   1012         return 0;
   1013     }
   1014 
   1015     // Given that InitMicrophone was successful, we know that a volume control
   1016     // exists
   1017     //
   1018     available = true;
   1019 
   1020     // Close the initialized input mixer
   1021     //
   1022     if (!wasInitialized)
   1023     {
   1024         _mixerManager.CloseMicrophone();
   1025     }
   1026 
   1027     return 0;
   1028 }
   1029 
   1030 int32_t AudioDeviceMac::SetMicrophoneVolume(uint32_t volume)
   1031 {
   1032 
   1033     return (_mixerManager.SetMicrophoneVolume(volume));
   1034 }
   1035 
   1036 int32_t AudioDeviceMac::MicrophoneVolume(uint32_t& volume) const
   1037 {
   1038 
   1039     uint32_t level(0);
   1040 
   1041     if (_mixerManager.MicrophoneVolume(level) == -1)
   1042     {
   1043         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   1044                      "  failed to retrive current microphone level");
   1045         return -1;
   1046     }
   1047 
   1048     volume = level;
   1049     return 0;
   1050 }
   1051 
   1052 int32_t
   1053 AudioDeviceMac::MaxMicrophoneVolume(uint32_t& maxVolume) const
   1054 {
   1055 
   1056     uint32_t maxVol(0);
   1057 
   1058     if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1)
   1059     {
   1060         return -1;
   1061     }
   1062 
   1063     maxVolume = maxVol;
   1064     return 0;
   1065 }
   1066 
   1067 int32_t
   1068 AudioDeviceMac::MinMicrophoneVolume(uint32_t& minVolume) const
   1069 {
   1070 
   1071     uint32_t minVol(0);
   1072 
   1073     if (_mixerManager.MinMicrophoneVolume(minVol) == -1)
   1074     {
   1075         return -1;
   1076     }
   1077 
   1078     minVolume = minVol;
   1079     return 0;
   1080 }
   1081 
   1082 int32_t
   1083 AudioDeviceMac::MicrophoneVolumeStepSize(uint16_t& stepSize) const
   1084 {
   1085 
   1086     uint16_t delta(0);
   1087 
   1088     if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1)
   1089     {
   1090         return -1;
   1091     }
   1092 
   1093     stepSize = delta;
   1094     return 0;
   1095 }
   1096 
   1097 int16_t AudioDeviceMac::PlayoutDevices()
   1098 {
   1099 
   1100     AudioDeviceID playDevices[MaxNumberDevices];
   1101     return GetNumberDevices(kAudioDevicePropertyScopeOutput, playDevices,
   1102                             MaxNumberDevices);
   1103 }
   1104 
   1105 int32_t AudioDeviceMac::SetPlayoutDevice(uint16_t index)
   1106 {
   1107 
   1108     if (_playIsInitialized)
   1109     {
   1110         return -1;
   1111     }
   1112 
   1113     AudioDeviceID playDevices[MaxNumberDevices];
   1114     uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput,
   1115                                          playDevices, MaxNumberDevices);
   1116     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1117                  "  number of availiable waveform-audio output devices is %u",
   1118                  nDevices);
   1119 
   1120     if (index > (nDevices - 1))
   1121     {
   1122         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   1123                      "  device index is out of range [0,%u]", (nDevices - 1));
   1124         return -1;
   1125     }
   1126 
   1127     _outputDeviceIndex = index;
   1128     _outputDeviceIsSpecified = true;
   1129 
   1130     return 0;
   1131 }
   1132 
   1133 int32_t AudioDeviceMac::SetPlayoutDevice(
   1134     AudioDeviceModule::WindowsDeviceType /*device*/)
   1135 {
   1136     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   1137                  "WindowsDeviceType not supported");
   1138     return -1;
   1139 }
   1140 
   1141 int32_t AudioDeviceMac::PlayoutDeviceName(
   1142     uint16_t index,
   1143     char name[kAdmMaxDeviceNameSize],
   1144     char guid[kAdmMaxGuidSize])
   1145 {
   1146 
   1147     const uint16_t nDevices(PlayoutDevices());
   1148 
   1149     if ((index > (nDevices - 1)) || (name == NULL))
   1150     {
   1151         return -1;
   1152     }
   1153 
   1154     memset(name, 0, kAdmMaxDeviceNameSize);
   1155 
   1156     if (guid != NULL)
   1157     {
   1158         memset(guid, 0, kAdmMaxGuidSize);
   1159     }
   1160 
   1161     return GetDeviceName(kAudioDevicePropertyScopeOutput, index, name);
   1162 }
   1163 
   1164 int32_t AudioDeviceMac::RecordingDeviceName(
   1165     uint16_t index,
   1166     char name[kAdmMaxDeviceNameSize],
   1167     char guid[kAdmMaxGuidSize])
   1168 {
   1169 
   1170     const uint16_t nDevices(RecordingDevices());
   1171 
   1172     if ((index > (nDevices - 1)) || (name == NULL))
   1173     {
   1174         return -1;
   1175     }
   1176 
   1177     memset(name, 0, kAdmMaxDeviceNameSize);
   1178 
   1179     if (guid != NULL)
   1180     {
   1181         memset(guid, 0, kAdmMaxGuidSize);
   1182     }
   1183 
   1184     return GetDeviceName(kAudioDevicePropertyScopeInput, index, name);
   1185 }
   1186 
   1187 int16_t AudioDeviceMac::RecordingDevices()
   1188 {
   1189 
   1190     AudioDeviceID recDevices[MaxNumberDevices];
   1191     return GetNumberDevices(kAudioDevicePropertyScopeInput, recDevices,
   1192                             MaxNumberDevices);
   1193 }
   1194 
   1195 int32_t AudioDeviceMac::SetRecordingDevice(uint16_t index)
   1196 {
   1197 
   1198     if (_recIsInitialized)
   1199     {
   1200         return -1;
   1201     }
   1202 
   1203     AudioDeviceID recDevices[MaxNumberDevices];
   1204     uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeInput,
   1205                                          recDevices, MaxNumberDevices);
   1206     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1207                  "  number of availiable waveform-audio input devices is %u",
   1208                  nDevices);
   1209 
   1210     if (index > (nDevices - 1))
   1211     {
   1212         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   1213                      "  device index is out of range [0,%u]", (nDevices - 1));
   1214         return -1;
   1215     }
   1216 
   1217     _inputDeviceIndex = index;
   1218     _inputDeviceIsSpecified = true;
   1219 
   1220     return 0;
   1221 }
   1222 
   1223 
   1224 int32_t
   1225 AudioDeviceMac::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType /*device*/)
   1226 {
   1227     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   1228                  "WindowsDeviceType not supported");
   1229     return -1;
   1230 }
   1231 
   1232 int32_t AudioDeviceMac::PlayoutIsAvailable(bool& available)
   1233 {
   1234 
   1235     available = true;
   1236 
   1237     // Try to initialize the playout side
   1238     if (InitPlayout() == -1)
   1239     {
   1240         available = false;
   1241     }
   1242 
   1243     // We destroy the IOProc created by InitPlayout() in implDeviceIOProc().
   1244     // We must actually start playout here in order to have the IOProc
   1245     // deleted by calling StopPlayout().
   1246     if (StartPlayout() == -1)
   1247     {
   1248         available = false;
   1249     }
   1250 
   1251     // Cancel effect of initialization
   1252     if (StopPlayout() == -1)
   1253     {
   1254         available = false;
   1255     }
   1256 
   1257     return 0;
   1258 }
   1259 
   1260 int32_t AudioDeviceMac::RecordingIsAvailable(bool& available)
   1261 {
   1262 
   1263     available = true;
   1264 
   1265     // Try to initialize the recording side
   1266     if (InitRecording() == -1)
   1267     {
   1268         available = false;
   1269     }
   1270 
   1271     // We destroy the IOProc created by InitRecording() in implInDeviceIOProc().
   1272     // We must actually start recording here in order to have the IOProc
   1273     // deleted by calling StopRecording().
   1274     if (StartRecording() == -1)
   1275     {
   1276         available = false;
   1277     }
   1278 
   1279     // Cancel effect of initialization
   1280     if (StopRecording() == -1)
   1281     {
   1282         available = false;
   1283     }
   1284 
   1285     return 0;
   1286 }
   1287 
   1288 int32_t AudioDeviceMac::InitPlayout()
   1289 {
   1290 
   1291     CriticalSectionScoped lock(&_critSect);
   1292 
   1293     if (_playing)
   1294     {
   1295         return -1;
   1296     }
   1297 
   1298     if (!_outputDeviceIsSpecified)
   1299     {
   1300         return -1;
   1301     }
   1302 
   1303     if (_playIsInitialized)
   1304     {
   1305         return 0;
   1306     }
   1307 
   1308     // Initialize the speaker (devices might have been added or removed)
   1309     if (InitSpeaker() == -1)
   1310     {
   1311         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   1312                      "  InitSpeaker() failed");
   1313     }
   1314 
   1315     if (!MicrophoneIsInitialized())
   1316     {
   1317         // Make this call to check if we are using
   1318         // one or two devices (_twoDevices)
   1319         bool available = false;
   1320         if (MicrophoneIsAvailable(available) == -1)
   1321         {
   1322             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   1323                          "  MicrophoneIsAvailable() failed");
   1324         }
   1325     }
   1326 
   1327     PaUtil_FlushRingBuffer(_paRenderBuffer);
   1328 
   1329     OSStatus err = noErr;
   1330     UInt32 size = 0;
   1331     _renderDelayOffsetSamples = 0;
   1332     _renderDelayUs = 0;
   1333     _renderLatencyUs = 0;
   1334     _renderDeviceIsAlive = 1;
   1335     _doStop = false;
   1336 
   1337     // The internal microphone of a MacBook Pro is located under the left speaker
   1338     // grille. When the internal speakers are in use, we want to fully stereo
   1339     // pan to the right.
   1340     AudioObjectPropertyAddress
   1341         propertyAddress = { kAudioDevicePropertyDataSource,
   1342                 kAudioDevicePropertyScopeOutput, 0 };
   1343     if (_macBookPro)
   1344     {
   1345         _macBookProPanRight = false;
   1346         Boolean hasProperty = AudioObjectHasProperty(_outputDeviceID,
   1347                                                      &propertyAddress);
   1348         if (hasProperty)
   1349         {
   1350             UInt32 dataSource = 0;
   1351             size = sizeof(dataSource);
   1352             WEBRTC_CA_LOG_WARN(AudioObjectGetPropertyData(_outputDeviceID,
   1353                     &propertyAddress, 0, NULL, &size, &dataSource));
   1354 
   1355             if (dataSource == 'ispk')
   1356             {
   1357                 _macBookProPanRight = true;
   1358                 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice,
   1359                              _id,
   1360                              "MacBook Pro using internal speakers; stereo"
   1361                              " panning right");
   1362             } else
   1363             {
   1364                 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice,
   1365                              _id, "MacBook Pro not using internal speakers");
   1366             }
   1367 
   1368             // Add a listener to determine if the status changes.
   1369             WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_outputDeviceID,
   1370                     &propertyAddress, &objectListenerProc, this));
   1371         }
   1372     }
   1373 
   1374     // Get current stream description
   1375     propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
   1376     memset(&_outStreamFormat, 0, sizeof(_outStreamFormat));
   1377     size = sizeof(_outStreamFormat);
   1378     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
   1379             &propertyAddress, 0, NULL, &size, &_outStreamFormat));
   1380 
   1381     if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM)
   1382     {
   1383         logCAMsg(kTraceError, kTraceAudioDevice, _id,
   1384                  "Unacceptable output stream format -> mFormatID",
   1385                  (const char *) &_outStreamFormat.mFormatID);
   1386         return -1;
   1387     }
   1388 
   1389     if (_outStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS)
   1390     {
   1391         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   1392             "Too many channels on output device (mChannelsPerFrame = %d)",
   1393             _outStreamFormat.mChannelsPerFrame);
   1394         return -1;
   1395     }
   1396 
   1397     if (_outStreamFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved)
   1398     {
   1399         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   1400                      "Non-interleaved audio data is not supported.",
   1401                      "AudioHardware streams should not have this format.");
   1402         return -1;
   1403     }
   1404 
   1405     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1406                  "Ouput stream format:");
   1407     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1408                  "mSampleRate = %f, mChannelsPerFrame = %u",
   1409                  _outStreamFormat.mSampleRate,
   1410                  _outStreamFormat.mChannelsPerFrame);
   1411     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1412                  "mBytesPerPacket = %u, mFramesPerPacket = %u",
   1413                  _outStreamFormat.mBytesPerPacket,
   1414                  _outStreamFormat.mFramesPerPacket);
   1415     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1416                  "mBytesPerFrame = %u, mBitsPerChannel = %u",
   1417                  _outStreamFormat.mBytesPerFrame,
   1418                  _outStreamFormat.mBitsPerChannel);
   1419     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1420                  "mFormatFlags = %u",
   1421                  _outStreamFormat.mFormatFlags);
   1422     logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
   1423              (const char *) &_outStreamFormat.mFormatID);
   1424 
   1425     // Our preferred format to work with
   1426     _outDesiredFormat.mSampleRate = N_PLAY_SAMPLES_PER_SEC;
   1427     if (_outStreamFormat.mChannelsPerFrame >= 2 && (_playChannels == 2))
   1428     {
   1429         _outDesiredFormat.mChannelsPerFrame = 2;
   1430     } else
   1431     {
   1432         // Disable stereo playout when we only have one channel on the device.
   1433         _outDesiredFormat.mChannelsPerFrame = 1;
   1434         _playChannels = 1;
   1435         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1436                      "Stereo playout unavailable on this device");
   1437     }
   1438 
   1439     if (_ptrAudioBuffer)
   1440     {
   1441         // Update audio buffer with the selected parameters
   1442         _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
   1443         _ptrAudioBuffer->SetPlayoutChannels((uint8_t) _playChannels);
   1444     }
   1445 
   1446     _renderDelayOffsetSamples = _renderBufSizeSamples - N_BUFFERS_OUT
   1447         * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * _outDesiredFormat.mChannelsPerFrame;
   1448 
   1449     _outDesiredFormat.mBytesPerPacket = _outDesiredFormat.mChannelsPerFrame
   1450         * sizeof(SInt16);
   1451     _outDesiredFormat.mFramesPerPacket = 1; // In uncompressed audio,
   1452     // a packet is one frame.
   1453     _outDesiredFormat.mBytesPerFrame = _outDesiredFormat.mChannelsPerFrame
   1454         * sizeof(SInt16);
   1455     _outDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
   1456 
   1457     _outDesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
   1458         | kLinearPCMFormatFlagIsPacked;
   1459 #ifdef WEBRTC_ARCH_BIG_ENDIAN
   1460     _outDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
   1461 #endif
   1462     _outDesiredFormat.mFormatID = kAudioFormatLinearPCM;
   1463 
   1464     WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_outDesiredFormat, &_outStreamFormat,
   1465             &_renderConverter));
   1466 
   1467     // First try to set buffer size to desired value (_playBufDelayFixed)
   1468     UInt32 bufByteCount = (UInt32)((_outStreamFormat.mSampleRate / 1000.0)
   1469         * _playBufDelayFixed * _outStreamFormat.mChannelsPerFrame
   1470         * sizeof(Float32));
   1471     if (_outStreamFormat.mFramesPerPacket != 0)
   1472     {
   1473         if (bufByteCount % _outStreamFormat.mFramesPerPacket != 0)
   1474         {
   1475             bufByteCount = ((UInt32)(bufByteCount
   1476                 / _outStreamFormat.mFramesPerPacket) + 1)
   1477                 * _outStreamFormat.mFramesPerPacket;
   1478         }
   1479     }
   1480 
   1481     // Ensure the buffer size is within the acceptable range provided by the device.
   1482     propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
   1483     AudioValueRange range;
   1484     size = sizeof(range);
   1485     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
   1486             &propertyAddress, 0, NULL, &size, &range));
   1487     if (range.mMinimum > bufByteCount)
   1488     {
   1489         bufByteCount = range.mMinimum;
   1490     } else if (range.mMaximum < bufByteCount)
   1491     {
   1492         bufByteCount = range.mMaximum;
   1493     }
   1494 
   1495     propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
   1496     size = sizeof(bufByteCount);
   1497     WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_outputDeviceID,
   1498             &propertyAddress, 0, NULL, size, &bufByteCount));
   1499 
   1500     // Get render device latency
   1501     propertyAddress.mSelector = kAudioDevicePropertyLatency;
   1502     UInt32 latency = 0;
   1503     size = sizeof(UInt32);
   1504     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
   1505             &propertyAddress, 0, NULL, &size, &latency));
   1506     _renderLatencyUs = (uint32_t) ((1.0e6 * latency)
   1507         / _outStreamFormat.mSampleRate);
   1508 
   1509     // Get render stream latency
   1510     propertyAddress.mSelector = kAudioDevicePropertyStreams;
   1511     AudioStreamID stream = 0;
   1512     size = sizeof(AudioStreamID);
   1513     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
   1514             &propertyAddress, 0, NULL, &size, &stream));
   1515     propertyAddress.mSelector = kAudioStreamPropertyLatency;
   1516     size = sizeof(UInt32);
   1517     latency = 0;
   1518     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
   1519             &propertyAddress, 0, NULL, &size, &latency));
   1520     _renderLatencyUs += (uint32_t) ((1.0e6 * latency)
   1521         / _outStreamFormat.mSampleRate);
   1522 
   1523     // Listen for format changes
   1524     propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
   1525     WEBRTC_CA_RETURN_ON_ERR(AudioObjectAddPropertyListener(_outputDeviceID,
   1526             &propertyAddress, &objectListenerProc, this));
   1527 
   1528     // Listen for processor overloads
   1529     propertyAddress.mSelector = kAudioDeviceProcessorOverload;
   1530     WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_outputDeviceID,
   1531             &propertyAddress, &objectListenerProc, this));
   1532 
   1533     if (_twoDevices || !_recIsInitialized)
   1534     {
   1535         WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(_outputDeviceID,
   1536                 deviceIOProc, this, &_deviceIOProcID));
   1537     }
   1538 
   1539     // Mark playout side as initialized
   1540     _playIsInitialized = true;
   1541 
   1542     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1543                  "  initial playout status: _renderDelayOffsetSamples=%d,"
   1544                  " _renderDelayUs=%d, _renderLatencyUs=%d",
   1545                  _renderDelayOffsetSamples, _renderDelayUs, _renderLatencyUs);
   1546 
   1547     return 0;
   1548 }
   1549 
   1550 int32_t AudioDeviceMac::InitRecording()
   1551 {
   1552 
   1553     CriticalSectionScoped lock(&_critSect);
   1554 
   1555     if (_recording)
   1556     {
   1557         return -1;
   1558     }
   1559 
   1560     if (!_inputDeviceIsSpecified)
   1561     {
   1562         return -1;
   1563     }
   1564 
   1565     if (_recIsInitialized)
   1566     {
   1567         return 0;
   1568     }
   1569 
   1570     // Initialize the microphone (devices might have been added or removed)
   1571     if (InitMicrophone() == -1)
   1572     {
   1573         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   1574                      "  InitMicrophone() failed");
   1575     }
   1576 
   1577     if (!SpeakerIsInitialized())
   1578     {
   1579         // Make this call to check if we are using
   1580         // one or two devices (_twoDevices)
   1581         bool available = false;
   1582         if (SpeakerIsAvailable(available) == -1)
   1583         {
   1584             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   1585                          "  SpeakerIsAvailable() failed");
   1586         }
   1587     }
   1588 
   1589     OSStatus err = noErr;
   1590     UInt32 size = 0;
   1591 
   1592     PaUtil_FlushRingBuffer(_paCaptureBuffer);
   1593 
   1594     _captureDelayUs = 0;
   1595     _captureLatencyUs = 0;
   1596     _captureDeviceIsAlive = 1;
   1597     _doStopRec = false;
   1598 
   1599     // Get current stream description
   1600     AudioObjectPropertyAddress
   1601         propertyAddress = { kAudioDevicePropertyStreamFormat,
   1602                 kAudioDevicePropertyScopeInput, 0 };
   1603     memset(&_inStreamFormat, 0, sizeof(_inStreamFormat));
   1604     size = sizeof(_inStreamFormat);
   1605     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
   1606             &propertyAddress, 0, NULL, &size, &_inStreamFormat));
   1607 
   1608     if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM)
   1609     {
   1610         logCAMsg(kTraceError, kTraceAudioDevice, _id,
   1611                  "Unacceptable input stream format -> mFormatID",
   1612                  (const char *) &_inStreamFormat.mFormatID);
   1613         return -1;
   1614     }
   1615 
   1616     if (_inStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS)
   1617     {
   1618         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   1619             "Too many channels on input device (mChannelsPerFrame = %d)",
   1620             _inStreamFormat.mChannelsPerFrame);
   1621         return -1;
   1622     }
   1623 
   1624     const int io_block_size_samples = _inStreamFormat.mChannelsPerFrame *
   1625         _inStreamFormat.mSampleRate / 100 * N_BLOCKS_IO;
   1626     if (io_block_size_samples > _captureBufSizeSamples)
   1627     {
   1628         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   1629             "Input IO block size (%d) is larger than ring buffer (%u)",
   1630             io_block_size_samples, _captureBufSizeSamples);
   1631         return -1;
   1632     }
   1633 
   1634     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1635                  " Input stream format:");
   1636     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1637                  " mSampleRate = %f, mChannelsPerFrame = %u",
   1638                  _inStreamFormat.mSampleRate, _inStreamFormat.mChannelsPerFrame);
   1639     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1640                  " mBytesPerPacket = %u, mFramesPerPacket = %u",
   1641                  _inStreamFormat.mBytesPerPacket,
   1642                  _inStreamFormat.mFramesPerPacket);
   1643     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1644                  " mBytesPerFrame = %u, mBitsPerChannel = %u",
   1645                  _inStreamFormat.mBytesPerFrame,
   1646                  _inStreamFormat.mBitsPerChannel);
   1647     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1648                  " mFormatFlags = %u",
   1649                  _inStreamFormat.mFormatFlags);
   1650     logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
   1651              (const char *) &_inStreamFormat.mFormatID);
   1652 
   1653     // Our preferred format to work with
   1654     if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2))
   1655     {
   1656         _inDesiredFormat.mChannelsPerFrame = 2;
   1657     } else
   1658     {
   1659         // Disable stereo recording when we only have one channel on the device.
   1660         _inDesiredFormat.mChannelsPerFrame = 1;
   1661         _recChannels = 1;
   1662         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   1663                      "Stereo recording unavailable on this device");
   1664     }
   1665 
   1666     if (_ptrAudioBuffer)
   1667     {
   1668         // Update audio buffer with the selected parameters
   1669         _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
   1670         _ptrAudioBuffer->SetRecordingChannels((uint8_t) _recChannels);
   1671     }
   1672 
   1673     _inDesiredFormat.mSampleRate = N_REC_SAMPLES_PER_SEC;
   1674     _inDesiredFormat.mBytesPerPacket = _inDesiredFormat.mChannelsPerFrame
   1675         * sizeof(SInt16);
   1676     _inDesiredFormat.mFramesPerPacket = 1;
   1677     _inDesiredFormat.mBytesPerFrame = _inDesiredFormat.mChannelsPerFrame
   1678         * sizeof(SInt16);
   1679     _inDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
   1680 
   1681     _inDesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
   1682         | kLinearPCMFormatFlagIsPacked;
   1683 #ifdef WEBRTC_ARCH_BIG_ENDIAN
   1684     _inDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
   1685 #endif
   1686     _inDesiredFormat.mFormatID = kAudioFormatLinearPCM;
   1687 
   1688     WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_inStreamFormat, &_inDesiredFormat,
   1689             &_captureConverter));
   1690 
   1691     // First try to set buffer size to desired value (10 ms * N_BLOCKS_IO)
   1692     // TODO(xians): investigate this block.
   1693     UInt32 bufByteCount = (UInt32)((_inStreamFormat.mSampleRate / 1000.0)
   1694         * 10.0 * N_BLOCKS_IO * _inStreamFormat.mChannelsPerFrame
   1695         * sizeof(Float32));
   1696     if (_inStreamFormat.mFramesPerPacket != 0)
   1697     {
   1698         if (bufByteCount % _inStreamFormat.mFramesPerPacket != 0)
   1699         {
   1700             bufByteCount = ((UInt32)(bufByteCount
   1701                 / _inStreamFormat.mFramesPerPacket) + 1)
   1702                 * _inStreamFormat.mFramesPerPacket;
   1703         }
   1704     }
   1705 
   1706     // Ensure the buffer size is within the acceptable range provided by the device.
   1707     propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
   1708     AudioValueRange range;
   1709     size = sizeof(range);
   1710     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
   1711             &propertyAddress, 0, NULL, &size, &range));
   1712     if (range.mMinimum > bufByteCount)
   1713     {
   1714         bufByteCount = range.mMinimum;
   1715     } else if (range.mMaximum < bufByteCount)
   1716     {
   1717         bufByteCount = range.mMaximum;
   1718     }
   1719 
   1720     propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
   1721     size = sizeof(bufByteCount);
   1722     WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_inputDeviceID,
   1723             &propertyAddress, 0, NULL, size, &bufByteCount));
   1724 
   1725     // Get capture device latency
   1726     propertyAddress.mSelector = kAudioDevicePropertyLatency;
   1727     UInt32 latency = 0;
   1728     size = sizeof(UInt32);
   1729     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
   1730             &propertyAddress, 0, NULL, &size, &latency));
   1731     _captureLatencyUs = (UInt32)((1.0e6 * latency)
   1732         / _inStreamFormat.mSampleRate);
   1733 
   1734     // Get capture stream latency
   1735     propertyAddress.mSelector = kAudioDevicePropertyStreams;
   1736     AudioStreamID stream = 0;
   1737     size = sizeof(AudioStreamID);
   1738     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
   1739             &propertyAddress, 0, NULL, &size, &stream));
   1740     propertyAddress.mSelector = kAudioStreamPropertyLatency;
   1741     size = sizeof(UInt32);
   1742     latency = 0;
   1743     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
   1744             &propertyAddress, 0, NULL, &size, &latency));
   1745     _captureLatencyUs += (UInt32)((1.0e6 * latency)
   1746         / _inStreamFormat.mSampleRate);
   1747 
   1748     // Listen for format changes
   1749     // TODO(xians): should we be using kAudioDevicePropertyDeviceHasChanged?
   1750     propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
   1751     WEBRTC_CA_RETURN_ON_ERR(AudioObjectAddPropertyListener(_inputDeviceID,
   1752             &propertyAddress, &objectListenerProc, this));
   1753 
   1754     // Listen for processor overloads
   1755     propertyAddress.mSelector = kAudioDeviceProcessorOverload;
   1756     WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_inputDeviceID,
   1757             &propertyAddress, &objectListenerProc, this));
   1758 
   1759     if (_twoDevices)
   1760     {
   1761         WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(_inputDeviceID,
   1762                 inDeviceIOProc, this, &_inDeviceIOProcID));
   1763     } else if (!_playIsInitialized)
   1764     {
   1765         WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(_inputDeviceID,
   1766                 deviceIOProc, this, &_deviceIOProcID));
   1767     }
   1768 
   1769     // Mark recording side as initialized
   1770     _recIsInitialized = true;
   1771 
   1772     return 0;
   1773 }
   1774 
   1775 int32_t AudioDeviceMac::StartRecording()
   1776 {
   1777 
   1778     CriticalSectionScoped lock(&_critSect);
   1779 
   1780     if (!_recIsInitialized)
   1781     {
   1782         return -1;
   1783     }
   1784 
   1785     if (_recording)
   1786     {
   1787         return 0;
   1788     }
   1789 
   1790     if (!_initialized)
   1791     {
   1792         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   1793                      " Recording worker thread has not been started");
   1794         return -1;
   1795     }
   1796 
   1797     OSStatus err = noErr;
   1798 
   1799     unsigned int threadID(0);
   1800     if (_captureWorkerThread != NULL)
   1801     {
   1802         _captureWorkerThread->Start(threadID);
   1803     }
   1804     _captureWorkerThreadId = threadID;
   1805 
   1806     if (_twoDevices)
   1807     {
   1808         WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, _inDeviceIOProcID));
   1809     } else if (!_playing)
   1810     {
   1811         WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, _deviceIOProcID));
   1812     }
   1813 
   1814     _recording = true;
   1815 
   1816     return 0;
   1817 }
   1818 
   1819 int32_t AudioDeviceMac::StopRecording()
   1820 {
   1821 
   1822     CriticalSectionScoped lock(&_critSect);
   1823 
   1824     if (!_recIsInitialized)
   1825     {
   1826         return 0;
   1827     }
   1828 
   1829     OSStatus err = noErr;
   1830 
   1831     // Stop device
   1832     int32_t captureDeviceIsAlive = AtomicGet32(&_captureDeviceIsAlive);
   1833     if (_twoDevices)
   1834     {
   1835         if (_recording && captureDeviceIsAlive == 1)
   1836         {
   1837             _recording = false;
   1838             _doStopRec = true; // Signal to io proc to stop audio device
   1839             _critSect.Leave(); // Cannot be under lock, risk of deadlock
   1840             if (kEventTimeout == _stopEventRec.Wait(2000))
   1841             {
   1842                 CriticalSectionScoped critScoped(&_critSect);
   1843                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   1844                              " Timed out stopping the capture IOProc. "
   1845                              "We may have failed to detect a device removal.");
   1846 
   1847                 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_inputDeviceID,
   1848                                                    _inDeviceIOProcID));
   1849                 WEBRTC_CA_LOG_WARN(
   1850                     AudioDeviceDestroyIOProcID(_inputDeviceID,
   1851                                                _inDeviceIOProcID));
   1852             }
   1853             _critSect.Enter();
   1854             _doStopRec = false;
   1855             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
   1856                          " Recording stopped");
   1857         }
   1858     }
   1859     else
   1860     {
   1861         // We signal a stop for a shared device even when rendering has
   1862         // not yet ended. This is to ensure the IOProc will return early as
   1863         // intended (by checking |_recording|) before accessing
   1864         // resources we free below (e.g. the capture converter).
   1865         //
   1866         // In the case of a shared devcie, the IOProc will verify
   1867         // rendering has ended before stopping itself.
   1868         if (_recording && captureDeviceIsAlive == 1)
   1869         {
   1870             _recording = false;
   1871             _doStop = true; // Signal to io proc to stop audio device
   1872             _critSect.Leave(); // Cannot be under lock, risk of deadlock
   1873             if (kEventTimeout == _stopEvent.Wait(2000))
   1874             {
   1875                 CriticalSectionScoped critScoped(&_critSect);
   1876                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   1877                              " Timed out stopping the shared IOProc. "
   1878                              "We may have failed to detect a device removal.");
   1879 
   1880                 // We assume rendering on a shared device has stopped as well if
   1881                 // the IOProc times out.
   1882                 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID,
   1883                                                    _deviceIOProcID));
   1884                 WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_outputDeviceID,
   1885                                                               _deviceIOProcID));
   1886             }
   1887             _critSect.Enter();
   1888             _doStop = false;
   1889             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
   1890                          " Recording stopped (shared)");
   1891         }
   1892     }
   1893 
   1894     // Setting this signal will allow the worker thread to be stopped.
   1895     AtomicSet32(&_captureDeviceIsAlive, 0);
   1896     _critSect.Leave();
   1897     if (_captureWorkerThread != NULL)
   1898     {
   1899         if (!_captureWorkerThread->Stop())
   1900         {
   1901             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   1902                          " Timed out waiting for the render worker thread to "
   1903                              "stop.");
   1904         }
   1905     }
   1906     _critSect.Enter();
   1907 
   1908     WEBRTC_CA_LOG_WARN(AudioConverterDispose(_captureConverter));
   1909 
   1910     // Remove listeners.
   1911     AudioObjectPropertyAddress
   1912         propertyAddress = { kAudioDevicePropertyStreamFormat,
   1913                 kAudioDevicePropertyScopeInput, 0 };
   1914     WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_inputDeviceID,
   1915             &propertyAddress, &objectListenerProc, this));
   1916 
   1917     propertyAddress.mSelector = kAudioDeviceProcessorOverload;
   1918     WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_inputDeviceID,
   1919             &propertyAddress, &objectListenerProc, this));
   1920 
   1921     _recIsInitialized = false;
   1922     _recording = false;
   1923 
   1924     return 0;
   1925 }
   1926 
   1927 bool AudioDeviceMac::RecordingIsInitialized() const
   1928 {
   1929     return (_recIsInitialized);
   1930 }
   1931 
   1932 bool AudioDeviceMac::Recording() const
   1933 {
   1934     return (_recording);
   1935 }
   1936 
   1937 bool AudioDeviceMac::PlayoutIsInitialized() const
   1938 {
   1939     return (_playIsInitialized);
   1940 }
   1941 
   1942 int32_t AudioDeviceMac::StartPlayout()
   1943 {
   1944 
   1945     CriticalSectionScoped lock(&_critSect);
   1946 
   1947     if (!_playIsInitialized)
   1948     {
   1949         return -1;
   1950     }
   1951 
   1952     if (_playing)
   1953     {
   1954         return 0;
   1955     }
   1956 
   1957     OSStatus err = noErr;
   1958 
   1959     unsigned int threadID(0);
   1960     if (_renderWorkerThread != NULL)
   1961     {
   1962         _renderWorkerThread->Start(threadID);
   1963     }
   1964     _renderWorkerThreadId = threadID;
   1965 
   1966     if (_twoDevices || !_recording)
   1967     {
   1968         WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_outputDeviceID, _deviceIOProcID));
   1969     }
   1970     _playing = true;
   1971 
   1972     return 0;
   1973 }
   1974 
   1975 int32_t AudioDeviceMac::StopPlayout()
   1976 {
   1977 
   1978     CriticalSectionScoped lock(&_critSect);
   1979 
   1980     if (!_playIsInitialized)
   1981     {
   1982         return 0;
   1983     }
   1984 
   1985     OSStatus err = noErr;
   1986 
   1987     int32_t renderDeviceIsAlive = AtomicGet32(&_renderDeviceIsAlive);
   1988     if (_playing && renderDeviceIsAlive == 1)
   1989     {
   1990         // We signal a stop for a shared device even when capturing has not
   1991         // yet ended. This is to ensure the IOProc will return early as
   1992         // intended (by checking |_playing|) before accessing resources we
   1993         // free below (e.g. the render converter).
   1994         //
   1995         // In the case of a shared device, the IOProc will verify capturing
   1996         // has ended before stopping itself.
   1997         _playing = false;
   1998         _doStop = true; // Signal to io proc to stop audio device
   1999         _critSect.Leave(); // Cannot be under lock, risk of deadlock
   2000         if (kEventTimeout == _stopEvent.Wait(2000))
   2001         {
   2002             CriticalSectionScoped critScoped(&_critSect);
   2003             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   2004                          " Timed out stopping the render IOProc. "
   2005                          "We may have failed to detect a device removal.");
   2006 
   2007             // We assume capturing on a shared device has stopped as well if the
   2008             // IOProc times out.
   2009             WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID,
   2010                                                _deviceIOProcID));
   2011             WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_outputDeviceID,
   2012                                                           _deviceIOProcID));
   2013         }
   2014         _critSect.Enter();
   2015         _doStop = false;
   2016         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
   2017                      "Playout stopped");
   2018     }
   2019 
   2020     // Setting this signal will allow the worker thread to be stopped.
   2021     AtomicSet32(&_renderDeviceIsAlive, 0);
   2022     _critSect.Leave();
   2023     if (_renderWorkerThread != NULL)
   2024     {
   2025         if (!_renderWorkerThread->Stop())
   2026         {
   2027             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   2028                          " Timed out waiting for the render worker thread to "
   2029                          "stop.");
   2030         }
   2031     }
   2032     _critSect.Enter();
   2033 
   2034     WEBRTC_CA_LOG_WARN(AudioConverterDispose(_renderConverter));
   2035 
   2036     // Remove listeners.
   2037     AudioObjectPropertyAddress propertyAddress = {
   2038             kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeOutput,
   2039             0 };
   2040     WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID,
   2041             &propertyAddress, &objectListenerProc, this));
   2042 
   2043     propertyAddress.mSelector = kAudioDeviceProcessorOverload;
   2044     WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID,
   2045             &propertyAddress, &objectListenerProc, this));
   2046 
   2047     if (_macBookPro)
   2048     {
   2049         Boolean hasProperty = AudioObjectHasProperty(_outputDeviceID,
   2050                                                      &propertyAddress);
   2051         if (hasProperty)
   2052         {
   2053             propertyAddress.mSelector = kAudioDevicePropertyDataSource;
   2054             WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID,
   2055                     &propertyAddress, &objectListenerProc, this));
   2056         }
   2057     }
   2058 
   2059     _playIsInitialized = false;
   2060     _playing = false;
   2061 
   2062     return 0;
   2063 }
   2064 
   2065 int32_t AudioDeviceMac::PlayoutDelay(uint16_t& delayMS) const
   2066 {
   2067     int32_t renderDelayUs = AtomicGet32(&_renderDelayUs);
   2068     delayMS = static_cast<uint16_t> (1e-3 * (renderDelayUs + _renderLatencyUs) +
   2069                                      0.5);
   2070     return 0;
   2071 }
   2072 
   2073 int32_t AudioDeviceMac::RecordingDelay(uint16_t& delayMS) const
   2074 {
   2075     int32_t captureDelayUs = AtomicGet32(&_captureDelayUs);
   2076     delayMS = static_cast<uint16_t> (1e-3 * (captureDelayUs +
   2077                                              _captureLatencyUs) + 0.5);
   2078     return 0;
   2079 }
   2080 
   2081 bool AudioDeviceMac::Playing() const
   2082 {
   2083     return (_playing);
   2084 }
   2085 
   2086 int32_t AudioDeviceMac::SetPlayoutBuffer(
   2087     const AudioDeviceModule::BufferType type,
   2088     uint16_t sizeMS)
   2089 {
   2090 
   2091     if (type != AudioDeviceModule::kFixedBufferSize)
   2092     {
   2093         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   2094                      " Adaptive buffer size not supported on this platform");
   2095         return -1;
   2096     }
   2097 
   2098     _playBufType = type;
   2099     _playBufDelayFixed = sizeMS;
   2100     return 0;
   2101 }
   2102 
   2103 int32_t AudioDeviceMac::PlayoutBuffer(
   2104     AudioDeviceModule::BufferType& type,
   2105     uint16_t& sizeMS) const
   2106 {
   2107 
   2108     type = _playBufType;
   2109     sizeMS = _playBufDelayFixed;
   2110 
   2111     return 0;
   2112 }
   2113 
   2114 // Not implemented for Mac.
   2115 int32_t AudioDeviceMac::CPULoad(uint16_t& /*load*/) const
   2116 {
   2117 
   2118     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   2119                  "  API call not supported on this platform");
   2120 
   2121     return -1;
   2122 }
   2123 
   2124 bool AudioDeviceMac::PlayoutWarning() const
   2125 {
   2126     return (_playWarning > 0);
   2127 }
   2128 
   2129 bool AudioDeviceMac::PlayoutError() const
   2130 {
   2131     return (_playError > 0);
   2132 }
   2133 
   2134 bool AudioDeviceMac::RecordingWarning() const
   2135 {
   2136     return (_recWarning > 0);
   2137 }
   2138 
   2139 bool AudioDeviceMac::RecordingError() const
   2140 {
   2141     return (_recError > 0);
   2142 }
   2143 
   2144 void AudioDeviceMac::ClearPlayoutWarning()
   2145 {
   2146     _playWarning = 0;
   2147 }
   2148 
   2149 void AudioDeviceMac::ClearPlayoutError()
   2150 {
   2151     _playError = 0;
   2152 }
   2153 
   2154 void AudioDeviceMac::ClearRecordingWarning()
   2155 {
   2156     _recWarning = 0;
   2157 }
   2158 
   2159 void AudioDeviceMac::ClearRecordingError()
   2160 {
   2161     _recError = 0;
   2162 }
   2163 
   2164 // ============================================================================
   2165 //                                 Private Methods
   2166 // ============================================================================
   2167 
   2168 int32_t
   2169 AudioDeviceMac::GetNumberDevices(const AudioObjectPropertyScope scope,
   2170                                  AudioDeviceID scopedDeviceIds[],
   2171                                  const uint32_t deviceListLength)
   2172 {
   2173     OSStatus err = noErr;
   2174 
   2175     AudioObjectPropertyAddress propertyAddress = {
   2176             kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
   2177             kAudioObjectPropertyElementMaster };
   2178     UInt32 size = 0;
   2179     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyDataSize(kAudioObjectSystemObject,
   2180             &propertyAddress, 0, NULL, &size));
   2181     if (size == 0)
   2182     {
   2183         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   2184                      "No devices");
   2185         return 0;
   2186     }
   2187 
   2188     AudioDeviceID* deviceIds = (AudioDeviceID*) malloc(size);
   2189     UInt32 numberDevices = size / sizeof(AudioDeviceID);
   2190     AudioBufferList* bufferList = NULL;
   2191     UInt32 numberScopedDevices = 0;
   2192 
   2193     // First check if there is a default device and list it
   2194     UInt32 hardwareProperty = 0;
   2195     if (scope == kAudioDevicePropertyScopeOutput)
   2196     {
   2197         hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
   2198     } else
   2199     {
   2200         hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
   2201     }
   2202 
   2203     AudioObjectPropertyAddress
   2204         propertyAddressDefault = { hardwareProperty,
   2205                 kAudioObjectPropertyScopeGlobal,
   2206                 kAudioObjectPropertyElementMaster };
   2207 
   2208     AudioDeviceID usedID;
   2209     UInt32 uintSize = sizeof(UInt32);
   2210     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
   2211             &propertyAddressDefault, 0, NULL, &uintSize, &usedID));
   2212     if (usedID != kAudioDeviceUnknown)
   2213     {
   2214         scopedDeviceIds[numberScopedDevices] = usedID;
   2215         numberScopedDevices++;
   2216     } else
   2217     {
   2218         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   2219                      "GetNumberDevices(): Default device unknown");
   2220     }
   2221 
   2222     // Then list the rest of the devices
   2223     bool listOK = true;
   2224 
   2225     WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
   2226             &propertyAddress, 0, NULL, &size, deviceIds));
   2227     if (err != noErr)
   2228     {
   2229         listOK = false;
   2230     } else
   2231     {
   2232         propertyAddress.mSelector = kAudioDevicePropertyStreamConfiguration;
   2233         propertyAddress.mScope = scope;
   2234         propertyAddress.mElement = 0;
   2235         for (UInt32 i = 0; i < numberDevices; i++)
   2236         {
   2237             // Check for input channels
   2238             WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyDataSize(deviceIds[i],
   2239                     &propertyAddress, 0, NULL, &size));
   2240             if (err == kAudioHardwareBadDeviceError)
   2241             {
   2242                 // This device doesn't actually exist; continue iterating.
   2243                 continue;
   2244             } else if (err != noErr)
   2245             {
   2246                 listOK = false;
   2247                 break;
   2248             }
   2249 
   2250             bufferList = (AudioBufferList*) malloc(size);
   2251             WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(deviceIds[i],
   2252                     &propertyAddress, 0, NULL, &size, bufferList));
   2253             if (err != noErr)
   2254             {
   2255                 listOK = false;
   2256                 break;
   2257             }
   2258 
   2259             if (bufferList->mNumberBuffers > 0)
   2260             {
   2261                 if (numberScopedDevices >= deviceListLength)
   2262                 {
   2263                     WEBRTC_TRACE(kTraceError,
   2264                                  kTraceAudioDevice, _id,
   2265                                  "Device list is not long enough");
   2266                     listOK = false;
   2267                     break;
   2268                 }
   2269 
   2270                 scopedDeviceIds[numberScopedDevices] = deviceIds[i];
   2271                 numberScopedDevices++;
   2272             }
   2273 
   2274             free(bufferList);
   2275             bufferList = NULL;
   2276         }  // for
   2277     }
   2278 
   2279     if (!listOK)
   2280     {
   2281         if (deviceIds)
   2282         {
   2283             free(deviceIds);
   2284             deviceIds = NULL;
   2285         }
   2286 
   2287         if (bufferList)
   2288         {
   2289             free(bufferList);
   2290             bufferList = NULL;
   2291         }
   2292 
   2293         return -1;
   2294     }
   2295 
   2296     // Happy ending
   2297     if (deviceIds)
   2298     {
   2299         free(deviceIds);
   2300         deviceIds = NULL;
   2301     }
   2302 
   2303     return numberScopedDevices;
   2304 }
   2305 
   2306 int32_t
   2307 AudioDeviceMac::GetDeviceName(const AudioObjectPropertyScope scope,
   2308                               const uint16_t index,
   2309                               char* name)
   2310 {
   2311     OSStatus err = noErr;
   2312     UInt32 len = kAdmMaxDeviceNameSize;
   2313     AudioDeviceID deviceIds[MaxNumberDevices];
   2314 
   2315     int numberDevices = GetNumberDevices(scope, deviceIds, MaxNumberDevices);
   2316     if (numberDevices < 0)
   2317     {
   2318         return -1;
   2319     } else if (numberDevices == 0)
   2320     {
   2321         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   2322                      "No devices");
   2323         return -1;
   2324     }
   2325 
   2326     // If the number is below the number of devices, assume it's "WEBRTC ID"
   2327     // otherwise assume it's a CoreAudio ID
   2328     AudioDeviceID usedID;
   2329 
   2330     // Check if there is a default device
   2331     bool isDefaultDevice = false;
   2332     if (index == 0)
   2333     {
   2334         UInt32 hardwareProperty = 0;
   2335         if (scope == kAudioDevicePropertyScopeOutput)
   2336         {
   2337             hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
   2338         } else
   2339         {
   2340             hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
   2341         }
   2342         AudioObjectPropertyAddress propertyAddress = { hardwareProperty,
   2343                 kAudioObjectPropertyScopeGlobal,
   2344                 kAudioObjectPropertyElementMaster };
   2345         UInt32 size = sizeof(UInt32);
   2346         WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
   2347                 &propertyAddress, 0, NULL, &size, &usedID));
   2348         if (usedID == kAudioDeviceUnknown)
   2349         {
   2350             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   2351                          "GetDeviceName(): Default device unknown");
   2352         } else
   2353         {
   2354             isDefaultDevice = true;
   2355         }
   2356     }
   2357 
   2358     AudioObjectPropertyAddress propertyAddress = {
   2359             kAudioDevicePropertyDeviceName, scope, 0 };
   2360 
   2361     if (isDefaultDevice)
   2362     {
   2363         char devName[len];
   2364 
   2365         WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID,
   2366                 &propertyAddress, 0, NULL, &len, devName));
   2367 
   2368         sprintf(name, "default (%s)", devName);
   2369     } else
   2370     {
   2371         if (index < numberDevices)
   2372         {
   2373             usedID = deviceIds[index];
   2374         } else
   2375         {
   2376             usedID = index;
   2377         }
   2378 
   2379         WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID,
   2380                 &propertyAddress, 0, NULL, &len, name));
   2381     }
   2382 
   2383     return 0;
   2384 }
   2385 
   2386 int32_t AudioDeviceMac::InitDevice(const uint16_t userDeviceIndex,
   2387                                    AudioDeviceID& deviceId,
   2388                                    const bool isInput)
   2389 {
   2390     OSStatus err = noErr;
   2391     UInt32 size = 0;
   2392     AudioObjectPropertyScope deviceScope;
   2393     AudioObjectPropertySelector defaultDeviceSelector;
   2394     AudioDeviceID deviceIds[MaxNumberDevices];
   2395 
   2396     if (isInput)
   2397     {
   2398         deviceScope = kAudioDevicePropertyScopeInput;
   2399         defaultDeviceSelector = kAudioHardwarePropertyDefaultInputDevice;
   2400     } else
   2401     {
   2402         deviceScope = kAudioDevicePropertyScopeOutput;
   2403         defaultDeviceSelector = kAudioHardwarePropertyDefaultOutputDevice;
   2404     }
   2405 
   2406     AudioObjectPropertyAddress
   2407         propertyAddress = { defaultDeviceSelector,
   2408                 kAudioObjectPropertyScopeGlobal,
   2409                 kAudioObjectPropertyElementMaster };
   2410 
   2411     // Get the actual device IDs
   2412     int numberDevices = GetNumberDevices(deviceScope, deviceIds,
   2413                                          MaxNumberDevices);
   2414     if (numberDevices < 0)
   2415     {
   2416         return -1;
   2417     } else if (numberDevices == 0)
   2418     {
   2419         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   2420                      "InitDevice(): No devices");
   2421         return -1;
   2422     }
   2423 
   2424     bool isDefaultDevice = false;
   2425     deviceId = kAudioDeviceUnknown;
   2426     if (userDeviceIndex == 0)
   2427     {
   2428         // Try to use default system device
   2429         size = sizeof(AudioDeviceID);
   2430         WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
   2431                 &propertyAddress, 0, NULL, &size, &deviceId));
   2432         if (deviceId == kAudioDeviceUnknown)
   2433         {
   2434             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   2435                          " No default device exists");
   2436         } else
   2437         {
   2438             isDefaultDevice = true;
   2439         }
   2440     }
   2441 
   2442     if (!isDefaultDevice)
   2443     {
   2444         deviceId = deviceIds[userDeviceIndex];
   2445     }
   2446 
   2447     // Obtain device name and manufacturer for logging.
   2448     // Also use this as a test to ensure a user-set device ID is valid.
   2449     char devName[128];
   2450     char devManf[128];
   2451     memset(devName, 0, sizeof(devName));
   2452     memset(devManf, 0, sizeof(devManf));
   2453 
   2454     propertyAddress.mSelector = kAudioDevicePropertyDeviceName;
   2455     propertyAddress.mScope = deviceScope;
   2456     propertyAddress.mElement = 0;
   2457     size = sizeof(devName);
   2458     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId,
   2459             &propertyAddress, 0, NULL, &size, devName));
   2460 
   2461     propertyAddress.mSelector = kAudioDevicePropertyDeviceManufacturer;
   2462     size = sizeof(devManf);
   2463     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId,
   2464             &propertyAddress, 0, NULL, &size, devManf));
   2465 
   2466     if (isInput)
   2467     {
   2468         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   2469                      " Input device: %s %s", devManf, devName);
   2470     } else
   2471     {
   2472         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   2473                      " Output device: %s %s", devManf, devName);
   2474     }
   2475 
   2476     return 0;
   2477 }
   2478 
   2479 OSStatus AudioDeviceMac::objectListenerProc(
   2480     AudioObjectID objectId,
   2481     UInt32 numberAddresses,
   2482     const AudioObjectPropertyAddress addresses[],
   2483     void* clientData)
   2484 {
   2485     AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData;
   2486     assert(ptrThis != NULL);
   2487 
   2488     ptrThis->implObjectListenerProc(objectId, numberAddresses, addresses);
   2489 
   2490     // AudioObjectPropertyListenerProc functions are supposed to return 0
   2491     return 0;
   2492 }
   2493 
   2494 OSStatus AudioDeviceMac::implObjectListenerProc(
   2495     const AudioObjectID objectId,
   2496     const UInt32 numberAddresses,
   2497     const AudioObjectPropertyAddress addresses[])
   2498 {
   2499     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
   2500                  "AudioDeviceMac::implObjectListenerProc()");
   2501 
   2502     for (UInt32 i = 0; i < numberAddresses; i++)
   2503     {
   2504         if (addresses[i].mSelector == kAudioHardwarePropertyDevices)
   2505         {
   2506             HandleDeviceChange();
   2507         } else if (addresses[i].mSelector == kAudioDevicePropertyStreamFormat)
   2508         {
   2509             HandleStreamFormatChange(objectId, addresses[i]);
   2510         } else if (addresses[i].mSelector == kAudioDevicePropertyDataSource)
   2511         {
   2512             HandleDataSourceChange(objectId, addresses[i]);
   2513         } else if (addresses[i].mSelector == kAudioDeviceProcessorOverload)
   2514         {
   2515             HandleProcessorOverload(addresses[i]);
   2516         }
   2517     }
   2518 
   2519     return 0;
   2520 }
   2521 
   2522 int32_t AudioDeviceMac::HandleDeviceChange()
   2523 {
   2524     OSStatus err = noErr;
   2525 
   2526     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
   2527                  "kAudioHardwarePropertyDevices");
   2528 
   2529     // A device has changed. Check if our registered devices have been removed.
   2530     // Ensure the devices have been initialized, meaning the IDs are valid.
   2531     if (MicrophoneIsInitialized())
   2532     {
   2533         AudioObjectPropertyAddress propertyAddress = {
   2534                 kAudioDevicePropertyDeviceIsAlive,
   2535                 kAudioDevicePropertyScopeInput, 0 };
   2536         UInt32 deviceIsAlive = 1;
   2537         UInt32 size = sizeof(UInt32);
   2538         err = AudioObjectGetPropertyData(_inputDeviceID, &propertyAddress, 0,
   2539                                          NULL, &size, &deviceIsAlive);
   2540 
   2541         if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0)
   2542         {
   2543             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   2544                          "Capture device is not alive (probably removed)");
   2545             AtomicSet32(&_captureDeviceIsAlive, 0);
   2546             _mixerManager.CloseMicrophone();
   2547             if (_recError == 1)
   2548             {
   2549                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
   2550                              _id, "  pending recording error exists");
   2551             }
   2552             _recError = 1; // triggers callback from module process thread
   2553         } else if (err != noErr)
   2554         {
   2555             logCAMsg(kTraceError, kTraceAudioDevice, _id,
   2556                      "Error in AudioDeviceGetProperty()", (const char*) &err);
   2557             return -1;
   2558         }
   2559     }
   2560 
   2561     if (SpeakerIsInitialized())
   2562     {
   2563         AudioObjectPropertyAddress propertyAddress = {
   2564                 kAudioDevicePropertyDeviceIsAlive,
   2565                 kAudioDevicePropertyScopeOutput, 0 };
   2566         UInt32 deviceIsAlive = 1;
   2567         UInt32 size = sizeof(UInt32);
   2568         err = AudioObjectGetPropertyData(_outputDeviceID, &propertyAddress, 0,
   2569                                          NULL, &size, &deviceIsAlive);
   2570 
   2571         if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0)
   2572         {
   2573             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   2574                          "Render device is not alive (probably removed)");
   2575             AtomicSet32(&_renderDeviceIsAlive, 0);
   2576             _mixerManager.CloseSpeaker();
   2577             if (_playError == 1)
   2578             {
   2579                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
   2580                              _id, "  pending playout error exists");
   2581             }
   2582             _playError = 1; // triggers callback from module process thread
   2583         } else if (err != noErr)
   2584         {
   2585             logCAMsg(kTraceError, kTraceAudioDevice, _id,
   2586                      "Error in AudioDeviceGetProperty()", (const char*) &err);
   2587             return -1;
   2588         }
   2589     }
   2590 
   2591     return 0;
   2592 }
   2593 
   2594 int32_t AudioDeviceMac::HandleStreamFormatChange(
   2595     const AudioObjectID objectId,
   2596     const AudioObjectPropertyAddress propertyAddress)
   2597 {
   2598     OSStatus err = noErr;
   2599 
   2600     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
   2601                  "Stream format changed");
   2602 
   2603     if (objectId != _inputDeviceID && objectId != _outputDeviceID)
   2604     {
   2605         return 0;
   2606     }
   2607 
   2608     // Get the new device format
   2609     AudioStreamBasicDescription streamFormat;
   2610     UInt32 size = sizeof(streamFormat);
   2611     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(objectId,
   2612             &propertyAddress, 0, NULL, &size, &streamFormat));
   2613 
   2614     if (streamFormat.mFormatID != kAudioFormatLinearPCM)
   2615     {
   2616         logCAMsg(kTraceError, kTraceAudioDevice, _id,
   2617                  "Unacceptable input stream format -> mFormatID",
   2618                  (const char *) &streamFormat.mFormatID);
   2619         return -1;
   2620     }
   2621 
   2622     if (streamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS)
   2623     {
   2624         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   2625                      "Too many channels on device (mChannelsPerFrame = %d)",
   2626                      streamFormat.mChannelsPerFrame);
   2627         return -1;
   2628     }
   2629 
   2630     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   2631                  "Stream format:");
   2632     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   2633                  "mSampleRate = %f, mChannelsPerFrame = %u",
   2634                  streamFormat.mSampleRate, streamFormat.mChannelsPerFrame);
   2635     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   2636                  "mBytesPerPacket = %u, mFramesPerPacket = %u",
   2637                  streamFormat.mBytesPerPacket, streamFormat.mFramesPerPacket);
   2638     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   2639                  "mBytesPerFrame = %u, mBitsPerChannel = %u",
   2640                  streamFormat.mBytesPerFrame, streamFormat.mBitsPerChannel);
   2641     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   2642                  "mFormatFlags = %u",
   2643                  streamFormat.mFormatFlags);
   2644     logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
   2645              (const char *) &streamFormat.mFormatID);
   2646 
   2647     if (propertyAddress.mScope == kAudioDevicePropertyScopeInput)
   2648     {
   2649         const int io_block_size_samples = streamFormat.mChannelsPerFrame *
   2650             streamFormat.mSampleRate / 100 * N_BLOCKS_IO;
   2651         if (io_block_size_samples > _captureBufSizeSamples)
   2652         {
   2653             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   2654                 "Input IO block size (%d) is larger than ring buffer (%u)",
   2655                 io_block_size_samples, _captureBufSizeSamples);
   2656             return -1;
   2657 
   2658         }
   2659 
   2660         memcpy(&_inStreamFormat, &streamFormat, sizeof(streamFormat));
   2661 
   2662         if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2))
   2663         {
   2664             _inDesiredFormat.mChannelsPerFrame = 2;
   2665         } else
   2666         {
   2667             // Disable stereo recording when we only have one channel on the device.
   2668             _inDesiredFormat.mChannelsPerFrame = 1;
   2669             _recChannels = 1;
   2670             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   2671                          "Stereo recording unavailable on this device");
   2672         }
   2673 
   2674         if (_ptrAudioBuffer)
   2675         {
   2676             // Update audio buffer with the selected parameters
   2677             _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
   2678             _ptrAudioBuffer->SetRecordingChannels((uint8_t) _recChannels);
   2679         }
   2680 
   2681         // Recreate the converter with the new format
   2682         // TODO(xians): make this thread safe
   2683         WEBRTC_CA_RETURN_ON_ERR(AudioConverterDispose(_captureConverter));
   2684 
   2685         WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&streamFormat, &_inDesiredFormat,
   2686                 &_captureConverter));
   2687     } else
   2688     {
   2689         memcpy(&_outStreamFormat, &streamFormat, sizeof(streamFormat));
   2690 
   2691         if (_outStreamFormat.mChannelsPerFrame >= 2 && (_playChannels == 2))
   2692         {
   2693             _outDesiredFormat.mChannelsPerFrame = 2;
   2694         } else
   2695         {
   2696             // Disable stereo playout when we only have one channel on the device.
   2697             _outDesiredFormat.mChannelsPerFrame = 1;
   2698             _playChannels = 1;
   2699             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   2700                          "Stereo playout unavailable on this device");
   2701         }
   2702 
   2703         if (_ptrAudioBuffer)
   2704         {
   2705             // Update audio buffer with the selected parameters
   2706             _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
   2707             _ptrAudioBuffer->SetPlayoutChannels((uint8_t) _playChannels);
   2708         }
   2709 
   2710         _renderDelayOffsetSamples = _renderBufSizeSamples - N_BUFFERS_OUT
   2711             * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES
   2712             * _outDesiredFormat.mChannelsPerFrame;
   2713 
   2714         // Recreate the converter with the new format
   2715         // TODO(xians): make this thread safe
   2716         WEBRTC_CA_RETURN_ON_ERR(AudioConverterDispose(_renderConverter));
   2717 
   2718         WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_outDesiredFormat, &streamFormat,
   2719                 &_renderConverter));
   2720     }
   2721 
   2722     return 0;
   2723 }
   2724 
   2725 int32_t AudioDeviceMac::HandleDataSourceChange(
   2726     const AudioObjectID objectId,
   2727     const AudioObjectPropertyAddress propertyAddress)
   2728 {
   2729     OSStatus err = noErr;
   2730 
   2731     if (_macBookPro && propertyAddress.mScope
   2732         == kAudioDevicePropertyScopeOutput)
   2733     {
   2734         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
   2735                      "Data source changed");
   2736 
   2737         _macBookProPanRight = false;
   2738         UInt32 dataSource = 0;
   2739         UInt32 size = sizeof(UInt32);
   2740         WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(objectId,
   2741                 &propertyAddress, 0, NULL, &size, &dataSource));
   2742         if (dataSource == 'ispk')
   2743         {
   2744             _macBookProPanRight = true;
   2745             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   2746                          "MacBook Pro using internal speakers; stereo panning right");
   2747         } else
   2748         {
   2749             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
   2750                          "MacBook Pro not using internal speakers");
   2751         }
   2752     }
   2753 
   2754     return 0;
   2755 }
   2756 int32_t AudioDeviceMac::HandleProcessorOverload(
   2757     const AudioObjectPropertyAddress propertyAddress)
   2758 {
   2759     // TODO(xians): we probably want to notify the user in some way of the
   2760     // overload. However, the Windows interpretations of these errors seem to
   2761     // be more severe than what ProcessorOverload is thrown for.
   2762     //
   2763     // We don't log the notification, as it's sent from the HAL's IO thread. We
   2764     // don't want to slow it down even further.
   2765     if (propertyAddress.mScope == kAudioDevicePropertyScopeInput)
   2766     {
   2767         //WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "Capture processor
   2768         // overload");
   2769         //_callback->ProblemIsReported(
   2770         // SndCardStreamObserver::ERecordingProblem);
   2771     } else
   2772     {
   2773         //WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   2774         // "Render processor overload");
   2775         //_callback->ProblemIsReported(
   2776         // SndCardStreamObserver::EPlaybackProblem);
   2777     }
   2778 
   2779     return 0;
   2780 }
   2781 
   2782 // ============================================================================
   2783 //                                  Thread Methods
   2784 // ============================================================================
   2785 
   2786 OSStatus AudioDeviceMac::deviceIOProc(AudioDeviceID, const AudioTimeStamp*,
   2787                                       const AudioBufferList* inputData,
   2788                                       const AudioTimeStamp* inputTime,
   2789                                       AudioBufferList* outputData,
   2790                                       const AudioTimeStamp* outputTime,
   2791                                       void *clientData)
   2792 {
   2793     AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData;
   2794     assert(ptrThis != NULL);
   2795 
   2796     ptrThis->implDeviceIOProc(inputData, inputTime, outputData, outputTime);
   2797 
   2798     // AudioDeviceIOProc functions are supposed to return 0
   2799     return 0;
   2800 }
   2801 
   2802 OSStatus AudioDeviceMac::outConverterProc(AudioConverterRef,
   2803                                           UInt32 *numberDataPackets,
   2804                                           AudioBufferList *data,
   2805                                           AudioStreamPacketDescription **,
   2806                                           void *userData)
   2807 {
   2808     AudioDeviceMac *ptrThis = (AudioDeviceMac *) userData;
   2809     assert(ptrThis != NULL);
   2810 
   2811     return ptrThis->implOutConverterProc(numberDataPackets, data);
   2812 }
   2813 
   2814 OSStatus AudioDeviceMac::inDeviceIOProc(AudioDeviceID, const AudioTimeStamp*,
   2815                                         const AudioBufferList* inputData,
   2816                                         const AudioTimeStamp* inputTime,
   2817                                         AudioBufferList*,
   2818                                         const AudioTimeStamp*, void* clientData)
   2819 {
   2820     AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData;
   2821     assert(ptrThis != NULL);
   2822 
   2823     ptrThis->implInDeviceIOProc(inputData, inputTime);
   2824 
   2825     // AudioDeviceIOProc functions are supposed to return 0
   2826     return 0;
   2827 }
   2828 
   2829 OSStatus AudioDeviceMac::inConverterProc(
   2830     AudioConverterRef,
   2831     UInt32 *numberDataPackets,
   2832     AudioBufferList *data,
   2833     AudioStreamPacketDescription ** /*dataPacketDescription*/,
   2834     void *userData)
   2835 {
   2836     AudioDeviceMac *ptrThis = static_cast<AudioDeviceMac*> (userData);
   2837     assert(ptrThis != NULL);
   2838 
   2839     return ptrThis->implInConverterProc(numberDataPackets, data);
   2840 }
   2841 
   2842 OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList *inputData,
   2843                                           const AudioTimeStamp *inputTime,
   2844                                           AudioBufferList *outputData,
   2845                                           const AudioTimeStamp *outputTime)
   2846 {
   2847     OSStatus err = noErr;
   2848     UInt64 outputTimeNs = AudioConvertHostTimeToNanos(outputTime->mHostTime);
   2849     UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
   2850 
   2851     if (!_twoDevices && _recording)
   2852     {
   2853         implInDeviceIOProc(inputData, inputTime);
   2854     }
   2855 
   2856     // Check if we should close down audio device
   2857     // Double-checked locking optimization to remove locking overhead
   2858     if (_doStop)
   2859     {
   2860         _critSect.Enter();
   2861         if (_doStop)
   2862         {
   2863             if (_twoDevices || (!_recording && !_playing))
   2864             {
   2865                // In the case of a shared device, the single driving ioProc
   2866                // is stopped here
   2867                WEBRTC_CA_LOG_ERR(AudioDeviceStop(_outputDeviceID,
   2868                                                  _deviceIOProcID));
   2869                WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_outputDeviceID,
   2870                                                              _deviceIOProcID));
   2871                if (err == noErr)
   2872                {
   2873                   WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
   2874                                _id, " Playout or shared device stopped");
   2875                }
   2876             }
   2877 
   2878             _doStop = false;
   2879             _stopEvent.Set();
   2880             _critSect.Leave();
   2881             return 0;
   2882         }
   2883         _critSect.Leave();
   2884     }
   2885 
   2886     if (!_playing)
   2887     {
   2888         // This can be the case when a shared device is capturing but not
   2889         // rendering. We allow the checks above before returning to avoid a
   2890         // timeout when capturing is stopped.
   2891         return 0;
   2892     }
   2893 
   2894     assert(_outStreamFormat.mBytesPerFrame != 0);
   2895     UInt32 size = outputData->mBuffers->mDataByteSize
   2896         / _outStreamFormat.mBytesPerFrame;
   2897 
   2898     // TODO(xians): signal an error somehow?
   2899     err = AudioConverterFillComplexBuffer(_renderConverter, outConverterProc,
   2900                                           this, &size, outputData, NULL);
   2901     if (err != noErr)
   2902     {
   2903         if (err == 1)
   2904         {
   2905             // This is our own error.
   2906             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   2907                          " Error in AudioConverterFillComplexBuffer()");
   2908             return 1;
   2909         } else
   2910         {
   2911             logCAMsg(kTraceError, kTraceAudioDevice, _id,
   2912                      "Error in AudioConverterFillComplexBuffer()",
   2913                      (const char *) &err);
   2914             return 1;
   2915         }
   2916     }
   2917 
   2918     ring_buffer_size_t bufSizeSamples =
   2919         PaUtil_GetRingBufferReadAvailable(_paRenderBuffer);
   2920 
   2921     int32_t renderDelayUs = static_cast<int32_t> (1e-3 * (outputTimeNs - nowNs)
   2922         + 0.5);
   2923     renderDelayUs += static_cast<int32_t> ((1.0e6 * bufSizeSamples)
   2924         / _outDesiredFormat.mChannelsPerFrame / _outDesiredFormat.mSampleRate
   2925         + 0.5);
   2926 
   2927     AtomicSet32(&_renderDelayUs, renderDelayUs);
   2928 
   2929     return 0;
   2930 }
   2931 
   2932 OSStatus AudioDeviceMac::implOutConverterProc(UInt32 *numberDataPackets,
   2933                                               AudioBufferList *data)
   2934 {
   2935     assert(data->mNumberBuffers == 1);
   2936     ring_buffer_size_t numSamples = *numberDataPackets
   2937         * _outDesiredFormat.mChannelsPerFrame;
   2938 
   2939     data->mBuffers->mNumberChannels = _outDesiredFormat.mChannelsPerFrame;
   2940     // Always give the converter as much as it wants, zero padding as required.
   2941     data->mBuffers->mDataByteSize = *numberDataPackets
   2942         * _outDesiredFormat.mBytesPerPacket;
   2943     data->mBuffers->mData = _renderConvertData;
   2944     memset(_renderConvertData, 0, sizeof(_renderConvertData));
   2945 
   2946     PaUtil_ReadRingBuffer(_paRenderBuffer, _renderConvertData, numSamples);
   2947 
   2948     kern_return_t kernErr = semaphore_signal_all(_renderSemaphore);
   2949     if (kernErr != KERN_SUCCESS)
   2950     {
   2951         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   2952                      " semaphore_signal_all() error: %d", kernErr);
   2953         return 1;
   2954     }
   2955 
   2956     return 0;
   2957 }
   2958 
   2959 OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList *inputData,
   2960                                             const AudioTimeStamp *inputTime)
   2961 {
   2962     OSStatus err = noErr;
   2963     UInt64 inputTimeNs = AudioConvertHostTimeToNanos(inputTime->mHostTime);
   2964     UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
   2965 
   2966     // Check if we should close down audio device
   2967     // Double-checked locking optimization to remove locking overhead
   2968     if (_doStopRec)
   2969     {
   2970         _critSect.Enter();
   2971         if (_doStopRec)
   2972         {
   2973             // This will be signalled only when a shared device is not in use.
   2974             WEBRTC_CA_LOG_ERR(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
   2975             WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_inputDeviceID,
   2976                                                           _inDeviceIOProcID));
   2977             if (err == noErr)
   2978             {
   2979                 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
   2980                              _id, " Recording device stopped");
   2981             }
   2982 
   2983             _doStopRec = false;
   2984             _stopEventRec.Set();
   2985             _critSect.Leave();
   2986             return 0;
   2987         }
   2988         _critSect.Leave();
   2989     }
   2990 
   2991     if (!_recording)
   2992     {
   2993         // Allow above checks to avoid a timeout on stopping capture.
   2994         return 0;
   2995     }
   2996 
   2997     ring_buffer_size_t bufSizeSamples =
   2998         PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer);
   2999 
   3000     int32_t captureDelayUs = static_cast<int32_t> (1e-3 * (nowNs - inputTimeNs)
   3001         + 0.5);
   3002     captureDelayUs
   3003         += static_cast<int32_t> ((1.0e6 * bufSizeSamples)
   3004             / _inStreamFormat.mChannelsPerFrame / _inStreamFormat.mSampleRate
   3005             + 0.5);
   3006 
   3007     AtomicSet32(&_captureDelayUs, captureDelayUs);
   3008 
   3009     assert(inputData->mNumberBuffers == 1);
   3010     ring_buffer_size_t numSamples = inputData->mBuffers->mDataByteSize
   3011         * _inStreamFormat.mChannelsPerFrame / _inStreamFormat.mBytesPerPacket;
   3012     PaUtil_WriteRingBuffer(_paCaptureBuffer, inputData->mBuffers->mData,
   3013                            numSamples);
   3014 
   3015     kern_return_t kernErr = semaphore_signal_all(_captureSemaphore);
   3016     if (kernErr != KERN_SUCCESS)
   3017     {
   3018         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   3019                      " semaphore_signal_all() error: %d", kernErr);
   3020     }
   3021 
   3022     return err;
   3023 }
   3024 
   3025 OSStatus AudioDeviceMac::implInConverterProc(UInt32 *numberDataPackets,
   3026                                              AudioBufferList *data)
   3027 {
   3028     assert(data->mNumberBuffers == 1);
   3029     ring_buffer_size_t numSamples = *numberDataPackets
   3030         * _inStreamFormat.mChannelsPerFrame;
   3031 
   3032     while (PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer) < numSamples)
   3033     {
   3034         mach_timespec_t timeout;
   3035         timeout.tv_sec = 0;
   3036         timeout.tv_nsec = TIMER_PERIOD_MS;
   3037 
   3038         kern_return_t kernErr = semaphore_timedwait(_captureSemaphore, timeout);
   3039         if (kernErr == KERN_OPERATION_TIMED_OUT)
   3040         {
   3041             int32_t signal = AtomicGet32(&_captureDeviceIsAlive);
   3042             if (signal == 0)
   3043             {
   3044                 // The capture device is no longer alive; stop the worker thread.
   3045                 *numberDataPackets = 0;
   3046                 return 1;
   3047             }
   3048         } else if (kernErr != KERN_SUCCESS)
   3049         {
   3050             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   3051                          " semaphore_wait() error: %d", kernErr);
   3052         }
   3053     }
   3054 
   3055     // Pass the read pointer directly to the converter to avoid a memcpy.
   3056     void* dummyPtr;
   3057     ring_buffer_size_t dummySize;
   3058     PaUtil_GetRingBufferReadRegions(_paCaptureBuffer, numSamples,
   3059                                     &data->mBuffers->mData, &numSamples,
   3060                                     &dummyPtr, &dummySize);
   3061     PaUtil_AdvanceRingBufferReadIndex(_paCaptureBuffer, numSamples);
   3062 
   3063     data->mBuffers->mNumberChannels = _inStreamFormat.mChannelsPerFrame;
   3064     *numberDataPackets = numSamples / _inStreamFormat.mChannelsPerFrame;
   3065     data->mBuffers->mDataByteSize = *numberDataPackets
   3066         * _inStreamFormat.mBytesPerPacket;
   3067 
   3068     return 0;
   3069 }
   3070 
   3071 bool AudioDeviceMac::RunRender(void* ptrThis)
   3072 {
   3073     return static_cast<AudioDeviceMac*> (ptrThis)->RenderWorkerThread();
   3074 }
   3075 
   3076 bool AudioDeviceMac::RenderWorkerThread()
   3077 {
   3078     ring_buffer_size_t numSamples = ENGINE_PLAY_BUF_SIZE_IN_SAMPLES
   3079         * _outDesiredFormat.mChannelsPerFrame;
   3080     while (PaUtil_GetRingBufferWriteAvailable(_paRenderBuffer)
   3081         - _renderDelayOffsetSamples < numSamples)
   3082     {
   3083         mach_timespec_t timeout;
   3084         timeout.tv_sec = 0;
   3085         timeout.tv_nsec = TIMER_PERIOD_MS;
   3086 
   3087         kern_return_t kernErr = semaphore_timedwait(_renderSemaphore, timeout);
   3088         if (kernErr == KERN_OPERATION_TIMED_OUT)
   3089         {
   3090             int32_t signal = AtomicGet32(&_renderDeviceIsAlive);
   3091             if (signal == 0)
   3092             {
   3093                 // The render device is no longer alive; stop the worker thread.
   3094                 return false;
   3095             }
   3096         } else if (kernErr != KERN_SUCCESS)
   3097         {
   3098             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   3099                          " semaphore_timedwait() error: %d", kernErr);
   3100         }
   3101     }
   3102 
   3103     int8_t playBuffer[4 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
   3104 
   3105     if (!_ptrAudioBuffer)
   3106     {
   3107         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   3108                      "  capture AudioBuffer is invalid");
   3109         return false;
   3110     }
   3111 
   3112     // Ask for new PCM data to be played out using the AudioDeviceBuffer.
   3113     uint32_t nSamples =
   3114         _ptrAudioBuffer->RequestPlayoutData(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES);
   3115 
   3116     nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer);
   3117     if (nSamples != ENGINE_PLAY_BUF_SIZE_IN_SAMPLES)
   3118     {
   3119         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   3120                      "  invalid number of output samples(%d)", nSamples);
   3121     }
   3122 
   3123     uint32_t nOutSamples = nSamples * _outDesiredFormat.mChannelsPerFrame;
   3124 
   3125     SInt16 *pPlayBuffer = (SInt16 *) &playBuffer;
   3126     if (_macBookProPanRight && (_playChannels == 2))
   3127     {
   3128         // Mix entirely into the right channel and zero the left channel.
   3129         SInt32 sampleInt32 = 0;
   3130         for (uint32_t sampleIdx = 0; sampleIdx < nOutSamples; sampleIdx
   3131             += 2)
   3132         {
   3133             sampleInt32 = pPlayBuffer[sampleIdx];
   3134             sampleInt32 += pPlayBuffer[sampleIdx + 1];
   3135             sampleInt32 /= 2;
   3136 
   3137             if (sampleInt32 > 32767)
   3138             {
   3139                 sampleInt32 = 32767;
   3140             } else if (sampleInt32 < -32768)
   3141             {
   3142                 sampleInt32 = -32768;
   3143             }
   3144 
   3145             pPlayBuffer[sampleIdx] = 0;
   3146             pPlayBuffer[sampleIdx + 1] = static_cast<SInt16> (sampleInt32);
   3147         }
   3148     }
   3149 
   3150     PaUtil_WriteRingBuffer(_paRenderBuffer, pPlayBuffer, nOutSamples);
   3151 
   3152     return true;
   3153 }
   3154 
   3155 bool AudioDeviceMac::RunCapture(void* ptrThis)
   3156 {
   3157     return static_cast<AudioDeviceMac*> (ptrThis)->CaptureWorkerThread();
   3158 }
   3159 
   3160 bool AudioDeviceMac::CaptureWorkerThread()
   3161 {
   3162     OSStatus err = noErr;
   3163     UInt32 noRecSamples = ENGINE_REC_BUF_SIZE_IN_SAMPLES
   3164         * _inDesiredFormat.mChannelsPerFrame;
   3165     SInt16 recordBuffer[noRecSamples];
   3166     UInt32 size = ENGINE_REC_BUF_SIZE_IN_SAMPLES;
   3167 
   3168     AudioBufferList engineBuffer;
   3169     engineBuffer.mNumberBuffers = 1; // Interleaved channels.
   3170     engineBuffer.mBuffers->mNumberChannels = _inDesiredFormat.mChannelsPerFrame;
   3171     engineBuffer.mBuffers->mDataByteSize = _inDesiredFormat.mBytesPerPacket
   3172         * noRecSamples;
   3173     engineBuffer.mBuffers->mData = recordBuffer;
   3174 
   3175     err = AudioConverterFillComplexBuffer(_captureConverter, inConverterProc,
   3176                                           this, &size, &engineBuffer, NULL);
   3177     if (err != noErr)
   3178     {
   3179         if (err == 1)
   3180         {
   3181             // This is our own error.
   3182             return false;
   3183         } else
   3184         {
   3185             logCAMsg(kTraceError, kTraceAudioDevice, _id,
   3186                      "Error in AudioConverterFillComplexBuffer()",
   3187                      (const char *) &err);
   3188             return false;
   3189         }
   3190     }
   3191 
   3192     // TODO(xians): what if the returned size is incorrect?
   3193     if (size == ENGINE_REC_BUF_SIZE_IN_SAMPLES)
   3194     {
   3195         uint32_t currentMicLevel(0);
   3196         uint32_t newMicLevel(0);
   3197         int32_t msecOnPlaySide;
   3198         int32_t msecOnRecordSide;
   3199 
   3200         int32_t captureDelayUs = AtomicGet32(&_captureDelayUs);
   3201         int32_t renderDelayUs = AtomicGet32(&_renderDelayUs);
   3202 
   3203         msecOnPlaySide = static_cast<int32_t> (1e-3 * (renderDelayUs +
   3204                                                        _renderLatencyUs) + 0.5);
   3205         msecOnRecordSide = static_cast<int32_t> (1e-3 * (captureDelayUs +
   3206                                                          _captureLatencyUs) +
   3207                                                  0.5);
   3208 
   3209         if (!_ptrAudioBuffer)
   3210         {
   3211             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
   3212                          "  capture AudioBuffer is invalid");
   3213             return false;
   3214         }
   3215 
   3216         // store the recorded buffer (no action will be taken if the
   3217         // #recorded samples is not a full buffer)
   3218         _ptrAudioBuffer->SetRecordedBuffer((int8_t*) &recordBuffer,
   3219                                            (uint32_t) size);
   3220 
   3221         if (AGC())
   3222         {
   3223             // store current mic level in the audio buffer if AGC is enabled
   3224             if (MicrophoneVolume(currentMicLevel) == 0)
   3225             {
   3226                 // this call does not affect the actual microphone volume
   3227                 _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
   3228             }
   3229         }
   3230 
   3231         _ptrAudioBuffer->SetVQEData(msecOnPlaySide, msecOnRecordSide, 0);
   3232 
   3233         _ptrAudioBuffer->SetTypingStatus(KeyPressed());
   3234 
   3235         // deliver recorded samples at specified sample rate, mic level etc.
   3236         // to the observer using callback
   3237         _ptrAudioBuffer->DeliverRecordedData();
   3238 
   3239         if (AGC())
   3240         {
   3241             newMicLevel = _ptrAudioBuffer->NewMicLevel();
   3242             if (newMicLevel != 0)
   3243             {
   3244                 // The VQE will only deliver non-zero microphone levels when
   3245                 // a change is needed.
   3246                 // Set this new mic level (received from the observer as return
   3247                 // value in the callback).
   3248                 WEBRTC_TRACE(kTraceStream, kTraceAudioDevice,
   3249                              _id, "  AGC change of volume: old=%u => new=%u",
   3250                              currentMicLevel, newMicLevel);
   3251                 if (SetMicrophoneVolume(newMicLevel) == -1)
   3252                 {
   3253                     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
   3254                                  "  the required modification of the microphone "
   3255                                  "volume failed");
   3256                 }
   3257             }
   3258         }
   3259     }
   3260 
   3261     return true;
   3262 }
   3263 
   3264 bool AudioDeviceMac::KeyPressed() {
   3265   bool key_down = false;
   3266   // Loop through all Mac virtual key constant values.
   3267   for (unsigned int key_index = 0;
   3268                     key_index < ARRAY_SIZE(prev_key_state_);
   3269                     ++key_index) {
   3270     bool keyState = CGEventSourceKeyState(
   3271                              kCGEventSourceStateHIDSystemState,
   3272                              key_index);
   3273     // A false -> true change in keymap means a key is pressed.
   3274     key_down |= (keyState && !prev_key_state_[key_index]);
   3275     // Save current state.
   3276     prev_key_state_[key_index] = keyState;
   3277   }
   3278   return key_down;
   3279 }
   3280 }  // namespace webrtc
   3281