Home | History | Annotate | Download | only in mac
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "media/audio/mac/audio_synchronized_mac.h"
      6 
      7 #include <CoreServices/CoreServices.h>
      8 #include <algorithm>
      9 
     10 #include "base/basictypes.h"
     11 #include "base/debug/trace_event.h"
     12 #include "base/logging.h"
     13 #include "base/mac/mac_logging.h"
     14 #include "media/audio/mac/audio_manager_mac.h"
     15 #include "media/base/channel_mixer.h"
     16 
     17 namespace media {
     18 
     19 static const int kHardwareBufferSize = 128;
     20 static const int kFifoSize = 16384;
     21 
     22 // TODO(crogers): handle the non-stereo case.
     23 static const int kChannels = 2;
     24 
     25 // This value was determined empirically for minimum latency while still
     26 // guarding against FIFO under-runs.
     27 static const int kBaseTargetFifoFrames = 256 + 64;
     28 
     29 // If the input and output sample-rate don't match, then we need to maintain
     30 // an additional safety margin due to the callback timing jitter and the
     31 // varispeed buffering.  This value was empirically tuned.
     32 static const int kAdditionalTargetFifoFrames = 128;
     33 
     34 static void ZeroBufferList(AudioBufferList* buffer_list) {
     35   for (size_t i = 0; i < buffer_list->mNumberBuffers; ++i)
     36     memset(buffer_list->mBuffers[i].mData,
     37            0,
     38            buffer_list->mBuffers[i].mDataByteSize);
     39 }
     40 
     41 static void WrapBufferList(AudioBufferList* buffer_list,
     42                            AudioBus* bus,
     43                            int frames) {
     44   DCHECK(buffer_list);
     45   DCHECK(bus);
     46   int channels = bus->channels();
     47   int buffer_list_channels = buffer_list->mNumberBuffers;
     48 
     49   // Copy pointers from AudioBufferList.
     50   int source_idx = 0;
     51   for (int i = 0; i < channels; ++i) {
     52     bus->SetChannelData(
     53         i, static_cast<float*>(buffer_list->mBuffers[source_idx].mData));
     54 
     55     // It's ok to pass in a |buffer_list| with fewer channels, in which
     56     // case we just duplicate the last channel.
     57     if (source_idx < buffer_list_channels - 1)
     58       ++source_idx;
     59   }
     60 
     61   // Finally set the actual length.
     62   bus->set_frames(frames);
     63 }
     64 
     65 AudioSynchronizedStream::AudioSynchronizedStream(
     66     AudioManagerMac* manager,
     67     const AudioParameters& params,
     68     AudioDeviceID input_id,
     69     AudioDeviceID output_id)
     70     : manager_(manager),
     71       params_(params),
     72       input_sample_rate_(0),
     73       output_sample_rate_(0),
     74       input_id_(input_id),
     75       output_id_(output_id),
     76       input_buffer_list_(NULL),
     77       fifo_(kChannels, kFifoSize),
     78       target_fifo_frames_(kBaseTargetFifoFrames),
     79       average_delta_(0.0),
     80       fifo_rate_compensation_(1.0),
     81       input_unit_(0),
     82       varispeed_unit_(0),
     83       output_unit_(0),
     84       first_input_time_(-1),
     85       is_running_(false),
     86       hardware_buffer_size_(kHardwareBufferSize),
     87       channels_(kChannels) {
     88   VLOG(1) << "AudioSynchronizedStream::AudioSynchronizedStream()";
     89 }
     90 
     91 AudioSynchronizedStream::~AudioSynchronizedStream() {
     92   DCHECK(!input_unit_);
     93   DCHECK(!output_unit_);
     94   DCHECK(!varispeed_unit_);
     95 }
     96 
     97 bool AudioSynchronizedStream::Open() {
     98   if (params_.channels() != kChannels) {
     99     LOG(ERROR) << "Only stereo output is currently supported.";
    100     return false;
    101   }
    102 
    103   // Create the input, output, and varispeed AudioUnits.
    104   OSStatus result = CreateAudioUnits();
    105   if (result != noErr) {
    106     LOG(ERROR) << "Cannot create AudioUnits.";
    107     return false;
    108   }
    109 
    110   result = SetupInput(input_id_);
    111   if (result != noErr) {
    112     LOG(ERROR) << "Error configuring input AudioUnit.";
    113     return false;
    114   }
    115 
    116   result = SetupOutput(output_id_);
    117   if (result != noErr) {
    118     LOG(ERROR) << "Error configuring output AudioUnit.";
    119     return false;
    120   }
    121 
    122   result = SetupCallbacks();
    123   if (result != noErr) {
    124     LOG(ERROR) << "Error setting up callbacks on AudioUnits.";
    125     return false;
    126   }
    127 
    128   result = SetupStreamFormats();
    129   if (result != noErr) {
    130     LOG(ERROR) << "Error configuring stream formats on AudioUnits.";
    131     return false;
    132   }
    133 
    134   AllocateInputData();
    135 
    136   // Final initialization of the AudioUnits.
    137   result = AudioUnitInitialize(input_unit_);
    138   if (result != noErr) {
    139     LOG(ERROR) << "Error initializing input AudioUnit.";
    140     return false;
    141   }
    142 
    143   result = AudioUnitInitialize(output_unit_);
    144   if (result != noErr) {
    145     LOG(ERROR) << "Error initializing output AudioUnit.";
    146     return false;
    147   }
    148 
    149   result = AudioUnitInitialize(varispeed_unit_);
    150   if (result != noErr) {
    151     LOG(ERROR) << "Error initializing varispeed AudioUnit.";
    152     return false;
    153   }
    154 
    155   if (input_sample_rate_ != output_sample_rate_) {
    156     // Add extra safety margin.
    157     target_fifo_frames_ += kAdditionalTargetFifoFrames;
    158   }
    159 
    160   // Buffer initial silence corresponding to target I/O buffering.
    161   fifo_.Clear();
    162   scoped_ptr<AudioBus> silence =
    163       AudioBus::Create(channels_, target_fifo_frames_);
    164   silence->Zero();
    165   fifo_.Push(silence.get());
    166 
    167   return true;
    168 }
    169 
    170 void AudioSynchronizedStream::Close() {
    171   DCHECK(!is_running_);
    172 
    173   if (input_buffer_list_) {
    174     free(input_buffer_list_);
    175     input_buffer_list_ = 0;
    176     input_bus_.reset(NULL);
    177     wrapper_bus_.reset(NULL);
    178   }
    179 
    180   if (input_unit_) {
    181     AudioUnitUninitialize(input_unit_);
    182     CloseComponent(input_unit_);
    183   }
    184 
    185   if (output_unit_) {
    186     AudioUnitUninitialize(output_unit_);
    187     CloseComponent(output_unit_);
    188   }
    189 
    190   if (varispeed_unit_) {
    191     AudioUnitUninitialize(varispeed_unit_);
    192     CloseComponent(varispeed_unit_);
    193   }
    194 
    195   input_unit_ = NULL;
    196   output_unit_ = NULL;
    197   varispeed_unit_ = NULL;
    198 
    199   // Inform the audio manager that we have been closed. This can cause our
    200   // destruction.
    201   manager_->ReleaseOutputStream(this);
    202 }
    203 
    204 void AudioSynchronizedStream::Start(AudioSourceCallback* callback) {
    205   DCHECK(callback);
    206   DCHECK(input_unit_);
    207   DCHECK(output_unit_);
    208   DCHECK(varispeed_unit_);
    209 
    210   if (is_running_ || !input_unit_ || !output_unit_ || !varispeed_unit_)
    211     return;
    212 
    213   source_ = callback;
    214 
    215   // Reset state variables each time we Start().
    216   fifo_rate_compensation_ = 1.0;
    217   average_delta_ = 0.0;
    218 
    219   OSStatus result = noErr;
    220 
    221   if (!is_running_) {
    222     first_input_time_ = -1;
    223 
    224     result = AudioOutputUnitStart(input_unit_);
    225     OSSTATUS_DCHECK(result == noErr, result);
    226 
    227     if (result == noErr) {
    228       result = AudioOutputUnitStart(output_unit_);
    229       OSSTATUS_DCHECK(result == noErr, result);
    230     }
    231   }
    232 
    233   is_running_ = true;
    234 }
    235 
    236 void AudioSynchronizedStream::Stop() {
    237   OSStatus result = noErr;
    238   if (is_running_) {
    239     result = AudioOutputUnitStop(input_unit_);
    240     OSSTATUS_DCHECK(result == noErr, result);
    241 
    242     if (result == noErr) {
    243       result = AudioOutputUnitStop(output_unit_);
    244       OSSTATUS_DCHECK(result == noErr, result);
    245     }
    246   }
    247 
    248   if (result == noErr)
    249     is_running_ = false;
    250 }
    251 
    252 bool AudioSynchronizedStream::IsRunning() {
    253   return is_running_;
    254 }
    255 
    256 // TODO(crogers): implement - or remove from AudioOutputStream.
    257 void AudioSynchronizedStream::SetVolume(double volume) {}
    258 void AudioSynchronizedStream::GetVolume(double* volume) {}
    259 
    260 OSStatus AudioSynchronizedStream::SetOutputDeviceAsCurrent(
    261     AudioDeviceID output_id) {
    262   OSStatus result = noErr;
    263 
    264   // Get the default output device if device is unknown.
    265   if (output_id == kAudioDeviceUnknown) {
    266     AudioObjectPropertyAddress pa;
    267     pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
    268     pa.mScope = kAudioObjectPropertyScopeGlobal;
    269     pa.mElement = kAudioObjectPropertyElementMaster;
    270     UInt32 size = sizeof(output_id);
    271 
    272     result = AudioObjectGetPropertyData(
    273         kAudioObjectSystemObject,
    274         &pa,
    275         0,
    276         0,
    277         &size,
    278         &output_id);
    279 
    280     OSSTATUS_DCHECK(result == noErr, result);
    281     if (result != noErr)
    282       return result;
    283   }
    284 
    285   // Set the render frame size.
    286   UInt32 frame_size = hardware_buffer_size_;
    287   AudioObjectPropertyAddress pa;
    288   pa.mSelector = kAudioDevicePropertyBufferFrameSize;
    289   pa.mScope = kAudioDevicePropertyScopeInput;
    290   pa.mElement = kAudioObjectPropertyElementMaster;
    291   result = AudioObjectSetPropertyData(
    292       output_id,
    293       &pa,
    294       0,
    295       0,
    296       sizeof(frame_size),
    297       &frame_size);
    298 
    299   OSSTATUS_DCHECK(result == noErr, result);
    300   if (result != noErr)
    301     return result;
    302 
    303   output_info_.Initialize(output_id, false);
    304 
    305   // Set the Current Device to the Default Output Unit.
    306   result = AudioUnitSetProperty(
    307       output_unit_,
    308       kAudioOutputUnitProperty_CurrentDevice,
    309       kAudioUnitScope_Global,
    310       0,
    311       &output_info_.id_,
    312       sizeof(output_info_.id_));
    313 
    314   OSSTATUS_DCHECK(result == noErr, result);
    315   return result;
    316 }
    317 
    318 OSStatus AudioSynchronizedStream::SetInputDeviceAsCurrent(
    319     AudioDeviceID input_id) {
    320   OSStatus result = noErr;
    321 
    322   // Get the default input device if device is unknown.
    323   if (input_id == kAudioDeviceUnknown) {
    324     AudioObjectPropertyAddress pa;
    325     pa.mSelector = kAudioHardwarePropertyDefaultInputDevice;
    326     pa.mScope = kAudioObjectPropertyScopeGlobal;
    327     pa.mElement = kAudioObjectPropertyElementMaster;
    328     UInt32 size = sizeof(input_id);
    329 
    330     result = AudioObjectGetPropertyData(
    331         kAudioObjectSystemObject,
    332         &pa,
    333         0,
    334         0,
    335         &size,
    336         &input_id);
    337 
    338     OSSTATUS_DCHECK(result == noErr, result);
    339     if (result != noErr)
    340       return result;
    341   }
    342 
    343   // Set the render frame size.
    344   UInt32 frame_size = hardware_buffer_size_;
    345   AudioObjectPropertyAddress pa;
    346   pa.mSelector = kAudioDevicePropertyBufferFrameSize;
    347   pa.mScope = kAudioDevicePropertyScopeInput;
    348   pa.mElement = kAudioObjectPropertyElementMaster;
    349   result = AudioObjectSetPropertyData(
    350       input_id,
    351       &pa,
    352       0,
    353       0,
    354       sizeof(frame_size),
    355       &frame_size);
    356 
    357   OSSTATUS_DCHECK(result == noErr, result);
    358   if (result != noErr)
    359     return result;
    360 
    361   input_info_.Initialize(input_id, true);
    362 
    363   // Set the Current Device to the AUHAL.
    364   // This should be done only after I/O has been enabled on the AUHAL.
    365   result = AudioUnitSetProperty(
    366       input_unit_,
    367       kAudioOutputUnitProperty_CurrentDevice,
    368       kAudioUnitScope_Global,
    369       0,
    370       &input_info_.id_,
    371       sizeof(input_info_.id_));
    372 
    373   OSSTATUS_DCHECK(result == noErr, result);
    374   return result;
    375 }
    376 
    377 OSStatus AudioSynchronizedStream::CreateAudioUnits() {
    378   // Q: Why do we need a varispeed unit?
    379   // A: If the input device and the output device are running at
    380   // different sample rates and/or on different clocks, we will need
    381   // to compensate to avoid a pitch change and
    382   // to avoid buffer under and over runs.
    383   ComponentDescription varispeed_desc;
    384   varispeed_desc.componentType = kAudioUnitType_FormatConverter;
    385   varispeed_desc.componentSubType = kAudioUnitSubType_Varispeed;
    386   varispeed_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
    387   varispeed_desc.componentFlags = 0;
    388   varispeed_desc.componentFlagsMask = 0;
    389 
    390   Component varispeed_comp = FindNextComponent(NULL, &varispeed_desc);
    391   if (varispeed_comp == NULL)
    392     return -1;
    393 
    394   OSStatus result = OpenAComponent(varispeed_comp, &varispeed_unit_);
    395   OSSTATUS_DCHECK(result == noErr, result);
    396   if (result != noErr)
    397     return result;
    398 
    399   // Open input AudioUnit.
    400   ComponentDescription input_desc;
    401   input_desc.componentType = kAudioUnitType_Output;
    402   input_desc.componentSubType = kAudioUnitSubType_HALOutput;
    403   input_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
    404   input_desc.componentFlags = 0;
    405   input_desc.componentFlagsMask = 0;
    406 
    407   Component input_comp = FindNextComponent(NULL, &input_desc);
    408   if (input_comp == NULL)
    409     return -1;
    410 
    411   result = OpenAComponent(input_comp, &input_unit_);
    412   OSSTATUS_DCHECK(result == noErr, result);
    413   if (result != noErr)
    414     return result;
    415 
    416   // Open output AudioUnit.
    417   ComponentDescription output_desc;
    418   output_desc.componentType = kAudioUnitType_Output;
    419   output_desc.componentSubType = kAudioUnitSubType_DefaultOutput;
    420   output_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
    421   output_desc.componentFlags = 0;
    422   output_desc.componentFlagsMask = 0;
    423 
    424   Component output_comp = FindNextComponent(NULL, &output_desc);
    425   if (output_comp == NULL)
    426     return -1;
    427 
    428   result = OpenAComponent(output_comp, &output_unit_);
    429   OSSTATUS_DCHECK(result == noErr, result);
    430   if (result != noErr)
    431     return result;
    432 
    433   return noErr;
    434 }
    435 
    436 OSStatus AudioSynchronizedStream::SetupInput(AudioDeviceID input_id) {
    437   // The AUHAL used for input needs to be initialized
    438   // before anything is done to it.
    439   OSStatus result = AudioUnitInitialize(input_unit_);
    440   OSSTATUS_DCHECK(result == noErr, result);
    441   if (result != noErr)
    442     return result;
    443 
    444   // We must enable the Audio Unit (AUHAL) for input and disable output
    445   // BEFORE setting the AUHAL's current device.
    446   result = EnableIO();
    447   OSSTATUS_DCHECK(result == noErr, result);
    448   if (result != noErr)
    449     return result;
    450 
    451   result = SetInputDeviceAsCurrent(input_id);
    452   OSSTATUS_DCHECK(result == noErr, result);
    453 
    454   return result;
    455 }
    456 
    457 OSStatus AudioSynchronizedStream::EnableIO() {
    458   // Enable input on the AUHAL.
    459   UInt32 enable_io = 1;
    460   OSStatus result = AudioUnitSetProperty(
    461       input_unit_,
    462       kAudioOutputUnitProperty_EnableIO,
    463       kAudioUnitScope_Input,
    464       1,  // input element
    465       &enable_io,
    466       sizeof(enable_io));
    467 
    468   OSSTATUS_DCHECK(result == noErr, result);
    469   if (result != noErr)
    470     return result;
    471 
    472   // Disable Output on the AUHAL.
    473   enable_io = 0;
    474   result = AudioUnitSetProperty(
    475       input_unit_,
    476       kAudioOutputUnitProperty_EnableIO,
    477       kAudioUnitScope_Output,
    478       0,  // output element
    479       &enable_io,
    480       sizeof(enable_io));
    481 
    482   OSSTATUS_DCHECK(result == noErr, result);
    483   return result;
    484 }
    485 
    486 OSStatus AudioSynchronizedStream::SetupOutput(AudioDeviceID output_id) {
    487   OSStatus result = noErr;
    488 
    489   result = SetOutputDeviceAsCurrent(output_id);
    490   OSSTATUS_DCHECK(result == noErr, result);
    491   if (result != noErr)
    492     return result;
    493 
    494   // Tell the output unit not to reset timestamps.
    495   // Otherwise sample rate changes will cause sync loss.
    496   UInt32 start_at_zero = 0;
    497   result = AudioUnitSetProperty(
    498       output_unit_,
    499       kAudioOutputUnitProperty_StartTimestampsAtZero,
    500       kAudioUnitScope_Global,
    501       0,
    502       &start_at_zero,
    503       sizeof(start_at_zero));
    504 
    505   OSSTATUS_DCHECK(result == noErr, result);
    506 
    507   return result;
    508 }
    509 
    510 OSStatus AudioSynchronizedStream::SetupCallbacks() {
    511   // Set the input callback.
    512   AURenderCallbackStruct callback;
    513   callback.inputProc = InputProc;
    514   callback.inputProcRefCon = this;
    515   OSStatus result = AudioUnitSetProperty(
    516       input_unit_,
    517       kAudioOutputUnitProperty_SetInputCallback,
    518       kAudioUnitScope_Global,
    519       0,
    520       &callback,
    521       sizeof(callback));
    522 
    523   OSSTATUS_DCHECK(result == noErr, result);
    524   if (result != noErr)
    525     return result;
    526 
    527   // Set the output callback.
    528   callback.inputProc = OutputProc;
    529   callback.inputProcRefCon = this;
    530   result = AudioUnitSetProperty(
    531       output_unit_,
    532       kAudioUnitProperty_SetRenderCallback,
    533       kAudioUnitScope_Input,
    534       0,
    535       &callback,
    536       sizeof(callback));
    537 
    538   OSSTATUS_DCHECK(result == noErr, result);
    539   if (result != noErr)
    540     return result;
    541 
    542   // Set the varispeed callback.
    543   callback.inputProc = VarispeedProc;
    544   callback.inputProcRefCon = this;
    545   result = AudioUnitSetProperty(
    546       varispeed_unit_,
    547       kAudioUnitProperty_SetRenderCallback,
    548       kAudioUnitScope_Input,
    549       0,
    550       &callback,
    551       sizeof(callback));
    552 
    553   OSSTATUS_DCHECK(result == noErr, result);
    554 
    555   return result;
    556 }
    557 
    558 OSStatus AudioSynchronizedStream::SetupStreamFormats() {
    559   AudioStreamBasicDescription asbd, asbd_dev1_in, asbd_dev2_out;
    560 
    561   // Get the Stream Format (Output client side).
    562   UInt32 property_size = sizeof(asbd_dev1_in);
    563   OSStatus result = AudioUnitGetProperty(
    564       input_unit_,
    565       kAudioUnitProperty_StreamFormat,
    566       kAudioUnitScope_Input,
    567       1,
    568       &asbd_dev1_in,
    569       &property_size);
    570 
    571   OSSTATUS_DCHECK(result == noErr, result);
    572   if (result != noErr)
    573     return result;
    574 
    575   // Get the Stream Format (client side).
    576   property_size = sizeof(asbd);
    577   result = AudioUnitGetProperty(
    578       input_unit_,
    579       kAudioUnitProperty_StreamFormat,
    580       kAudioUnitScope_Output,
    581       1,
    582       &asbd,
    583       &property_size);
    584 
    585   OSSTATUS_DCHECK(result == noErr, result);
    586   if (result != noErr)
    587     return result;
    588 
    589   // Get the Stream Format (Output client side).
    590   property_size = sizeof(asbd_dev2_out);
    591   result = AudioUnitGetProperty(
    592       output_unit_,
    593       kAudioUnitProperty_StreamFormat,
    594       kAudioUnitScope_Output,
    595       0,
    596       &asbd_dev2_out,
    597       &property_size);
    598 
    599   OSSTATUS_DCHECK(result == noErr, result);
    600   if (result != noErr)
    601     return result;
    602 
    603   // Set the format of all the AUs to the input/output devices channel count.
    604   // For a simple case, you want to set this to
    605   // the lower of count of the channels in the input device vs output device.
    606   asbd.mChannelsPerFrame = std::min(asbd_dev1_in.mChannelsPerFrame,
    607                                     asbd_dev2_out.mChannelsPerFrame);
    608 
    609   // We must get the sample rate of the input device and set it to the
    610   // stream format of AUHAL.
    611   Float64 rate = 0;
    612   property_size = sizeof(rate);
    613 
    614   AudioObjectPropertyAddress pa;
    615   pa.mSelector = kAudioDevicePropertyNominalSampleRate;
    616   pa.mScope = kAudioObjectPropertyScopeWildcard;
    617   pa.mElement = kAudioObjectPropertyElementMaster;
    618   result = AudioObjectGetPropertyData(
    619       input_info_.id_,
    620       &pa,
    621       0,
    622       0,
    623       &property_size,
    624       &rate);
    625 
    626   OSSTATUS_DCHECK(result == noErr, result);
    627   if (result != noErr)
    628     return result;
    629 
    630   input_sample_rate_ = rate;
    631 
    632   asbd.mSampleRate = rate;
    633   property_size = sizeof(asbd);
    634 
    635   // Set the new formats to the AUs...
    636   result = AudioUnitSetProperty(
    637       input_unit_,
    638       kAudioUnitProperty_StreamFormat,
    639       kAudioUnitScope_Output,
    640       1,
    641       &asbd,
    642       property_size);
    643 
    644   OSSTATUS_DCHECK(result == noErr, result);
    645   if (result != noErr)
    646     return result;
    647 
    648   result = AudioUnitSetProperty(
    649       varispeed_unit_,
    650       kAudioUnitProperty_StreamFormat,
    651       kAudioUnitScope_Input,
    652       0,
    653       &asbd,
    654       property_size);
    655 
    656   OSSTATUS_DCHECK(result == noErr, result);
    657   if (result != noErr)
    658     return result;
    659 
    660   // Set the correct sample rate for the output device,
    661   // but keep the channel count the same.
    662   property_size = sizeof(rate);
    663 
    664   pa.mSelector = kAudioDevicePropertyNominalSampleRate;
    665   pa.mScope = kAudioObjectPropertyScopeWildcard;
    666   pa.mElement = kAudioObjectPropertyElementMaster;
    667   result = AudioObjectGetPropertyData(
    668       output_info_.id_,
    669       &pa,
    670       0,
    671       0,
    672       &property_size,
    673       &rate);
    674 
    675   OSSTATUS_DCHECK(result == noErr, result);
    676   if (result != noErr)
    677     return result;
    678 
    679   output_sample_rate_ = rate;
    680 
    681   // The requested sample-rate must match the hardware sample-rate.
    682   if (output_sample_rate_ != params_.sample_rate()) {
    683     LOG(ERROR) << "Requested sample-rate: " << params_.sample_rate()
    684         <<  " must match the hardware sample-rate: " << output_sample_rate_;
    685     return kAudioDeviceUnsupportedFormatError;
    686   }
    687 
    688   asbd.mSampleRate = rate;
    689   property_size = sizeof(asbd);
    690 
    691   // Set the new audio stream formats for the rest of the AUs...
    692   result = AudioUnitSetProperty(
    693       varispeed_unit_,
    694       kAudioUnitProperty_StreamFormat,
    695       kAudioUnitScope_Output,
    696       0,
    697       &asbd,
    698       property_size);
    699 
    700   OSSTATUS_DCHECK(result == noErr, result);
    701   if (result != noErr)
    702     return result;
    703 
    704   result = AudioUnitSetProperty(
    705       output_unit_,
    706       kAudioUnitProperty_StreamFormat,
    707       kAudioUnitScope_Input,
    708       0,
    709       &asbd,
    710       property_size);
    711 
    712   OSSTATUS_DCHECK(result == noErr, result);
    713   return result;
    714 }
    715 
    716 void AudioSynchronizedStream::AllocateInputData() {
    717   // Get the native number of input channels that the hardware supports.
    718   int hardware_channels = 0;
    719   bool got_hardware_channels = AudioManagerMac::GetDeviceChannels(
    720       input_id_, kAudioDevicePropertyScopeInput, &hardware_channels);
    721   if (!got_hardware_channels || hardware_channels > 2) {
    722     // Only mono and stereo are supported on the input side. When it fails to
    723     // get the native channel number or the native channel number is bigger
    724     // than 2, we open the device in stereo mode.
    725     hardware_channels = 2;
    726   }
    727 
    728   // Allocate storage for the AudioBufferList used for the
    729   // input data from the input AudioUnit.
    730   // We allocate enough space for with one AudioBuffer per channel.
    731   size_t malloc_size = offsetof(AudioBufferList, mBuffers[0]) +
    732       (sizeof(AudioBuffer) * hardware_channels);
    733 
    734   input_buffer_list_ = static_cast<AudioBufferList*>(malloc(malloc_size));
    735   input_buffer_list_->mNumberBuffers = hardware_channels;
    736 
    737   input_bus_ = AudioBus::Create(hardware_channels, hardware_buffer_size_);
    738   wrapper_bus_ = AudioBus::CreateWrapper(channels_);
    739   if (hardware_channels != params_.input_channels()) {
    740     ChannelLayout hardware_channel_layout =
    741         GuessChannelLayout(hardware_channels);
    742     ChannelLayout requested_channel_layout =
    743         GuessChannelLayout(params_.input_channels());
    744     channel_mixer_.reset(new ChannelMixer(hardware_channel_layout,
    745                                           requested_channel_layout));
    746     mixer_bus_ = AudioBus::Create(params_.input_channels(),
    747                                   hardware_buffer_size_);
    748   }
    749 
    750   // Allocate buffers for AudioBufferList.
    751   UInt32 buffer_size_bytes = input_bus_->frames() * sizeof(Float32);
    752   for (size_t i = 0; i < input_buffer_list_->mNumberBuffers; ++i) {
    753     input_buffer_list_->mBuffers[i].mNumberChannels = 1;
    754     input_buffer_list_->mBuffers[i].mDataByteSize = buffer_size_bytes;
    755     input_buffer_list_->mBuffers[i].mData = input_bus_->channel(i);
    756   }
    757 }
    758 
    759 OSStatus AudioSynchronizedStream::HandleInputCallback(
    760     AudioUnitRenderActionFlags* io_action_flags,
    761     const AudioTimeStamp* time_stamp,
    762     UInt32 bus_number,
    763     UInt32 number_of_frames,
    764     AudioBufferList* io_data) {
    765   TRACE_EVENT0("audio", "AudioSynchronizedStream::HandleInputCallback");
    766 
    767   if (first_input_time_ < 0.0)
    768     first_input_time_ = time_stamp->mSampleTime;
    769 
    770   // Get the new audio input data.
    771   OSStatus result = AudioUnitRender(
    772       input_unit_,
    773       io_action_flags,
    774       time_stamp,
    775       bus_number,
    776       number_of_frames,
    777       input_buffer_list_);
    778 
    779   // TODO(xians): Add back the DCHECK after synchronize IO supports all
    780   // combination of input and output params. See http://issue/246521.
    781   if (result != noErr)
    782     return result;
    783 
    784   // Buffer input into FIFO.
    785   int available_frames = fifo_.max_frames() - fifo_.frames();
    786   if (input_bus_->frames() <= available_frames) {
    787     if (channel_mixer_) {
    788       channel_mixer_->Transform(input_bus_.get(), mixer_bus_.get());
    789       fifo_.Push(mixer_bus_.get());
    790     } else {
    791       fifo_.Push(input_bus_.get());
    792     }
    793   }
    794 
    795   return result;
    796 }
    797 
    798 OSStatus AudioSynchronizedStream::HandleVarispeedCallback(
    799     AudioUnitRenderActionFlags* io_action_flags,
    800     const AudioTimeStamp* time_stamp,
    801     UInt32 bus_number,
    802     UInt32 number_of_frames,
    803     AudioBufferList* io_data) {
    804   // Create a wrapper bus on the AudioBufferList.
    805   WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
    806 
    807   if (fifo_.frames() < static_cast<int>(number_of_frames)) {
    808     // We don't DCHECK here, since this is a possible run-time condition
    809     // if the machine is bogged down.
    810     wrapper_bus_->Zero();
    811     return noErr;
    812   }
    813 
    814   // Read from the FIFO to feed the varispeed.
    815   fifo_.Consume(wrapper_bus_.get(), 0, number_of_frames);
    816 
    817   return noErr;
    818 }
    819 
    820 OSStatus AudioSynchronizedStream::HandleOutputCallback(
    821     AudioUnitRenderActionFlags* io_action_flags,
    822     const AudioTimeStamp* time_stamp,
    823     UInt32 bus_number,
    824     UInt32 number_of_frames,
    825     AudioBufferList* io_data) {
    826   // Input callback hasn't run yet or we've suddenly changed sample-rates
    827   // -> silence.
    828   if (first_input_time_ < 0.0 ||
    829       static_cast<int>(number_of_frames) != params_.frames_per_buffer()) {
    830     ZeroBufferList(io_data);
    831     return noErr;
    832   }
    833 
    834   // Use the varispeed playback rate to offset small discrepancies
    835   // in hardware clocks, and also any differences in sample-rate
    836   // between input and output devices.
    837 
    838   // Calculate a varispeed rate scalar factor to compensate for drift between
    839   // input and output.  We use the actual number of frames still in the FIFO
    840   // compared with the ideal value of |target_fifo_frames_|.
    841   int delta = fifo_.frames() - target_fifo_frames_;
    842 
    843   // Average |delta| because it can jitter back/forth quite frequently
    844   // by +/- the hardware buffer-size *if* the input and output callbacks are
    845   // happening at almost exactly the same time.  Also, if the input and output
    846   // sample-rates are different then |delta| will jitter quite a bit due to
    847   // the rate conversion happening in the varispeed, plus the jittering of
    848   // the callbacks.  The average value is what's important here.
    849   average_delta_ += (delta - average_delta_) * 0.1;
    850 
    851   // Compute a rate compensation which always attracts us back to the
    852   // |target_fifo_frames_| over a period of kCorrectionTimeSeconds.
    853   const double kCorrectionTimeSeconds = 0.1;
    854   double correction_time_frames = kCorrectionTimeSeconds * output_sample_rate_;
    855   fifo_rate_compensation_ =
    856       (correction_time_frames + average_delta_) / correction_time_frames;
    857 
    858   // Adjust for FIFO drift.
    859   OSStatus result = AudioUnitSetParameter(
    860       varispeed_unit_,
    861       kVarispeedParam_PlaybackRate,
    862       kAudioUnitScope_Global,
    863       0,
    864       fifo_rate_compensation_,
    865       0);
    866 
    867   OSSTATUS_DCHECK(result == noErr, result);
    868   if (result != noErr)
    869     return result;
    870 
    871   // Render to the output using the varispeed.
    872   result = AudioUnitRender(
    873       varispeed_unit_,
    874       io_action_flags,
    875       time_stamp,
    876       0,
    877       number_of_frames,
    878       io_data);
    879 
    880   OSSTATUS_DCHECK(result == noErr, result);
    881   if (result != noErr)
    882     return result;
    883 
    884   // Create a wrapper bus on the AudioBufferList.
    885   WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
    886 
    887   // Process in-place!
    888   source_->OnMoreIOData(wrapper_bus_.get(),
    889                         wrapper_bus_.get(),
    890                         AudioBuffersState(0, 0));
    891 
    892   return noErr;
    893 }
    894 
    895 OSStatus AudioSynchronizedStream::InputProc(
    896     void* user_data,
    897     AudioUnitRenderActionFlags* io_action_flags,
    898     const AudioTimeStamp* time_stamp,
    899     UInt32 bus_number,
    900     UInt32 number_of_frames,
    901     AudioBufferList* io_data) {
    902   AudioSynchronizedStream* stream =
    903       static_cast<AudioSynchronizedStream*>(user_data);
    904   DCHECK(stream);
    905 
    906   return stream->HandleInputCallback(
    907       io_action_flags,
    908       time_stamp,
    909       bus_number,
    910       number_of_frames,
    911       io_data);
    912 }
    913 
    914 OSStatus AudioSynchronizedStream::VarispeedProc(
    915     void* user_data,
    916     AudioUnitRenderActionFlags* io_action_flags,
    917     const AudioTimeStamp* time_stamp,
    918     UInt32 bus_number,
    919     UInt32 number_of_frames,
    920     AudioBufferList* io_data) {
    921   AudioSynchronizedStream* stream =
    922       static_cast<AudioSynchronizedStream*>(user_data);
    923   DCHECK(stream);
    924 
    925   return stream->HandleVarispeedCallback(
    926       io_action_flags,
    927       time_stamp,
    928       bus_number,
    929       number_of_frames,
    930       io_data);
    931 }
    932 
    933 OSStatus AudioSynchronizedStream::OutputProc(
    934     void* user_data,
    935     AudioUnitRenderActionFlags* io_action_flags,
    936     const AudioTimeStamp* time_stamp,
    937     UInt32 bus_number,
    938     UInt32 number_of_frames,
    939     AudioBufferList* io_data) {
    940   AudioSynchronizedStream* stream =
    941       static_cast<AudioSynchronizedStream*>(user_data);
    942   DCHECK(stream);
    943 
    944   return stream->HandleOutputCallback(
    945       io_action_flags,
    946       time_stamp,
    947       bus_number,
    948       number_of_frames,
    949       io_data);
    950 }
    951 
    952 void AudioSynchronizedStream::AudioDeviceInfo::Initialize(
    953     AudioDeviceID id, bool is_input) {
    954   id_ = id;
    955   is_input_ = is_input;
    956   if (id_ == kAudioDeviceUnknown)
    957     return;
    958 
    959   UInt32 property_size = sizeof(buffer_size_frames_);
    960 
    961   AudioObjectPropertyAddress pa;
    962   pa.mSelector = kAudioDevicePropertyBufferFrameSize;
    963   pa.mScope = kAudioObjectPropertyScopeWildcard;
    964   pa.mElement = kAudioObjectPropertyElementMaster;
    965   OSStatus result = AudioObjectGetPropertyData(
    966       id_,
    967       &pa,
    968       0,
    969       0,
    970       &property_size,
    971       &buffer_size_frames_);
    972 
    973   OSSTATUS_DCHECK(result == noErr, result);
    974 }
    975 
    976 }  // namespace media
    977