Home | History | Annotate | Download | only in mac
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "media/audio/mac/audio_unified_mac.h"
      6 
      7 #include <CoreServices/CoreServices.h>
      8 
      9 #include "base/basictypes.h"
     10 #include "base/logging.h"
     11 #include "base/mac/mac_logging.h"
     12 #include "media/audio/audio_util.h"
     13 #include "media/audio/mac/audio_manager_mac.h"
     14 
     15 namespace media {
     16 
     17 // TODO(crogers): support more than hard-coded stereo input.
     18 // Ideally we would like to receive this value as a constructor argument.
     19 static const int kDefaultInputChannels = 2;
     20 
     21 AudioHardwareUnifiedStream::AudioHardwareUnifiedStream(
     22     AudioManagerMac* manager, const AudioParameters& params)
     23     : manager_(manager),
     24       source_(NULL),
     25       client_input_channels_(kDefaultInputChannels),
     26       volume_(1.0f),
     27       input_channels_(0),
     28       output_channels_(0),
     29       input_channels_per_frame_(0),
     30       output_channels_per_frame_(0),
     31       io_proc_id_(0),
     32       device_(kAudioObjectUnknown),
     33       is_playing_(false) {
     34   DCHECK(manager_);
     35 
     36   // A frame is one sample across all channels. In interleaved audio the per
     37   // frame fields identify the set of n |channels|. In uncompressed audio, a
     38   // packet is always one frame.
     39   format_.mSampleRate = params.sample_rate();
     40   format_.mFormatID = kAudioFormatLinearPCM;
     41   format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
     42                          kLinearPCMFormatFlagIsSignedInteger;
     43   format_.mBitsPerChannel = params.bits_per_sample();
     44   format_.mChannelsPerFrame = params.channels();
     45   format_.mFramesPerPacket = 1;
     46   format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8;
     47   format_.mBytesPerFrame = format_.mBytesPerPacket;
     48   format_.mReserved = 0;
     49 
     50   // Calculate the number of sample frames per callback.
     51   number_of_frames_ = params.GetBytesPerBuffer() / format_.mBytesPerPacket;
     52 
     53   input_bus_ = AudioBus::Create(client_input_channels_,
     54                                 params.frames_per_buffer());
     55   output_bus_ = AudioBus::Create(params);
     56 }
     57 
     58 AudioHardwareUnifiedStream::~AudioHardwareUnifiedStream() {
     59   DCHECK_EQ(device_, kAudioObjectUnknown);
     60 }
     61 
     62 bool AudioHardwareUnifiedStream::Open() {
     63   // Obtain the current output device selected by the user.
     64   AudioObjectPropertyAddress pa;
     65   pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
     66   pa.mScope = kAudioObjectPropertyScopeGlobal;
     67   pa.mElement = kAudioObjectPropertyElementMaster;
     68 
     69   UInt32 size = sizeof(device_);
     70 
     71   OSStatus result = AudioObjectGetPropertyData(
     72       kAudioObjectSystemObject,
     73       &pa,
     74       0,
     75       0,
     76       &size,
     77       &device_);
     78 
     79   if ((result != kAudioHardwareNoError) || (device_ == kAudioDeviceUnknown)) {
     80     LOG(ERROR) << "Cannot open unified AudioDevice.";
     81     return false;
     82   }
     83 
     84   // The requested sample-rate must match the hardware sample-rate.
     85   Float64 sample_rate = 0.0;
     86   size = sizeof(sample_rate);
     87 
     88   pa.mSelector = kAudioDevicePropertyNominalSampleRate;
     89   pa.mScope = kAudioObjectPropertyScopeWildcard;
     90   pa.mElement = kAudioObjectPropertyElementMaster;
     91 
     92   result = AudioObjectGetPropertyData(
     93       device_,
     94       &pa,
     95       0,
     96       0,
     97       &size,
     98       &sample_rate);
     99 
    100   if (result != noErr || sample_rate != format_.mSampleRate) {
    101     LOG(ERROR) << "Requested sample-rate: " << format_.mSampleRate
    102         <<  " must match the hardware sample-rate: " << sample_rate;
    103     return false;
    104   }
    105 
    106   // Configure buffer frame size.
    107   UInt32 frame_size = number_of_frames_;
    108 
    109   pa.mSelector = kAudioDevicePropertyBufferFrameSize;
    110   pa.mScope = kAudioDevicePropertyScopeInput;
    111   pa.mElement = kAudioObjectPropertyElementMaster;
    112   result = AudioObjectSetPropertyData(
    113       device_,
    114       &pa,
    115       0,
    116       0,
    117       sizeof(frame_size),
    118       &frame_size);
    119 
    120   if (result != noErr) {
    121     LOG(ERROR) << "Unable to set input buffer frame size: "  << frame_size;
    122     return false;
    123   }
    124 
    125   pa.mScope = kAudioDevicePropertyScopeOutput;
    126   result = AudioObjectSetPropertyData(
    127       device_,
    128       &pa,
    129       0,
    130       0,
    131       sizeof(frame_size),
    132       &frame_size);
    133 
    134   if (result != noErr) {
    135     LOG(ERROR) << "Unable to set output buffer frame size: "  << frame_size;
    136     return false;
    137   }
    138 
    139   DVLOG(1) << "Sample rate: " << sample_rate;
    140   DVLOG(1) << "Frame size: " << frame_size;
    141 
    142   // Determine the number of input and output channels.
    143   // We handle both the interleaved and non-interleaved cases.
    144 
    145   // Get input stream configuration.
    146   pa.mSelector = kAudioDevicePropertyStreamConfiguration;
    147   pa.mScope = kAudioDevicePropertyScopeInput;
    148   pa.mElement = kAudioObjectPropertyElementMaster;
    149 
    150   result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size);
    151   OSSTATUS_DCHECK(result == noErr, result);
    152 
    153   if (result == noErr && size > 0) {
    154     // Allocate storage.
    155     scoped_ptr<uint8[]> input_list_storage(new uint8[size]);
    156     AudioBufferList& input_list =
    157         *reinterpret_cast<AudioBufferList*>(input_list_storage.get());
    158 
    159     result = AudioObjectGetPropertyData(
    160         device_,
    161         &pa,
    162         0,
    163         0,
    164         &size,
    165         &input_list);
    166     OSSTATUS_DCHECK(result == noErr, result);
    167 
    168     if (result == noErr) {
    169       // Determine number of input channels.
    170       input_channels_per_frame_ = input_list.mNumberBuffers > 0 ?
    171           input_list.mBuffers[0].mNumberChannels : 0;
    172       if (input_channels_per_frame_ == 1 && input_list.mNumberBuffers > 1) {
    173         // Non-interleaved.
    174         input_channels_ = input_list.mNumberBuffers;
    175       } else {
    176         // Interleaved.
    177         input_channels_ = input_channels_per_frame_;
    178       }
    179     }
    180   }
    181 
    182   DVLOG(1) << "Input channels: " << input_channels_;
    183   DVLOG(1) << "Input channels per frame: " << input_channels_per_frame_;
    184 
    185   // The hardware must have at least the requested input channels.
    186   if (result != noErr || client_input_channels_ > input_channels_) {
    187     LOG(ERROR) << "AudioDevice does not support requested input channels.";
    188     return false;
    189   }
    190 
    191   // Get output stream configuration.
    192   pa.mSelector = kAudioDevicePropertyStreamConfiguration;
    193   pa.mScope = kAudioDevicePropertyScopeOutput;
    194   pa.mElement = kAudioObjectPropertyElementMaster;
    195 
    196   result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size);
    197   OSSTATUS_DCHECK(result == noErr, result);
    198 
    199   if (result == noErr && size > 0) {
    200     // Allocate storage.
    201     scoped_ptr<uint8[]> output_list_storage(new uint8[size]);
    202     AudioBufferList& output_list =
    203         *reinterpret_cast<AudioBufferList*>(output_list_storage.get());
    204 
    205     result = AudioObjectGetPropertyData(
    206         device_,
    207         &pa,
    208         0,
    209         0,
    210         &size,
    211         &output_list);
    212     OSSTATUS_DCHECK(result == noErr, result);
    213 
    214     if (result == noErr) {
    215       // Determine number of output channels.
    216       output_channels_per_frame_ = output_list.mBuffers[0].mNumberChannels;
    217       if (output_channels_per_frame_ == 1 && output_list.mNumberBuffers > 1) {
    218         // Non-interleaved.
    219         output_channels_ = output_list.mNumberBuffers;
    220       } else {
    221         // Interleaved.
    222         output_channels_ = output_channels_per_frame_;
    223       }
    224     }
    225   }
    226 
    227   DVLOG(1) << "Output channels: " << output_channels_;
    228   DVLOG(1) << "Output channels per frame: " << output_channels_per_frame_;
    229 
    230   // The hardware must have at least the requested output channels.
    231   if (result != noErr ||
    232       output_channels_ < static_cast<int>(format_.mChannelsPerFrame)) {
    233     LOG(ERROR) << "AudioDevice does not support requested output channels.";
    234     return false;
    235   }
    236 
    237   // Setup the I/O proc.
    238   result = AudioDeviceCreateIOProcID(device_, RenderProc, this, &io_proc_id_);
    239   if (result != noErr) {
    240     LOG(ERROR) << "Error creating IOProc.";
    241     return false;
    242   }
    243 
    244   return true;
    245 }
    246 
    247 void AudioHardwareUnifiedStream::Close() {
    248   DCHECK(!is_playing_);
    249 
    250   OSStatus result = AudioDeviceDestroyIOProcID(device_, io_proc_id_);
    251   OSSTATUS_DCHECK(result == noErr, result);
    252 
    253   io_proc_id_ = 0;
    254   device_ = kAudioObjectUnknown;
    255 
    256   // Inform the audio manager that we have been closed. This can cause our
    257   // destruction.
    258   manager_->ReleaseOutputStream(this);
    259 }
    260 
    261 void AudioHardwareUnifiedStream::Start(AudioSourceCallback* callback) {
    262   DCHECK(callback);
    263   DCHECK_NE(device_, kAudioObjectUnknown);
    264   DCHECK(!is_playing_);
    265   if (device_ == kAudioObjectUnknown || is_playing_)
    266     return;
    267 
    268   source_ = callback;
    269 
    270   OSStatus result = AudioDeviceStart(device_, io_proc_id_);
    271   OSSTATUS_DCHECK(result == noErr, result);
    272 
    273   if (result == noErr)
    274     is_playing_ = true;
    275 }
    276 
    277 void AudioHardwareUnifiedStream::Stop() {
    278   if (!is_playing_)
    279     return;
    280 
    281   if (device_ != kAudioObjectUnknown) {
    282     OSStatus result = AudioDeviceStop(device_, io_proc_id_);
    283     OSSTATUS_DCHECK(result == noErr, result);
    284   }
    285 
    286   is_playing_ = false;
    287   source_ = NULL;
    288 }
    289 
    290 void AudioHardwareUnifiedStream::SetVolume(double volume) {
    291   volume_ = static_cast<float>(volume);
    292   // TODO(crogers): set volume property
    293 }
    294 
    295 void AudioHardwareUnifiedStream::GetVolume(double* volume) {
    296   *volume = volume_;
    297 }
    298 
    299 // Pulls on our provider with optional input, asking it to render output.
    300 // Note to future hackers of this function: Do not add locks here because this
    301 // is running on a real-time thread (for low-latency).
    302 OSStatus AudioHardwareUnifiedStream::Render(
    303     AudioDeviceID device,
    304     const AudioTimeStamp* now,
    305     const AudioBufferList* input_data,
    306     const AudioTimeStamp* input_time,
    307     AudioBufferList* output_data,
    308     const AudioTimeStamp* output_time) {
    309   // Convert the input data accounting for possible interleaving.
    310   // TODO(crogers): it's better to simply memcpy() if source is already planar.
    311   if (input_channels_ >= client_input_channels_) {
    312     for (int channel_index = 0; channel_index < client_input_channels_;
    313          ++channel_index) {
    314       float* source;
    315 
    316       int source_channel_index = channel_index;
    317 
    318       if (input_channels_per_frame_ > 1) {
    319         // Interleaved.
    320         source = static_cast<float*>(input_data->mBuffers[0].mData) +
    321             source_channel_index;
    322       } else {
    323         // Non-interleaved.
    324         source = static_cast<float*>(
    325             input_data->mBuffers[source_channel_index].mData);
    326       }
    327 
    328       float* p = input_bus_->channel(channel_index);
    329       for (int i = 0; i < number_of_frames_; ++i) {
    330         p[i] = *source;
    331         source += input_channels_per_frame_;
    332       }
    333     }
    334   } else if (input_channels_) {
    335     input_bus_->Zero();
    336   }
    337 
    338   // Give the client optional input data and have it render the output data.
    339   source_->OnMoreIOData(input_bus_.get(),
    340                         output_bus_.get(),
    341                         AudioBuffersState(0, 0));
    342 
    343   // TODO(crogers): handle final Core Audio 5.1 layout for 5.1 audio.
    344 
    345   // Handle interleaving as necessary.
    346   // TODO(crogers): it's better to simply memcpy() if dest is already planar.
    347 
    348   for (int channel_index = 0;
    349        channel_index < static_cast<int>(format_.mChannelsPerFrame);
    350        ++channel_index) {
    351     float* dest;
    352 
    353     int dest_channel_index = channel_index;
    354 
    355     if (output_channels_per_frame_ > 1) {
    356       // Interleaved.
    357       dest = static_cast<float*>(output_data->mBuffers[0].mData) +
    358           dest_channel_index;
    359     } else {
    360       // Non-interleaved.
    361       dest = static_cast<float*>(
    362           output_data->mBuffers[dest_channel_index].mData);
    363     }
    364 
    365     float* p = output_bus_->channel(channel_index);
    366     for (int i = 0; i < number_of_frames_; ++i) {
    367       *dest = p[i];
    368       dest += output_channels_per_frame_;
    369     }
    370   }
    371 
    372   return noErr;
    373 }
    374 
    375 OSStatus AudioHardwareUnifiedStream::RenderProc(
    376     AudioDeviceID device,
    377     const AudioTimeStamp* now,
    378     const AudioBufferList* input_data,
    379     const AudioTimeStamp* input_time,
    380     AudioBufferList* output_data,
    381     const AudioTimeStamp* output_time,
    382     void* user_data) {
    383   AudioHardwareUnifiedStream* audio_output =
    384       static_cast<AudioHardwareUnifiedStream*>(user_data);
    385   DCHECK(audio_output);
    386   if (!audio_output)
    387     return -1;
    388 
    389   return audio_output->Render(
    390       device,
    391       now,
    392       input_data,
    393       input_time,
    394       output_data,
    395       output_time);
    396 }
    397 
    398 }  // namespace media
    399