Home | History | Annotate | Download | only in source
      1 /*
      2  *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 #include "audio_buffer.h"
     12 
     13 #include "module_common_types.h"
     14 
     15 namespace webrtc {
     16 namespace {
     17 
     18 enum {
     19   kSamplesPer8kHzChannel = 80,
     20   kSamplesPer16kHzChannel = 160,
     21   kSamplesPer32kHzChannel = 320
     22 };
     23 
     24 void StereoToMono(const WebRtc_Word16* left, const WebRtc_Word16* right,
     25                   WebRtc_Word16* out, int samples_per_channel) {
     26   WebRtc_Word32 data_int32 = 0;
     27   for (int i = 0; i < samples_per_channel; i++) {
     28     data_int32 = (left[i] + right[i]) >> 1;
     29     if (data_int32 > 32767) {
     30       data_int32 = 32767;
     31     } else if (data_int32 < -32768) {
     32       data_int32 = -32768;
     33     }
     34 
     35     out[i] = static_cast<WebRtc_Word16>(data_int32);
     36   }
     37 }
     38 }  // namespace
     39 
     40 struct AudioChannel {
     41   AudioChannel() {
     42     memset(data, 0, sizeof(data));
     43   }
     44 
     45   WebRtc_Word16 data[kSamplesPer32kHzChannel];
     46 };
     47 
     48 struct SplitAudioChannel {
     49   SplitAudioChannel() {
     50     memset(low_pass_data, 0, sizeof(low_pass_data));
     51     memset(high_pass_data, 0, sizeof(high_pass_data));
     52     memset(analysis_filter_state1, 0, sizeof(analysis_filter_state1));
     53     memset(analysis_filter_state2, 0, sizeof(analysis_filter_state2));
     54     memset(synthesis_filter_state1, 0, sizeof(synthesis_filter_state1));
     55     memset(synthesis_filter_state2, 0, sizeof(synthesis_filter_state2));
     56   }
     57 
     58   WebRtc_Word16 low_pass_data[kSamplesPer16kHzChannel];
     59   WebRtc_Word16 high_pass_data[kSamplesPer16kHzChannel];
     60 
     61   WebRtc_Word32 analysis_filter_state1[6];
     62   WebRtc_Word32 analysis_filter_state2[6];
     63   WebRtc_Word32 synthesis_filter_state1[6];
     64   WebRtc_Word32 synthesis_filter_state2[6];
     65 };
     66 
     67 // TODO(am): check range of input parameters?
     68 AudioBuffer::AudioBuffer(WebRtc_Word32 max_num_channels,
     69                          WebRtc_Word32 samples_per_channel)
     70     : max_num_channels_(max_num_channels),
     71       num_channels_(0),
     72       num_mixed_channels_(0),
     73       num_mixed_low_pass_channels_(0),
     74       samples_per_channel_(samples_per_channel),
     75       samples_per_split_channel_(samples_per_channel),
     76       reference_copied_(false),
     77       data_(NULL),
     78       channels_(NULL),
     79       split_channels_(NULL),
     80       mixed_low_pass_channels_(NULL),
     81       low_pass_reference_channels_(NULL) {
     82   if (max_num_channels_ > 1) {
     83     channels_ = new AudioChannel[max_num_channels_];
     84     mixed_low_pass_channels_ = new AudioChannel[max_num_channels_];
     85   }
     86   low_pass_reference_channels_ = new AudioChannel[max_num_channels_];
     87 
     88   if (samples_per_channel_ == kSamplesPer32kHzChannel) {
     89     split_channels_ = new SplitAudioChannel[max_num_channels_];
     90     samples_per_split_channel_ = kSamplesPer16kHzChannel;
     91   }
     92 }
     93 
     94 AudioBuffer::~AudioBuffer() {
     95   if (channels_ != NULL) {
     96     delete [] channels_;
     97   }
     98 
     99   if (mixed_low_pass_channels_ != NULL) {
    100     delete [] mixed_low_pass_channels_;
    101   }
    102 
    103   if (low_pass_reference_channels_ != NULL) {
    104     delete [] low_pass_reference_channels_;
    105   }
    106 
    107   if (split_channels_ != NULL) {
    108     delete [] split_channels_;
    109   }
    110 }
    111 
    112 WebRtc_Word16* AudioBuffer::data(WebRtc_Word32 channel) const {
    113   assert(channel >= 0 && channel < num_channels_);
    114   if (data_ != NULL) {
    115     return data_;
    116   }
    117 
    118   return channels_[channel].data;
    119 }
    120 
    121 WebRtc_Word16* AudioBuffer::low_pass_split_data(WebRtc_Word32 channel) const {
    122   assert(channel >= 0 && channel < num_channels_);
    123   if (split_channels_ == NULL) {
    124     return data(channel);
    125   }
    126 
    127   return split_channels_[channel].low_pass_data;
    128 }
    129 
    130 WebRtc_Word16* AudioBuffer::high_pass_split_data(WebRtc_Word32 channel) const {
    131   assert(channel >= 0 && channel < num_channels_);
    132   if (split_channels_ == NULL) {
    133     return NULL;
    134   }
    135 
    136   return split_channels_[channel].high_pass_data;
    137 }
    138 
    139 WebRtc_Word16* AudioBuffer::mixed_low_pass_data(WebRtc_Word32 channel) const {
    140   assert(channel >= 0 && channel < num_mixed_low_pass_channels_);
    141 
    142   return mixed_low_pass_channels_[channel].data;
    143 }
    144 
    145 WebRtc_Word16* AudioBuffer::low_pass_reference(WebRtc_Word32 channel) const {
    146   assert(channel >= 0 && channel < num_channels_);
    147   if (!reference_copied_) {
    148     return NULL;
    149   }
    150 
    151   return low_pass_reference_channels_[channel].data;
    152 }
    153 
    154 WebRtc_Word32* AudioBuffer::analysis_filter_state1(WebRtc_Word32 channel) const {
    155   assert(channel >= 0 && channel < num_channels_);
    156   return split_channels_[channel].analysis_filter_state1;
    157 }
    158 
    159 WebRtc_Word32* AudioBuffer::analysis_filter_state2(WebRtc_Word32 channel) const {
    160   assert(channel >= 0 && channel < num_channels_);
    161   return split_channels_[channel].analysis_filter_state2;
    162 }
    163 
    164 WebRtc_Word32* AudioBuffer::synthesis_filter_state1(WebRtc_Word32 channel) const {
    165   assert(channel >= 0 && channel < num_channels_);
    166   return split_channels_[channel].synthesis_filter_state1;
    167 }
    168 
    169 WebRtc_Word32* AudioBuffer::synthesis_filter_state2(WebRtc_Word32 channel) const {
    170   assert(channel >= 0 && channel < num_channels_);
    171   return split_channels_[channel].synthesis_filter_state2;
    172 }
    173 
    174 WebRtc_Word32 AudioBuffer::num_channels() const {
    175   return num_channels_;
    176 }
    177 
    178 WebRtc_Word32 AudioBuffer::samples_per_channel() const {
    179   return samples_per_channel_;
    180 }
    181 
    182 WebRtc_Word32 AudioBuffer::samples_per_split_channel() const {
    183   return samples_per_split_channel_;
    184 }
    185 
    186 // TODO(ajm): Do deinterleaving and mixing in one step?
    187 void AudioBuffer::DeinterleaveFrom(AudioFrame* audioFrame) {
    188   assert(audioFrame->_audioChannel <= max_num_channels_);
    189   assert(audioFrame->_payloadDataLengthInSamples ==  samples_per_channel_);
    190 
    191   num_channels_ = audioFrame->_audioChannel;
    192   num_mixed_channels_ = 0;
    193   num_mixed_low_pass_channels_ = 0;
    194   reference_copied_ = false;
    195 
    196   if (num_channels_ == 1) {
    197     // We can get away with a pointer assignment in this case.
    198     data_ = audioFrame->_payloadData;
    199     return;
    200   }
    201 
    202   for (int i = 0; i < num_channels_; i++) {
    203     WebRtc_Word16* deinterleaved = channels_[i].data;
    204     WebRtc_Word16* interleaved = audioFrame->_payloadData;
    205     WebRtc_Word32 interleaved_idx = i;
    206     for (int j = 0; j < samples_per_channel_; j++) {
    207       deinterleaved[j] = interleaved[interleaved_idx];
    208       interleaved_idx += num_channels_;
    209     }
    210   }
    211 }
    212 
    213 void AudioBuffer::InterleaveTo(AudioFrame* audioFrame) const {
    214   assert(audioFrame->_audioChannel == num_channels_);
    215   assert(audioFrame->_payloadDataLengthInSamples == samples_per_channel_);
    216 
    217   if (num_channels_ == 1) {
    218     if (num_mixed_channels_ == 1) {
    219       memcpy(audioFrame->_payloadData,
    220              channels_[0].data,
    221              sizeof(WebRtc_Word16) * samples_per_channel_);
    222     } else {
    223       // These should point to the same buffer in this case.
    224       assert(data_ == audioFrame->_payloadData);
    225     }
    226 
    227     return;
    228   }
    229 
    230   for (int i = 0; i < num_channels_; i++) {
    231     WebRtc_Word16* deinterleaved = channels_[i].data;
    232     WebRtc_Word16* interleaved = audioFrame->_payloadData;
    233     WebRtc_Word32 interleaved_idx = i;
    234     for (int j = 0; j < samples_per_channel_; j++) {
    235       interleaved[interleaved_idx] = deinterleaved[j];
    236       interleaved_idx += num_channels_;
    237     }
    238   }
    239 }
    240 
    241 // TODO(ajm): would be good to support the no-mix case with pointer assignment.
    242 // TODO(ajm): handle mixing to multiple channels?
    243 void AudioBuffer::Mix(WebRtc_Word32 num_mixed_channels) {
    244   // We currently only support the stereo to mono case.
    245   assert(num_channels_ == 2);
    246   assert(num_mixed_channels == 1);
    247 
    248   StereoToMono(channels_[0].data,
    249                channels_[1].data,
    250                channels_[0].data,
    251                samples_per_channel_);
    252 
    253   num_channels_ = num_mixed_channels;
    254   num_mixed_channels_ = num_mixed_channels;
    255 }
    256 
    257 void AudioBuffer::CopyAndMixLowPass(WebRtc_Word32 num_mixed_channels) {
    258   // We currently only support the stereo to mono case.
    259   assert(num_channels_ == 2);
    260   assert(num_mixed_channels == 1);
    261 
    262   StereoToMono(low_pass_split_data(0),
    263                low_pass_split_data(1),
    264                mixed_low_pass_channels_[0].data,
    265                samples_per_split_channel_);
    266 
    267   num_mixed_low_pass_channels_ = num_mixed_channels;
    268 }
    269 
    270 void AudioBuffer::CopyLowPassToReference() {
    271   reference_copied_ = true;
    272   for (int i = 0; i < num_channels_; i++) {
    273     memcpy(low_pass_reference_channels_[i].data,
    274            low_pass_split_data(i),
    275            sizeof(WebRtc_Word16) * samples_per_split_channel_);
    276   }
    277 }
    278 }  // namespace webrtc
    279