/external/webrtc/webrtc/modules/audio_conference_mixer/source/ |
audio_frame_manipulator.h | 15 class AudioFrame; 17 // Updates the audioFrame's energy (based on its samples). 18 void CalculateEnergy(AudioFrame& audioFrame); 20 // Apply linear step function that ramps in/out the audio samples in audioFrame 21 void RampIn(AudioFrame& audioFrame); 22 void RampOut(AudioFrame& audioFrame);
|
audio_frame_manipulator.cc | 42 void CalculateEnergy(AudioFrame& audioFrame) 44 audioFrame.energy_ = 0; 45 for(size_t position = 0; position < audioFrame.samples_per_channel_; 49 audioFrame.energy_ += audioFrame.data_[position] * 50 audioFrame.data_[position]; 54 void RampIn(AudioFrame& audioFrame) 56 assert(rampSize <= audioFrame.samples_per_channel_) [all...] |
audio_conference_mixer_impl.cc | 24 AudioFrame* audioFrame; 35 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { 142 MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool, 174 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); 264 // Get an AudioFrame for mixing from the memory pool. 265 AudioFrame* mixedAudio = NULL; 286 AudioFrame::kNormalSpeech, 287 AudioFrame::kVadPassive, num_mixed_channels) [all...] |
/external/webrtc/webrtc/modules/audio_coding/test/ |
SpatialAudio.cc | 153 AudioFrame audioFrame; 161 _inFile.Read10MsData(audioFrame); 162 for (size_t n = 0; n < audioFrame.samples_per_channel_; n++) { 163 audioFrame.data_[n] = (int16_t) floor( 164 audioFrame.data_[n] * leftPanning + 0.5); 166 CHECK_ERROR(_acmLeft->Add10MsData(audioFrame)); 168 for (size_t n = 0; n < audioFrame.samples_per_channel_; n++) { 169 audioFrame.data_[n] = (int16_t) floor( 170 audioFrame.data_[n] * rightToLeftRatio + 0.5) [all...] |
TwoWayCommunication.cc | 251 AudioFrame audioFrame; 262 EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0); 263 EXPECT_GE(_acmA->Add10MsData(audioFrame), 0); 264 EXPECT_GE(_acmRefA->Add10MsData(audioFrame), 0); 266 EXPECT_GT(_inFileB.Read10MsData(audioFrame), 0); 268 EXPECT_GE(_acmB->Add10MsData(audioFrame), 0); 269 EXPECT_GE(_acmRefB->Add10MsData(audioFrame), 0); 270 EXPECT_EQ(0, _acmA->PlayoutData10Ms(outFreqHzA, &audioFrame)); 271 _outFileA.Write10MsData(audioFrame); [all...] |
iSACTest.cc | 198 AudioFrame audioFrame; 199 EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0); 200 EXPECT_GE(_acmA->Add10MsData(audioFrame), 0); 201 EXPECT_GE(_acmB->Add10MsData(audioFrame), 0); 202 EXPECT_EQ(0, _acmA->PlayoutData10Ms(32000, &audioFrame)); 203 _outFileA.Write10MsData(audioFrame); 204 EXPECT_EQ(0, _acmB->PlayoutData10Ms(32000, &audioFrame)); 205 _outFileB.Write10MsData(audioFrame);
|
EncodeDecodeTest.cc | 210 AudioFrame audioFrame; 212 int32_t ok =_acm->PlayoutData10Ms(_frequency, &audioFrame); 220 _pcmFile.Write10MsData(audioFrame.data_, 221 audioFrame.samples_per_channel_ * audioFrame.num_channels_);
|
TestRedFec.cc | 454 AudioFrame audioFrame; 462 EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0); 463 EXPECT_GE(_acmA->Add10MsData(audioFrame), 0); 464 EXPECT_EQ(0, _acmB->PlayoutData10Ms(outFreqHzB, &audioFrame)); 465 _outFileB.Write10MsData(audioFrame.data_, audioFrame.samples_per_channel_);
|
APITest.cc | 325 AudioFrame audioFrame; 326 if (_acmA->PlayoutData10Ms(_outFreqHzA, &audioFrame) < 0) { 337 _outFileA.Write10MsData(audioFrame); 345 AudioFrame audioFrame; 346 if (_acmB->PlayoutData10Ms(_outFreqHzB, &audioFrame) < 0) { 358 _outFileB.Write10MsData(audioFrame); 366 AudioFrame audioFrame; [all...] |
/external/webrtc/webrtc/voice_engine/ |
level_indicator.cc | 48 void AudioLevel::ComputeLevel(const AudioFrame& audioFrame) 54 audioFrame.data_, 55 audioFrame.samples_per_channel_*audioFrame.num_channels_);
|
level_indicator.h | 19 class AudioFrame; 38 void ComputeLevel(const AudioFrame& audioFrame);
|
channel.cc | 523 int32_t Channel::GetAudioFrame(int32_t id, AudioFrame* audioFrame) 531 if (audio_coding_->PlayoutData10Ms(audioFrame->sample_rate_hz_, 532 audioFrame) == -1) 546 UpdateRxVadDetection(*audioFrame); 550 audioFrame->id_ = VoEChannelId(audioFrame->id_); 552 _outputSpeechType = audioFrame->speech_type_; 557 int err = rx_audioproc_->ProcessStream(audioFrame); 572 &audioFrame->data_[0] [all...] |
output_mixer.h | 67 AudioFrame* audioFrame); 91 const AudioFrame& generalAudioFrame, 92 const AudioFrame** uniqueAudioFrames, 116 AudioFrame _audioFrame;
|
channel.h | 308 int UpdateRxVadDetection(AudioFrame& audioFrame); 401 int32_t GetAudioFrame(int32_t id, AudioFrame* audioFrame) override; 447 uint32_t Demultiplex(const AudioFrame& audioFrame); 483 int32_t MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency); 517 AudioFrame _audioFrame; 585 AudioFrame::SpeechType _outputSpeechType [all...] |
/external/webrtc/webrtc/modules/audio_conference_mixer/include/ |
audio_conference_mixer_defines.h | 24 // The implementation of this function should update audioFrame with new 29 AudioFrame* audioFrame) = 0; 34 // This function specifies the sampling frequency needed for the AudioFrame 48 // Note that uniqueAudioFrames is an array of AudioFrame pointers with the 51 const AudioFrame& generalAudioFrame, 52 const AudioFrame** uniqueAudioFrames,
|
/external/webrtc/webrtc/modules/utility/source/ |
coder.cc | 51 int32_t AudioCoder::Decode(AudioFrame& decodedAudio, 71 int32_t AudioCoder::PlayoutData(AudioFrame& decodedAudio, 77 int32_t AudioCoder::Encode(const AudioFrame& audio, 83 AudioFrame audioFrame; 84 audioFrame.CopyFrom(audio); 85 audioFrame.timestamp_ = _encodeTimestamp; 86 _encodeTimestamp += static_cast<uint32_t>(audioFrame.samples_per_channel_); 91 if(_acm->Add10MsData((AudioFrame&)audioFrame) == -1 [all...] |
/frameworks/base/tests/Camera2Tests/SmartCamera/SimpleCamera/src/androidx/media/filterfw/decoder/ |
AudioTrackDecoder.java | 105 public void grabSample(FrameValue audioFrame) { 107 if (audioFrame != null) { 110 audioFrame.setValue(sample); 111 audioFrame.setTimestamp(mAudioPresentationTimeUs * 1000);
|
/frameworks/av/media/libstagefright/webm/ |
WebmFrameThread.cpp | 207 const sp<WebmFrame> audioFrame = mAudioFrames.peek(); 208 ALOGV("a frame: %p", audioFrame.get()); 210 if (videoFrame->mEos && audioFrame->mEos) { 214 if (*audioFrame < *videoFrame) { 217 outstandingFrames.push_back(audioFrame);
|
/external/webrtc/webrtc/modules/audio_processing/ |
audio_buffer.h | 105 void set_activity(AudioFrame::VADActivity activity); 106 AudioFrame::VADActivity activity() const; 109 void DeinterleaveFrom(AudioFrame* audioFrame); 112 void InterleaveTo(AudioFrame* frame, bool data_changed); 146 AudioFrame::VADActivity activity_;
|