HomeSort by relevance Sort by last modified time
    Searched refs:audioFrame (Results 1 - 21 of 21) sorted by null

  /external/chromium_org/third_party/webrtc/modules/audio_conference_mixer/source/
audio_frame_manipulator.h 15 class AudioFrame;
17 // Updates the audioFrame's energy (based on its samples).
18 void CalculateEnergy(AudioFrame& audioFrame);
20 // Apply linear step function that ramps in/out the audio samples in audioFrame
21 void RampIn(AudioFrame& audioFrame);
22 void RampOut(AudioFrame& audioFrame);
audio_frame_manipulator.cc 42 void CalculateEnergy(AudioFrame& audioFrame)
44 audioFrame.energy_ = 0;
45 for(int position = 0; position < audioFrame.samples_per_channel_;
49 audioFrame.energy_ += audioFrame.data_[position] *
50 audioFrame.data_[position];
54 void RampIn(AudioFrame& audioFrame)
56 assert(rampSize <= audioFrame.samples_per_channel_)
    [all...]
audio_conference_mixer_impl.cc 24 AudioFrame* audioFrame;
35 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) {
64 const AudioFrame& frame) {
158 MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool,
190 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool);
286 // Get an AudioFrame for mixing from the memory pool.
287 AudioFrame* mixedAudio = NULL;
310 AudioFrame::kNormalSpeech
    [all...]
  /external/chromium_org/third_party/webrtc/modules/audio_coding/main/test/
SpatialAudio.cc 153 AudioFrame audioFrame;
161 _inFile.Read10MsData(audioFrame);
162 for (int n = 0; n < audioFrame.samples_per_channel_; n++) {
163 audioFrame.data_[n] = (int16_t) floor(
164 audioFrame.data_[n] * leftPanning + 0.5);
166 CHECK_ERROR(_acmLeft->Add10MsData(audioFrame));
168 for (int n = 0; n < audioFrame.samples_per_channel_; n++) {
169 audioFrame.data_[n] = (int16_t) floor(
170 audioFrame.data_[n] * rightToLeftRatio + 0.5)
    [all...]
TwoWayCommunication.cc 273 AudioFrame audioFrame;
288 EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
289 EXPECT_EQ(0, _acmA->Add10MsData(audioFrame));
290 EXPECT_EQ(0, _acmRefA->Add10MsData(audioFrame));
292 EXPECT_GT(_inFileB.Read10MsData(audioFrame), 0);
296 EXPECT_EQ(0, _acmB->Add10MsData(audioFrame));
298 EXPECT_EQ(-1, _acmB->Add10MsData(audioFrame));
307 EXPECT_EQ(0, _acmRefB->Add10MsData(audioFrame));
311 EXPECT_EQ(0, _acmA->PlayoutData10Ms(outFreqHzA, &audioFrame));
    [all...]
TestVADDTX.cc 235 AudioFrame audioFrame;
242 _inFileA.Read10MsData(audioFrame);
243 audioFrame.timestamp_ = timestampA;
245 EXPECT_EQ(0, _acmA->Add10MsData(audioFrame));
247 EXPECT_EQ(0, _acmB->PlayoutData10Ms(outFreqHzB, &audioFrame));
248 _outFileB.Write10MsData(audioFrame.data_, audioFrame.samples_per_channel_);
TestRedFec.cc 295 AudioFrame audioFrame;
302 EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
303 EXPECT_EQ(0, _acmA->Add10MsData(audioFrame));
305 EXPECT_EQ(0, _acmB->PlayoutData10Ms(outFreqHzB, &audioFrame));
306 _outFileB.Write10MsData(audioFrame.data_, audioFrame.samples_per_channel_);
iSACTest.cc 247 AudioFrame audioFrame;
248 EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
249 EXPECT_EQ(0, _acmA->Add10MsData(audioFrame));
250 EXPECT_EQ(0, _acmB->Add10MsData(audioFrame));
253 EXPECT_EQ(0, _acmA->PlayoutData10Ms(32000, &audioFrame));
254 _outFileA.Write10MsData(audioFrame);
255 EXPECT_EQ(0, _acmB->PlayoutData10Ms(32000, &audioFrame));
256 _outFileB.Write10MsData(audioFrame);
EncodeDecodeTest.cc 211 AudioFrame audioFrame;
213 int32_t ok =_acm->PlayoutData10Ms(_frequency, &audioFrame);
221 _pcmFile.Write10MsData(audioFrame.data_,
222 audioFrame.samples_per_channel_ * audioFrame.num_channels_);
APITest.cc 337 AudioFrame audioFrame;
338 if (_acmA->PlayoutData10Ms(_outFreqHzA, &audioFrame) < 0) {
349 _outFileA.Write10MsData(audioFrame);
357 AudioFrame audioFrame;
358 if (_acmB->PlayoutData10Ms(_outFreqHzB, &audioFrame) < 0) {
370 _outFileB.Write10MsData(audioFrame);
378 AudioFrame audioFrame;
    [all...]
  /external/chromium_org/third_party/webrtc/voice_engine/
level_indicator.cc 48 void AudioLevel::ComputeLevel(const AudioFrame& audioFrame)
54 audioFrame.data_,
55 audioFrame.samples_per_channel_*audioFrame.num_channels_);
level_indicator.h 19 class AudioFrame;
38 void ComputeLevel(const AudioFrame& audioFrame);
output_mixer.h 68 AudioFrame* audioFrame);
92 const AudioFrame& generalAudioFrame,
93 const AudioFrame** uniqueAudioFrames,
131 AudioFrame _audioFrame;
channel.cc 494 isAlive = (_outputSpeechType != AudioFrame::kPLCCNG);
576 int32_t Channel::GetAudioFrame(int32_t id, AudioFrame& audioFrame)
582 if (audio_coding_->PlayoutData10Ms(audioFrame.sample_rate_hz_,
583 &audioFrame) == -1)
597 UpdateRxVadDetection(audioFrame);
601 audioFrame.id_ = VoEChannelId(audioFrame.id_);
603 _outputSpeechType = audioFrame.speech_type_;
608 int err = rx_audioproc_->ProcessStream(&audioFrame);
    [all...]
channel.h 303 int UpdateRxVadDetection(AudioFrame& audioFrame);
420 int32_t GetAudioFrame(int32_t id, AudioFrame& audioFrame);
471 uint32_t Demultiplex(const AudioFrame& audioFrame);
498 int32_t MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency);
531 AudioFrame _audioFrame;
609 AudioFrame::SpeechType _outputSpeechType
    [all...]
  /external/chromium_org/third_party/webrtc/modules/utility/source/
coder.cc 54 int32_t AudioCoder::Decode(AudioFrame& decodedAudio,
74 int32_t AudioCoder::PlayoutData(AudioFrame& decodedAudio,
80 int32_t AudioCoder::Encode(const AudioFrame& audio,
86 AudioFrame audioFrame;
87 audioFrame.CopyFrom(audio);
88 audioFrame.timestamp_ = _encodeTimestamp;
89 _encodeTimestamp += audioFrame.samples_per_channel_;
94 if(_acm->Add10MsData((AudioFrame&)audioFrame) == -1
    [all...]
  /frameworks/base/tests/Camera2Tests/SmartCamera/SimpleCamera/src/androidx/media/filterfw/decoder/
AudioTrackDecoder.java 105 public void grabSample(FrameValue audioFrame) {
107 if (audioFrame != null) {
110 audioFrame.setValue(sample);
111 audioFrame.setTimestamp(mAudioPresentationTimeUs * 1000);
  /external/webrtc/src/modules/audio_processing/
audio_buffer.h 44 void set_activity(AudioFrame::VADActivity activity);
45 AudioFrame::VADActivity activity() const;
49 void DeinterleaveFrom(AudioFrame* audioFrame);
50 void InterleaveTo(AudioFrame* audioFrame) const;
53 void InterleaveTo(AudioFrame* frame, bool data_changed) const;
69 AudioFrame::VADActivity activity_;
  /external/chromium_org/third_party/webrtc/modules/audio_conference_mixer/interface/
audio_conference_mixer_defines.h 24 // The implementation of this function should update audioFrame with new
28 virtual int32_t GetAudioFrame(const int32_t id, AudioFrame& audioFrame) = 0;
33 // This function specifies the sampling frequency needed for the AudioFrame
79 // Note that uniqueAudioFrames is an array of AudioFrame pointers with the
82 const AudioFrame& generalAudioFrame,
83 const AudioFrame** uniqueAudioFrames,
  /frameworks/av/media/libstagefright/webm/
WebmFrameThread.cpp 207 const sp<WebmFrame> audioFrame = mAudioFrames.peek();
208 ALOGV("a frame: %p", audioFrame.get());
210 if (videoFrame->mEos && audioFrame->mEos) {
214 if (*audioFrame < *videoFrame) {
217 outstandingFrames.push_back(audioFrame);
  /external/chromium_org/third_party/webrtc/modules/audio_processing/
audio_buffer.h 85 void set_activity(AudioFrame::VADActivity activity);
86 AudioFrame::VADActivity activity() const;
89 void DeinterleaveFrom(AudioFrame* audioFrame);
92 void InterleaveTo(AudioFrame* frame, bool data_changed) const;
115 AudioFrame::VADActivity activity_;

Completed in 322 milliseconds