/external/webkit/Source/WebCore/platform/audio/ |
FFTConvolver.h | 39 // fftSize must be a power of two 40 FFTConvolver(size_t fftSize); 42 // For now, with multiple calls to Process(), framesToProcess MUST add up EXACTLY to fftSize / 2 46 // The input to output latency is equal to fftSize / 2 53 size_t fftSize() const { return m_frame.fftSize(); } 58 // Buffer input until we get fftSize / 2 samples then do an FFT
|
FFTConvolver.cpp | 41 FFTConvolver::FFTConvolver(size_t fftSize) 42 : m_frame(fftSize) 44 , m_inputBuffer(fftSize) // 2nd half of buffer is always zeroed 45 , m_outputBuffer(fftSize) 46 , m_lastOverlapBuffer(fftSize / 2) 52 // FIXME: make so framesToProcess is not required to fit evenly into fftSize/2 79 size_t halfSize = fftSize() / 2;
|
FFTFrame.h | 60 FFTFrame(unsigned fftSize); 82 void doPaddedFFT(float* data, size_t dataSize); // zero-padding with dataSize <= fftSize 86 unsigned fftSize() const { return m_FFTSize; } 99 static FFTSetup fftSetupForSize(unsigned fftSize); 117 static DFTI_DESCRIPTOR_HANDLE descriptorHandleForSize(unsigned fftSize); 146 static fftwf_plan fftwPlanForSize(unsigned fftSize, Direction,
|
HRTFKernel.h | 54 static PassRefPtr<HRTFKernel> create(AudioChannel* channel, size_t fftSize, double sampleRate, bool bassBoost) 56 return adoptRef(new HRTFKernel(channel, fftSize, sampleRate, bassBoost)); 69 size_t fftSize() const { return m_fftFrame->fftSize(); } 80 HRTFKernel(AudioChannel* channel, size_t fftSize, double sampleRate, bool bassBoost);
|
ReverbConvolverStage.cpp | 47 size_t fftSize, size_t renderPhase, size_t renderSliceSize, ReverbAccumulationBuffer* accumulationBuffer) 48 : m_fftKernel(fftSize) 58 m_convolver = adoptPtr(new FFTConvolver(fftSize)); 64 // But, the FFT convolution itself incurs fftSize / 2 latency, so subtract this out... 65 size_t halfSize = fftSize / 2; 81 m_preDelayBuffer.resize(m_preDelayLength < fftSize ? fftSize : m_preDelayLength); 133 // An expensive FFT will happen every fftSize / 2 frames.
|
ReverbConvolver.cpp | 93 size_t fftSize = m_minFFTSize; 95 size_t stageSize = fftSize / 2; 105 OwnPtr<ReverbConvolverStage> stage(new ReverbConvolverStage(response, totalResponseLength, reverbTotalLatency, stageOffset, stageSize, fftSize, renderPhase, renderSliceSize, &m_accumulationBuffer)); 119 fftSize *= 2; 120 if (hasRealtimeConstraint && !isBackgroundStage && fftSize > m_maxRealtimeFFTSize) 121 fftSize = m_maxRealtimeFFTSize; 122 if (fftSize > m_maxFFTSize) 123 fftSize = m_maxFFTSize;
|
FFTFrame.cpp | 48 AudioFloatArray paddedResponse(fftSize()); // zero-initialized 57 OwnPtr<FFTFrame> newFrame = adoptPtr(new FFTFrame(frame1.fftSize())); 62 int fftSize = newFrame->fftSize(); 63 AudioFloatArray buffer(fftSize); 65 buffer.zeroRange(fftSize / 2, fftSize); 85 m_FFTSize = frame1.fftSize(); 184 int halfSize = fftSize() / 2; 186 const double kSamplePhaseDelay = (2.0 * piDouble) / double(fftSize()); [all...] |
HRTFKernel.cpp | 68 HRTFKernel::HRTFKernel(AudioChannel* channel, size_t fftSize, double sampleRate, bool bassBoost) 75 m_frameDelay = extractAverageGroupDelay(channel, fftSize / 2); 90 size_t truncatedResponseLength = min(responseLength, fftSize / 2); // truncate if necessary to max impulse response length allowed by FFT 102 m_fftFrame = adoptPtr(new FFTFrame(fftSize)); 108 OwnPtr<AudioChannel> channel = adoptPtr(new AudioChannel(fftSize()));
|
HRTFPanner.h | 43 size_t fftSize() { return fftSizeForSampleRate(m_sampleRate); }
|
ReverbConvolverStage.h | 49 size_t fftSize, size_t renderPhase, size_t renderSliceSize, ReverbAccumulationBuffer* accumulationBuffer);
|
HRTFElevation.cpp | 121 // Note that depending on the fftSize returned by the panner, we may be truncating the impulse response we just loaded in. 122 const size_t fftSize = HRTFPanner::fftSizeForSampleRate(sampleRate); 123 kernelL = HRTFKernel::create(leftEarImpulseResponse, fftSize, sampleRate, true); 124 kernelR = HRTFKernel::create(rightEarImpulseResponse, fftSize, sampleRate, true);
|
/cts/apps/CtsVerifier/jni/audioquality/ |
CompareSpectra.cpp | 70 /* Return a pointer to 1+(fftSize/2) spectrum magnitude values 74 fftSize points is used to compute the spectra, The overall signal 77 double* getAverageSpectrum(short* pcm, int numSamples, int fftSize, 79 if (numSamples < fftSize) return NULL; 80 int numFrames = 1 + ((2 * (numSamples - fftSize)) / fftSize); 81 int numMag = 1 + (fftSize / 2); 82 float* re = new float[fftSize]; 83 float* im = new float[fftSize]; 88 Window wind(fftSize); [all...] |
/external/webkit/Source/WebCore/platform/audio/fftw/ |
FFTFrameFFTW.cpp | 49 unsigned unpackedFFTWDataSize(unsigned fftSize) 51 return fftSize / 2 + 1; 57 // Normal constructor: allocates for a given fftSize. 58 FFTFrame::FFTFrame(unsigned fftSize) 59 : m_FFTSize(fftSize) 60 , m_log2FFTSize(static_cast<unsigned>(log2(fftSize))) 63 , m_data(2 * (3 + unpackedFFTWDataSize(fftSize))) // enough space for real and imaginary data plus 16-byte alignment padding 81 m_forwardPlan = fftwPlanForSize(fftSize, Forward, 83 m_backwardPlan = fftwPlanForSize(fftSize, Backward, 102 , m_data(2 * (3 + unpackedFFTWDataSize(fftSize()))) // enough space for real and imaginary data plus 16-byte alignment paddin [all...] |
/external/webkit/Source/WebCore/platform/audio/mkl/ |
FFTFrameMKL.cpp | 42 DFTI_DESCRIPTOR_HANDLE createDescriptorHandle(int fftSize) 47 MKL_LONG status = DftiCreateDescriptor(&handle, DFTI_SINGLE, DFTI_REAL, 1, fftSize); 67 double scale = 1.0 / (2.0 * fftSize); 88 // Normal constructor: allocates for a given fftSize. 89 FFTFrame::FFTFrame(unsigned fftSize) 90 : m_FFTSize(fftSize) 91 , m_log2FFTSize(static_cast<unsigned>(log2(fftSize))) 93 , m_complexData(fftSize) 94 , m_realData(fftSize / 2) 95 , m_imagData(fftSize / 2 [all...] |
/external/webkit/Source/WebCore/platform/audio/mac/ |
FFTFrameMac.cpp | 43 // Normal constructor: allocates for a given fftSize 44 FFTFrame::FFTFrame(unsigned fftSize) 45 : m_realData(fftSize) 46 , m_imagData(fftSize) 48 m_FFTSize = fftSize; 49 m_log2FFTSize = static_cast<unsigned>(log2(fftSize)); 55 m_FFTSetup = fftSetupForSize(fftSize); 150 FFTSetup FFTFrame::fftSetupForSize(unsigned fftSize) 157 int pow2size = static_cast<int>(log2(fftSize));
|
/external/webkit/Source/WebCore/webaudio/ |
RealtimeAnalyserNode.idl | 30 attribute unsigned long fftSize;
|
RealtimeAnalyser.cpp | 108 // FIXME : allow to work with non-FFTSize divisible chunking 153 size_t fftSize = this->fftSize(); 155 AudioFloatArray temporaryBuffer(fftSize); 159 // Take the previous fftSize values from the input buffer and copy into the temporary buffer. 162 for (unsigned i = 0; i < fftSize; ++i) 163 tempP[i] = inputBuffer[(i + writeIndex - fftSize + InputBufferSize) % InputBufferSize]; 166 applyWindow(tempP, fftSize); 266 unsigned fftSize = this->fftSize(); [all...] |
RealtimeAnalyserNode.h | 48 unsigned int fftSize() const { return m_analyser.fftSize(); }
|
RealtimeAnalyser.h | 50 size_t fftSize() const { return m_fftSize; }
|
/frameworks/base/include/media/ |
Visualizer.h | 87 // fftSize) is 0. 91 uint32_t fftSize,
|
/frameworks/base/media/libmedia/ |
Visualizer.cpp | 259 uint32_t fftSize = 0; 266 fftSize = mCaptureSize; 268 mCaptureCallBack(mCaptureCbkUser, waveSize, wavePtr, fftSize, fftPtr, mSampleRate);
|
/frameworks/base/media/jni/audioeffect/ |
android_media_Visualizer.cpp | 99 uint32_t fftSize, 138 if (fftSize != 0 && fft != NULL) { 139 jbyteArray jArray = env->NewByteArray(fftSize); 142 memcpy(nArray, fft, fftSize);
|