/device/samsung/manta/ |
media_profiles.xml | 37 <!ATTLIST Audio sampleRate CDATA #REQUIRED> 91 sampleRate="44100" 104 sampleRate="44100" 118 sampleRate="44100" 133 sampleRate="44100" 147 sampleRate="44100" 161 sampleRate="44100" 183 sampleRate="44100" 196 sampleRate="44100" 211 sampleRate="44100 [all...] |
/hardware/libhardware_legacy/audio/ |
A2dpAudioInterface.cpp | 64 uint32_t devices, int *format, uint32_t *channels, uint32_t *sampleRate, status_t *status) 68 return mHardwareInterface->openOutputStream(devices, format, channels, sampleRate, status); 82 if ((err = out->set(devices, format, channels, sampleRate)) == NO_ERROR) { 107 uint32_t devices, int *format, uint32_t *channels, uint32_t *sampleRate, status_t *status, 110 return mHardwareInterface->openInputStream(devices, format, channels, sampleRate, status, acoustics); 202 size_t A2dpAudioInterface::getInputBufferSize(uint32_t sampleRate, int format, int channelCount) 204 return mHardwareInterface->getInputBufferSize(sampleRate, format, channelCount); 247 if (lRate == 0) lRate = sampleRate(); 252 (lRate != sampleRate())){ 255 if (pRate) *pRate = sampleRate(); [all...] |
AudioHardwareGeneric.cpp | 68 uint32_t devices, int *format, uint32_t *channels, uint32_t *sampleRate, status_t *status) 82 status_t lStatus = out->set(this, mFd, devices, format, channels, sampleRate); 102 uint32_t devices, int *format, uint32_t *channels, uint32_t *sampleRate, 122 status_t lStatus = in->set(this, mFd, devices, format, channels, sampleRate, acoustics); 207 if (lRate == 0) lRate = sampleRate(); 212 (lRate != sampleRate())) { 215 if (pRate) *pRate = sampleRate(); 252 snprintf(buffer, SIZE, "\tsample rate: %d\n", sampleRate()); 325 (*pRate != sampleRate())) { 329 *pRate = sampleRate(); [all...] |
/external/chromium_org/third_party/WebKit/Source/modules/webaudio/ |
AsyncAudioDecoder.h | 47 void decodeAsync(ArrayBuffer* audioData, float sampleRate, PassOwnPtr<AudioBufferCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback); 50 static void decode(ArrayBuffer* audioData, float sampleRate, AudioBufferCallback* successCallback, AudioBufferCallback* errorCallback);
|
AudioSourceNode.h | 38 AudioSourceNode(AudioContext* context, float sampleRate) 39 : AudioNode(context, sampleRate)
|
ChannelMergerNode.h | 41 static PassRefPtr<ChannelMergerNode> create(AudioContext*, float sampleRate, unsigned numberOfInputs); 56 ChannelMergerNode(AudioContext*, float sampleRate, unsigned numberOfInputs);
|
ChannelSplitterNode.h | 37 static PassRefPtr<ChannelSplitterNode> create(AudioContext*, float sampleRate, unsigned numberOfOutputs); 47 ChannelSplitterNode(AudioContext*, float sampleRate, unsigned numberOfOutputs);
|
DelayProcessor.cpp | 35 DelayProcessor::DelayProcessor(AudioContext* context, float sampleRate, unsigned numberOfChannels, double maxDelayTime) 36 : AudioDSPKernelProcessor(sampleRate, numberOfChannels)
|
AudioScheduledSourceNode.cpp | 45 AudioScheduledSourceNode::AudioScheduledSourceNode(AudioContext* context, float sampleRate) 46 : AudioSourceNode(context, sampleRate) 68 double sampleRate = this->sampleRate(); 76 size_t startFrame = AudioUtilities::timeToSampleFrame(m_startTime, sampleRate); 77 size_t endFrame = m_endTime == UnknownTime ? 0 : AudioUtilities::timeToSampleFrame(m_endTime, sampleRate);
|
AudioParam.cpp | 158 double sampleRate = context()->sampleRate(); 160 double endTime = startTime + numberOfValues / sampleRate; 164 m_value = m_timeline.valuesForTimeRange(startTime, endTime, narrowPrecisionToFloat(m_value), values, numberOfValues, sampleRate, sampleRate);
|
ConvolverNode.cpp | 48 ConvolverNode::ConvolverNode(AudioContext* context, float sampleRate) 49 : AudioNode(context, sampleRate) 139 bufferBus->setSampleRate(buffer->sampleRate()); 163 return m_reverb ? m_reverb->impulseResponseLength() / static_cast<double>(sampleRate()) : 0; 173 return m_reverb ? m_reverb->latencyFrames() / static_cast<double>(sampleRate()) : 0;
|
MediaElementAudioSourceNode.cpp | 49 : AudioSourceNode(context, context->sampleRate()) 86 if (sourceSampleRate != sampleRate()) { 87 double scaleFactor = sourceSampleRate / sampleRate(); 120 ASSERT(m_sourceSampleRate != sampleRate()); 124 ASSERT(m_sourceSampleRate == sampleRate());
|
/external/chromium_org/third_party/WebKit/Source/testing/runner/ |
MockWebAudioDevice.h | 42 explicit MockWebAudioDevice(double sampleRate); 47 virtual double sampleRate();
|
/external/srec/srec/cfront/ |
ca_wave.c | 72 ASSERT(hFrontend->config->waveobj->samplerate); 80 hWave->data.samplerate = hFrontend->config->waveobj->samplerate; 84 hFrontend->config->waveobj->samplerate / FRAMERATE, 132 hWave->data.samplerate = 0; 161 int samplerate, 179 hWave->data.samplerate = samplerate;
|
/frameworks/av/cmds/stagefright/ |
SineSource.cpp | 12 SineSource::SineSource(int32_t sampleRate, int32_t numChannels) 14 mSampleRate(sampleRate),
|
/frameworks/av/media/libnbaio/ |
AudioStreamInSource.cpp | 47 uint32_t sampleRate = mStream->common.get_sample_rate(&mStream->common); 50 mFormat = Format_from_SR_C(sampleRate, popcount(channelMask));
|
/frameworks/base/core/java/android/speech/srec/ |
WaveHeader.java | 29 * <li> sampleRate - usually 8000, 11025, 16000, 22050, or 44100 hz. 69 * @param sampleRate typically 8000, 11025, 16000, 22050, or 44100 hz. 73 public WaveHeader(short format, short numChannels, int sampleRate, short bitsPerSample, int numBytes) { 75 mSampleRate = sampleRate; 129 * @param sampleRate sample rate, typically 8000, 11025, 16000, 22050, or 44100 hz. 132 public WaveHeader setSampleRate(int sampleRate) { 133 mSampleRate = sampleRate; 272 "WaveHeader format=%d numChannels=%d sampleRate=%d bitsPerSample=%d numBytes=%d",
|
/frameworks/opt/net/voip/src/jni/rtp/ |
GsmCodec.cpp | 42 int set(int sampleRate, const char *fmtp) { 43 return (sampleRate == 8000 && mEncode && mDecode) ? 160 : -1;
|
/packages/apps/VoiceDialer/src/com/android/voicedialer/ |
RecognizerEngine.java | 110 * @param sampleRate the same rate coming from the mic or micFile 113 File micFile, int sampleRate) { 124 if (mSampleRate != sampleRate) { 131 mSampleRate = sampleRate; 138 if (sampleRate == 8000) { 152 mic = new MicrophoneInputStream(sampleRate, sampleRate * 15); 159 if (mLogger != null) mic = mLogger.logInputStream(mic, sampleRate);
|
/external/chromium_org/third_party/WebKit/Source/platform/audio/ |
HRTFElevation.cpp | 99 bool HRTFElevation::calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName, 104 bool success = calculateKernelsForAzimuthElevation(azimuth, elevation, sampleRate, subjectName, kernelL1, kernelR1); 113 success = calculateKernelsForAzimuthElevation(symmetricAzimuth, elevation, sampleRate, subjectName, kernelL2, kernelR2); 124 bool HRTFElevation::calculateKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName, 172 RefPtr<AudioBus> response(AudioBus::createBySampleRateConverting(preSampleRateConvertedResponse.get(), false, sampleRate)); 178 RefPtr<AudioBus> impulseResponse(AudioBus::loadPlatformResource(resourceName.utf8().data(), sampleRate)); 185 size_t expectedLength = static_cast<size_t>(256 * (sampleRate / 44100.0)); 198 const size_t fftSize = HRTFPanner::fftSizeForSampleRate(sampleRate); 199 kernelL = HRTFKernel::create(leftEarImpulseResponse, fftSize, sampleRate); 200 kernelR = HRTFKernel::create(rightEarImpulseResponse, fftSize, sampleRate); [all...] |
HRTFKernel.cpp | 70 HRTFKernel::HRTFKernel(AudioChannel* channel, size_t fftSize, float sampleRate) 72 , m_sampleRate(sampleRate) 86 unsigned numberOfFadeOutFrames = static_cast<unsigned>(sampleRate / 4410); // 10 sample-frames @44.1KHz sample-rate 121 float sampleRate1 = kernel1->sampleRate(); 122 float sampleRate2 = kernel2->sampleRate();
|
/frameworks/av/media/libstagefright/rtsp/ |
APacketSource.cpp | 473 int32_t sampleRate, numChannels; 475 desc.c_str(), &sampleRate, &numChannels); 477 mFormat->setInt32(kKeySampleRate, sampleRate); 489 int32_t sampleRate, numChannels; 491 desc.c_str(), &sampleRate, &numChannels); 493 mFormat->setInt32(kKeySampleRate, sampleRate); 496 if (sampleRate != 8000 || numChannels != 1) { 502 int32_t sampleRate, numChannels; 504 desc.c_str(), &sampleRate, &numChannels); 506 mFormat->setInt32(kKeySampleRate, sampleRate); [all...] |
/device/asus/grouper/ |
media_profiles.xml | 39 <!ATTLIST Audio sampleRate CDATA #REQUIRED> 101 sampleRate="8000" 114 sampleRate="8000" 127 sampleRate="44100" 144 sampleRate="8000" 161 sampleRate="8000" 178 sampleRate="44100"
|
/external/chromium_org/media/base/android/java/src/org/chromium/media/ |
WebAudioMediaCodecBridge.java | 70 int sampleRate = format.getInteger(MediaFormat.KEY_SAMPLE_RATE); 150 Log.d(LOG_TAG, "Final: Rate: " + sampleRate + 157 sampleRate, 178 sampleRate = newFormat.getInteger(MediaFormat.KEY_SAMPLE_RATE); 199 int sampleRate,
|
/external/chromium_org/third_party/WebKit/Source/platform/audio/chromium/ |
AudioDestinationChromium.cpp | 48 PassOwnPtr<AudioDestination> AudioDestination::create(AudioIOCallback& callback, const String& inputDeviceId, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate) 50 return adoptPtr(new AudioDestinationChromium(callback, inputDeviceId, numberOfInputChannels, numberOfOutputChannels, sampleRate)); 53 AudioDestinationChromium::AudioDestinationChromium(AudioIOCallback& callback, const String& inputDeviceId, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate) 58 , m_sampleRate(sampleRate) 85 m_audioDevice = adoptPtr(blink::Platform::current()->createAudioDevice(m_callbackBufferSize, numberOfInputChannels, numberOfOutputChannels, sampleRate, this, inputDeviceId));
|