/frameworks/av/services/audiopolicy/common/managerdefinitions/src/ |
Gains.cpp | 17 #define LOG_TAG "APM::Gains" 27 #include "Gains.h" 36 Gains::sDefaultVolumeCurve[Volume::VOLCNT] = { 42 Gains::sDefaultMediaVolumeCurve[Volume::VOLCNT] = { 47 Gains::sExtMediaSystemVolumeCurve[Volume::VOLCNT] = { 52 Gains::sSpeakerMediaVolumeCurve[Volume::VOLCNT] = { 57 Gains::sSpeakerMediaVolumeCurveDrc[Volume::VOLCNT] = { 62 Gains::sSpeakerSonificationVolumeCurve[Volume::VOLCNT] = { 67 Gains::sSpeakerSonificationVolumeCurveDrc[Volume::VOLCNT] = { 77 Gains::sDefaultSystemVolumeCurve[Volume::VOLCNT] = [all...] |
StreamDescriptor.cpp | 28 #include "Gains.h" 152 return Gains::volIndexToDb(streamDesc.getVolumeCurvePoint(category), 177 Gains::sVolumeProfiles[i][j]); 184 Gains::sDefaultSystemVolumeCurveDrc); 186 Gains::sSpeakerSonificationVolumeCurveDrc); 188 Gains::sSpeakerSonificationVolumeCurveDrc); 190 Gains::sSpeakerSonificationVolumeCurveDrc); 192 Gains::sSpeakerMediaVolumeCurveDrc); 194 Gains::sSpeakerMediaVolumeCurveDrc); 203 Gains::sVolumeProfiles[streamSrc][j]) [all...] |
/cts/apps/CameraITS/tests/scene1/ |
test_auto_vs_manual.py | 40 sens, exp, gains, xform, focus = cam.do_3a(get_results=True) 43 print "AWB gains", gains 54 gains_a = cap_auto["metadata"]["android.colorCorrection.gains"] 55 print "Auto gains:", gains_a 61 req["android.colorCorrection.gains"] = gains 67 gains_m1 = cap_man1["metadata"]["android.colorCorrection.gains"] 68 print "Manual wb gains:", gains_m1 82 gains_m2 = cap_man2["metadata"]["android.colorCorrection.gains"] [all...] |
test_3a.py | 28 sens, exp, gains, xform, focus = cam.do_3a(get_results=True) 30 print "AWB: gains", gains, "transform", xform 34 assert(len(gains) == 4)
|
test_capture_result.py | 49 # The camera HAL may not support different gains for two G channels. 65 "android.colorCorrection.gains": manual_gains, 117 gains = cap_res["android.colorCorrection.gains"] 124 print "Gains:", gains 135 assert(len(gains) == 4) 137 assert(all([g > 0 for g in gains])) 141 assert(any([not is_close_float(gains[i], manual_gains[i]) 161 gains = cap_res["android.colorCorrection.gains" [all...] |
test_param_color_correction.py | 28 Takes shots with different transform and gains values, and tests that 29 they look correspondingly different. The transform and gains are chosen 56 # Gains: 60 gains = [[1,1,1,1], [2,1,1,1], [1,1,1,1]] 67 # 1. With unit gains, and identity transform. 69 # 3. With unit gains, and a transform that boosts blue. 72 req["android.colorCorrection.gains"] = gains[i]
|
/external/webrtc/webrtc/modules/audio_coding/codecs/isac/main/source/ |
pitch_estimator.h | 29 double *gains); 41 double *gains); 47 double *gains); 53 double *gains); 60 double *gains);
|
pitch_gain_tables.h | 25 /* cdf for quantized pitch filter gains */ 34 /* mean values of pitch filter gains */
|
pitch_filter.c | 101 * damper_state_dg : state of damping filter for different trial gains. 214 /* Update filter parameters based on the pitch-gains and pitch-lags. */ 240 * Filter a frame of 30 millisecond, given pitch-lags and pitch-gains. 245 * gains : pointer to pitch-gians, 4 gains per frame. 252 * pitch-gains. 261 double* lags, double* gains, PitchFilterOperation mode, 294 gains[n] *= -kEnhancer; 305 old_gain = gains[0]; 319 gain_delta = (gains[m] - old_gain) / PITCH_GRAN_PER_SUBFRAME [all...] |
/external/webrtc/webrtc/modules/audio_processing/vad/ |
pitch_internal.cc | 29 double* gains, 39 gains[n] = log(gains[n] + 1e-12); 41 // Interpolate lags and gains. 42 PitchInterpolation(*log_old_gain, gains, log_pitch_gain); 43 *log_old_gain = gains[num_in_frames - 1];
|
pitch_internal_unittest.cc | 23 double gains[] = {0.6, 0.2, 0.5, 0.4}; local 31 double expected_log_old_gain = log(gains[kNumInputParameters - 1]); 40 GetSubframesPitchParameters(kSamplingRateHz, gains, lags, kNumInputParameters,
|
pitch_internal.h | 17 double* gains,
|
/external/libopus/silk/float/ |
process_gains_FLP.c | 35 /* Processing of gains */ 51 psEncCtrl->Gains[ k ] *= s; 59 /* Soft limit on ratio residual energy and squared gains */ 60 gain = psEncCtrl->Gains[ k ]; 62 psEncCtrl->Gains[ k ] = silk_min_float( gain, 32767.0f ); 65 /* Prepare gains for noise shaping quantization */ 67 pGains_Q16[ k ] = (opus_int32)( psEncCtrl->Gains[ k ] * 65536.0f ); 70 /* Save unquantized gains and gain Index */ 74 /* Quantize gains */ 78 /* Overwrite unquantized gains with quantized gains and convert back to Q0 from Q16 * [all...] |
residual_energy_FLP.c | 95 const silk_float gains[], /* I Quantization gains */ 109 nrgs[ 0 ] = ( silk_float )( gains[ 0 ] * gains[ 0 ] * silk_energy_FLP( LPC_res_ptr + 0 * shift, subfr_length ) ); 110 nrgs[ 1 ] = ( silk_float )( gains[ 1 ] * gains[ 1 ] * silk_energy_FLP( LPC_res_ptr + 1 * shift, subfr_length ) ); 114 nrgs[ 2 ] = ( silk_float )( gains[ 2 ] * gains[ 2 ] * silk_energy_FLP( LPC_res_ptr + 0 * shift, subfr_length ) ); 115 nrgs[ 3 ] = ( silk_float )( gains[ 3 ] * gains[ 3 ] * silk_energy_FLP( LPC_res_ptr + 1 * shift, subfr_length ) ) [all...] |
/external/libopus/silk/fixed/ |
residual_energy_FIX.c | 42 const opus_int32 gains[ MAX_NB_SUBFR ], /* I Quantization gains */ 81 /* Apply the squared subframe gains */ 83 /* Fully upscale gains and energies */ 85 lz2 = silk_CLZ32( gains[ i ] ) - 1; 87 tmp32 = silk_LSHIFT32( gains[ i ], lz2 ); 89 /* Find squared gains */
|
/cts/apps/CameraITS/tests/inprog/ |
test_3a_remote.py | 50 gains = cap["metadata"]["android.colorCorrection.gains"] 58 print "Gains:", gains
|
/external/webrtc/webrtc/modules/audio_processing/intelligibility/ |
intelligibility_enhancer.h | 29 // audio streams and modifies the render stream with a set of gains per 111 // Computes and sets modified gains. 117 // Transforms freq gains to ERB gains. 130 // Analytically solves quadratic for optimal gains given |lambda|. 131 // Negative gains are set to 0. Stores the results in |sols|. 149 const int analysis_rate_; // Num blocks before gains recalculated. 151 const bool active_; // Whether render gains are being updated. 163 rtc::scoped_ptr<float[]> gains_eq_; // Pre-filter modified gains.
|
/system/media/audio_utils/include/audio_utils/ |
minifloat.h | 58 /** A pair of unity gains expressed as a gain_minifloat_packed_t */ 62 * Convert a float to the internal representation used for gains. 70 * Details on internal representation of gains, based on mini-floats: 79 /** Convert the internal representation used for gains to float */
|
/cts/apps/CameraITS/pymodules/its/ |
dng.py | 23 def compute_cm_fm(illuminant, gains, ccm, cal): 27 standard A illuminant, the HAL will produce the WB gains and transform, 28 in the android.colorCorrection.gains and android.colorCorrection.transform 32 This function is used to take the per-unit gains, ccm, and calibration 41 gains: White balance gains, as a list of 4 floats. 87 # G is formed by taking the r,g,b gains and putting them into a 89 G = numpy.array([[gains[0],0,0], [0,gains[1],0], [0,0,gains[3]]] [all...] |
/external/webrtc/webrtc/modules/audio_coding/codecs/isac/fix/source/ |
pitch_gain_tables.h | 24 /* cdf for quantized pitch filter gains */ 32 /* mean values of pitch filter gains in Q12*/
|
/external/webrtc/webrtc/modules/audio_processing/agc/legacy/ |
digital_agc.c | 30 // gains = round(2^16*10.^(0.05 * (MinGain + B * ( log(exp(-Knee*A)+exp(-Knee*B)) - log(1+exp(-Knee*B)) ) / log(1/(1+exp(Knee*B)))))); 31 // fprintf(1, '\t%i, %i, %i, %i,\n', gains); 33 // in = 10*log10(lvl); out = 20*log10(gains/65536); 300 // array for gains (one value per ms, incl start & end) 301 int32_t gains[11]; local 416 gains[0] = stt->gain; 457 gains[k + 1] = stt->gainTable[zeros] + (tmp32 >> 12); 507 if ((gains[k + 1] - stt->gainTable[0]) > 8388608) 510 tmp32 = (gains[k + 1] - stt->gainTable[0]) >> 8; 514 tmp32 = (gains[k+1] - stt->gainTable[0]) * (178 + gain_adj) [all...] |
/cts/tests/camera/src/android/hardware/camera2/cts/rs/ |
raw_converter.rs | 43 // Interpolate gain map to find per-channel gains at a given pixel 218 float4 gains = 1.f; 220 gains = getGain(i, j); 226 g = gains.x; 230 g = gains.y; 234 g = gains.z; 238 g = gains.w; 243 g = gains.y; 247 g = gains.x; 251 g = gains.w [all...] |
/external/libopus/celt/ |
celt.c | 122 static const opus_val16 gains[3][3] = { local 134 g00 = MULT16_16_Q15(g0, gains[tapset0][0]); 135 g01 = MULT16_16_Q15(g0, gains[tapset0][1]); 136 g02 = MULT16_16_Q15(g0, gains[tapset0][2]); 137 g10 = MULT16_16_Q15(g1, gains[tapset1][0]); 138 g11 = MULT16_16_Q15(g1, gains[tapset1][1]); 139 g12 = MULT16_16_Q15(g1, gains[tapset1][2]);
|
/frameworks/base/media/java/android/media/ |
AudioMixPort.java | 35 int[] formats, AudioGain[] gains) { 37 formats, gains);
|
/cts/apps/CameraITS/tests/inprog/scene2/ |
test_dng_tags.py | 47 gains = cap["metadata"]["android.colorCorrection.gains"] 51 print "HAL reported gains:\n", numpy.array(gains) 60 # the HAL-reported WB gains, CCM, and calibration matrix. 61 cm, fm = its.dng.compute_cm_fm(dng_illum[i], gains, ccm, cal)
|