/cts/apps/CameraITS/tests/scene1/ |
test_auto_vs_manual.py | 40 sens, exp, gains, xform, focus = cam.do_3a(get_results=True) 43 print "AWB gains", gains 54 gains_a = cap_auto["metadata"]["android.colorCorrection.gains"] 55 print "Auto gains:", gains_a 61 req["android.colorCorrection.gains"] = gains 67 gains_m1 = cap_man1["metadata"]["android.colorCorrection.gains"] 68 print "Manual wb gains:", gains_m1 82 gains_m2 = cap_man2["metadata"]["android.colorCorrection.gains"] [all...] |
test_3a.py | 28 sens, exp, gains, xform, focus = cam.do_3a(get_results=True) 30 print "AWB: gains", gains, "transform", xform 34 assert(len(gains) == 4)
|
test_capture_result.py | 48 # The camera HAL may not support different gains for two G channels. 64 "android.colorCorrection.gains": manual_gains, 116 gains = cap_res["android.colorCorrection.gains"] 123 print "Gains:", gains 134 assert(len(gains) == 4) 136 assert(all([g > 0 for g in gains])) 140 assert(any([not is_close_float(gains[i], manual_gains[i]) 160 gains = cap_res["android.colorCorrection.gains" [all...] |
test_param_color_correction.py | 28 Takes shots with different transform and gains values, and tests that 29 they look correspondingly different. The transform and gains are chosen 56 # Gains: 60 gains = [[1,1,1,1], [2,1,1,1], [1,1,1,1]] 67 # 1. With unit gains, and identity transform. 69 # 3. With unit gains, and a transform that boosts blue. 72 req["android.colorCorrection.gains"] = gains[i]
|
/external/chromium_org/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/ |
pitch_estimator.h | 29 double *gains); 41 double *gains); 47 double *gains); 53 double *gains); 60 double *gains);
|
pitch_gain_tables.h | 25 /* cdf for quantized pitch filter gains */ 34 /* mean values of pitch filter gains */
|
pitch_filter.c | 101 * damper_state_dg : state of damping filter for different trial gains. 214 /* Update filter parameters based on the pitch-gains and pitch-lags. */ 240 * Filter a frame of 30 millisecond, given pitch-lags and pitch-gains. 245 * gains : pointer to pitch-gians, 4 gains per frame. 252 * pitch-gains. 261 double* lags, double* gains, PitchFilterOperation mode, 294 gains[n] *= -kEnhancer; 305 old_gain = gains[0]; 319 gain_delta = (gains[m] - old_gain) / PITCH_GRAN_PER_SUBFRAME [all...] |
/external/webrtc/src/modules/audio_coding/codecs/isac/main/source/ |
pitch_estimator.h | 29 double *gains); 41 double *gains); 47 double *gains); 53 double *gains); 60 double *gains);
|
pitch_gain_tables.h | 25 /* cdf for quantized pitch filter gains */ 34 /* mean values of pitch filter gains */
|
pitch_filter.c | 101 * damper_state_dg : state of damping filter for different trial gains. 214 /* Update filter parameters based on the pitch-gains and pitch-lags. */ 240 * Filter a frame of 30 millisecond, given pitch-lags and pitch-gains. 245 * gains : pointer to pitch-gians, 4 gains per frame. 252 * pitch-gains. 261 double* lags, double* gains, PitchFilterOperation mode, 294 gains[n] *= -kEnhancer; 305 old_gain = gains[0]; 319 gain_delta = (gains[m] - old_gain) / PITCH_GRAN_PER_SUBFRAME [all...] |
/external/chromium_org/third_party/opus/src/silk/float/ |
process_gains_FLP.c | 35 /* Processing of gains */ 51 psEncCtrl->Gains[ k ] *= s; 59 /* Soft limit on ratio residual energy and squared gains */ 60 gain = psEncCtrl->Gains[ k ]; 62 psEncCtrl->Gains[ k ] = silk_min_float( gain, 32767.0f ); 65 /* Prepare gains for noise shaping quantization */ 67 pGains_Q16[ k ] = (opus_int32)( psEncCtrl->Gains[ k ] * 65536.0f ); 70 /* Save unquantized gains and gain Index */ 74 /* Quantize gains */ 78 /* Overwrite unquantized gains with quantized gains and convert back to Q0 from Q16 * [all...] |
residual_energy_FLP.c | 95 const silk_float gains[], /* I Quantization gains */ 109 nrgs[ 0 ] = ( silk_float )( gains[ 0 ] * gains[ 0 ] * silk_energy_FLP( LPC_res_ptr + 0 * shift, subfr_length ) ); 110 nrgs[ 1 ] = ( silk_float )( gains[ 1 ] * gains[ 1 ] * silk_energy_FLP( LPC_res_ptr + 1 * shift, subfr_length ) ); 114 nrgs[ 2 ] = ( silk_float )( gains[ 2 ] * gains[ 2 ] * silk_energy_FLP( LPC_res_ptr + 0 * shift, subfr_length ) ); 115 nrgs[ 3 ] = ( silk_float )( gains[ 3 ] * gains[ 3 ] * silk_energy_FLP( LPC_res_ptr + 1 * shift, subfr_length ) ) [all...] |
/external/libopus/silk/float/ |
process_gains_FLP.c | 35 /* Processing of gains */ 51 psEncCtrl->Gains[ k ] *= s; 59 /* Soft limit on ratio residual energy and squared gains */ 60 gain = psEncCtrl->Gains[ k ]; 62 psEncCtrl->Gains[ k ] = silk_min_float( gain, 32767.0f ); 65 /* Prepare gains for noise shaping quantization */ 67 pGains_Q16[ k ] = (opus_int32)( psEncCtrl->Gains[ k ] * 65536.0f ); 70 /* Save unquantized gains and gain Index */ 74 /* Quantize gains */ 78 /* Overwrite unquantized gains with quantized gains and convert back to Q0 from Q16 * [all...] |
residual_energy_FLP.c | 95 const silk_float gains[], /* I Quantization gains */ 109 nrgs[ 0 ] = ( silk_float )( gains[ 0 ] * gains[ 0 ] * silk_energy_FLP( LPC_res_ptr + 0 * shift, subfr_length ) ); 110 nrgs[ 1 ] = ( silk_float )( gains[ 1 ] * gains[ 1 ] * silk_energy_FLP( LPC_res_ptr + 1 * shift, subfr_length ) ); 114 nrgs[ 2 ] = ( silk_float )( gains[ 2 ] * gains[ 2 ] * silk_energy_FLP( LPC_res_ptr + 0 * shift, subfr_length ) ); 115 nrgs[ 3 ] = ( silk_float )( gains[ 3 ] * gains[ 3 ] * silk_energy_FLP( LPC_res_ptr + 1 * shift, subfr_length ) ) [all...] |
/external/chromium_org/third_party/opus/src/silk/fixed/ |
residual_energy_FIX.c | 42 const opus_int32 gains[ MAX_NB_SUBFR ], /* I Quantization gains */ 81 /* Apply the squared subframe gains */ 83 /* Fully upscale gains and energies */ 85 lz2 = silk_CLZ32( gains[ i ] ) - 1; 87 tmp32 = silk_LSHIFT32( gains[ i ], lz2 ); 89 /* Find squared gains */
|
/external/libopus/silk/fixed/ |
residual_energy_FIX.c | 42 const opus_int32 gains[ MAX_NB_SUBFR ], /* I Quantization gains */ 81 /* Apply the squared subframe gains */ 83 /* Fully upscale gains and energies */ 85 lz2 = silk_CLZ32( gains[ i ] ) - 1; 87 tmp32 = silk_LSHIFT32( gains[ i ], lz2 ); 89 /* Find squared gains */
|
/cts/apps/CameraITS/tests/dng_noise_model/ |
dng_noise_model.py | 62 req["android.colorCorrection.gains"] = awb_gains 135 gains = [d[0] for d in lines] 138 mS,bS = numpy.polyfit(gains, Ss, 1) 139 mO,bO = numpy.polyfit(gains, Os, 1) 143 pylab.plot(gains, [10*o for o in Os], 'r', label="Measured") 144 pylab.plot([gains[0],gains[-1]], 145 [10*mO*gains[0]+10*bO, 10*mO*gains[-1]+10*bO],'r--',label="Fit") 146 pylab.plot(gains, Ss, 'b', label="Measured" [all...] |
/cts/apps/CameraITS/tests/inprog/ |
test_3a_remote.py | 50 gains = cap["metadata"]["android.colorCorrection.gains"] 58 print "Gains:", gains
|
/system/media/audio_utils/include/audio_utils/ |
minifloat.h | 56 /* A pair of unity gains expressed as a gain_minifloat_packed_t */ 59 /* Convert a float to the internal representation used for gains. 67 * Details on internal representation of gains, based on mini-floats: 76 /* Convert the internal representation used for gains to float */
|
/cts/apps/CameraITS/pymodules/its/ |
dng.py | 23 def compute_cm_fm(illuminant, gains, ccm, cal): 27 standard A illuminant, the HAL will produce the WB gains and transform, 28 in the android.colorCorrection.gains and android.colorCorrection.transform 32 This function is used to take the per-unit gains, ccm, and calibration 41 gains: White balance gains, as a list of 4 floats. 87 # G is formed by taking the r,g,b gains and putting them into a 89 G = numpy.array([[gains[0],0,0], [0,gains[1],0], [0,0,gains[3]]] [all...] |
/external/chromium_org/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/ |
pitch_gain_tables.h | 24 /* cdf for quantized pitch filter gains */ 32 /* mean values of pitch filter gains in Q12*/
|
/external/webrtc/src/modules/audio_coding/codecs/isac/fix/source/ |
pitch_gain_tables.h | 25 /* cdf for quantized pitch filter gains */ 33 /* mean values of pitch filter gains in Q12*/
|
/frameworks/base/media/java/android/media/ |
AudioMixPort.java | 30 int[] formats, AudioGain[] gains) { 31 super(handle, role, samplingRates, channelMasks, formats, gains);
|
/external/chromium_org/third_party/webrtc/modules/audio_processing/agc/ |
digital_agc.c | 30 // gains = round(2^16*10.^(0.05 * (MinGain + B * ( log(exp(-Knee*A)+exp(-Knee*B)) - log(1+exp(-Knee*B)) ) / log(1/(1+exp(Knee*B)))))); 31 // fprintf(1, '\t%i, %i, %i, %i,\n', gains); 33 // in = 10*log10(lvl); out = 20*log10(gains/65536); 303 // array for gains (one value per ms, incl start & end) 304 int32_t gains[11]; local 427 gains[0] = stt->gain; 468 gains[k + 1] = stt->gainTable[zeros] + WEBRTC_SPL_RSHIFT_W32(tmp32, 12); 518 if ((gains[k + 1] - stt->gainTable[0]) > 8388608) 521 tmp32 = WEBRTC_SPL_RSHIFT_W32((gains[k+1] - stt->gainTable[0]), 8); 525 tmp32 = WEBRTC_SPL_MUL((gains[k+1] - stt->gainTable[0]), (178 + gain_adj)) [all...] |
/external/webrtc/src/modules/audio_processing/agc/ |
digital_agc.c | 30 // gains = round(2^16*10.^(0.05 * (MinGain + B * ( log(exp(-Knee*A)+exp(-Knee*B)) - log(1+exp(-Knee*B)) ) / log(1/(1+exp(Knee*B)))))); 31 // fprintf(1, '\t%i, %i, %i, %i,\n', gains); 33 // in = 10*log10(lvl); out = 20*log10(gains/65536); 308 // array for gains (one value per ms, incl start & end) 309 WebRtc_Word32 gains[11]; local 427 gains[0] = stt->gain; 468 gains[k + 1] = stt->gainTable[zeros] + WEBRTC_SPL_RSHIFT_W32(tmp32, 12); 513 if ((gains[k + 1] - stt->gainTable[0]) > 8388608) 516 tmp32 = WEBRTC_SPL_RSHIFT_W32((gains[k+1] - stt->gainTable[0]), 8); 520 tmp32 = WEBRTC_SPL_MUL((gains[k+1] - stt->gainTable[0]), (178 + gain_adj)) [all...] |