/frameworks/av/media/libstagefright/codecs/amrwbenc/src/ |
cor_h_x.c | 42 Word32 L_tmp, y32[L_SUBFR], L_tot; 54 L_tmp = 1; /* 1 -> to avoid null dn[] */ 58 L_tmp += vo_L_mult(*p1++, *p2++); 60 y32[i] = L_tmp; 61 L_tmp = (L_tmp > 0)? L_tmp:-L_tmp; 62 if(L_tmp > L_max) 64 L_max = L_tmp; [all...] |
deemph.c | 38 Word32 L_tmp; 40 L_tmp = L_deposit_h(x[0]); 41 L_tmp = L_mac(L_tmp, *mem, mu); 42 x[0] = vo_round(L_tmp); 46 L_tmp = L_deposit_h(x[i]); 47 L_tmp = L_mac(L_tmp, x[i - 1], mu); 48 x[i] = voround(L_tmp); 65 Word32 L_tmp; [all...] |
syn_filt.c | 40 Word32 L_tmp; 54 L_tmp = vo_mult32(a0, x[i]); 55 L_tmp -= vo_mult32((*p1++), (*p2--)); 56 L_tmp -= vo_mult32((*p1++), (*p2--)); 57 L_tmp -= vo_mult32((*p1++), (*p2--)); 58 L_tmp -= vo_mult32((*p1++), (*p2--)); 59 L_tmp -= vo_mult32((*p1++), (*p2--)); 60 L_tmp -= vo_mult32((*p1++), (*p2--)); 61 L_tmp -= vo_mult32((*p1++), (*p2--)); 62 L_tmp -= vo_mult32((*p1++), (*p2--)) [all...] |
hp_wsp.c | 59 Word32 L_tmp; 63 L_tmp = ((mem[i] << 16) + (mem[i + 1]<<1)); 64 L_tmp = L_shl(L_tmp, exp); 65 mem[i] = L_tmp >> 16; 66 mem[i + 1] = (L_tmp & 0xffff)>>1; 71 L_tmp = L_deposit_h(mem[i]); /* x[i] */ 72 L_tmp = L_shl(L_tmp, exp); 73 mem[i] = vo_round(L_tmp); [all...] |
preemph.c | 36 Word32 i, L_tmp; 42 L_tmp = L_deposit_h(x[i]); 43 L_tmp -= (x[i - 1] * mu)<<1; 44 x[i] = (L_tmp + 0x8000)>>16; 47 L_tmp = L_deposit_h(x[0]); 48 L_tmp -= ((*mem) * mu)<<1; 49 x[0] = (L_tmp + 0x8000)>>16; 65 Word32 i, L_tmp; 71 L_tmp = L_deposit_h(x[i]); 72 L_tmp -= (x[i - 1] * mu)<<1 [all...] |
hp6k.c | 60 Word32 i, L_tmp; 69 L_tmp = (x[i] + x[i+ 30]) * fir_6k_7k[0]; 70 L_tmp += (x[i+1] + x[i + 29]) * fir_6k_7k[1]; 71 L_tmp += (x[i+2] + x[i + 28]) * fir_6k_7k[2]; 72 L_tmp += (x[i+3] + x[i + 27]) * fir_6k_7k[3]; 73 L_tmp += (x[i+4] + x[i + 26]) * fir_6k_7k[4]; 74 L_tmp += (x[i+5] + x[i + 25]) * fir_6k_7k[5]; 75 L_tmp += (x[i+6] + x[i + 24]) * fir_6k_7k[6]; 76 L_tmp += (x[i+7] + x[i + 23]) * fir_6k_7k[7]; 77 L_tmp += (x[i+8] + x[i + 22]) * fir_6k_7k[8] [all...] |
scale.c | 34 Word32 L_tmp; 39 L_tmp = L_shl2(x[i], 16 + exp); 40 x[i] = extract_h(L_add(L_tmp, 0x8000)); 48 L_tmp = x[i] << 16; 49 L_tmp >>= exp; 50 x[i] = (L_tmp + 0x8000)>>16;
|
lp_dec2.c | 43 Word32 L_tmp; 58 L_tmp = ((*p_x++) * h_fir[0]); 59 L_tmp += ((*p_x++) * h_fir[1]); 60 L_tmp += ((*p_x++) * h_fir[2]); 61 L_tmp += ((*p_x++) * h_fir[3]); 62 L_tmp += ((*p_x++) * h_fir[4]); 63 x[j] = (L_tmp + 0x4000)>>15;
|
q_gain2.c | 83 Word32 i, j, L_tmp, dist_min; 173 * L_tmp = MEAN_ENER - 10log10(energy of code/ L_subfr) * 177 L_tmp = Dot_product12_asm(code, code, L_subfr, &exp_code); 179 L_tmp = Dot_product12(code, code, L_subfr, &exp_code); 181 /* exp_code: -18 (code in Q9), -6 (/L_subfr), -31 (L_tmp Q31->Q0) */ 184 Log2(L_tmp, &exp, &frac); 186 L_tmp = Mpy_32_16(exp, frac, -24660); /* x -3.0103(Q13) -> Q14 */ 188 L_tmp += (MEAN_ENER * 8192)<<1; /* + MEAN_ENER in Q14 */ 194 L_tmp = (L_tmp << 10); /* From Q14 to Q24 * [all...] |
hp400.c | 65 Word32 L_tmp; 81 L_tmp = 8192L; /* rounding to maximise precision */ 82 L_tmp += y1_lo * a[1]; 83 L_tmp += y2_lo * a[2]; 84 L_tmp = L_tmp >> 14; 85 L_tmp += (y1_hi * a[1] + y2_hi * a[2] + (x0 + x2)* b[0] + x1 * b[1]) << 1; 86 L_tmp <<= 1; /* coeff Q12 --> Q13 */ 89 y1_hi = (Word16)(L_tmp>>16); 90 y1_lo = (Word16)((L_tmp & 0xffff)>>1) [all...] |
hp50.c | 66 Word32 L_tmp; 83 L_tmp = 8192 ; /* rounding to maximise precision */ 84 L_tmp += y1_lo * a[1]; 85 L_tmp += y2_lo * a[2]; 86 L_tmp = L_tmp >> 14; 87 L_tmp += (y1_hi * a[1] + y2_hi * a[2] + (x0 + x2) * b[0] + x1 * b[1]) << 1; 88 L_tmp <<= 2; /* coeff Q12 --> Q13 */ 91 y1_hi = (Word16)(L_tmp>>16); 92 y1_lo = (Word16)((L_tmp & 0xffff)>>1) [all...] |
pitch_f4.c | 173 Word32 L_tmp, L_tmp1, L_tmp2; 185 L_tmp = 0; 188 L_tmp += (xn[i] * xn[i]); 189 L_tmp += (xn[i+1] * xn[i+1]); 190 L_tmp += (xn[i+2] * xn[i+2]); 191 L_tmp += (xn[i+3] * xn[i+3]); 194 L_tmp = (L_tmp << 1) + 1; 195 exp = norm_l(L_tmp); 205 L_tmp = 0 [all...] |
pit_shrp.c | 35 Word32 L_tmp; 40 L_tmp = (*x_ptr << 15); 41 L_tmp += *x++ * sharp; 42 *x_ptr++ = ((L_tmp + 0x4000)>>15);
|
updt_tar.c | 36 Word32 L_tmp; 40 L_tmp = x[i] << 15; 41 L_tmp -= (y[i] * gain)<<1; 42 x2[i] = extract_h(L_shl2(L_tmp, 1));
|
int_lpc.c | 42 Word32 L_tmp; 51 L_tmp = (isp_old[i] * fac_old)<<1; 52 L_tmp += (isp_new[i] * fac_new)<<1; 53 isp[i] = (L_tmp + 0x8000)>>16;
|
isp_isf.c | 40 Word32 L_tmp; 52 L_tmp = vo_L_mult(vo_sub(isp[i], table[ind]), slope[ind]); 53 isf[i] = vo_round((L_tmp << 4)); /* (isp[i]-table[ind])*slope[ind])>>11 */ 68 Word32 i, ind, L_tmp; 82 L_tmp = vo_L_mult(vo_sub(table[ind + 1], table[ind]), offset); 83 isp[i] = add1(table[ind], (Word16)((L_tmp >> 8)));
|
gpclip.c | 95 Word32 L_tmp; 96 L_tmp = (29491 * mem[1])<<1; 97 L_tmp += (3277 * gain_pit)<<1; 99 gain = extract_h(L_tmp);
|
/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/ |
Norm_Corr_opt.s | 73 MOV r6, #0 @L_tmp = 0 81 SMLABB r6, r9, r9, r6 @L_tmp += (xn[i] * xn[i]) 82 SMLATT r6, r9, r9, r6 @L_tmp += (xn[i+1] * xn[i+1]) 92 ADD r9, r7, r6, LSL #1 @L_tmp = (L_tmp << 1) + 1 94 SUB r6, r7, #1 @exp = norm_l(L_tmp) 104 MOV r5, #0 @L_tmp = 0 116 SMLABB r5, r10, r11, r5 @L_tmp += xn[i] * excf[i] 117 SMLATT r5, r10, r11, r5 @L_tmp += xn[i+1] * excf[i+1] 127 @r5 --- L_tmp, r6 --- L_tmp [all...] |
Deemph_32_opt.s | 44 MOV r10, r6, LSL #16 @L_tmp = x_hi[0]<<16 47 ADD r12, r10, r7, LSL #4 @L_tmp += x_lo[0] << 4 48 MOV r10, r12, LSL #3 @L_tmp <<= 3 53 MOV r12, r10, LSL #1 @L_tmp = L_mac(L_tmp, *mem, fac) 55 MOV r14, r10, ASR #16 @y[0] = round(L_tmp) 66 MOV r14, r10, ASR #16 @y[1] = round(L_tmp)
|
/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/ |
Norm_Corr_neon.s | 100 QADD r9, r6, r7 @L_tmp = (L_tmp << 1) + 1; 102 SUB r6, r7, #1 @exp = norm_l(L_tmp) 121 VMULL.S16 Q11, D0, D8 @L_tmp += x[] * excf[] 162 VPADD.S32 D22, D22, D22 @D22[0] --- L_tmp << 1 167 @r5 --- L_tmp, r6 --- L_tmp1 169 ADD r5, r10, r5, LSL #1 @L_tmp = (L_tmp << 1) + 1 176 SUB r10, r10, #1 @exp = norm_l(L_tmp) 178 MOV r5, r5, LSL r10 @L_tmp = (L_tmp << exp [all...] |
Deemph_32_neon.s | 44 MOV r10, r6, LSL #16 @L_tmp = x_hi[0]<<16 47 ADD r12, r10, r7, LSL #4 @L_tmp += x_lo[0] << 4 48 MOV r10, r12, LSL #3 @L_tmp <<= 3 53 MOV r12, r10, LSL #1 @L_tmp = L_mac(L_tmp, *mem, fac) 55 MOV r14, r10, ASR #16 @y[0] = round(L_tmp) 66 MOV r14, r10, ASR #16 @y[1] = round(L_tmp)
|
Syn_filt_32_neon.s | 67 @L_tmp = L_msu(L_tmp, sig_lo[i - j], a[j])@ 85 @L_tmp = L_msu(L_tmp, sig_hi[i - j], a[j])@ 105 VADD.S32 Q12, Q12, Q10 @L_tmp = L_tmp - (result1 >>= 11) - (result2 <<= 1) 108 VSHL.S32 Q12, Q12, #3 @L_tmp <<= 3 111 VSHRN.S32 D20, Q12, #16 @sig_hi[i] = L_tmp >> 16@ 113 VSHR.S32 Q12, Q12, #4 @L_tmp >>= 4 116 VMOV.S32 r11, D24[0] @r11 --- L_tmp >>= [all...] |
/frameworks/av/media/libstagefright/codecs/amrwb/src/ |
pvamrwbdecoder.cpp | 323 int32 L_tmp, L_gain_code; 449 L_tmp = mul_16by16_to_int32(isf_tmp[i], sub_int16(32767, interpol_frac[j])); 450 L_tmp = mac_16by16_to_int32(L_tmp, isf[i], interpol_frac[j]); 451 HfIsf[i] = amr_wb_round(L_tmp); 531 L_tmp = 0; 535 L_tmp = mac_16by16_to_int32(L_tmp, tmp, tmp); 537 tmp = extract_h(shl_int32(L_tmp, 8)); 538 tmp = mult_int16(tmp, 26214); /* tmp = L_tmp*0.8/256 * [all...] |
deemphasis_32.cpp | 130 int32 L_tmp; 133 L_tmp = ((int32)x_hi[0]) << 16; 134 L_tmp += ((int32)x_lo[0]) << 4; 135 L_tmp = shl_int32(L_tmp, 3); 137 L_tmp = fxp_mac_16by16(*mem, mu, L_tmp), 139 L_tmp = shl_int32(L_tmp, 1); /* saturation can occur here */ 140 y[0] = amr_wb_round(L_tmp); [all...] |
dec_gain2_amr_wb.cpp | 190 int32 L_tmp; 203 * L_tmp = 1.0 / sqrt(energy of code/ L_subfr) 206 L_tmp = Dot_product12(code, code, L_subfr, &exp); 209 one_ov_sqrt_norm(&L_tmp, &exp); 211 gcode_inov = extract_h(shl_int32(L_tmp, exp - 3)); /* g_code_inov in Q12 */ 259 L_tmp = tmp; 260 L_tmp += tmp1; 264 L_tmp += tmp; 265 L_tmp += tmp1; 267 qua_ener = (int16)(L_tmp >> 3) [all...] |