HomeSort by relevance Sort by last modified time
    Searched full:tmp1 (Results 226 - 250 of 1307) sorted by null

1 2 3 4 5 6 7 8 91011>>

  /external/llvm/test/CodeGen/Thumb/
rev.ll 6 %tmp1 = lshr i32 %X, 8
9 %tmp2 = and i32 %tmp1, 16711680
11 %tmp9 = and i32 %tmp1, 255
22 %tmp1 = lshr i32 %X, 8
23 %tmp1.upgrd.1 = trunc i32 %tmp1 to i16
25 %tmp2 = and i16 %tmp1.upgrd.1, 255
  /external/libvpx/libvpx/vpx_dsp/mips/
idct8x8_msa.c 45 v4i32 tmp0, tmp1, tmp2, tmp3; local
58 DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
59 SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS);
60 PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
70 DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
71 SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS);
72 PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
80 DOTP_SH2_SW(s0, s0, k1, k0, tmp0, tmp1);
81 SRARI_W2_SW(tmp0, tmp1, DCT_CONST_BITS);
82 PCKEV_H2_SH(zero, tmp0, zero, tmp1, s2, s3)
    [all...]
vpx_convolve8_vert_msa.c 73 v16u8 tmp0, tmp1; local
106 tmp1 = PCKEV_XORI128_UB(out2_r, out3_r);
107 ST8x4_UB(tmp0, tmp1, dst, dst_stride);
129 v16u8 tmp0, tmp1, tmp2, tmp3; local
177 tmp0, tmp1, tmp2, tmp3);
178 XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3);
179 ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride);
210 v16u8 tmp0, tmp1, tmp2, tmp3; local
261 out3_r, tmp0, tmp1, tmp2, tmp3);
262 XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3)
307 v8u16 tmp0, tmp1; local
330 v8u16 tmp0, tmp1, tmp2, tmp3; local
372 v8u16 tmp0, tmp1, tmp2, tmp3; local
396 v8u16 tmp0, tmp1, tmp2, tmp3; local
448 v8u16 tmp0, tmp1, tmp2, tmp3; local
496 v8u16 tmp0, tmp1, tmp2, tmp3; local
564 v8u16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local
    [all...]
loopfilter_msa.h 176 v8u16 tmp0, tmp1, tmp2; \
182 tmp1 = tmp0 + p3_in + p2_in; \
183 p2_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3); \
185 tmp1 = tmp0 + p1_in + q1_in; \
186 p1_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3); \
188 tmp1 = q2_in + q1_in + q0_in; \
189 tmp2 = tmp2 + tmp1; \
195 tmp0 = p0_in + tmp1 + tmp0; \
196 tmp1 = q3_in + q3_in; \
197 tmp1 = tmp1 + tmp0;
    [all...]
  /external/llvm/test/CodeGen/ARM/
vabd.ll 6 %tmp1 = load <8 x i8>, <8 x i8>* %A
8 %tmp3 = call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
15 %tmp1 = load <4 x i16>, <4 x i16>* %A
17 %tmp3 = call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
24 %tmp1 = load <2 x i32>, <2 x i32>* %A
26 %tmp3 = call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
33 %tmp1 = load <8 x i8>, <8 x i8>* %A
35 %tmp3 = call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
42 %tmp1 = load <4 x i16>, <4 x i16>* %A
44 %tmp3 = call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2
    [all...]
popcnt.ll 7 %tmp1 = load <8 x i8>, <8 x i8>* %A
8 %tmp2 = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %tmp1)
15 %tmp1 = load <16 x i8>, <16 x i8>* %A
16 %tmp2 = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %tmp1)
27 %tmp1 = load <4 x i16>, <4 x i16>* %A
28 %tmp2 = call <4 x i16> @llvm.ctpop.v4i16(<4 x i16> %tmp1)
39 %tmp1 = load <8 x i16>, <8 x i16>* %A
40 %tmp2 = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %tmp1)
54 %tmp1 = load <2 x i32>, <2 x i32>* %A
55 %tmp2 = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %tmp1)
    [all...]
vaba.ll 6 %tmp1 = load <8 x i8>, <8 x i8>* %A
10 %tmp5 = add <8 x i8> %tmp1, %tmp4
17 %tmp1 = load <4 x i16>, <4 x i16>* %A
21 %tmp5 = add <4 x i16> %tmp1, %tmp4
28 %tmp1 = load <2 x i32>, <2 x i32>* %A
32 %tmp5 = add <2 x i32> %tmp1, %tmp4
39 %tmp1 = load <8 x i8>, <8 x i8>* %A
43 %tmp5 = add <8 x i8> %tmp1, %tmp4
50 %tmp1 = load <4 x i16>, <4 x i16>* %A
54 %tmp5 = add <4 x i16> %tmp1, %tmp
    [all...]
vqshrn.ll 6 %tmp1 = load <8 x i16>, <8 x i16>* %A
7 %tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftns.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
14 %tmp1 = load <4 x i32>, <4 x i32>* %A
15 %tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftns.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
22 %tmp1 = load <2 x i64>, <2 x i64>* %A
23 %tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftns.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
30 %tmp1 = load <8 x i16>, <8 x i16>* %A
31 %tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
38 %tmp1 = load <4 x i32>, <4 x i32>* %A
39 %tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >
    [all...]
vcombine.ll 14 %tmp1 = load <8 x i8>, <8 x i8>* %A
16 %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
30 %tmp1 = load <4 x i16>, <4 x i16>* %A
32 %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
47 %tmp1 = load <2 x i32>, <2 x i32>* %A
49 %tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
64 %tmp1 = load <2 x float>, <2 x float>* %A
66 %tmp3 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
80 %tmp1 = load <1 x i64>, <1 x i64>* %A
82 %tmp3 = shufflevector <1 x i64> %tmp1, <1 x i64> %tmp2, <2 x i32> <i32 0, i32 1
    [all...]
vminmax.ll 6 %tmp1 = load <8 x i8>, <8 x i8>* %A
8 %tmp3 = call <8 x i8> @llvm.arm.neon.vmins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
15 %tmp1 = load <4 x i16>, <4 x i16>* %A
17 %tmp3 = call <4 x i16> @llvm.arm.neon.vmins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
24 %tmp1 = load <2 x i32>, <2 x i32>* %A
26 %tmp3 = call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
33 %tmp1 = load <8 x i8>, <8 x i8>* %A
35 %tmp3 = call <8 x i8> @llvm.arm.neon.vminu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
42 %tmp1 = load <4 x i16>, <4 x i16>* %A
44 %tmp3 = call <4 x i16> @llvm.arm.neon.vminu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2
    [all...]
  /external/llvm/test/CodeGen/X86/
pr1489.ll 11 %tmp1 = tail call i32 @lrintf( float 0x400FFDF3C0000000 ) ; <i32> [#uses=1]
12 %tmp2 = icmp slt i32 %tmp1, 1 ; <i1> [#uses=1]
21 %tmp1 = tail call i32 @lrint( double 3.999000e+00 ) ; <i32> [#uses=1]
22 %tmp2 = icmp slt i32 %tmp1, 1 ; <i1> [#uses=1]
31 %tmp1 = tail call i32 @lrintf( float 0x400FFDF3C0000000 ) ; <i32> [#uses=1]
32 %tmp2 = icmp slt i32 %tmp1, 1 ; <i1> [#uses=1]
39 %tmp1 = tail call i32 @lrintf( float 0x400FFDF3C0000000 ) ; <i32> [#uses=1]
40 %tmp2 = icmp slt i32 %tmp1, 1 ; <i1> [#uses=1]
48 %tmp1 = tail call i32 @bar( ) ; <i32> [#uses=1]
51 %tmp5 = tail call i32 (i8*, ...) @printf( i8* getelementptr ([13 x i8], [13 x i8]* @.str, i32 0, i32 0), i32 %tmp3, i32 %tmp2, i32 %tmp1, i32 %tmp ) ; <i32> [#uses=0
    [all...]
  /external/skia/src/opts/
SkBitmapProcState_filter_neon.h 30 uint16x8_t tmp1, tmp2; local
41 tmp1 = vmull_u8(vreinterpret_u8_u32(va0), v16_y); // tmp1 = [a01|a00] * (16-y)
48 tmp = vmul_u16(vget_high_u16(tmp1), vx); // tmp = a01 * x
50 tmp = vmla_u16(tmp, vget_low_u16(tmp1), v16_x); // tmp += a00 * (16-x)
65 uint16x8_t tmp1, tmp2; local
76 tmp1 = vmull_u8(vreinterpret_u8_u32(va0), v16_y); // tmp1 = [a01|a00] * (16-y)
83 tmp = vmul_u16(vget_high_u16(tmp1), vx); // tmp = a01 * x
85 tmp = vmla_u16(tmp, vget_low_u16(tmp1), v16_x); // tmp += a00 * (16-x
    [all...]
  /system/core/libpixelflinger/include/private/pixelflinger/
ggl_fixed.h 198 GGLfixed result,tmp,tmp1,tmp2; local
214 "addu %[tmp1],%[tmp],%[res] \t\n"
215 "sltu %[tmp1],%[tmp1],%[tmp]\t\n" /*obit*/
219 "addu %[res],%[res],%[tmp1]\t\n"
220 : [res]"=&r"(result),[tmp]"=&r"(tmp),[tmp1]"=&r"(tmp1)
230 "addu %[tmp1],%[tmp],%[res] \t\n"
231 "sltu %[tmp1],%[tmp1],%[tmp] \t\n" /*obit?*
294 GGLfixed result,t,tmp1,tmp2; local
366 GGLfixed result,t,tmp1,tmp2; local
528 GGLfixed result,tmp,tmp1,tmp2; local
617 GGLfixed result,t,tmp1,tmp2; local
683 GGLfixed result,t,tmp1,tmp2; local
    [all...]
  /external/llvm/test/Assembler/
2007-12-11-AddressSpaces.ll 15 %tmp1 = load i32 addrspace(33)*, i32 addrspace(33)* addrspace(42)* getelementptr (%struct.mystruct, %struct.mystruct addrspace(42)* @input, i32 0, i32 3), align 4 ; <i32 addrspace(33)*> [#uses=1]
16 store i32 addrspace(33)* %tmp1, i32 addrspace(33)* addrspace(66)* getelementptr (%struct.mystruct, %struct.mystruct addrspace(66)* @output, i32 0, i32 1), align 4
22 %tmp1 = load i32 addrspace(11)* addrspace(22)*, i32 addrspace(11)* addrspace(22)* addrspace(33)* @y, align 4 ; <i32 addrspace(11)* addrspace(22)*> [#uses=2]
23 store i32 addrspace(11)* addrspace(22)* %tmp1, i32 addrspace(11)* addrspace(22)* addrspace(33)* %x, align 4
24 %tmp5 = load i32 addrspace(11)*, i32 addrspace(11)* addrspace(22)* %tmp1, align 4 ; <i32 addrspace(11)*> [#uses=1]
  /external/llvm/test/Transforms/InstCombine/
neon-intrinsics.ll 15 %tmp1 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32.p0i8(i8* bitcast ([8 x i32]* @x to i8*), i32 1)
16 %tmp2 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 0
17 %tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 1
18 %tmp4 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 2
19 %tmp5 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 3
  /bionic/libc/arch-arm64/generic/bionic/
strlen.S 46 #define tmp1 x7 define
61 ands tmp1, srcin, #15
73 sub tmp1, data1, zeroones
77 bic has_nul1, tmp1, tmp2
97 sub tmp1, data2, zeroones
99 bic has_nul2, tmp1, tmp2
108 cmp tmp1, #8
109 neg tmp1, tmp1
111 lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. *
    [all...]
  /external/llvm/test/Instrumentation/ThreadSanitizer/
unaligned.ll 7 %tmp1 = load i16, i16* %a, align 1
8 ret i16 %tmp1
15 ; CHECK-NEXT: %tmp1 = load i16, i16* %a, align 1
21 %tmp1 = load i32, i32* %a, align 2
22 ret i32 %tmp1
29 ; CHECK-NEXT: %tmp1 = load i32, i32* %a, align 2
35 %tmp1 = load i64, i64* %a, align 4
36 ret i64 %tmp1
43 ; CHECK-NEXT: %tmp1 = load i64, i64* %a, align 4
49 %tmp1 = load i128, i128* %a, align
    [all...]
  /external/llvm/test/CodeGen/AArch64/
arm64-vadd.ll 6 %tmp1 = load <8 x i16>, <8 x i16>* %A
8 %tmp3 = call <8 x i8> @llvm.aarch64.neon.addhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
15 %tmp1 = load <4 x i32>, <4 x i32>* %A
17 %tmp3 = call <4 x i16> @llvm.aarch64.neon.addhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
24 %tmp1 = load <2 x i64>, <2 x i64>* %A
26 %tmp3 = call <2 x i32> @llvm.aarch64.neon.addhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
68 %tmp1 = load <8 x i16>, <8 x i16>* %A
70 %tmp3 = call <8 x i8> @llvm.aarch64.neon.raddhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
77 %tmp1 = load <4 x i32>, <4 x i32>* %A
79 %tmp3 = call <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2
    [all...]
arm64-fmuladd.ll 7 %tmp1 = load float, float* %A
10 %tmp4 = call float @llvm.fmuladd.f32(float %tmp1, float %tmp2, float %tmp3)
18 %tmp1 = load <2 x float>, <2 x float>* %A
21 %tmp4 = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> %tmp1, <2 x float> %tmp2, <2 x float> %tmp3)
29 %tmp1 = load <4 x float>, <4 x float>* %A
32 %tmp4 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %tmp1, <4 x float> %tmp2, <4 x float> %tmp3)
41 %tmp1 = load <8 x float>, <8 x float>* %A
44 %tmp4 = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> %tmp1, <8 x float> %tmp2, <8 x float> %tmp3)
52 %tmp1 = load double, double* %A
55 %tmp4 = call double @llvm.fmuladd.f64(double %tmp1, double %tmp2, double %tmp3
    [all...]
arm64-vqadd.ll 6 %tmp1 = load <8 x i8>, <8 x i8>* %A
8 %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
15 %tmp1 = load <4 x i16>, <4 x i16>* %A
17 %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
24 %tmp1 = load <2 x i32>, <2 x i32>* %A
26 %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
33 %tmp1 = load <8 x i8>, <8 x i8>* %A
35 %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
42 %tmp1 = load <4 x i16>, <4 x i16>* %A
44 %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2
    [all...]
  /external/webrtc/webrtc/common_audio/signal_processing/
resample_by_2_mips.c 151 int32_t tmp1, tmp2, diff; local
158 tmp1 = MUL_ACCUM_1(kResampleAllpass2[0], diff, state0);
160 diff = tmp1 - state2;
162 state1 = tmp1;
170 tmp1 = MUL_ACCUM_1(kResampleAllpass1[0], diff, state4);
172 diff = tmp1 - state6;
174 state5 = tmp1;
187 tmp1 = MUL_ACCUM_1(kResampleAllpass2[0], diff, state0);
189 diff = tmp1 - state2;
191 state1 = tmp1;
    [all...]
resample_by_2.c 72 int32_t tmp1, tmp2, diff, in32, out32; local
88 tmp1 = MUL_ACCUM_1(kResampleAllpass2[0], diff, state0);
90 diff = tmp1 - state2;
92 state1 = tmp1;
100 tmp1 = MUL_ACCUM_1(kResampleAllpass1[0], diff, state4);
102 diff = tmp1 - state6;
104 state5 = tmp1;
130 int32_t tmp1, tmp2, diff, in32, out32; local
146 tmp1 = MUL_ACCUM_1(kResampleAllpass1[0], diff, state0);
148 diff = tmp1 - state2
    [all...]
  /external/llvm/test/CodeGen/Thumb2/
thumb2-pack.ll 6 %tmp1 = and i32 %X, 65535 ; <i32> [#uses=1]
8 %tmp5 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1]
24 %tmp1 = and i32 %X, 65535 ; <i32> [#uses=1]
27 %tmp57 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1]
43 %tmp1 = and i32 %X, 65535 ; <i32> [#uses=1]
45 %tmp46 = or i32 %tmp3, %tmp1 ; <i32> [#uses=1]
72 %tmp1 = and i32 %X, -65536 ; <i32> [#uses=1]
76 %tmp59 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1]
83 %tmp1 = and i32 %X, -65536 ; <i32> [#uses=1]
86 %tmp57 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1
    [all...]
  /external/opencv3/3rdparty/libjpeg/
jfdctint.c 160 INT32 tmp0, tmp1, tmp2, tmp3; local
181 tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[6]);
187 tmp11 = tmp1 + tmp2;
188 tmp13 = tmp1 - tmp2;
191 tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[6]);
213 tmp11 = tmp1 + tmp2;
215 tmp13 = tmp1 + tmp3;
221 tmp1 = MULTIPLY(tmp1, FIX_3_072711026); /* c1+c3+c5-c7 */
235 RIGHT_SHIFT(tmp1 + tmp11 + tmp13, CONST_BITS-PASS1_BITS)
330 INT32 tmp0, tmp1, tmp2, tmp3; local
465 INT32 tmp0, tmp1, tmp2; local
578 INT32 tmp0, tmp1, tmp2; local
688 INT32 tmp0, tmp1; local
781 INT32 tmp0, tmp1, tmp2; local
866 INT32 tmp0, tmp1, tmp2, tmp3; local
928 INT32 tmp0, tmp1, tmp2, tmp3, tmp4; local
1078 INT32 tmp0, tmp1, tmp2, tmp3, tmp4; local
1241 INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; local
1424 INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; local
1589 INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6; local
1788 INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6; local
1988 INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local
2167 INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local
2375 INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local
2556 INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6; local
2720 INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; local
2859 INT32 tmp0, tmp1, tmp2, tmp3, tmp4; local
2991 INT32 tmp0, tmp1, tmp2, tmp3; local
3127 INT32 tmp0, tmp1, tmp2; local
3227 INT32 tmp0, tmp1; local
3310 INT32 tmp0, tmp1; local
3343 INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local
3531 INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6; local
3712 INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; local
3861 INT32 tmp0, tmp1, tmp2, tmp3, tmp4; local
4007 INT32 tmp0, tmp1, tmp2, tmp3; local
4143 INT32 tmp0, tmp1, tmp2; local
4248 INT32 tmp0, tmp1; local
4327 INT32 tmp0, tmp1; local
    [all...]
  /external/libopus/silk/float/
burg_modified_FLP.c 49 double C0, invGain, num, nrg_f, nrg_b, rc, Atmp, tmp1, tmp2; local
79 tmp1 = x_ptr[ n ];
85 tmp1 += x_ptr[ n - k - 1 ] * Atmp;
89 CAf[ k ] -= tmp1 * x_ptr[ n - k ];
93 tmp1 = C_first_row[ n ];
97 tmp1 += C_last_row[ n - k - 1 ] * Atmp;
100 CAf[ n + 1 ] = tmp1;
121 tmp1 = invGain * ( 1.0 - rc * rc );
122 if( tmp1 <= minInvGain ) {
132 invGain = tmp1;
    [all...]

Completed in 5932 milliseconds

1 2 3 4 5 6 7 8 91011>>