HomeSort by relevance Sort by last modified time
    Searched full:tmp1 (Results 351 - 375 of 1307) sorted by null

<<11121314151617181920>>

  /external/llvm/test/CodeGen/ARM/
vlddup.ll 7 %tmp1 = load i8, i8* %A, align 8
8 %tmp2 = insertelement <8 x i8> undef, i8 %tmp1, i32 0
17 %tmp1 = load i16, i16* %A, align 8
18 %tmp2 = insertelement <4 x i16> undef, i16 %tmp1, i32 0
27 %tmp1 = load i32, i32* %A, align 8
28 %tmp2 = insertelement <2 x i32> undef, i32 %tmp1, i32 0
37 %tmp1 = insertelement <2 x float> undef, float %tmp0, i32 0
38 %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> zeroinitializer
46 %tmp1 = load i8, i8* %A, align 8
47 %tmp2 = insertelement <16 x i8> undef, i8 %tmp1, i32
    [all...]
vmla.ll 6 %tmp1 = load <8 x i8>, <8 x i8>* %A
10 %tmp5 = add <8 x i8> %tmp1, %tmp4
17 %tmp1 = load <4 x i16>, <4 x i16>* %A
21 %tmp5 = add <4 x i16> %tmp1, %tmp4
28 %tmp1 = load <2 x i32>, <2 x i32>* %A
32 %tmp5 = add <2 x i32> %tmp1, %tmp4
39 %tmp1 = load <2 x float>, <2 x float>* %A
43 %tmp5 = fadd <2 x float> %tmp1, %tmp4
50 %tmp1 = load <16 x i8>, <16 x i8>* %A
54 %tmp5 = add <16 x i8> %tmp1, %tmp
    [all...]
vmls.ll 6 %tmp1 = load <8 x i8>, <8 x i8>* %A
10 %tmp5 = sub <8 x i8> %tmp1, %tmp4
17 %tmp1 = load <4 x i16>, <4 x i16>* %A
21 %tmp5 = sub <4 x i16> %tmp1, %tmp4
28 %tmp1 = load <2 x i32>, <2 x i32>* %A
32 %tmp5 = sub <2 x i32> %tmp1, %tmp4
39 %tmp1 = load <2 x float>, <2 x float>* %A
43 %tmp5 = fsub <2 x float> %tmp1, %tmp4
50 %tmp1 = load <16 x i8>, <16 x i8>* %A
54 %tmp5 = sub <16 x i8> %tmp1, %tmp
    [all...]
vtrn.ll 12 %tmp1 = load <8 x i8>, <8 x i8>* %A
14 %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
15 %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
29 %tmp1 = load <8 x i8>, <8 x i8>* %A
31 %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <16 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14, i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
44 %tmp1 = load <4 x i16>, <4 x i16>* %A
46 %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
47 %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
61 %tmp1 = load <4 x i16>, <4 x i16>* %A
63 %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <8 x i32> <i32 0, i32 4, i32 2, i32 6, i32 1, i32 5, i32 3, i32 7
    [all...]
vmov.ll 194 %tmp1 = load <8 x i8>, <8 x i8>* %A
195 %tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
202 %tmp1 = load <4 x i16>, <4 x i16>* %A
203 %tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
210 %tmp1 = load <2 x i32>, <2 x i32>* %A
211 %tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
218 %tmp1 = load <8 x i8>, <8 x i8>* %A
219 %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
226 %tmp1 = load <4 x i16>, <4 x i16>* %A
227 %tmp2 = zext <4 x i16> %tmp1 to <4 x i32
    [all...]
mvn.ll 16 %tmp1 = shl i32 %a, 2 ; <i32> [#uses=1]
17 %tmp1not = xor i32 %tmp1, -1 ; <i32> [#uses=1]
31 %tmp1 = lshr i32 %a, 2 ; <i32> [#uses=1]
32 %tmp1not = xor i32 %tmp1, -1 ; <i32> [#uses=1]
46 %tmp1 = ashr i32 %a, 2 ; <i32> [#uses=1]
47 %tmp1not = xor i32 %tmp1, -1 ; <i32> [#uses=1]
vbsl.ll 8 %tmp1 = load <8 x i8>, <8 x i8>* %A
11 %tmp4 = and <8 x i8> %tmp1, %tmp2
12 %tmp5 = xor <8 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
21 %tmp1 = load <4 x i16>, <4 x i16>* %A
24 %tmp4 = and <4 x i16> %tmp1, %tmp2
25 %tmp5 = xor <4 x i16> %tmp1, < i16 -1, i16 -1, i16 -1, i16 -1 >
34 %tmp1 = load <2 x i32>, <2 x i32>* %A
37 %tmp4 = and <2 x i32> %tmp1, %tmp2
38 %tmp5 = xor <2 x i32> %tmp1, < i32 -1, i32 -1 >
47 %tmp1 = load <1 x i64>, <1 x i64>* %
    [all...]
  /external/wpa_supplicant_8/src/crypto/
milenage.c 39 u8 tmp1[16], tmp2[16], tmp3[16]; local
42 /* tmp1 = TEMP = E_K(RAND XOR OP_C) */
44 tmp1[i] = _rand[i] ^ opc[i];
45 if (aes_128_encrypt_block(k, tmp1, tmp1))
60 tmp3[i] ^= tmp1[i];
64 if (aes_128_encrypt_block(k, tmp3, tmp1))
67 tmp1[i] ^= opc[i];
69 os_memcpy(mac_a, tmp1, 8); /* f1 */
71 os_memcpy(mac_s, tmp1 + 8, 8); /* f1* *
91 u8 tmp1[16], tmp2[16], tmp3[16]; local
    [all...]
  /bionic/libc/arch-arm64/generic/bionic/
memmove.S 41 #define tmp1 x3 define
66 add tmp1, src, count
67 cmp dstin, tmp1
83 ands tmp1, count, #0x30
85 sub dst, dst, tmp1
86 sub src, src, tmp1
102 ldr tmp1, [src, #-8]!
103 str tmp1, [dst, #-8]!
129 ldr tmp1, [src, #-8]!
130 str tmp1, [dst, #-8]
    [all...]
strncmp.S 53 #define tmp1 x8 define
69 eor tmp1, src1, src2
71 tst tmp1, #7
73 ands tmp1, src1, #7
88 sub tmp1, data1, zeroones
92 bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
151 sub tmp1, tmp3, zeroones
153 bic has_nul, tmp1, tmp2
179 neg tmp3, tmp1, lsl #3 /* 64 - bits(bytes beyond align). */
185 lsl tmp2, tmp2, tmp3 /* Shift (tmp1 & 63). *
    [all...]
  /external/webrtc/webrtc/common_audio/signal_processing/
min_max_operations_mips.c 231 int tmp1 = 0, max_value = 0x7fffffff; local
243 "slt %[tmp1], %[maximum], %[absolute] \n\t"
244 "movn %[maximum], %[absolute], %[tmp1] \n\t"
247 "slt %[tmp1], %[max_value], %[maximum] \n\t"
248 "movn %[maximum], %[max_value], %[tmp1] \n\t"
252 : [tmp1] "=&r" (tmp1), [maximum] "+r" (maximum), [absolute] "+r" (absolute)
264 int tmp1; local
276 "slt %[tmp1], %[maximum], %[value] \n\t"
277 "movn %[maximum], %[value], %[tmp1] \n\t
293 int tmp1, value; local
322 int tmp1; local
352 int tmp1, value; local
    [all...]
resample.c 316 int32_t tmp1 = 16384; local
321 tmp1 += coef * in1[0];
325 tmp1 += coef * in1[1];
329 tmp1 += coef * in1[2];
333 tmp1 += coef * in1[3];
337 tmp1 += coef * in1[4];
341 tmp1 += coef * in1[5];
345 tmp1 += coef * in1[6];
349 tmp1 += coef * in1[7];
353 *out1 = tmp1 + coef * in1[8]
362 int32_t tmp1 = 16384; local
    [all...]
  /external/libvpx/libvpx/vpx_dsp/mips/
vpx_convolve8_avg_msa.c 24 v16u8 dst0, dst1, dst2, dst3, mask0, mask1, mask2, mask3, tmp0, tmp1; local
84 PCKEV_B2_UB(res0, res0, res1, res1, tmp0, tmp1);
85 XORI_B2_128_UB(tmp0, tmp1);
86 AVER_UB2_UB(tmp0, dst0, tmp1, dst2, tmp0, tmp1);
87 ST4x4_UB(tmp0, tmp1, 0, 1, 0, 1, dst, dst_stride);
110 v8i16 hz_out7, hz_out8, hz_out9, hz_out10, tmp0, tmp1, tmp2, tmp3; local
166 tmp1 = FILT_8TAP_DPADD_S_H(out4, out5, out6, out7, filt_vt0, filt_vt1,
181 SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
182 SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7)
254 v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, tmp0, tmp1, filt; local
293 v8u16 hz_out7, hz_out8, tmp0, tmp1, tmp2, tmp3; local
359 v8u16 hz_out0, hz_out1, tmp0, tmp1, tmp2, tmp3; local
407 v8u16 hz_out0, hz_out1, tmp0, tmp1, tmp2, tmp3; local
481 v8u16 hz_out0, hz_out1, hz_out2, hz_out3, tmp0, tmp1; local
    [all...]
  /external/llvm/test/CodeGen/AArch64/
arm64-vcmp.ll 19 %tmp1 = load <2 x float>, <2 x float>* %A
21 %tmp3 = call <2 x i32> @llvm.aarch64.neon.facge.v2i32.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
28 %tmp1 = load <4 x float>, <4 x float>* %A
30 %tmp3 = call <4 x i32> @llvm.aarch64.neon.facge.v4i32.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
37 %tmp1 = load <2 x double>, <2 x double>* %A
39 %tmp3 = call <2 x i64> @llvm.aarch64.neon.facge.v2i64.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
50 %tmp1 = load <2 x float>, <2 x float>* %A
52 %tmp3 = call <2 x i32> @llvm.aarch64.neon.facgt.v2i32.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
59 %tmp1 = load <4 x float>, <4 x float>* %A
61 %tmp3 = call <4 x i32> @llvm.aarch64.neon.facgt.v4i32.v4f32(<4 x float> %tmp1, <4 x float> %tmp2
    [all...]
neon-fma.ll 5 %tmp1 = fmul <2 x float> %A, %B;
6 %tmp2 = fadd <2 x float> %C, %tmp1;
12 %tmp1 = fmul <4 x float> %A, %B;
13 %tmp2 = fadd <4 x float> %C, %tmp1;
19 %tmp1 = fmul <2 x double> %A, %B;
20 %tmp2 = fadd <2 x double> %C, %tmp1;
27 %tmp1 = fmul <2 x float> %A, %B;
28 %tmp2 = fsub <2 x float> %C, %tmp1;
34 %tmp1 = fmul <4 x float> %A, %B;
35 %tmp2 = fsub <4 x float> %C, %tmp1;
    [all...]
arm64-stur.ll 8 %tmp1 = trunc i64 %val to i32
10 store i32 %tmp1, i32* %ptr, align 4
17 %tmp1 = trunc i64 %val to i16
19 store i16 %tmp1, i16* %ptr, align 2
26 %tmp1 = trunc i64 %val to i8
28 store i8 %tmp1, i8* %ptr, align 1
35 %tmp1 = trunc i32 %val to i16
37 store i16 %tmp1, i16* %ptr, align 2
44 %tmp1 = trunc i32 %val to i8
46 store i8 %tmp1, i8* %ptr, align
    [all...]
nontemporal.ll 190 %tmp1 = bitcast i8* %tmp0 to <4 x float>*
191 store <4 x float> %v, <4 x float>* %tmp1, align 1, !nontemporal !0
202 %tmp1 = bitcast i8* %tmp0 to <4 x float>*
203 store <4 x float> %v, <4 x float>* %tmp1, align 1, !nontemporal !0
214 %tmp1 = bitcast i8* %tmp0 to <4 x float>*
215 store <4 x float> %v, <4 x float>* %tmp1, align 1, !nontemporal !0
225 %tmp1 = bitcast i8* %tmp0 to <4 x float>*
226 store <4 x float> %v, <4 x float>* %tmp1, align 1, !nontemporal !0
237 %tmp1 = bitcast i8* %tmp0 to <4 x float>*
238 store <4 x float> %v, <4 x float>* %tmp1, align 1, !nontemporal !
    [all...]
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/source/
h264bsd_reconstruct.c 124 u32 x, y, tmp1, tmp2, tmp3, tmp4, c, val; local
168 tmp1 = ptrA[width];
172 c = ((val * tmp1 + xFrac * tmp3) << 3) + 32;
178 tmp1 = ptrA[width];
180 c = ((val * tmp3 + xFrac * tmp1) << 3) + 32;
220 u32 x, y, tmp1, tmp2, tmp3, c, val; local
266 tmp1 = *ptrA++;
270 c = ((val * tmp1 + yFrac * tmp2) << 3) + 32;
275 tmp1 = *ptrA++;
279 c = ((val * tmp1 + yFrac * tmp2) << 3) + 32
315 u32 x, y, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, valX, valY, plus32 = 32; local
503 i32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local
622 i32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local
756 i32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local
880 i32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local
1019 i32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local
1222 i32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local
1409 i32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local
1613 i32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local
    [all...]
  /external/aac/libFDK/include/arm/
cplx_mul.h 110 LONG tmp1,tmp2; local
118 : "=&r"(tmp1), "=&r"(tmp2)
122 *c_Re = tmp1;
135 LONG tmp1, tmp2; local
139 "smlawb %0, %2, %4, %1;\n" /* tmp1 = a_Re * b_Re - a_Im * b_Im */
142 : "=&r"(tmp1), "=&r"(tmp2)
146 *c_Re = tmp1;
159 LONG tmp1, tmp2; local
166 : "=&r"(tmp1), "=&r"(tmp2)
170 *c_Re += tmp1;
184 LONG tmp1, tmp2; local
    [all...]
  /external/boringssl/src/crypto/ec/
oct.c 275 BIGNUM *tmp1, *tmp2, *x, *y; local
290 tmp1 = BN_CTX_get(ctx);
302 /* tmp1 := x^3 */
310 !group->meth->field_mul(group, tmp1, tmp2, x_, ctx)) {
315 !BN_mod_mul(tmp1, tmp2, x_, &group->field, ctx)) {
320 /* tmp1 := tmp1 + a*x */
324 !BN_mod_sub_quick(tmp1, tmp1, tmp2, &group->field)) {
340 if (!BN_mod_add_quick(tmp1, tmp1, tmp2, &group->field))
    [all...]
  /bionic/libc/arch-arm64/denver64/bionic/
memset.S 51 #define tmp1 x3 define
83 ands tmp1, count, #0xC0
98 ands tmp1, count, #0x30
100 add dst, dst, tmp1
207 mrs tmp1, dczid_el0
208 tbz tmp1, #4, 1f
219 mrs tmp1, dczid_el0
220 tbnz tmp1, #4, .Lnot_short
236 sub tmp1, count, tmp2
237 cmp tmp1, #6
    [all...]
  /external/llvm/test/Analysis/BasicAA/
aligned-overread.ll 13 %tmp1 = or i8 %tmp, -128
14 store i8 %tmp1, i8* getelementptr inbounds ({ i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8 }* @a, i64 0, i32 4), align 4
22 ; CHECK: store i8 %tmp1
  /external/llvm/test/CodeGen/PowerPC/
inlineasm-copy.ll 12 %tmp1 = tail call i32 asm "foo $0, $1", "=r,r"( i32 %X ) ; <i32> [#uses=1]
13 ret i32 %tmp1
19 %tmp1 = tail call { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } asm sideeffect "foo $0, $1", "=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19"( i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y ) ; <i32> [#uses=1]
  /external/llvm/test/CodeGen/X86/
copy-propagation.ll 40 %tmp1 = sitofp i64 %arg to double
41 call void inttoptr (i64 339772768 to void (double, double)*)(double %tmp, double %tmp1)
42 %tmp3 = fadd double %tmp1, %tmp
lsr-wrap.ll 23 %tmp1 = load i32, i32* @g_19, align 4 ; <i32> [#uses=2]
24 %tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1]
26 %tmp3 = trunc i32 %tmp1 to i8 ; <i8> [#uses=1]

Completed in 1232 milliseconds

<<11121314151617181920>>