/external/llvm/test/CodeGen/ARM/ |
2010-04-14-SplitVector.ll | 10 %tmp64 = trunc i128 %tmp63 to i32 14 %0 = phi i32 [ %tmp64, %bb9 ], [ undef, %bb ]
|
2007-04-30-CombinerCrash.ll | 21 %tmp64 = or i64 %tmp63, 0 ; <i64> [#uses=1] 23 %tmp66 = and i64 %tmp65, %tmp64 ; <i64> [#uses=1]
|
2007-03-13-InstrSched.ll | 24 %d2.1 = phi i32 [ %tmp64, %bb26 ], [ 8192, %newFuncRoot ] ; <i32> [#uses=2] 46 %tmp64 = add i32 %tmp62, %d2.1 ; <i32> [#uses=1]
|
2012-01-23-PostRA-LICM.ll | 81 %tmp64 = lshr i128 %tmp63, 64 82 %tmp65 = trunc i128 %tmp64 to i64
|
2012-01-26-CopyPropKills.ll | 82 %tmp64 = bitcast <4 x float> %tmp54 to i128 88 %tmp70 = lshr i128 %tmp64, 64
|
/external/llvm/test/Transforms/InstCombine/ |
2006-12-01-BadFPVectorXform.ll | 6 %tmp64 = fadd <4 x float> %tmp26, %tmp53 ; <<4 x float>> [#uses=1] 7 %tmp75 = fsub <4 x float> %tmp64, %tmp53 ; <<4 x float>> [#uses=1]
|
2012-06-06-LoadOfPHIs.ll | 131 %tmp64 = fdiv double %tmp60, %tmp63 132 %tmp65 = fadd double 2.000000e+00, %tmp64
|
/external/llvm/test/CodeGen/X86/ |
2006-05-25-CycleInDAG.ll | 14 %tmp64.i = fadd double %tmp62.i.upgrd.2, %tmp44.i ; <double> [#uses=1] 15 %tmp68.i = call double @foo( double %tmp64.i, i32 0 ) ; <double> [#uses=0]
|
vec_splat-2.ll | 19 %tmp64 = insertelement <16 x i8> %tmp62, i8 %x, i32 15 ; <<16 x i8>> [#uses=1] 22 %tmp73 = add <16 x i8> %tmp71, %tmp64 ; <<16 x i8>> [#uses=1]
|
mmx-arith.ll | 46 %tmp64 = or <8 x i8> %tmp58, %tmp63a ; <<8 x i8>> [#uses=2] 47 %tmp64a = bitcast <8 x i8> %tmp64 to x86_mmx 137 %tmp64 = load x86_mmx* %B ; <x86_mmx> [#uses=1] 138 %tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd( x86_mmx %tmp60, x86_mmx %tmp64 ) ; <x86_mmx> [#uses=1] 198 %tmp64 = tail call x86_mmx @llvm.x86.mmx.por( x86_mmx %tmp58, x86_mmx %tmp63 ) ; <x86_mmx> [#uses=2] 199 store x86_mmx %tmp64, x86_mmx* %A 201 %tmp70 = tail call x86_mmx @llvm.x86.mmx.pxor( x86_mmx %tmp64, x86_mmx %tmp69 ) ; <x86_mmx> [#uses=2] 262 %tmp64 = load x86_mmx* %B ; <x86_mmx> [#uses=1] 263 %tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd( x86_mmx %tmp60, x86_mmx %tmp64 ) ; <x86_mmx> [#uses=1]
|
2012-01-10-UndefExceptionEdge.ll | 129 %tmp64 = getelementptr i32* %tmp13, i32 %tmp63 130 store i32 0, i32* %tmp64, align 4
|
misched-balance.ll | 70 %tmp64 = load i32* %arrayidx12.us.i61.3, align 4 71 %mul.us.i.3 = mul nsw i32 %tmp64, %tmp63 176 %tmp64 = load i32* %arrayidx12.us.i61.3, align 4 206 %mul.us.i.3 = mul nsw i32 %tmp64, %tmp63
|
nancvt.ll | 120 %tmp64 = load i64* %tmp6263, align 8 ; <i64> [#uses=1] 121 %tmp6465 = trunc i64 %tmp64 to i32 ; <i32> [#uses=1]
|
2007-08-09-IllegalX86-64Asm.ll | 134 %tmp64 = load i8** %tmp63 ; <i8*> [#uses=1] 135 %tmp6465 = bitcast i8* %tmp64 to i32 (i8*, i32*, i32*, %struct.PyObject**)* ; <i32 (i8*, i32*, i32*, %struct.PyObject**)*> [#uses=1]
|
2009-03-23-MultiUseSched.ll | 77 %tmp64 = add i64 %tmp47, %tmp34 ; <i64> [#uses=1] 78 %tmp65 = add i64 %tmp64, %tmp60 ; <i64> [#uses=1]
|
/external/llvm/test/Object/Inputs/ |
shared.ll | 10 ; llc -mtriple=x86_64-linux-gnu shared.ll -filetype=obj -o tmp64.o -relocation-model=pic 11 ; ld -melf_x86_64 -shared tmp64.o -o shared-object-test.elf-x86-64 $LDARGS
|
/external/qemu/target-arm/ |
op_helper.c | 521 uint64_t tmp64; local 540 tmp64 = (uint32_t)LDL(addr); 542 tmp64 |= (uint64_t)LDL(addr) << 32; 544 env->vfp.regs[rd] = make_float64(tmp64); 546 tmp64 = float64_val(env->vfp.regs[rd]); 547 STL(addr, tmp64); 549 STL(addr, tmp64 >> 32); 555 tmp64 = 0ull; 557 tmp64 |= (uint64_t)LDW(addr) << (i * 16); 559 env->vfp.regs[rd] = make_float64(tmp64); [all...] |
translate.c | 292 TCGv_i64 tmp64 = tcg_temp_new_i64(); local 294 tcg_gen_extu_i32_i64(tmp64, b); 296 tcg_gen_shli_i64(tmp64, tmp64, 32); 297 tcg_gen_add_i64(a, tmp64, a); 299 tcg_temp_free_i64(tmp64); 306 TCGv_i64 tmp64 = tcg_temp_new_i64(); local 308 tcg_gen_extu_i32_i64(tmp64, b); 310 tcg_gen_shli_i64(tmp64, tmp64, 32) 4429 TCGv_i64 tmp64; local 6423 TCGv_i64 tmp64; local 7787 TCGv_i64 tmp64; local [all...] |
/external/llvm/test/Transforms/LoopStrengthReduce/ |
2013-01-14-ReuseCast.ll | 75 %tmp64 = getelementptr inbounds i8* %tmp3, i64 %i.0.i 76 %tmp65 = load i8* %tmp64, align 1
|
/external/chromium_org/third_party/openssl/openssl/crypto/sha/asm/ |
sha1-sparcv9.pl | 31 $tmp64="%g3"; 230 ldx [$tmp0+64],$tmp64 240 srlx $tmp64,$tmp2,$tmp64 241 or $tmp64,@X[7],@X[7]
|
/external/openssl/crypto/sha/asm/ |
sha1-sparcv9.pl | 31 $tmp64="%g3"; 230 ldx [$tmp0+64],$tmp64 240 srlx $tmp64,$tmp2,$tmp64 241 or $tmp64,@X[7],@X[7]
|
/hardware/invensense/60xx/mlsdk/mllite/ |
mlsupervisor.c | 379 long long tmp64 = 0; local 421 tmp64 = 0; 423 tmp64 += (long long) tmp[j] * 427 (long) (tmp64 / inv_obj.compass_sens);
|
/external/llvm/test/Transforms/ObjCARC/ |
move-and-merge-autorelease.ll | 80 %tmp64 = icmp eq i8 %tmp62, 0 81 br i1 %tmp64, label %bb76, label %bb65
|
move-and-form-retain-autorelease.ll | 162 %tmp64 = tail call i8* %tmp63(i8* %tmp61, %1* bitcast (%0* @"\01l_objc_msgSend_fixup_objectAtIndex_" to %1*), i64 0) 164 %tmp66 = tail call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %tmp64, i8* %tmp65)
|
/external/zlib/src/ |
Makefile.in | 108 @TMP64=tmp64_$$; \ 109 if echo hello world | ./minigzip64 | ./minigzip64 -d && ./example64 $$TMP64; then \ 114 rm -f $$TMP64
|