/external/llvm/test/CodeGen/X86/ |
mmx-copy-gprs.ll | 14 %tmp1 = load <1 x i64>, <1 x i64>* %y, align 8 ; <<1 x i64>> [#uses=1] 15 store <1 x i64> %tmp1, <1 x i64>* %x, align 8
|
2007-03-24-InlineAsmPModifier.ll | 6 %tmp1 = tail call i32* asm sideeffect "mov %gs:${1:P}, $0", "=r,i,~{dirflag},~{fpsr},~{flags}"( i32 72 ) ; <%struct._pthread*> [#uses=1]
|
2008-02-20-InlineAsmClobber.ll | 12 %tmp1 = tail call i32 asm sideeffect "a: $0 $1", "=r,0,~{dirflag},~{fpsr},~{flags},~{ax}"( i32 %tmp ) nounwind ; <i32> [#uses=1] 13 store i32 %tmp1, i32* @pixels, align 4 23 %tmp1 = getelementptr i16, i16* %block, i32 64 ; <i16*> [#uses=1] 24 %tmp3 = tail call i8* asm sideeffect "b: $0 $1 $2", "=r,r,0,~{dirflag},~{fpsr},~{flags},~{ax}"( i16* %tmp1, i8* %pixels ) nounwind ; <i8*> [#uses=0]
|
or-branch.ll | 15 %tmp1 = icmp eq i32 %X, 0 17 %tmp4 = or i1 %tmp3, %tmp1 38 %tmp1 = icmp eq i32 %X, 0 40 %tmp4 = or i1 %tmp3, %tmp1
|
/external/llvm/test/ExecutionEngine/MCJIT/ |
2007-12-10-APIntLoadStore.ll | 12 %tmp1 = load i32, i32* %tmp, align 4 ; <i32> [#uses=1] 13 store i32 %tmp1, i32* %retval, align 4
|
/external/llvm/test/ExecutionEngine/OrcMCJIT/ |
2007-12-10-APIntLoadStore.ll | 12 %tmp1 = load i32, i32* %tmp, align 4 ; <i32> [#uses=1] 13 store i32 %tmp1, i32* %retval, align 4
|
/external/llvm/test/Transforms/GlobalOpt/ |
invoke.ll | 16 %tmp1 = invoke i32 @one() 20 store i32 %tmp1, i32* @tmp
|
/external/llvm/test/Transforms/InstCombine/ |
2006-09-15-CastToBool.ll | 4 define i32 @test(i32* %tmp1) { 5 %tmp.i = load i32, i32* %tmp1 ; <i32> [#uses=1]
|
2008-06-13-InfiniteLoopStore.ll | 8 %tmp1 = icmp ne i32 %p_60, 0 ; <i1> [#uses=1] 9 %tmp12 = zext i1 %tmp1 to i8 ; <i8> [#uses=1]
|
2011-05-02-VectorBoolean.ll | 11 %tmp1 = load <2 x i16>, <2 x i16>* %.compoundliteral 12 %cmp = icmp uge <2 x i16> %tmp, %tmp1
|
2011-05-13-InBoundsGEP.ll | 8 %tmp1 = add i32 %argc, -2 12 %p1 = getelementptr i8, i8* %p, i32 %tmp1
|
zext-bool-add-sub.ll | 7 ; CHECK: [[TMP1:%.*]] = sext i1 %y to i32 9 ; CHECK-NEXT: add nsw i32 [[TMP2]], [[TMP1]]
|
/external/llvm/test/Transforms/ObjCARC/ |
tail-call-invariant-enforcement.ll | 14 ; CHECK: %tmp1 = call i8* @objc_autorelease(i8* %x) [[NUW]] 19 %tmp1 = tail call i8* @objc_autorelease(i8* %x) 28 ; CHECK: %tmp1 = tail call i8* @objc_autoreleaseReturnValue(i8* %x) [[NUW]] 33 %tmp1 = tail call i8* @objc_autoreleaseReturnValue(i8* %x) 41 ; CHECK: %tmp1 = tail call i8* @objc_retain(i8* %x) [[NUW]] 46 %tmp1 = tail call i8* @objc_retain(i8* %x) 53 ; CHECK: %tmp1 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %z) [[NUW]] 60 %tmp1 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %z)
|
/external/llvm/test/Transforms/Reassociate/ |
shift-factor.ll | 7 ; CHECK-NEXT: %tmp1 = shl i32 %tmp, 1 8 ; CHECK-NEXT: ret i32 %tmp1
|
/external/webrtc/webrtc/modules/audio_coding/codecs/isac/fix/source/ |
filterbanks_neon.c | 45 int16x4_t tmp1, tmp2; local 52 tmp1 = vshrn_n_s32(a, 16); 55 statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp1, factorv); 62 tmp1 = vld1_lane_s16(data_ch1 + 1, tmp1, 1); 63 tmp1 = vld1_lane_s16(data_ch2 + 1, tmp1, 3); 64 datav = vrev32_s16(tmp1); 69 tmp1 = vshrn_n_s32(a, 16); 71 vst1_lane_s16(data_ch1 + n, tmp1, 1) [all...] |
/bionic/libc/arch-arm64/generic/bionic/ |
string_copy.S | 87 #define tmp1 x7 define 108 ands tmp1, src, #15 119 sub tmp1, data1, zeroones 121 bic has_nul1, tmp1, tmp2 134 add tmp1, pos, #0x8 136 tbz tmp1, #6, 1f 144 tbz tmp1, #5, 1f 148 tbz tmp1, #4, 1f 152 tbz tmp1, #3, 1f 168 add tmp1, pos, #0x [all...] |
/external/llvm/test/CodeGen/AArch64/ |
neon-or-combine.ll | 11 %tmp1 = and <8 x i8> %a, < i8 -1, i8 -1, i8 0, i8 0, i8 -1, i8 -1, i8 0, i8 0 > 13 %tmp3 = or <8 x i8> %tmp1, %tmp2 21 %tmp1 = and <16 x i8> %a, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > 23 %tmp3 = or <16 x i8> %tmp1, %tmp2
|
/external/llvm/test/CodeGen/AMDGPU/ |
private-memory-atomics.ll | 10 %tmp1 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 0 12 store i32 0, i32* %tmp1 23 %tmp1 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 0 25 store i32 0, i32* %tmp1
|
v_mac.ll | 20 %tmp1 = fadd float %tmp0, %c 21 store float %tmp1, float addrspace(1)* %out 31 %tmp1 = fadd float %tmp0, 0.5 32 store float %tmp1, float addrspace(1)* %out 47 %tmp1 = fadd float %tmp0, %c 48 store float %tmp1, float addrspace(1)* %out 59 %tmp1 = fadd float %tmp0, %c 60 store float %tmp1, float addrspace(1)* %out 81 %tmp1 = fadd float %tmp0, %c 87 store float %tmp1, float addrspace(1)* %ou [all...] |
/external/llvm/test/CodeGen/ARM/ |
select.ll | 14 %tmp1.s = select i1 %tmp, i32 2, i32 3 15 ret i32 %tmp1.s 23 %tmp1.s = select i1 %tmp, i32 2, i32 3 24 ret i32 %tmp1.s 32 %tmp1.s = select i1 %tmp, i32 2, i32 3 33 ret i32 %tmp1.s 41 %tmp1.s = select i1 %tmp, i32 2, i32 3 42 ret i32 %tmp1.s 50 %tmp1.s = select i1 %tmp, i32 2, i32 3 51 ret i32 %tmp1. [all...] |
smul.ll | 11 %tmp1 = add i16 %tmp, 2 ; <i16> [#uses=1] 12 %tmp2 = sext i16 %tmp1 to i32 ; <i32> [#uses=1] 21 %tmp1 = ashr i32 %x, 16 ; <i32> [#uses=1] 23 %tmp4 = mul i32 %tmp3, %tmp1 ; <i32> [#uses=1]
|
vbsl-constant.ll | 8 %tmp1 = load <8 x i8>, <8 x i8>* %A 11 %tmp4 = and <8 x i8> %tmp1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3> 22 %tmp1 = load <4 x i16>, <4 x i16>* %A 25 %tmp4 = and <4 x i16> %tmp1, <i16 3, i16 3, i16 3, i16 3> 36 %tmp1 = load <2 x i32>, <2 x i32>* %A 39 %tmp4 = and <2 x i32> %tmp1, <i32 3, i32 3> 51 %tmp1 = load <1 x i64>, <1 x i64>* %A 54 %tmp4 = and <1 x i64> %tmp1, <i64 3> 65 %tmp1 = load <16 x i8>, <16 x i8>* %A 68 %tmp4 = and <16 x i8> %tmp1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3 [all...] |
vtbl.ll | 10 %tmp1 = load <8 x i8>, <8 x i8>* %A 12 %tmp3 = call <8 x i8> @llvm.arm.neon.vtbl1(<8 x i8> %tmp1, <8 x i8> %tmp2) 19 %tmp1 = load <8 x i8>, <8 x i8>* %A 23 %tmp5 = call <8 x i8> @llvm.arm.neon.vtbl2(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4) 30 %tmp1 = load <8 x i8>, <8 x i8>* %A 35 %tmp6 = call <8 x i8> @llvm.arm.neon.vtbl3(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5) 42 %tmp1 = load <8 x i8>, <8 x i8>* %A 48 %tmp7 = call <8 x i8> @llvm.arm.neon.vtbl4(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6) 55 %tmp1 = load <8 x i8>, <8 x i8>* %A 58 %tmp4 = call <8 x i8> @llvm.arm.neon.vtbx1(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3 [all...] |
zextload_demandedbits.ll | 17 %tmp1 = getelementptr inbounds %struct.eggs, %struct.eggs* %arg, i32 0, i32 1 18 %0 = load i16, i16* %tmp1, align 2 23 %tmp4 = bitcast i16* %tmp1 to i8* 24 %tmp5 = ptrtoint i16* %tmp1 to i32
|
/external/llvm/test/CodeGen/Thumb/ |
fpconv.ll | 5 %tmp1 = fptrunc double %x to float ; <float> [#uses=1] 6 ret float %tmp1 11 %tmp1 = fpext float %x to double ; <double> [#uses=1] 12 ret double %tmp1
|