HomeSort by relevance Sort by last modified time
    Searched full:tmp4 (Results 576 - 600 of 1043) sorted by null

<<21222324252627282930>>

  /external/llvm/test/Bitcode/
metadata-2.ll 33 %tmp4 = sub i32 %x_arg, %tmp2 ; <i32> [#uses=2]
34 %tmp6 = lshr i32 %tmp4, 2 ; <i32> [#uses=1]
36 %tmp9 = and i32 %tmp4, 858993459 ; <i32> [#uses=1]
54 %tmp4 = shl i32 %x_arg, 1 ; <i32> [#uses=1]
55 %tmp5 = and i32 %tmp4, -1431655766 ; <i32> [#uses=1]
  /external/llvm/test/CodeGen/ARM/
vtrn.ll 15 %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
16 %tmp5 = add <8 x i8> %tmp3, %tmp4
47 %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
48 %tmp5 = add <4 x i16> %tmp3, %tmp4
79 %tmp4 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 3>
80 %tmp5 = add <2 x i32> %tmp3, %tmp4
111 %tmp4 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> <i32 1, i32 3>
112 %tmp5 = fadd <2 x float> %tmp3, %tmp4
144 %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
145 %tmp5 = add <16 x i8> %tmp3, %tmp4
    [all...]
2011-04-26-SchedTweak.ll 35 %tmp4 = icmp eq i32 %tmp2, 0
36 br i1 %tmp4, label %bb1, label %bb8
2011-11-28-DAGCombineBug.ll 24 %tmp4 = load i32, i32* getelementptr inbounds (%struct.InformationBlock, %struct.InformationBlock* @infoBlock, i32 0, i32 2, i32 0, i32 2), align 1
32 %insert9 = insertvalue [4 x i32] %insert7, i32 %tmp4, 2
2014-01-09-pseudo_expand_implicit_reg.ll 44 %tmp4 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 1
48 %tmp8 = call <8 x i8> @llvm.arm.neon.vtbx4(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6, <8 x i8> %tmp7)
dyn-stackalloc.ll 48 %tmp4 = add i32 %tmp1, 2
49 %tmp5 = add i32 %tmp4, %tmp3
global-merge.ll 27 %tmp4 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*))
28 %tmp5 = icmp eq i32 %tmp3, %tmp4
ldaex-stlex.ll 24 %tmp4 = trunc i64 %val to i32
27 %stlexd = tail call i32 @llvm.arm.stlexd(i32 %tmp4, i32 %tmp7, i8* %ptr)
lsr-code-insertion.ll 26 %tmp4 = load i32, i32* %tmp3 ; <i32> [#uses=1]
29 %tmp10 = add i32 %tmp9, %tmp4 ; <i32> [#uses=2]
machine-cse-cmp.ll 17 %tmp4 = add i32 %tmp2, %tmp3
18 ret i32 %tmp4
  /external/swiftshader/third_party/LLVM/test/Bitcode/
metadata-2.ll 32 %tmp4 = sub i32 %x_arg, %tmp2 ; <i32> [#uses=2]
33 %tmp6 = lshr i32 %tmp4, 2 ; <i32> [#uses=1]
35 %tmp9 = and i32 %tmp4, 858993459 ; <i32> [#uses=1]
53 %tmp4 = shl i32 %x_arg, 1 ; <i32> [#uses=1]
54 %tmp5 = and i32 %tmp4, -1431655766 ; <i32> [#uses=1]
  /external/swiftshader/third_party/LLVM/test/Transforms/GVN/
crash.ll 77 %tmp4 = getelementptr inbounds [4 x %struct.attribute_spec*]* @attribute_tables, i32 0, i32 undef ; <%struct.attribute_spec**> [#uses=1]
78 %tmp3 = load %struct.attribute_spec** %tmp4, align 4 ; <%struct.attribute_spec*> [#uses=1]
85 %tmp = load %struct.attribute_spec** %tmp4, align 4 ; <%struct.attribute_spec*> [#uses=1]
117 %tmp4 = bitcast i8* %tmp40.i to i64*
118 %tmp41.i = load i64* %tmp4
  /external/clang/test/CodeGen/
aarch64-neon-perm.c     [all...]
  /external/libyuv/files/source/
scale_msa.cc 389 v4u32 tmp0, tmp1, tmp2, tmp3, tmp4; local
423 tmp4 = __msa_hadd_u_w(vec0, vec0);
430 tmp4 *= const_0x4000;
433 tmp4 = (v4u32)__msa_srai_w((v4i32)tmp4, 16);
435 vec1 = (v8u16)__msa_pckev_h((v8i16)tmp4, (v8i16)tmp4);
459 v4u32 tmp0, tmp1, tmp2, tmp3, tmp4; local
503 tmp4 = __msa_hadd_u_w(vec0, vec0);
510 tmp4 *= const_0x2AAA
    [all...]
  /external/llvm/test/CodeGen/Generic/
vector.ll 119 %tmp4 = add %i4 %tmp3, < i32 1, i32 2, i32 3, i32 4 > ; <%i4> [#uses=1]
120 store %i4 %tmp4, %i4* %a
136 %tmp4 = insertelement %f4 %tmp2, float %X, i32 2 ; <%f4> [#uses=1]
137 %tmp6 = insertelement %f4 %tmp4, float %X, i32 3 ; <%f4> [#uses=1]
147 %tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2 ; <%i4> [#uses=1]
148 %tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3 ; <%i4> [#uses=1]
  /external/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/
split-gep-and-gvn-addrspace-addressing-modes.ll 16 %tmp4 = load float, float addrspace(2)* %tmp2, align 4
17 %tmp5 = fadd float %tmp4, 0.000000e+00
49 %tmp4 = load float, float addrspace(2)* %tmp2, align 4
50 %tmp5 = fadd float %tmp4, 0.000000e+00
79 %tmp4 = load float, float addrspace(3)* %tmp2, align 4
80 %tmp5 = fadd float %tmp4, 0.000000e+00
  /external/llvm/test/CodeGen/PowerPC/
vec_shuffle.ll 18 %tmp4 = extractelement <16 x i8> %tmp.upgrd.1, i32 7 ; <i8> [#uses=1]
34 %tmp20 = insertelement <16 x i8> %tmp19, i8 %tmp4, i32 2 ; <<16 x i8>> [#uses=1]
60 %tmp4 = extractelement <16 x i8> %tmp.upgrd.5, i32 7 ; <i8> [#uses=1]
76 %tmp20 = insertelement <16 x i8> %tmp19, i8 %tmp4, i32 2 ; <<16 x i8>> [#uses=1]
115 %tmp4 = extractelement <16 x i8> %tmp, i32 9 ; <i8> [#uses=1]
131 %tmp20 = insertelement <16 x i8> %tmp19, i8 %tmp4, i32 2 ; <<16 x i8>> [#uses=1]
155 %tmp4 = extractelement <8 x i16> %tmp, i32 5 ; <i16> [#uses=1]
163 %tmp12 = insertelement <8 x i16> %tmp11, i16 %tmp4, i32 2 ; <<8 x i16>> [#uses=1]
179 %tmp4 = extractelement <4 x i32> %tmp, i32 3 ; <i32> [#uses=1]
183 %tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 2 ; <<4 x i32>> [#uses=1
    [all...]
2007-11-16-landingpad-split.ll 25 %tmp4 = call i8* @llvm.stacksave() ; <i8*> [#uses=1]
38 call void @llvm.stackrestore(i8* %tmp4)
  /external/swiftshader/third_party/LLVM/test/CodeGen/PowerPC/
vec_shuffle.ll 18 %tmp4 = extractelement <16 x i8> %tmp.upgrd.1, i32 7 ; <i8> [#uses=1]
34 %tmp20 = insertelement <16 x i8> %tmp19, i8 %tmp4, i32 2 ; <<16 x i8>> [#uses=1]
60 %tmp4 = extractelement <16 x i8> %tmp.upgrd.5, i32 7 ; <i8> [#uses=1]
76 %tmp20 = insertelement <16 x i8> %tmp19, i8 %tmp4, i32 2 ; <<16 x i8>> [#uses=1]
115 %tmp4 = extractelement <16 x i8> %tmp, i32 9 ; <i8> [#uses=1]
131 %tmp20 = insertelement <16 x i8> %tmp19, i8 %tmp4, i32 2 ; <<16 x i8>> [#uses=1]
155 %tmp4 = extractelement <8 x i16> %tmp, i32 5 ; <i16> [#uses=1]
163 %tmp12 = insertelement <8 x i16> %tmp11, i16 %tmp4, i32 2 ; <<8 x i16>> [#uses=1]
179 %tmp4 = extractelement <4 x i32> %tmp, i32 3 ; <i32> [#uses=1]
183 %tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 2 ; <<4 x i32>> [#uses=1]
    [all...]
  /frameworks/av/media/libstagefright/codecs/amrwb/src/
oversamp_12k8_to_16k.cpp 284 int16 tmp1, tmp2, tmp3, tmp4; local
291 tmp4 = *(pt_x++);
295 L_sum = fxp_mac_16by16(tmp4, *(pt_fir++), L_sum);
299 tmp4 = *(pt_x++);
303 L_sum = fxp_mac_16by16(tmp4, *(pt_fir++), L_sum);
307 tmp4 = *(pt_x++);
311 L_sum = fxp_mac_16by16(tmp4, *(pt_fir++), L_sum);
315 tmp4 = *(pt_x++);
319 L_sum = fxp_mac_16by16(tmp4, *(pt_fir++), L_sum);
323 tmp4 = *(pt_x++)
    [all...]
  /external/llvm/test/Analysis/BasicAA/
pure-const-dce.ll 31 %tmp4 = call i32 @TestConst( i32 5 ) readnone ; <i32> [#uses=1]
39 %sum3 = add i32 %sum2, %tmp4 ; <i32> [#uses=1]
  /external/llvm/test/Analysis/ScalarEvolution/
nsw.ll 23 %tmp4 = load double, double* %tmp3, align 8 ; <double> [#uses=1]
24 %tmp5 = fmul double %tmp4, 9.200000e+00 ; <double> [#uses=1]
137 %tmp3 = phi i32 [ 0, %bb ], [ %tmp4, %bb1 ]
138 %tmp4 = add nsw i32 %tmp3, 1
144 ret i32 %tmp4
154 %tmp = phi i32* [ %arg, %bb ], [ %tmp4, %bb2 ]
155 %tmp4 = getelementptr inbounds i32, i32* %tmp, i64 1
156 %tmp3 = icmp ult i32* %tmp4, %arg1
  /external/llvm/test/CodeGen/AMDGPU/
split-smrd.ll 19 %tmp4 = bitcast float %tmp to i32
20 %tmp5 = add i32 %tmp4, 4
  /external/llvm/test/CodeGen/Hexagon/
adde.ll 21 %tmp4 = shl i128 %tmp23, 64
22 %tmp5 = or i128 %tmp4, %tmp1
  /external/llvm/test/CodeGen/MIR/X86/
frame-info-save-restore-points.mir 15 %tmp4 = call i32 @doSomething(i32 0, i32* %tmp)
19 %tmp.0 = phi i32 [ %tmp4, %true ], [ %a, %entry ]

Completed in 882 milliseconds

<<21222324252627282930>>