HomeSort by relevance Sort by last modified time
    Searched full:tmp4 (Results 1 - 25 of 569) sorted by null

1 2 3 4 5 6 7 8 91011>>

  /external/llvm/test/CodeGen/X86/
2011-06-06-fgetsign80bit.ll 4 %tmp4 = bitcast x86_fp80 %x to i80
5 %tmp4.lobit = lshr i80 %tmp4, 79
6 %tmp = trunc i80 %tmp4.lobit to i32
2007-08-01-LiveVariablesBug.ll 7 %tmp4 = add i8 %y, -2
8 %tmp5 = mul i8 %tmp4, %tmp2
lea-2.ll 6 %tmp4 = add i32 %tmp3, %tmp1
7 ; The above computation of %tmp4 should match a single lea, without using
12 ret i32 %tmp4
bc-extract.ll 15 %tmp4 = bitcast <1 x double> <double 0x000000003F800000> to <2 x float>
16 %tmp5 = extractelement <2 x float> %tmp4, i32 1
23 %tmp4 = bitcast <1 x i64> <i64 256> to <2 x i32>
24 %tmp5 = extractelement <2 x i32> %tmp4, i32 1
rotate2.ll 6 %tmp4 = shl i64 %x, 9 ; <i64> [#uses=1]
7 %tmp5 = or i64 %tmp2, %tmp4 ; <i64> [#uses=1]
14 %tmp4 = shl i32 %x, 10 ; <i32> [#uses=1]
15 %tmp5 = or i32 %tmp2, %tmp4 ; <i32> [#uses=1]
insertelement-legalize.ll 6 %tmp4 = insertelement <2 x i64> %val, i64 %x, i32 0 ; <<2 x i64>> [#uses=1]
7 %add = add <2 x i64> %tmp4, %val ; <<2 x i64>> [#uses=1]
mmx-insert-element.ll 7 %tmp4 = bitcast <2 x i32> %tmp3 to x86_mmx
8 ret x86_mmx %tmp4
nobt.ll 11 %tmp4 = icmp eq i32 %tmp3, %tmp2 ; <i1> [#uses=1]
12 br i1 %tmp4, label %bb, label %UnifiedReturnBlock
27 %tmp4 = icmp eq i32 %tmp2, %tmp3 ; <i1> [#uses=1]
28 br i1 %tmp4, label %bb, label %UnifiedReturnBlock
43 %tmp4 = icmp ne i32 %tmp2, %tmp3 ; <i1> [#uses=1]
44 br i1 %tmp4, label %bb, label %UnifiedReturnBlock
59 %tmp4 = icmp ne i32 %tmp2, %tmp3 ; <i1> [#uses=1]
60 br i1 %tmp4, label %bb, label %UnifiedReturnBlock
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/
h264bsd_interpolate_hor_quarter.s 60 tmp4 RN 10 label
96 ADD tmp4, x0, partW ;// (x0+partWidth)
97 ADD tmp4, tmp4, #5 ;// (y0+partW+5)
99 CMP tmp4, width
115 ADD tmp4, partW, #5 ;// tmp4 = partW + 5;
116 STMIB sp, {height, tmp4} ;// sp+4 = height, sp+8 = partWidth+5
118 STR tmp4, [sp,#0x10] ;// sp+10 = partWidth+5
128 STR tmp4, [sp,#0x218] ;// width = partWidth+
    [all...]
h264bsd_interpolate_hor_half.s 60 tmp4 RN 10 label
94 ADD tmp4, x0, partW ;// (x0+partWidth)
95 ADD tmp4, tmp4, #5 ;// (y0+partW+5)
97 CMP tmp4, width
113 ADD tmp4, partW, #5 ;// tmp4 = partW + 5;
114 STMIB sp, {height, tmp4} ;// sp+4 = height, sp+8 = partWidth+5
116 STR tmp4, [sp,#0x10] ;// sp+10 = partWidth+5
126 STR tmp4, [sp,#0x218] ;// width = partWidth+
    [all...]
  /external/llvm/test/Transforms/InstCombine/
2009-01-31-InfIterate.ll 6 %tmp4 = trunc i128 %Y to i64
8 store i64 %tmp4, i64* %Q
9 %x = sub i64 %tmp2, %tmp4
10 %c = sub i64 %tmp2, %tmp4
  /external/llvm/test/CodeGen/Generic/
i128-arith.ll 9 %tmp4 = trunc i128 %tmp3 to i64
10 ret i64 %tmp4
  /external/llvm/test/Transforms/GlobalOpt/
2008-01-29-VolatileGlobal.ll 7 %tmp4 = fmul double %tmp1, %tmp1 ; <double> [#uses=1]
8 ret double %tmp4
  /external/llvm/test/CodeGen/AArch64/
arm64-vsra.ll 9 %tmp4 = add <8 x i8> %tmp1, %tmp3
10 ret <8 x i8> %tmp4
19 %tmp4 = add <4 x i16> %tmp1, %tmp3
20 ret <4 x i16> %tmp4
29 %tmp4 = add <2 x i32> %tmp1, %tmp3
30 ret <2 x i32> %tmp4
39 %tmp4 = add <16 x i8> %tmp1, %tmp3
40 ret <16 x i8> %tmp4
49 %tmp4 = add <8 x i16> %tmp1, %tmp3
50 ret <8 x i16> %tmp4
    [all...]
  /external/llvm/test/CodeGen/ARM/
vicmp.ll 16 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
17 ret <8 x i8> %tmp4
27 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
28 ret <4 x i16> %tmp4
38 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
39 ret <2 x i32> %tmp4
49 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
50 ret <16 x i8> %tmp4
60 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
61 ret <8 x i16> %tmp4
    [all...]
vcgt.ll 10 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
11 ret <8 x i8> %tmp4
20 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
21 ret <4 x i16> %tmp4
30 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
31 ret <2 x i32> %tmp4
40 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
41 ret <8 x i8> %tmp4
50 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
51 ret <4 x i16> %tmp4
    [all...]
vceq.ll 9 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
10 ret <8 x i8> %tmp4
19 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
20 ret <4 x i16> %tmp4
29 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
30 ret <2 x i32> %tmp4
39 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
40 ret <2 x i32> %tmp4
49 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
50 ret <16 x i8> %tmp4
    [all...]
vcge.ll 9 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
10 ret <8 x i8> %tmp4
19 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
20 ret <4 x i16> %tmp4
29 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
30 ret <2 x i32> %tmp4
39 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
40 ret <8 x i8> %tmp4
49 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
50 ret <4 x i16> %tmp4
    [all...]
  /external/chromium_org/third_party/webrtc/common_audio/signal_processing/
spl_sqrt_floor_mips.c 51 int32_t root = 0, tmp1, tmp2, tmp3, tmp4; local
61 "or %[tmp4], %[root], %[tmp1] \n\t"
63 "movz %[root], %[tmp4], %[tmp2] \n\t"
70 "ori %[tmp4], %[root], 0x8000 \n\t"
72 "movz %[root], %[tmp4], %[tmp2] \n\t"
79 "ori %[tmp4], %[root], 0x4000 \n\t"
81 "movz %[root], %[tmp4], %[tmp2] \n\t"
88 "ori %[tmp4], %[root], 0x2000 \n\t"
90 "movz %[root], %[tmp4], %[tmp2] \n\t"
97 "ori %[tmp4], %[root], 0x1000 \n\t
    [all...]
  /external/llvm/test/Transforms/IndVarSimplify/
2006-03-31-NegativeStride.ll 12 %a.0.0 = phi i32 [ 10, %entry ], [ %tmp4, %cond_true ] ; <i32> [#uses=2]
15 %tmp4 = add i32 %a.0.0, -1 ; <i32> [#uses=2]
16 %tmp = icmp sgt i32 %tmp4, 7 ; <i1> [#uses=1]
loop_evaluate_3.ll 10 %x.03.0 = phi i32 [ 0, %entry ], [ %tmp4, %bb5 ] ; <i32> [#uses=1]
12 %tmp4 = add i32 %x.03.0, 1 ; <i32> [#uses=2]
13 icmp slt i32 %tmp4, 200000 ; <i1>:0 [#uses=1]
loop_evaluate_4.ll 9 %v.01.0 = phi i32 [ 0, %entry ], [ %tmp4, %bb7 ] ; <i32> [#uses=1]
12 %tmp4 = add i32 %tmp2, %v.01.0 ; <i32> [#uses=2]
18 ret i32 %tmp4
  /external/chromium_org/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/
pitch_estimator_mips.c 35 int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; local
45 "lh %[tmp4], 6(%[tmp_in]) \n\t"
56 "mul %[tmp8], %[tmp4], %[tmp8] \n\t"
57 "mul %[tmp4], %[tmp4], %[tmp4] \n\t"
66 "srav %[tmp4], %[tmp4], %[scaling] \n\t"
73 "addu %[ysum32], %[ysum32], %[tmp4] \n\t"
80 [tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6)
107 int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; local
    [all...]
  /external/llvm/test/CodeGen/PowerPC/
fneg.ll 6 %tmp4 = fmul double %tmp2, %d ; <double> [#uses=1]
8 %tmp9 = fsub double %tmp7, %tmp4 ; <double> [#uses=1]
  /external/llvm/test/CodeGen/Thumb2/
thumb2-ldr_post.ll 7 %tmp4 = sub i32 %tmp1, 8 ; <i32> [#uses=1]
8 %tmp5 = mul i32 %tmp4, %tmp3 ; <i32> [#uses=1]

Completed in 4580 milliseconds

1 2 3 4 5 6 7 8 91011>>