/external/webrtc/webrtc/common_audio/signal_processing/ |
spl_sqrt_floor_mips.c | 51 int32_t root = 0, tmp1, tmp2, tmp3, tmp4; local 57 "lui %[tmp1], 0x4000 \n\t" 58 "slt %[tmp2], %[value], %[tmp1] \n\t" 59 "sub %[tmp3], %[value], %[tmp1] \n\t" 60 "lui %[tmp1], 0x1 \n\t" 61 "or %[tmp4], %[root], %[tmp1] \n\t" 65 "addiu %[tmp1], $0, 0x4000 \n\t" 66 "addu %[tmp1], %[tmp1], %[root] \n\t" 67 "sll %[tmp1], 14 \n\t [all...] |
/external/llvm/test/CodeGen/ARM/ |
fptoint.ll | 7 %tmp1 = load float, float* %x 8 %tmp2 = bitcast float %tmp1 to i32 13 %tmp1 = load double, double* %x 14 %tmp2 = bitcast double %tmp1 to i64 19 %tmp1 = fptosi float %x to i32 20 store i32 %tmp1, i32* @i 25 %tmp1 = fptoui float %x to i32 26 store i32 %tmp1, i32* @u 31 %tmp1 = fptosi double %x to i32 32 store i32 %tmp1, i32* @ [all...] |
ldr_ext.ll | 6 %tmp1.s = zext i8 %tmp.u to i32 7 ret i32 %tmp1.s 13 %tmp1.s = zext i16 %tmp.u to i32 14 ret i32 %tmp1.s 20 %tmp1.s = sext i8 %tmp.s to i32 21 ret i32 %tmp1.s 27 %tmp1.s = sext i16 %tmp.s to i32 28 ret i32 %tmp1.s 35 %tmp1.s = sext i16 %tmp.s to i32 36 ret i32 %tmp1. [all...] |
bic.ll | 5 %tmp1 = and i32 %a, %tmp 6 ret i32 %tmp1 13 %tmp1 = and i32 %tmp, %a 14 ret i32 %tmp1
|
vst4.ll | 7 %tmp1 = load <8 x i8>, <8 x i8>* %B 8 call void @llvm.arm.neon.vst4.p0i8.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 8) 17 %tmp1 = load <8 x i8>, <8 x i8>* %B 18 call void @llvm.arm.neon.vst4.p0i8.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 16 [all...] |
ldr_frame.ll | 6 %tmp1 = load i32, i32* %tmp 7 ret i32 %tmp1 13 %tmp1 = load i8, i8* %tmp 14 %tmp2 = zext i8 %tmp1 to i32 21 %tmp1 = load i32, i32* %tmp 22 ret i32 %tmp1 28 %tmp1 = load i8, i8* %tmp 29 %tmp2 = zext i8 %tmp1 to i32
|
thread_pointer.ll | 6 %tmp1 = call i8* @llvm.arm.thread.pointer( ) ; <i8*> [#uses=0] 7 ret i8* %tmp1
|
/external/llvm/test/CodeGen/Generic/ |
2008-02-20-MatchingMem.ll | 5 %tmp1 = getelementptr i32, i32* %X, i32 10 ; <i32*> [#uses=2] 6 tail call void asm sideeffect " $0 $1 ", "=*im,*im,~{memory}"( i32* %tmp1, i32* %tmp1 ) nounwind
|
/external/llvm/test/CodeGen/PowerPC/ |
bswap-load-store.ll | 8 %tmp1 = getelementptr i8, i8* %ptr, i32 %off ; <i8*> [#uses=1] 9 %tmp1.upgrd.1 = bitcast i8* %tmp1 to i32* ; <i32*> [#uses=1] 11 store i32 %tmp13, i32* %tmp1.upgrd.1 16 %tmp1 = getelementptr i8, i8* %ptr, i32 %off ; <i8*> [#uses=1] 17 %tmp1.upgrd.2 = bitcast i8* %tmp1 to i32* ; <i32*> [#uses=1] 18 %tmp = load i32, i32* %tmp1.upgrd.2 ; <i32> [#uses=1] 24 %tmp1 = getelementptr i8, i8* %ptr, i32 %off ; <i8*> [#uses=1] 25 %tmp1.upgrd.3 = bitcast i8* %tmp1 to i16* ; <i16*> [#uses=1 [all...] |
/external/llvm/test/CodeGen/Thumb2/ |
thumb2-teq.ll | 9 %tmp1 = icmp eq i32 0, %tmp 10 ret i1 %tmp1 18 %tmp1 = icmp eq i32 %tmp, 0 19 ret i1 %tmp1 27 %tmp1 = icmp eq i32 0, %tmp 28 ret i1 %tmp1 36 %tmp1 = icmp eq i32 %tmp, 0 37 ret i1 %tmp1 45 %tmp1 = icmp ne i32 0, %tmp 46 ret i1 %tmp1 [all...] |
thumb2-orn.ll | 5 %tmp1 = or i32 %a, %tmp 6 ret i32 %tmp1 13 %tmp1 = or i32 %tmp, %a 14 ret i32 %tmp1 21 %tmp1 = or i32 %a, %tmp 22 ret i32 %tmp1 29 %tmp1 = or i32 %tmp, %a 30 ret i32 %tmp1 37 %tmp1 = xor i32 4294967295, %tmp 38 %tmp2 = or i32 %a, %tmp1 [all...] |
thumb2-ldr_ext.ll | 5 %tmp1.s = zext i8 %tmp.u to i32 6 ret i32 %tmp1.s 11 %tmp1.s = zext i16 %tmp.u to i32 12 ret i32 %tmp1.s 17 %tmp1.s = sext i8 %tmp.s to i32 18 ret i32 %tmp1.s 23 %tmp1.s = sext i16 %tmp.s to i32 24 ret i32 %tmp1.s
|
thumb2-orn2.ll | 5 %tmp1 = xor i32 4294967295, 187 6 %tmp2 = or i32 %a, %tmp1 14 %tmp1 = xor i32 4294967295, 11141290 15 %tmp2 = or i32 %a, %tmp1 23 %tmp1 = xor i32 4294967295, 3422604288 24 %tmp2 = or i32 %a, %tmp1 32 %tmp1 = xor i32 4294967295, 1114112 33 %tmp2 = or i32 %a, %tmp1
|
thumb2-rsb.ll | 5 %tmp1 = sub i32 %tmp, %a 6 ret i32 %tmp1 13 %tmp1 = sub i32 %tmp, %a 14 ret i32 %tmp1 21 %tmp1 = sub i32 %tmp, %a 22 ret i32 %tmp1 31 %tmp1 = sub i32 %tmp, %a 32 ret i32 %tmp1
|
/external/llvm/test/Transforms/GlobalOpt/ |
2008-01-29-VolatileGlobal.ll | 6 %tmp1 = load volatile double, double* @t0.1441, align 8 ; <double> [#uses=2] 7 %tmp4 = fmul double %tmp1, %tmp1 ; <double> [#uses=1]
|
/external/llvm/test/Transforms/InstCombine/ |
apint-and-compare.ll | 5 %tmp1 = and i33 %a, 65280 7 %tmp = icmp ne i33 %tmp1, %tmp3 12 %tmp1 = and i999 %a, 65280 14 %tmp = icmp ne i999 %tmp1, %tmp3
|
/external/llvm/test/Transforms/Reassociate/ |
2006-04-27-ReassociateVector.ll | 5 ; CHECK-NEXT: %tmp1 = fsub <4 x float> zeroinitializer, zeroinitializer 6 ; CHECK-NEXT: %tmp2 = fmul <4 x float> %tmp1, zeroinitializer 9 %tmp1 = fsub <4 x float> zeroinitializer, zeroinitializer 10 %tmp2 = fmul <4 x float> zeroinitializer, %tmp1
|
/frameworks/av/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/ |
h264bsd_interpolate_hor_quarter.s | 54 tmp1 RN 7 label 156 AND tmp1, count, #0x000F0000 ;// partHeight-1 158 ADD count, count, tmp1, LSL #8 170 SMLAD tmp1, x_2_0, mult_20_01, plus16 175 SMLAD tmp1, x_3_1, mult_20_m5, tmp1 181 SMLABB tmp1, x_6_4, mult_20_m5, tmp1 186 SMLABB tmp1, x_7_5, mult_20_01, tmp1 [all...] |
/external/llvm/test/CodeGen/AArch64/ |
arm64-return-vector.ll | 9 %tmp1 = load <2 x double>, <2 x double>* %p, align 16 10 ret <2 x double> %tmp1
|
/external/llvm/test/CodeGen/Mips/ |
2010-11-09-CountLeading.ll | 6 %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %X, i1 true) 7 ret i32 %tmp1 15 %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %X, i1 true) 16 ret i32 %tmp1 23 %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %neg, i1 true) 24 ret i32 %tmp1 31 %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %neg, i1 true) 32 ret i32 %tmp1
|
/external/llvm/test/CodeGen/Thumb/ |
ldr_frame.ll | 8 %tmp1 = load i32, i32* %tmp 9 ret i32 %tmp1 18 %tmp1 = load i8, i8* %tmp 19 %tmp2 = zext i8 %tmp1 to i32 28 %tmp1 = load i32, i32* %tmp 29 ret i32 %tmp1 38 %tmp1 = load i8, i8* %tmp 39 %tmp2 = zext i8 %tmp1 to i32
|
/external/llvm/test/CodeGen/X86/ |
movfs.ll | 6 %tmp1 = load i32, i32* %tmp ; <i32> [#uses=1] 7 ret i32 %tmp1
|
rem.ll | 6 %tmp1 = srem i32 %X, 255 ; <i32> [#uses=1] 7 ret i32 %tmp1 13 %tmp1 = srem i32 %X, 256 ; <i32> [#uses=1] 14 ret i32 %tmp1 20 %tmp1 = urem i32 %X, 255 ; <i32> [#uses=1] 21 ret i32 %tmp1 27 %tmp1 = urem i32 %X, 256 ; <i32> [#uses=1] 28 ret i32 %tmp1
|
x86-64-pic-4.ll | 8 %tmp1 = load i32, i32* @a, align 4 9 ret i32 %tmp1
|
x86_64-mul-by-const.ll | 7 %tmp1 = udiv i32 %A, 1577682821 ; <i32> [#uses=1] 8 ret i32 %tmp1
|