/external/llvm/test/CodeGen/AArch64/ |
neon-scalar-copy.ll | 7 %tmp1 = extractelement <2 x float> %v, i32 1 8 ret float %tmp1 16 %tmp1 = extractelement <2 x float> %v, i32 0 17 ret float %tmp1 24 %tmp1 = extractelement <4 x float> %v, i32 1 25 ret float %tmp1 33 %tmp1 = extractelement <4 x float> %v, i32 0 34 ret float %tmp1 42 %tmp1 = extractelement <1 x double> %v, i32 0 43 ret double %tmp1 [all...] |
arm64-rev.ll | 26 %tmp1 = lshr i32 %X, 8 29 %tmp2 = and i32 %tmp1, 16711680 31 %tmp9 = and i32 %tmp1, 255 67 %tmp1 = load <8 x i8>, <8 x i8>* %A 68 %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0> 75 %tmp1 = load <4 x i16>, <4 x i16>* %A 76 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0> 83 %tmp1 = load <2 x i32>, <2 x i32>* %A 84 %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> <i32 1, i32 0> 91 %tmp1 = load <2 x float>, <2 x float>* % [all...] |
arm64-vabs.ll | 7 %tmp1 = load <8 x i8>, <8 x i8>* %A 9 %tmp3 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 17 %tmp1 = load <4 x i16>, <4 x i16>* %A 19 %tmp3 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 27 %tmp1 = load <2 x i32>, <2 x i32>* %A 29 %tmp3 = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 39 %tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> 41 %tmp3 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 51 %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> 53 %tmp3 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2 [all...] |
arm64-convert-v4f64.ll | 11 %tmp1 = load <4 x double>, <4 x double>* %ptr 12 %tmp2 = fptosi <4 x double> %tmp1 to <4 x i16> 26 ; CHECK-DAG: xtn v[[TMP1:[0-9]+]].4h, v[[NA0]].4s 27 ; CHECK-DAG: xtn2 v[[TMP1]].8h, v[[NA2]].4s 28 ; CHECK: xtn v0.8b, v[[TMP1]].8h 29 %tmp1 = load <8 x double>, <8 x double>* %ptr 30 %tmp2 = fptosi <8 x double> %tmp1 to <8 x i8> 41 %tmp1 = load <4 x i64>, <4 x i64>* %ptr 42 %tmp2 = uitofp <4 x i64> %tmp1 to <4 x half> 51 %tmp1 = load <4 x i64>, <4 x i64>* %pt [all...] |
/external/llvm/test/CodeGen/ARM/ |
emutls.ll | 42 %tmp1 = load i32, i32* @i1 43 ret i32 %tmp1 67 %tmp1 = load i32, i32* @i2 68 ret i32 %tmp1 92 %tmp1 = load i32, i32* @i3 93 ret i32 %tmp1 117 %tmp1 = load i32, i32* @i4 118 ret i32 %tmp1 141 %tmp1 = load i32, i32* @i5 142 ret i32 %tmp1 [all...] |
long.ll | 42 %tmp1 = add i64 %y, 1 ; <i64> [#uses=1] 43 ret i64 %tmp1 69 %tmp1 = sext i32 %b to i64 ; <i64> [#uses=1] 70 %tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1] 79 %tmp1 = zext i32 %b to i64 ; <i64> [#uses=1] 80 %tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1]
|
mulhi.ll | 15 %tmp1 = sext i32 %y to i64 ; <i64> [#uses=1] 16 %tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1] 32 %tmp1 = zext i32 %y to i64 ; <i64> [#uses=1] 33 %tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1] 51 %tmp1 = mul nsw i32 %a, 3 52 %tmp2 = sdiv i32 %tmp1, 23
|
vpadal.ll | 6 %tmp1 = load <4 x i16>, <4 x i16>* %A 8 %tmp3 = call <4 x i16> @llvm.arm.neon.vpadals.v4i16.v8i8(<4 x i16> %tmp1, <8 x i8> %tmp2) 15 %tmp1 = load <2 x i32>, <2 x i32>* %A 17 %tmp3 = call <2 x i32> @llvm.arm.neon.vpadals.v2i32.v4i16(<2 x i32> %tmp1, <4 x i16> %tmp2) 24 %tmp1 = load <1 x i64>, <1 x i64>* %A 26 %tmp3 = call <1 x i64> @llvm.arm.neon.vpadals.v1i64.v2i32(<1 x i64> %tmp1, <2 x i32> %tmp2) 33 %tmp1 = load <4 x i16>, <4 x i16>* %A 35 %tmp3 = call <4 x i16> @llvm.arm.neon.vpadalu.v4i16.v8i8(<4 x i16> %tmp1, <8 x i8> %tmp2) 42 %tmp1 = load <2 x i32>, <2 x i32>* %A 44 %tmp3 = call <2 x i32> @llvm.arm.neon.vpadalu.v2i32.v4i16(<2 x i32> %tmp1, <4 x i16> %tmp2 [all...] |
vzip.ll | 12 %tmp1 = load <8 x i8>, <8 x i8>* %A 14 %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11> 15 %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> 29 %tmp1 = load <8 x i8>, <8 x i8>* %A 31 %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> 44 %tmp1 = load <4 x i16>, <4 x i16>* %A 46 %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5> 47 %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7> 61 %tmp1 = load <4 x i16>, <4 x i16>* %A 63 %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7 [all...] |
/prebuilts/go/darwin-x86/src/image/jpeg/ |
fdct.go | 99 tmp1 := x1 + x6 105 tmp11 := tmp1 + tmp2 106 tmp13 := tmp1 - tmp2 109 tmp1 = x1 - x6 121 tmp11 = tmp1 + tmp2 123 tmp13 = tmp1 + tmp3 127 tmp1 = tmp1 * fix_3_072711026 138 b[y*8+3] = (tmp1 + tmp11 + tmp13) >> (constBits - pass1Bits) 146 tmp1 := b[1*8+x] + b[6*8+x [all...] |
/prebuilts/go/linux-x86/src/image/jpeg/ |
fdct.go | 99 tmp1 := x1 + x6 105 tmp11 := tmp1 + tmp2 106 tmp13 := tmp1 - tmp2 109 tmp1 = x1 - x6 121 tmp11 = tmp1 + tmp2 123 tmp13 = tmp1 + tmp3 127 tmp1 = tmp1 * fix_3_072711026 138 b[y*8+3] = (tmp1 + tmp11 + tmp13) >> (constBits - pass1Bits) 146 tmp1 := b[1*8+x] + b[6*8+x [all...] |
/external/llvm/test/CodeGen/AMDGPU/ |
rotr.ll | 12 %tmp1 = shl i32 %x, %tmp0 14 %tmp3 = or i32 %tmp1, %tmp2 28 %tmp1 = shl <2 x i32> %x, %tmp0 30 %tmp3 = or <2 x i32> %tmp1, %tmp2 48 %tmp1 = shl <4 x i32> %x, %tmp0 50 %tmp3 = or <4 x i32> %tmp1, %tmp2
|
/external/llvm/test/CodeGen/SystemZ/ |
fp-sincos-01.ll | 15 %tmp1 = call float @sinf(float %x) 17 %add = fadd float %tmp1, %tmp2 30 %tmp1 = call double @sin(double %x) 32 %add = fadd double %tmp1, %tmp2 44 %tmp1 = call fp128 @sinl(fp128 %x) 46 %add = fadd fp128 %tmp1, %tmp2
|
/external/llvm/test/CodeGen/Thumb/ |
long.ll | 31 %tmp1 = add i64 %y, 1 ; <i64> [#uses=1] 32 ret i64 %tmp1 52 %tmp1 = sext i32 %b to i64 ; <i64> [#uses=1] 53 %tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1] 60 %tmp1 = zext i32 %b to i64 ; <i64> [#uses=1] 61 %tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1]
|
/external/llvm/test/Transforms/InstCombine/ |
2011-05-28-swapmulsub.ll | 17 %tmp1 = load i32, i32* %a, align 4 18 %conv = trunc i32 %tmp1 to i16 30 %tmp1 = load i32, i32* %on_off.addr, align 4 31 %sub = sub i32 %tmp, %tmp1 54 %tmp1 = load i32, i32* %a, align 4 55 %conv = trunc i32 %tmp1 to i16
|
/external/valgrind/none/tests/s390x/ |
div.h | 5 unsigned long tmp1 = d1_1; \ 12 : "+d" (tmp1), "+d" (tmp2) \ 15 printf(#insn " %16.16lX%16.16lX / %16.16lX = %16.16lX (rem %16.16lX)\n", d1_1, d1_2, d2, tmp2, tmp1); \ 20 unsigned long tmp1 = d1_1; \ 27 : "+d" (tmp1), "+d" (tmp2) \ 30 printf(#insn " %16.16lX%16.16lX / %16.16lX = %16.16lX (rem %16.16lX)\n", d1_1, d1_2, d2, tmp2, tmp1); \
|
/frameworks/av/media/libstagefright/codecs/on2/h264dec/source/ |
h264bsd_image.c | 89 u32 tmp1, tmp2; local 113 tmp1 = *ptr++; 115 *lum++ = tmp1; 117 tmp1 = *ptr++; 119 *lum++ = tmp1; 127 tmp1 = *ptr++; 129 *cb++ = tmp1; 136 tmp1 = *ptr++; 138 *cr++ = tmp1; 186 i32 tmp1, tmp2, tmp3, tmp4 local [all...] |
/external/webrtc/webrtc/common_audio/signal_processing/ |
complex_bit_reverse_mips.c | 67 int32_t tmp1, tmp2, tmp3, tmp4; local 88 "ulw %[tmp1], 0(%[ptr_i]) \n\t" 92 "usw %[tmp1], 0(%[ptr_j]) \n\t" 96 "lh %[tmp1], 8(%[pcoeftable_8]) \n\t" 100 "addu %[ptr_i], %[frfi], %[tmp1] \n\t" 104 "ulw %[tmp1], 0(%[ptr_i]) \n\t" 108 "usw %[tmp1], 0(%[ptr_j]) \n\t" 116 : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [ptr_i] "=&r" (ptr_i), 140 "ulw %[tmp1], 0(%[ptr_i]) \n\t [all...] |
/frameworks/av/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/ |
h264bsd_interpolate_chroma_hor.s | 54 tmp1 RN 7 label 157 SUB tmp1, chrPW, #1 ;// chromaPartWidth-1 160 ADD count, count, tmp1, LSL #20 ;// chromaPartWidth-1 176 LDRB tmp1, [ptrA, width] 183 PKHBT tmp5, tmp1, tmp3, LSL #16 186 LDRB tmp1, [ptrA, width] 192 PKHBT tmp7, tmp3, tmp1, LSL #16 229 LDR tmp1, [sp, #0xd0] ;// y0 233 ADD tmp1, height, tmp1 [all...] |
/bionic/libc/arch-arm64/generic/bionic/ |
memcmp.S | 49 #define tmp1 x8 define 58 eor tmp1, src1, src2 59 tst tmp1, #7 61 ands tmp1, src1, #7 120 add limit, limit, tmp1 /* Adjust the limit for the extra. */ 121 lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */ 123 neg tmp1, tmp1 /* Bits to alignment -64. */ 128 lsl tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). * [all...] |
/external/libvpx/libvpx/vp8/common/mips/msa/ |
bilinear_filter_msa.c | 274 v8u16 tmp0, tmp1; local 285 DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1); 286 SRARI_H2_UH(tmp0, tmp1, VP8_FILTER_SHIFT); 287 src2110 = __msa_pckev_b((v16i8)tmp1, (v16i8)tmp0); 298 v8u16 tmp0, tmp1, tmp2, tmp3; local 318 tmp0, tmp1, tmp2, tmp3); 319 SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, VP8_FILTER_SHIFT); 320 PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, src2110, src4332); 345 v8u16 tmp0, tmp1, tmp2, tmp3; local 355 tmp0, tmp1, tmp2, tmp3) 369 v8u16 tmp0, tmp1, tmp2, tmp3; local 427 v8u16 tmp0, tmp1, tmp2, tmp3; local 476 v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, filt, tmp0, tmp1; local 567 v8u16 hz_out0, hz_out1, tmp0, tmp1, tmp2, tmp3; local 612 v8u16 hz_out0, hz_out1, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; local 715 v8u16 tmp1, tmp2, hz_out0, hz_out1, hz_out2, hz_out3; local [all...] |
/external/llvm/test/CodeGen/Mips/ |
alloca.ll | 11 %tmp1 = alloca i8, i32 %size, align 4 12 %add.ptr = getelementptr inbounds i8, i8* %tmp1, i32 5 16 %call = call i32 @foo(i8* %tmp1) nounwind 34 %tmp1 = alloca i8, i32 %size, align 4 35 %0 = bitcast i8* %tmp1 to i32* 42 %add.ptr = getelementptr inbounds i8, i8* %tmp1, i32 40 45 %arrayidx15.pre = getelementptr inbounds i8, i8* %tmp1, i32 12 52 %add.ptr5 = getelementptr inbounds i8, i8* %tmp1, i32 12 63 %arrayidx9 = getelementptr inbounds i8, i8* %tmp1, i32 4 66 %arrayidx12 = getelementptr inbounds i8, i8* %tmp1, i32 [all...] |
/external/libvpx/libvpx/vpx_dsp/mips/ |
vpx_convolve8_avg_vert_msa.c | 148 v16u8 dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3; local 202 out3_r, tmp0, tmp1, tmp2, tmp3); 203 XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3); 204 AVER_UB4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3, dst0, dst1, 268 v8u16 tmp0, tmp1; local 285 DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1); 286 SRARI_H2_UH(tmp0, tmp1, FILTER_BITS); 288 out = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0); 303 v8u16 tmp0, tmp1, tmp2, tmp3; local 324 tmp0, tmp1, tmp2, tmp3) 353 v8u16 tmp0, tmp1, tmp2, tmp3; local 381 v8u16 tmp0, tmp1, tmp2, tmp3; local 441 v8u16 tmp0, tmp1, tmp2, tmp3, filt; local 493 v8u16 tmp0, tmp1, tmp2, tmp3, filt; local 567 v8u16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local [all...] |
/external/libopus/silk/fixed/ |
burg_modified_FIX.c | 58 opus_int32 C0, num, nrg, rc_Q31, invGain_Q30, Atmp_QA, Atmp1, tmp1, tmp2, x1, x2; local 130 tmp1 = silk_LSHIFT32( (opus_int32)x_ptr[ n ], QA - 16 ); /* Q(QA-16) */ 136 tmp1 = silk_SMLAWB( tmp1, Atmp_QA, x_ptr[ n - k - 1 ] ); /* Q(QA-16) */ 139 tmp1 = silk_LSHIFT32( -tmp1, 32 - QA - rshifts ); /* Q(16-rshifts) */ 142 CAf[ k ] = silk_SMLAWB( CAf[ k ], tmp1, x_ptr[ n - k ] ); /* Q( -rshift ) */ 151 tmp1 = silk_LSHIFT32( (opus_int32)x_ptr[ n ], 17 ); /* Q17 */ 157 tmp1 = silk_MLA( tmp1, x_ptr[ n - k - 1 ], Atmp1 ); /* Q17 * [all...] |
/external/libopus/silk/float/ |
warped_autocorrelation_FLP.c | 44 double tmp1, tmp2; local 53 tmp1 = input[ n ]; 57 tmp2 = state[ i ] + warping * ( state[ i + 1 ] - tmp1 ); 58 state[ i ] = tmp1; 59 C[ i ] += state[ 0 ] * tmp1; 61 tmp1 = state[ i + 1 ] + warping * ( state[ i + 2 ] - tmp2 ); 65 state[ order ] = tmp1; 66 C[ order ] += state[ 0 ] * tmp1;
|