| /frameworks/av/media/libstagefright/codecs/mp3dec/src/ |
| pvmp3_dct_6.cpp | 121 Int32 tmp4; local 130 tmp4 = vec[4] - vec[1]; 140 tmp0 = fxp_mac32_Q30(tmp4, -cos_3_pi_12, tmp0); 143 vec[3] = fxp_mul32_Q30((tmp3 + tmp4 - tmp5), cos_3_pi_12); 145 tmp0 = fxp_mac32_Q30(tmp4, cos_3_pi_12, tmp0);
|
| /external/llvm/test/CodeGen/ARM/ |
| reg_sequence.ll | 87 %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 1 ; <<8 x i8>> [#uses=1] 88 %tmp5 = sub <8 x i8> %tmp3, %tmp4 90 %tmp7 = mul <8 x i8> %tmp4, %tmp2 92 ret <8 x i8> %tmp4 106 %tmp4 = bitcast i32* %tmp3 to i8* ; <i8*> [#uses=1] 107 %tmp5 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32.p0i8(i8* %tmp4, i32 1) ; <%struct.__neon_int32x4x2_t> [#uses=2] 152 %tmp4 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 1 ; <<8 x i16>> [#uses=1] 153 %tmp5 = add <8 x i16> %tmp3, %tmp4 ; <<8 x i16>> [#uses=1] 165 %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1 ; <<8 x i8>> [#uses=1] 166 %tmp5 = add <8 x i8> %tmp3, %tmp4 ; <<8 x i8>> [#uses=1 [all...] |
| 2007-05-07-tailmerge-1.ll | 28 %tmp4 = call i32 (...) @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
|
| 2007-05-09-tailmerge-2.ll | 30 %tmp4 = call i32 (...) @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
|
| /external/libvpx/libvpx/vp9/encoder/mips/msa/ |
| vp9_fdct16x16_msa.c | 367 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 387 in12, in13, in14, in15, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, 391 FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp0, tmp1, 392 tmp2, tmp3, tmp4, tmp5, tmp6, tmp7); 400 TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, tmp4, in4, 403 ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, out, 16);
|
| /external/libvpx/libvpx/vpx_dsp/mips/ |
| vpx_convolve8_avg_vert_msa.c | 544 v8u16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 577 DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp4, tmp5); 578 SRARI_H2_UH(tmp4, tmp5, FILTER_BITS); 579 PCKEV_AVG_ST_UB(tmp5, tmp4, dst2, dst + 16); 597 DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp4, tmp5); 598 SRARI_H2_UH(tmp4, tmp5, FILTER_BITS); 599 PCKEV_AVG_ST_UB(tmp5, tmp4, dst6, (dst + 48));
|
| vpx_convolve8_vert_msa.c | 564 v8u16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 593 DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp4, tmp5); 594 SRARI_H2_UH(tmp4, tmp5, FILTER_BITS); 595 PCKEV_ST_SB(tmp4, tmp5, dst + 16); 613 DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp4, tmp5); 614 SRARI_H2_UH(tmp4, tmp5, FILTER_BITS); 615 PCKEV_ST_SB(tmp4, tmp5, dst + 48);
|
| /external/llvm/test/Transforms/ObjCARC/ |
| move-and-form-retain-autorelease.ll | 84 %tmp4 = bitcast %15* %arg to i8* 85 %tmp5 = tail call %18* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %18* (i8*, i8*)*)(i8* %tmp4, i8* %tmp) 142 %tmp49 = tail call %22* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %22* (i8*, i8*)*)(i8* %tmp4, i8* %tmp48) 152 tail call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*)*)(i8* %tmp4, i8* %tmp56) 158 %tmp60 = tail call %22* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %22* (i8*, i8*)*)(i8* %tmp4, i8* %tmp59) 178 %tmp76 = tail call %22* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %22* (i8*, i8*)*)(i8* %tmp4, i8* %tmp75) 202 %tmp97 = tail call signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*)*)(i8* %tmp4, i8* %tmp96)
|
| /external/llvm/test/Transforms/InstCombine/ |
| vec_shuffle.ll | 67 %tmp4 = extractelement <4 x float> %tmp, i32 1 70 %tmp128 = insertelement <4 x float> undef, float %tmp4, i32 0 197 %tmp4 = load <4 x i16>, <4 x i16>* %tmp 198 %tmp5 = shufflevector <4 x i16> %tmp4, <4 x i16> undef, <2 x i32> <i32 2, i32 0> 210 ; CHECK-NEXT: ret <4 x float> %tmp4 214 %tmp4 = insertelement <4 x float> %tmp2, float %tmp3, i32 3 215 ret <4 x float> %tmp4 226 %tmp4 = extractelement <4 x float> %RHS, i32 2 227 %tmp5 = insertelement <4 x float> %tmp3, float %tmp4, i32 3
|
| /external/clang/test/CodeGen/ |
| aarch64-neon-tbl.c | 136 // CHECK: [[TMP4:%.*]] = load <8 x i8>, <8 x i8>* [[ARRAYIDX6_I]], align 8 138 // CHECK: [[VTBL27_I:%.*]] = shufflevector <8 x i8> [[TMP3]], <8 x i8> [[TMP4]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> 165 // CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX6_I]], align 16 166 // CHECK: [[VTBL4_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl4.v8i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <8 x i8> %b) #2 244 // CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX6_I]], align 16 245 // CHECK: [[VTBL4_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl4.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> %b) #2 258 // CHECK: [[TMP4:%.*]] = and <8 x i8> [[TMP3]], [[VTBL11_I]] 259 // CHECK: [[VTBX_I:%.*]] = or <8 x i8> [[TMP2]], [[TMP4]] 308 // CHECK: [[TMP4:%.*]] = icmp uge <8 x i8> %c, <i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24> 309 // CHECK: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i8 [all...] |
| /external/libjpeg-turbo/simd/ |
| jsimd_arm64_neon.S | 207 TMP4 .req x10 403 ldp TMP3, TMP4, [OUTPUT_BUF], 16 411 add TMP4, TMP4, OUTPUT_COL 446 st1 {v29.d}[1], [TMP4] 460 add TMP4, xzr, TMP2, LSL #32 544 cbnz TMP4, 4f [all...] |
| /external/llvm/test/CodeGen/AMDGPU/ |
| valu-i1.ll | 157 %tmp4 = sext i32 %tmp to i64 158 %tmp5 = getelementptr inbounds i32, i32 addrspace(1)* %arg3, i64 %tmp4 166 %tmp12 = add nsw i64 %tmp11, %tmp4
|
| mubuf.ll | 64 %tmp4 = add i32 %6, 16 65 call void @llvm.SI.tbuffer.store.i32(<16 x i8> %tmp1, i32 %tmp3, i32 1, i32 %tmp4, i32 %4, i32 0, i32 4, i32 4, i32 1, i32 0, i32 1, i32 1, i32 0) 82 %tmp4 = add i32 %6, 16 83 call void @llvm.SI.tbuffer.store.i32(<16 x i8> %tmp1, i32 %tmp3, i32 1, i32 %tmp4, i32 %4, i32 0, i32 4, i32 4, i32 1, i32 0, i32 1, i32 1, i32 0)
|
| /external/llvm/test/CodeGen/X86/ |
| interval-update-remat.ll | 59 %tmp4 = phi i32 [ undef, %lor.end7 ], [ %conv42.us.lcssa, %LABEL_mSmSDb.loopexit ] 72 %.ph = phi i32 [ 0, %if.then47 ], [ %tmp4, %LABEL_BRBRN.preheader ] 86 %tmp7 = phi i32 [ %conv42.us, %lor.end32.us ], [ %tmp4, %LABEL_BRBRN.preheader ]
|
| /external/swiftshader/third_party/LLVM/test/Transforms/DeadStoreElimination/ |
| simple.ll | 135 %tmp4 = getelementptr { i32, i32 }* %x, i32 0, i32 0
136 %tmp5 = load i32* %tmp4, align 4
140 store i32 %tmp5, i32* %tmp4, align 4
|
| /external/webp/src/dsp/ |
| enc_msa.c | 84 v4i32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; local 120 UNPCK_R_SH_SW(t1, tmp4); 121 tmp5 = __msa_ceqi_w(tmp4, 0); 122 tmp4 = (v4i32)__msa_nor_v((v16u8)tmp5, (v16u8)tmp5); 124 tmp5 = (v4i32)__msa_and_v((v16u8)tmp5, (v16u8)tmp4); 807 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, sign0, sign1; local 816 tmp4 = __msa_add_a_h(in0, zero); 818 ILVRL_H2_SH(sh0, tmp4, tmp0, tmp1); 844 tmp4 = (v8i16)__msa_bmnz_v((v16u8)zero, (v16u8)tmp2, (v16u8)tmp0); 847 MUL2(tmp4, tmp0, tmp5, tmp1, in0, in1) [all...] |
| /external/boringssl/src/crypto/cipher_extra/asm/ |
| aes128gcmsiv-x86_64.pl | 80 # uses also TMP1,TMP2,TMP3,TMP4 88 my $TMP4 = "%xmm5"; 96 vpclmulqdq \$0x11, $TMP0, $T, $TMP4 103 vpxor $TMP2, $TMP4, $TMP4 113 vpxor $TMP4, $TMP1, $T 213 my $TMP4 = "%xmm7"; 332 vpclmulqdq \$0x10, poly(%rip), $T, $TMP4 # reduction stage 1a 337 vpxor $TMP4, $T, $T # reduction stage 1b 345 vpclmulqdq \$0x10, poly(%rip), $T, $TMP4 # reduction stage 2 [all...] |
| /external/llvm/test/Analysis/BasicAA/ |
| gep-alias.ll | 69 %tmp4 = load i32, i32* %tmp2, align 8 70 ret i32 %tmp4 269 %tmp4 = load i8, i8* %arrayidx5, align 1 270 %conv = zext i8 %tmp4 to i32
|
| /external/llvm/test/CodeGen/AArch64/ |
| arm64-ldxr-stxr.ll | 23 %tmp4 = trunc i128 %val to i64 26 %strexd = tail call i32 @llvm.aarch64.stxp(i64 %tmp4, i64 %tmp7, i8* %ptr) 162 %tmp4 = trunc i128 %val to i64 165 %strexd = tail call i32 @llvm.aarch64.stlxp(i64 %tmp4, i64 %tmp7, i8* %ptr)
|
| /external/llvm/test/CodeGen/PowerPC/ |
| 2007-05-22-tailmerge-3.ll | 26 %tmp4 = call i32 (...) @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
|
| /external/llvm/test/Transforms/GlobalOpt/ |
| crash.ll | 75 %tmp4 = load i32**, i32*** @g_52, align 8
|
| /external/llvm/test/Transforms/Inline/ |
| inline_minisize.ll | 19 %tmp4 = load i32*, i32** @data, align 8 20 %arrayidx2 = getelementptr inbounds i32, i32* %tmp4, i64 %idxprom1 119 %tmp4 = load i32*, i32** @data, align 8 120 %arrayidx3 = getelementptr inbounds i32, i32* %tmp4, i64 %idxprom2
|
| /external/swiftshader/third_party/LLVM/test/CodeGen/ARM/ |
| 2007-05-07-tailmerge-1.ll | 25 %tmp4 = call i32 (...)* @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
|
| 2007-05-09-tailmerge-2.ll | 25 %tmp4 = call i32 (...)* @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
|
| 2007-05-22-tailmerge-3.ll | 27 %tmp4 = call i32 (...)* @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
|