/external/libhevc/common/arm64/ |
ihevc_weighted_pred_uni.s | 175 smull v4.4s, v1.4h, v0.4h[0] //vmull_n_s16(pi2_src_val1, (int16_t) wgt0) 180 smull v6.4s, v2.4h, v0.4h[0] //vmull_n_s16(pi2_src_val2, (int16_t) wgt0) ii iteration 187 smull v7.4s, v3.4h, v0.4h[0] //vmull_n_s16(pi2_src_val1, (int16_t) wgt0) iii iteration 196 smull v16.4s, v5.4h, v0.4h[0] //vmull_n_s16(pi2_src_val2, (int16_t) wgt0) iv iteration
|
ihevc_inter_pred_filters_luma_vert_w16inp.s | 155 smull v19.4s, v1.4h, v23.4h //mul_res1 = vmull_u8(src_tmp2, coeffabs_1)// 171 smull v20.4s, v2.4h, v23.4h //mul_res2 = vmull_u8(src_tmp3, coeffabs_1)// 188 smull v21.4s, v3.4h, v23.4h 202 smull v30.4s, v4.4h, v23.4h 229 smull v19.4s, v1.4h, v23.4h //mul_res1 = vmull_u8(src_tmp2, coeffabs_1)// 246 smull v20.4s, v2.4h, v23.4h //mul_res2 = vmull_u8(src_tmp3, coeffabs_1)// 262 smull v21.4s, v3.4h, v23.4h 289 smull v30.4s, v4.4h, v23.4h 316 smull v19.4s, v1.4h, v23.4h //mul_res1 = vmull_u8(src_tmp2, coeffabs_1)// 330 smull v20.4s, v2.4h, v23.4h //mul_res2 = vmull_u8(src_tmp3, coeffabs_1)/ [all...] |
ihevc_inter_pred_luma_vert_w16inp_w16out.s | 166 smull v19.4s,v1.4h,v23.4h //mul_res1 = smull_u8(src_tmp2, coeffabs_1)// 182 smull v20.4s,v2.4h,v23.4h //mul_res2 = smull_u8(src_tmp3, coeffabs_1)// 199 smull v21.4s,v3.4h,v23.4h 214 smull v31.4s,v4.4h,v23.4h 243 smull v19.4s,v1.4h,v23.4h //mul_res1 = smull_u8(src_tmp2, coeffabs_1)// 261 smull v20.4s,v2.4h,v23.4h //mul_res2 = smull_u8(src_tmp3, coeffabs_1)// 278 smull v21.4s,v3.4h,v23.4h 306 smull v31.4s,v4.4h,v23.4h 334 smull v19.4s,v1.4h,v23.4h //mul_res1 = smull_u8(src_tmp2, coeffabs_1)// 349 smull v20.4s,v2.4h,v23.4h //mul_res2 = smull_u8(src_tmp3, coeffabs_1)/ [all...] |
ihevc_itrans_recon_32x32.s | 216 smull v24.4s, v8.4h, v0.4h[1] //// y1 * cos1(part of b0) 217 smull v26.4s, v8.4h, v0.4h[3] //// y1 * cos3(part of b1) 218 smull v28.4s, v8.4h, v1.4h[1] //// y1 * sin3(part of b2) 219 smull v30.4s, v8.4h, v1.4h[3] //// y1 * sin1(part of b3) 230 smull v20.4s, v10.4h, v0.4h[0] 234 smull v22.4s, v10.4h, v0.4h[0] 237 smull v16.4s, v10.4h, v0.4h[0] 240 smull v18.4s, v10.4h, v0.4h[0] 577 smull v24.4s, v8.4h, v2.4h[1] //// y1 * cos1(part of b0) 578 smull v26.4s, v8.4h, v2.4h[3] //// y1 * cos3(part of b1 [all...] |
ihevc_itrans_recon_4x4.s | 143 smull v6.4s, v1.4h, v4.4h[1] //83 * pi2_src[1] 145 smull v5.4s, v1.4h, v4.4h[3] //36 * pi2_src[1] 176 smull v6.4s, v1.4h, v4.4h[1] //83 * pi2_src[1] 179 smull v5.4s, v1.4h, v4.4h[3] //36 * pi2_src[1]
|
/frameworks/av/media/libstagefright/codecs/aacenc/src/asm/ARMV5E/ |
R4R8First_v5.s | 210 smull r8, r6, r4, r11 211 smull r9, r7, r5, r11 216 smull r8, r4, r0, r11 217 smull r9, r5, r1, r11
|
CalcWindowEnergy_v5.s | 61 smull r0, r8, r12, r11 @ accu2 = fixmul( Coeff0, states1 );
|
/frameworks/av/media/libstagefright/codecs/mp3dec/src/asm/ |
pvmp3_dct_9_gcc.s | 122 smull r2,r6,r9,r1 148 smull r5,lr,r4,lr 154 smull r5,lr,r7,r1 166 smull r5,r1,lr,r1
|
/frameworks/native/opengl/libagl/ |
matrix.h | 66 "smull %0, %1, %2, %2 \n" 119 "smull %0, %1, %2, %3 \n" 149 "smull %0, %1, %2, %3 \n" 325 "smull %0, %1, %2, %3 \n" 357 "smull %0, %1, %2, %3 \n"
|
/external/llvm/test/CodeGen/ARM/ |
long.ll | 66 ; CHECK: smull
|
2007-05-14-RegScavengerAssert.ll | 24 %tmp81 = call i32 asm "smull $0, $1, $2, $3 \0A\09mov $0, $0, lsr $4\0A\09add $1, $0, $1, lsl $5\0A\09", "=&r,=*&r,r,r,i,i"( i32* null, i32 %tmp6869, i32 13316085, i32 23, i32 9 ) ; <i32> [#uses=0] 25 %tmp90 = call i32 asm "smull $0, $1, $2, $3 \0A\09mov $0, $0, lsr $4\0A\09add $1, $0, $1, lsl $5\0A\09", "=&r,=*&r,r,r,i,i"( i32* null, i32 %tmp6869, i32 10568984, i32 23, i32 9 ) ; <i32> [#uses=0]
|
/external/vixl/doc/ |
changelog.md | 34 + Fixed `smull`, `fmsub` and `sdiv` simulation.
|
/external/chromium_org/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/ |
lattice_neon.S | 29 @ instructions, smulwb, and smull. Speech quality was not degraded by 131 smull r5, r6, r8, r2 @ tmp32b * input2, in 64 bits
|
/external/speex/libspeex/ |
fixed_arm4.h | 43 "smull %0,%1,%2,%3 \n\t" 56 "smull %0,%1,%2,%3 \n\t"
|
/external/webrtc/src/modules/audio_coding/codecs/isac/fix/source/ |
lattice_neon.S | 29 @ instructions, smulwb, and smull. Speech quality was not degraded by 138 smull r5, r6, r8, r2 @ tmp32b * input2, in 64 bits
|
/system/core/include/private/pixelflinger/ |
ggl_fixed.h | 114 asm("smull %[lo], %[hi], %[x], %[y] \n" 122 asm("smull %[lo], %[hi], %[x], %[y] \n" 137 asm("smull %[lo], %[hi], %[x], %[y] \n" 144 asm("smull %[lo], %[hi], %[x], %[y] \n" 158 asm("smull %[lo], %[hi], %[x], %[y] \n" 165 asm("smull %[lo], %[hi], %[x], %[y] \n" 186 asm("smull %0, %1, %2, %3 \n" 485 asm("smull %x[result], %w[x], %w[y] \n" 502 asm("smull %x[result], %w[x], %w[y] \n" 515 asm("smull %x0, %w1, %w2 \n [all...] |
/external/chromium_org/third_party/openmax_dl/dl/sp/src/arm/arm64/ |
armSP_FFT_CToC_FC32_Radix2_s.S | 109 smull outPointStep, grpCount32, pointStep32
|
/external/chromium_org/third_party/opus/src/silk/fixed/ |
schur64_FIX.c | 35 /* Uses SMULL(), available on armv4 */
|
/external/libopus/silk/fixed/ |
schur64_FIX.c | 35 /* Uses SMULL(), available on armv4 */
|
/external/llvm/test/CodeGen/AArch64/ |
arm64-mul.ll | 41 ; CHECK: smull {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
|
arm64-neon-2velem-high.ll | 23 declare <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32>, <2 x i32>) 25 declare <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16>, <4 x i16>) 37 %vmull15.i.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i) 49 %vmull9.i.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i) 115 %vmull2.i.i.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i) 128 %vmull2.i.i.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i) 196 %vmull2.i.i.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i) 208 %vmull2.i.i.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
|
arm64-vmul.ll | 6 ;CHECK: smull.8h 9 %tmp3 = call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2) 15 ;CHECK: smull.4s 18 %tmp3 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2) 24 ;CHECK: smull.2d 27 %tmp3 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) 31 declare <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone 32 declare <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone 33 declare <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone 260 %tmp4 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2 [all...] |
arm64-neon-2velem.ll | 41 declare <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32>, <2 x i32>) 43 declare <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16>, <4 x i16>) 566 %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) 577 %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) 588 %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) 599 %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) 611 %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) 623 %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) 635 %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) 647 %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle [all...] |
/external/tremolo/Tremolo/ |
floor1ARM.s | 57 SMULL r6, r5, r4, r5 @ (r6,r5) = *d * *floor
|
/external/libhevc/decoder/arm64/ |
ihevcd_fmt_conv_420sp_to_rgba8888.s | 200 sMULL v5.4s, v4.4h, v0.4h[3] ////(U-128)*C4 FOR B 203 sMULL v20.4s, v6.4h, v0.4h[0] ////(V-128)*C1 FOR R 206 sMULL v12.4s, v4.4h, v0.4h[1] ////(U-128)*C2 FOR G 363 sMULL v5.4s, v4.4h, v0.4h[3] ////(U-128)*C4 FOR B 366 sMULL v20.4s, v6.4h, v0.4h[0] ////(V-128)*C1 FOR R 369 sMULL v12.4s, v4.4h, v0.4h[1] ////(U-128)*C2 FOR G
|