HomeSort by relevance Sort by last modified time
    Searched refs:d17 (Results 101 - 125 of 203) sorted by null

1 2 3 45 6 7 8 9

  /external/libavc/common/arm/
ih264_inter_pred_luma_horz_qpel_vert_qpel_a9q.s 254 vext.8 d17, d12, d13, #5
261 vaddl.u8 q5, d12, d17
273 vext.8 d17, d12, d13, #5
279 vaddl.u8 q5, d12, d17
308 vext.8 d17, d12, d13, #5
315 vaddl.u8 q5, d12, d17
327 vext.8 d17, d12, d13, #5
333 vaddl.u8 q5, d12, d17
  /external/libhevc/common/arm/
ihevc_inter_pred_filters_luma_horz.s 211 @ vext.u8 d17,d12,d13,#5 @vector extract of src[0_5]
234 vld1.u32 {d17},[r4],r11
239 vmlsl.u8 q5,d17,d29 @mul_res = vmlsl_u8(src[0_5], coeffabs_5)@
328 vmlal.u8 q10,d17,d30
381 vmlal.u8 q11,d17,d30
483 vld1.u32 {d17},[r4],r11
486 vzip.32 d5,d17
493 @ vext.u8 d17,d12,d13,#5 @vector extract of src[0_5]
ihevc_intra_pred_chroma_mode_27_to_33.s 192 vld1.8 {d17},[r10] @(iii)ref_main_idx_1
204 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
251 vld1.8 {d17},[r10] @(vii)ref_main_idx_1
268 vmlal.u8 q9,d17,d27 @(vii)vmull_u8(ref_main_idx_1, dup_const_fract)
334 vld1.8 {d17},[r10] @(iii)ref_main_idx_1
348 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
402 vld1.8 {d17},[r10] @(vii)ref_main_idx_1
418 vmlal.u8 q9,d17,d27 @(vii)vmull_u8(ref_main_idx_1, dup_const_fract)
ihevc_intra_pred_chroma_mode_3_to_9.s 210 vtbl.8 d17, {d0,d1,d2,d3}, d5 @load from ref_main_idx + 1 (row 1)
218 vmlal.u8 q11, d17, d6 @mul (row 1)
253 vtbl.8 d17, {d0,d1,d2,d3}, d5 @load from ref_main_idx + 1 (row 5)
264 vmlal.u8 q11, d17, d6 @mul (row 5)
380 vtbl.8 d17, {d0,d1,d2,d3}, d5 @load from ref_main_idx + 1 (row 1)
392 vmlal.u8 q11, d17, d6 @mul (row 1)
431 vtbl.8 d17, {d0,d1,d2,d3}, d5 @load from ref_main_idx + 1 (row 5)
450 vmlal.u8 q11, d17, d6 @mul (row 5)
ihevc_intra_pred_filters_chroma_mode_19_to_25.s 300 vld1.8 {d17},[r10] @(iii)ref_main_idx_1
312 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
356 vld1.8 {d17},[r10] @(vii)ref_main_idx_1
376 vmlal.u8 q9,d17,d27 @(vii)vmull_u8(ref_main_idx_1, dup_const_fract)
444 vld1.8 {d17},[r10] @(iii)ref_main_idx_1
458 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
516 vld1.8 {d17},[r10] @(vii)ref_main_idx_1
533 vmlal.u8 q9,d17,d27 @(vii)vmull_u8(ref_main_idx_1, dup_const_fract)
ihevc_intra_pred_filters_luma_mode_19_to_25.s 303 vld1.8 {d17},[r10] @(iii)ref_main_idx_1
315 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
358 vld1.8 {d17},[r10] @(vii)ref_main_idx_1
375 vmlal.u8 q9,d17,d27 @(vii)vmull_u8(ref_main_idx_1, dup_const_fract)
441 vld1.8 {d17},[r10] @(iii)ref_main_idx_1
454 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
510 vld1.8 {d17},[r10] @(vii)ref_main_idx_1
527 vmlal.u8 q9,d17,d27 @(vii)vmull_u8(ref_main_idx_1, dup_const_fract)
ihevc_intra_pred_luma_mode_27_to_33.s 195 vld1.8 {d17},[r10] @(iii)ref_main_idx_1
207 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
253 vld1.8 {d17},[r10] @(vii)ref_main_idx_1
270 vmlal.u8 q9,d17,d27 @(vii)vmull_u8(ref_main_idx_1, dup_const_fract)
335 vld1.8 {d17},[r10] @(iii)ref_main_idx_1
349 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
402 vld1.8 {d17},[r10] @(vii)ref_main_idx_1
418 vmlal.u8 q9,d17,d27 @(vii)vmull_u8(ref_main_idx_1, dup_const_fract)
ihevc_inter_pred_filters_luma_vert.s 171 vld1.u8 {d17},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@
210 vmlsl.u8 q6,d17,d29
226 vmlal.u8 q7,d17,d28
252 vld1.u8 {d17},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@
313 vmlsl.u8 q6,d17,d29
333 vmlal.u8 q7,d17,d28
372 vld1.u8 {d17},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@
380 vmlsl.u8 q6,d17,d29
392 vmlal.u8 q7,d17,d28
621 vld1.u8 {d17},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)
    [all...]
ihevc_inter_pred_luma_horz_w16out.s 229 vld1.u32 {d17},[r4],r11
232 vzip.32 d5,d17
239 @ vext.u8 d17,d12,d13,#5 @vector extract of src[0_5]
376 @ vext.u8 d17,d12,d13,#5 @vector extract of src[0_5]
399 vld1.u32 {d17},[r4],r11
404 vmlsl.u8 q5,d17,d29 @mul_res = vmlsl_u8(src[0_5], coeffabs_5)@
495 vmlal.u8 q10,d17,d30
544 vmlal.u8 q11,d17,d30
ihevc_intra_pred_filters_chroma_mode_11_to_17.s 324 vtbl.8 d17, {d0,d1,d2,d3}, d5 @load from ref_main_idx + 1 (row 1)
332 vmlal.u8 q11, d17, d6 @mul (row 1)
367 vtbl.8 d17, {d0,d1,d2,d3}, d5 @load from ref_main_idx + 1 (row 5)
378 vmlal.u8 q11, d17, d6 @mul (row 5)
500 vtbl.8 d17, {d0,d1,d2,d3}, d5 @load from ref_main_idx + 1 (row 1)
512 vmlal.u8 q11, d17, d6 @mul (row 1)
552 vtbl.8 d17, {d0,d1,d2,d3}, d5 @load from ref_main_idx + 1 (row 5)
569 vmlal.u8 q11, d17, d6 @mul (row 5)
  /external/libhevc/decoder/arm/
ihevcd_itrans_recon_dc_chroma.s 100 vld2.8 {d16,d17},[r7]
135 vst2.8 {d16,d17},[r11]
  /external/llvm/test/MC/ARM/
pr22395.s 11 @ CHECK: vldmia r0, {d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31}
neon-v8.s 30 vcvtn.s32.f32 d15, d17
31 @ CHECK: vcvtn.s32.f32 d15, d17 @ encoding: [0x21,0xf1,0xbb,0xf3]
thumb-neon-v8.s 30 vcvtn.s32.f32 d15, d17
31 @ CHECK: vcvtn.s32.f32 d15, d17 @ encoding: [0xbb,0xff,0x21,0xf1]
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/
armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe_s.S 47 VLD1.8 {d16,d17},[r12],r1
71 VADDL.U8 q3,d7,d17
  /frameworks/rs/cpu_ref/
rsCpuIntrinsics_neon_Blend.S 130 vmull.u8 q12, d15, d17
158 vrshrn.u16 d17, q12, #8
174 vmull.u8 q12, d7, d17
274 vmull.u8 q4, d7, d17
321 vmull.u8 q4, d7, d17
361 vmull.u8 q12, d1, d17
458 vld4.8 {d17,d19,d21,d23}, [r1]!
511 .if \ldsrc ; vld1.64 {d17}, [r1]! ; .endif
rsCpuIntrinsics_neon_Resize.S 66 vmlsl.u16 q13, d17, d6[0]
484 vaddw.s16 q13, q12, d17
565 vqrshrn.s32 d17, q9, #15
613 vqrshrn.s32 d17, q9, #15
645 vqrshrun.s16 d17, q9, #VERTBITS - 8
683 vqrshrn.s32 d17, q9, #15
698 vqrshrun.s16 d17, q9, #VERTBITS - 8
775 vmov.u8 d16, d17
  /external/boringssl/src/crypto/curve25519/asm/
x25519-asm-arm.S 285 vst1.8 {d16-d17},[r2,: 128]!
337 vmlal.s32 q10,d17,d22
342 vmlal.s32 q13,d17,d23
347 vmlal.s32 q1,d17,d0
352 vmlal.s32 q14,d17,d1
361 vmlal.s32 q15,d11,d17
375 vmlal.s32 q0,d13,d17
433 vtrn.32 d17,d3
438 vst1.8 d17,[r4,: 64]!
453 vtrn.32 d17,d
    [all...]
  /external/boringssl/src/crypto/poly1305/
poly1305_arm_asm.S 221 # asm 2: vld1.8 {>x01=d16->x01=d17},[<input_0=r0,: 128]!
222 vld1.8 {d16-d17},[r0,: 128]!
479 # asm 2: vmlal.u32 <r4=q15,<x01=d17,<z34=d10
480 vmlal.u32 q15,d17,d10
529 # asm 2: vmlal.u32 <r3=q4,<x01=d17,<z12=d5
530 vmlal.u32 q4,d17,d5
589 # asm 2: vmlal.u32 <r2=q13,<x01=d17,<z12=d4
590 vmlal.u32 q13,d17,d4
624 # asm 2: vmlal.u32 <r1=q14,<x01=d17,<z0=d6
625 vmlal.u32 q14,d17,d
    [all...]
  /external/v8/test/cctest/
test-disasm-arm.cc 691 COMPARE(vadd(d16, d17, d18),
692 "ee710ba2 vadd.f64 d16, d17, d18");
694 COMPARE(vsub(d16, d17, d18),
695 "ee710be2 vsub.f64 d16, d17, d18");
697 COMPARE(vmul(d16, d17, d18),
698 "ee610ba2 vmul.f64 d16, d17, d18");
700 COMPARE(vdiv(d16, d17, d18),
701 "eec10ba2 vdiv.f64 d16, d17, d18");
703 COMPARE(vcmp(d16, d17),
704 "eef40b61 vcmp.f64 d16, d17");
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/arm/neon/
vp8_subpixelvariance16x16_neon.asm 104 vqrshrn.u16 d17, q10, #7
113 vst1.u8 {d14, d15, d16, d17}, [lr]! ;store result
170 vqrshrn.u16 d17, q2, #7
175 vst1.u8 {d14, d15, d16, d17}, [lr]!
292 vqrshrn.u16 d17, q10, #7
299 vst1.u8 {d16, d17}, [r3]!
  /external/v8/test/mjsunit/asm/embenchen/
box2d.js     [all...]
  /external/valgrind/none/tests/arm/
neon64.c     [all...]
  /external/vixl/examples/
custom-disassembler.cc 130 __ Fadd(d30, d16, d17);
  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/arm/
vldconst.s 143 # d17 should not reuse high part of d11 and s12.
145 vldr d17, =0xff0000040000fff0

Completed in 3913 milliseconds

1 2 3 45 6 7 8 9