/external/libhevc/common/arm/ |
ihevc_itrans_recon_32x32.s | 485 vsub.s32 q5,q10,q12 488 vsub.s32 q12,q11,q13 491 vsub.s32 q13,q8,q14 495 vsub.s32 q14,q9,q15 824 vsub.s32 q5,q10,q12 827 vsub.s32 q12,q11,q13 830 vsub.s32 q13,q8,q14 834 vsub.s32 q14,q9,q15 1133 vsub.s32 q5,q10,q12 1136 vsub.s32 q12,q11,q1 [all...] |
ihevc_sao_edge_offset_class0_chroma.s | 170 VSUB.U8 Q10,Q9,Q8 @sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 197 VSUB.U8 Q11,Q9,Q8 @sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 204 VSUB.U8 Q10,Q12,Q13 @II sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 213 VSUB.U8 Q11,Q12,Q13 @II sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 333 VSUB.U8 Q10,Q9,Q8 @sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 358 VSUB.U8 Q11,Q9,Q8 @sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 367 VSUB.U8 Q10,Q12,Q13 @II sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 373 VSUB.U8 Q11,Q12,Q13 @II sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
|
ihevc_sao_edge_offset_class1.s | 137 VSUB.U8 Q8,Q7,Q6 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 155 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 168 VSUB.U8 Q4,Q12,Q11 @II sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 228 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 279 VSUB.U8 Q8,Q7,Q6 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 297 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 308 VSUB.U8 Q10,Q12,Q11 @II sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 347 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
|
ihevc_sao_edge_offset_class1_chroma.s | 142 VSUB.U8 Q8,Q7,Q6 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 160 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 173 VSUB.U8 Q14,Q12,Q11 @II sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 241 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 297 VSUB.U8 Q8,Q7,Q6 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 315 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 326 VSUB.U8 Q10,Q12,Q11 @II sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 378 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
|
ihevc_intra_pred_filters_luma_mode_11_to_17.s | 310 vsub.s8 d8, d8, d26 @ref_main_idx (row 0) 313 vsub.s8 d7, d28, d6 @32-fract 433 vsub.s8 d8, d8, d26 @ref_main_idx 479 vsub.s8 d7, d28, d6 @32-fract 633 vsub.s8 d7, d28, d6 @32-fract 636 vsub.s8 d8, d8, d26 @ref_main_idx
|
ihevc_sao_edge_offset_class2.s | 256 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 297 VSUB.U8 Q5,Q9,Q5 @I sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 368 VSUB.U8 Q12,Q11,Q12 @II sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 385 VSUB.U8 Q5,Q9,Q5 @III sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 468 VSUB.U8 Q5,Q9,Q5 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 568 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) [all...] |
ihevc_sao_edge_offset_class2_chroma.s | 356 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 405 VSUB.U8 Q11,Q11,Q10 @I sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 488 VSUB.U8 Q12,Q12,Q11 @II sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 531 VSUB.U8 Q11,Q11,Q10 @III sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) [all...] |
ihevc_sao_edge_offset_class3.s | 276 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 310 VSUB.U8 Q5,Q9,Q5 @I sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 393 VSUB.U8 Q12,Q13,Q12 @II sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 419 VSUB.U8 Q5,Q9,Q5 @III sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 505 VSUB.U8 Q12,Q13,Q12 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 603 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) [all...] |
ihevc_sao_edge_offset_class3_chroma.s | 338 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 397 VSUB.U8 Q11,Q11,Q10 @I sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 495 VSUB.U8 Q12,Q12,Q11 @II sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 527 VSUB.U8 Q11,Q10,Q11 @III sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) [all...] |
/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/ |
Syn_filt_32_neon.s | 84 VSUB.S32 Q10, Q15, Q10 106 VSUB.S32 Q12, Q12, Q11
|
syn_filt_neon.s | 84 VSUB.S32 Q9, Q10, Q7
|
/external/valgrind/none/tests/arm/ |
vfp.c | [all...] |
/frameworks/av/services/audioflinger/ |
AudioResamplerFirProcessNeon.h | 678 "vsub.s16 q9, q9, q8 \n"// (1) interpolate (step1) 1st set of coefs 679 "vsub.s16 q11, q11, q10 \n"// (1) interpolate (step1) 2nd set of coets 754 "vsub.s16 q9, q9, q8 \n"// (1) interpolate (step1) 1st set of coefs 755 "vsub.s16 q11, q11, q10 \n"// (1) interpolate (step1) 2nd set of coets [all...] |
/bionic/libm/arm/ |
floor.S | 133 vsub.f64 d16, d16, d18
|
/external/llvm/lib/Target/ARM/ |
ARMHazardRecognizer.cpp | 41 // Look for special VMLA / VMLS hazards. A VMUL / VADD / VSUB following
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/arm/neon/ |
buildintrapredictorsmby_neon.asm | 247 vsub.s16 q4, q4, q7 248 vsub.s16 q5, q5, q7 530 vsub.s16 q4, q4, q7 531 vsub.s16 q5, q5, q7
|
/external/v8/test/cctest/ |
test-disasm-arm.cc | 510 COMPARE(vsub(d0, d1, d2), 511 "ee310b42 vsub.f64 d0, d1, d2"); 512 COMPARE(vsub(d3, d4, d5, ne), 515 COMPARE(vsub(s0, s1, s2), 516 "ee300ac1 vsub.f32 s0, s1, s2"); 517 COMPARE(vsub(s3, s4, s5, ne), 694 COMPARE(vsub(d16, d17, d18), 695 "ee710be2 vsub.f64 d16, d17, d18"); [all...] |
/external/opencv3/3rdparty/libwebp/dsp/ |
dec_neon.c | 333 "vsub.s32 q2, q2, q3 \n" // tmp[8] = a0 - a1 335 "vsub.s32 q3, q5, q4 \n" // tmp[12] = a3 - a2 349 "vsub.s32 q8, q1, q2 \n" // a2 = tmp[1] - tmp[2] 350 "vsub.s32 q9, q0, q3 \n" // a3 = dc - tmp[3] 356 "vsub.s32 q2, q6, q7 \n" 358 "vsub.s32 q3, q9, q8 \n"
|
/bionic/libc/arch-arm/generic/bionic/ |
memcmp.S | 81 vsub.i8 q0, q2 82 vsub.i8 q1, q3
|
/external/llvm/test/CodeGen/Thumb2/ |
float-ops.ll | 29 ; HARD: vsub.f32 s 39 ; DP: vsub.f64 d0, d0, d1
|
/external/llvm/test/MC/ARM/ |
fullfp16-neon-neg.s | 11 vsub.f16 d0, d1, d2 12 vsub.f16 q0, q1, q2
|
/external/llvm/test/MC/Disassembler/ARM/ |
fp-encoding.txt | 10 # CHECK: vsub.f64 d16, d17, d16 13 # CHECK: vsub.f32 s0, s1, s0
|
/frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/ |
omxVCM4P10_PredictIntra_16x16_s.S | 222 VSUB.I64 q12,q12,q11 232 VSUB.I16 q13,q11,q13
|
/external/llvm/lib/Target/Hexagon/ |
HexagonInstrInfoV60.td | [all...] |
/external/opencv3/modules/core/src/ |
arithm.cpp | 370 FUNCTOR_TEMPLATE(VSub); 371 FUNCTOR_CLOSURE_2arg(VSub, uchar, return _mm256_subs_epu8 (a, b)); 372 FUNCTOR_CLOSURE_2arg(VSub, schar, return _mm256_subs_epi8 (a, b)); 373 FUNCTOR_CLOSURE_2arg(VSub, ushort, return _mm256_subs_epu16(a, b)); 374 FUNCTOR_CLOSURE_2arg(VSub, short, return _mm256_subs_epi16(a, b)); 375 FUNCTOR_CLOSURE_2arg(VSub, int, return _mm256_sub_epi32 (a, b)); 376 FUNCTOR_CLOSURE_2arg(VSub, float, return _mm256_sub_ps (a, b)); 377 FUNCTOR_CLOSURE_2arg(VSub, double, return _mm256_sub_pd (a, b)); 509 FUNCTOR_TEMPLATE(VSub); 510 FUNCTOR_CLOSURE_2arg(VSub, uchar, return _mm_subs_epu8 (a, b)) [all...] |