/external/llvm/test/MC/ARM/ |
neon-bitwise-encoding.s | 65 veor q4, q7, q3 66 veor.8 q4, q7, q3 67 veor.16 q4, q7, q3 68 veor.32 q4, q7, q3 69 veor.64 q4, q7, q3 71 veor.i8 q4, q7, q3 72 veor.i16 q4, q7, q3 73 veor.i32 q4, q7, q3 74 veor.i64 q4, q7, q3 76 veor.s8 q4, q7, q [all...] |
neon-v8.s | 5 vmaxnm.f32 q2, q4, q6 6 @ CHECK: vmaxnm.f32 q2, q4, q6 @ encoding: [0x5c,0x4f,0x08,0xf3] 16 vcvta.s32.f32 q4, q6 17 @ CHECK: vcvta.s32.f32 q4, q6 @ encoding: [0x4c,0x80,0xbb,0xf3] 18 vcvta.u32.f32 q4, q10 19 @ CHECK: vcvta.u32.f32 q4, q10 @ encoding: [0xe4,0x80,0xbb,0xf3] 43 vcvtp.s32.f32 q4, q15 44 @ CHECK: vcvtp.s32.f32 q4, q15 @ encoding: [0x6e,0x82,0xbb,0xf3] 50 vrintn.f32 q1, q4 51 @ CHECK: vrintn.f32 q1, q4 @ encoding: [0x48,0x24,0xba,0xf3 [all...] |
thumb-neon-v8.s | 5 vmaxnm.f32 q2, q4, q6 6 @ CHECK: vmaxnm.f32 q2, q4, q6 @ encoding: [0x08,0xff,0x5c,0x4f] 16 vcvta.s32.f32 q4, q6 17 @ CHECK: vcvta.s32.f32 q4, q6 @ encoding: [0xbb,0xff,0x4c,0x80] 18 vcvta.u32.f32 q4, q10 19 @ CHECK: vcvta.u32.f32 q4, q10 @ encoding: [0xbb,0xff,0xe4,0x80] 43 vcvtp.s32.f32 q4, q15 44 @ CHECK: vcvtp.s32.f32 q4, q15 @ encoding: [0xbb,0xff,0x6e,0x82] 50 vrintn.f32 q1, q4 51 @ CHECK: vrintn.f32 q1, q4 @ encoding: [0xba,0xff,0x48,0x24 [all...] |
neon-shiftaccum-encoding.s | 10 vsra.s64 q8, q4, #64 18 vsra.u64 q4, q5, #25 28 vsra.s64 q4, #64 45 @ CHECK: vsra.s64 q8, q4, #64 @ encoding: [0xd8,0x01,0xc0,0xf2] 53 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xda,0x81,0xa7,0xf3] 62 @ CHECK: vsra.s64 q4, q4, #64 @ encoding: [0xd8,0x81,0x80,0xf2] 82 vrsra.s32 q3, q4, #32 83 vrsra.s64 q4, q5, #64 100 vrsra.s32 q4, #3 [all...] |
neont2-shiftaccum-encoding.s | 12 vsra.s64 q8, q4, #64 20 vsra.u64 q4, q5, #25 30 vsra.s64 q4, #64 47 @ CHECK: vsra.s64 q8, q4, #64 @ encoding: [0xc0,0xef,0xd8,0x01] 55 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xa7,0xff,0xda,0x81] 64 @ CHECK: vsra.s64 q4, q4, #64 @ encoding: [0x80,0xef,0xd8,0x81] 85 vrsra.s32 q3, q4, #32 86 vrsra.s64 q4, q5, #64 103 vrsra.s32 q4, #3 [all...] |
neon-shift-encoding.s | 116 vsra.s64 q4, q5, #63 122 vsra.s8 q4, #7 134 @ CHECK: vsra.s64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf2] 139 @ CHECK: vsra.s8 q4, q4, #7 @ encoding: [0x58,0x81,0x89,0xf2] 152 vsra.u64 q4, q5, #63 158 vsra.u8 q4, #7 170 @ CHECK: vsra.u64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf3] 175 @ CHECK: vsra.u8 q4, q4, #7 @ encoding: [0x58,0x81,0x89,0xf3 [all...] |
vfp4.s | 18 @ ARM: vfma.f32 q2, q4, q0 @ encoding: [0x50,0x4c,0x08,0xf2] 19 @ THUMB: vfma.f32 q2, q4, q0 @ encoding: [0x08,0xef,0x50,0x4c] 20 vfma.f32 q2, q4, q0 44 @ ARM: vfms.f32 q2, q4, q0 @ encoding: [0x50,0x4c,0x28,0xf2] 45 @ THUMB: vfms.f32 q2, q4, q0 @ encoding: [0x28,0xef,0x50,0x4c] 46 vfms.f32 q2, q4, q0
|
neon-minmax-encoding.s | 20 vmax.s16 q4, q5, q6 31 vmax.u16 q4, q5 50 @ CHECK: vmax.s16 q4, q5, q6 @ encoding: [0x4c,0x86,0x1a,0xf2] 60 @ CHECK: vmax.u16 q4, q4, q5 @ encoding: [0x4a,0x86,0x18,0xf3] 82 vmin.s16 q4, q5, q6 93 vmin.u16 q4, q5 112 @ CHECK: vmin.s16 q4, q5, q6 @ encoding: [0x5c,0x86,0x1a,0xf2] 122 @ CHECK: vmin.u16 q4, q4, q5 @ encoding: [0x5a,0x86,0x18,0xf3 [all...] |
neont2-minmax-encoding.s | 22 vmax.s16 q4, q5, q6 33 vmax.u16 q4, q5 52 @ CHECK: vmax.s16 q4, q5, q6 @ encoding: [0x1a,0xef,0x4c,0x86] 62 @ CHECK: vmax.u16 q4, q4, q5 @ encoding: [0x18,0xff,0x4a,0x86] 84 vmin.s16 q4, q5, q6 95 vmin.u16 q4, q5 114 @ CHECK: vmin.s16 q4, q5, q6 @ encoding: [0x1a,0xef,0x5c,0x86] 124 @ CHECK: vmin.u16 q4, q4, q5 @ encoding: [0x18,0xff,0x5a,0x86 [all...] |
/external/chromium_org/third_party/freetype/src/base/ |
ftbbox.c | 227 FT_Pos q1, q2, q3, q4; 233 q4 = p4; 240 if ( q1 + q2 > q3 + q4 ) /* first half */ 242 q4 = q4 + q3; 245 q4 = q4 + q3; 247 q4 = ( q4 + q3 ) / 8; 255 q3 = q3 + q4; [all...] |
/external/freetype/src/base/ |
ftbbox.c | 227 FT_Pos q1, q2, q3, q4; 233 q4 = p4; 240 if ( q1 + q2 > q3 + q4 ) /* first half */ 242 q4 = q4 + q3; 245 q4 = q4 + q3; 247 q4 = ( q4 + q3 ) / 8; 255 q3 = q3 + q4; [all...] |
/external/libvpx/libvpx/vp8/common/arm/neon/ |
dequant_idct_neon.asm | 26 vld1.16 {q3, q4}, [r0] 40 vmul.i16 q2, q4, q6 47 vqdmulh.s16 q4, q2, d0[0] 53 vshr.s16 q4, q4, #1 56 vqadd.s16 q4, q4, q2 76 vqdmulh.s16 q4, q2, d0[0] 84 vshr.s16 q4, q4, # [all...] |
idct_dequant_full_2x_neon.asm | 28 vld1.16 {q4, q5}, [r0] ; r q 46 vmul.i16 q4, q4, q0 52 ; q4: l4r4 q5: l12r12 61 vqdmulh.s16 q6, q4, d0[2] ; sinpi8sqrt2 63 vqdmulh.s16 q8, q4, d0[0] ; cospi8sqrt2minus1 80 ; q4: 4 + 4 * cospi : d1/temp1 82 vqadd.s16 q4, q4, q8 88 vqadd.s16 q3, q4, q [all...] |
sixtappredict4x4_neon.asm | 62 vld1.u8 {q4}, [r0], r1 88 vmov q4, q3 ;keep original src data in q4 q6 93 vshr.u64 q9, q4, #8 ;construct src_ptr[-1] 100 vshr.u64 q3, q4, #32 ;construct src_ptr[2] 107 vshr.u64 q9, q4, #16 ;construct src_ptr[0] 114 vshr.u64 q3, q4, #24 ;construct src_ptr[1] 125 vld1.u8 {q4}, [r0], r1 154 vmov q4, q3 ;keep original src data in q4 q [all...] |
shortidct4x4llm_neon.asm | 48 vqdmulh.s16 q4, q2, d0[0] 54 vshr.s16 q4, q4, #1 57 vqadd.s16 q4, q4, q2 80 vqdmulh.s16 q4, q2, d0[0] 86 vshr.s16 q4, q4, #1 89 vqadd.s16 q4, q4, q [all...] |
vp8_subpixelvariance16x16s_neon.asm | 57 vext.8 q5, q4, q5, #1 63 vrhadd.u8 q2, q4, q5 66 vsubl.u8 q4, d0, d22 ;diff 75 vpadal.s16 q8, q4 ;sum 147 vld1.u8 {q4}, [r0], r1 155 vrhadd.u8 q2, q2, q4 156 vrhadd.u8 q4, q4, q6 255 vext.8 q5, q4, q5, #1 260 vrhadd.u8 q2, q4, q [all...] |
/external/valgrind/main/none/tests/arm/ |
neon128.c | 439 TESTINSN_bin("vand q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); 445 TESTINSN_bin("vbic q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); 452 TESTINSN_bin("vorr q4, q4, q4", q4, q4, i16, 0xff, q4, i16, 0xff) [all...] |
neon128.stdout.exp | 120 vand q4, q6, q5 :: Qd 0x00570057 0x00570057 0x00570057 0x00570057 Qm (i8)0x000000ff Qn (i16)0x00000057 125 vbic q4, q6, q5 :: Qd 0xffa8ffa8 0xffa8ffa8 0xffa8ffa8 0xffa8ffa8 Qm (i8)0x000000ff Qn (i16)0x00000057 131 vorr q4, q4, q4 :: Qd 0x00ff00ff 0x00ff00ff 0x00ff00ff 0x00ff00ff Qm (i16)0x000000ff Qn (i16)0x000000ff 136 vorn q4, q4, q4 :: Qd 0xffffffff 0xffffffff 0xffffffff 0xffffffff Qm (i16)0x000000ff Qn (i16)0x000000ff 140 veor q4, q6, q5 :: Qd 0xffa8ffa8 0xffa8ffa8 0xffa8ffa8 0xffa8ffa8 Qm (i8)0x000000ff Qn (i16)0x00000057 145 veor q4, q4, q4 :: Qd 0x00000000 0x00000000 0x00000000 0x00000000 Qm (i16)0x000000ff Qn (i16)0x000000f [all...] |
/frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/ |
armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe_s.S | 24 VADDL.U8 q4,d14,d15 36 VADDL.U8 q4,d16,d17 48 VADDL.U8 q4,d18,d19 60 VADDL.U8 q4,d20,d21
|
/external/libvpx/libvpx/vp9/common/arm/neon/ |
vp9_short_idct32x32_add_neon.asm | 171 ; q4-q7 contain the results (out[j * 32 + 0-31]) 183 vrshr.s16 q4, q4, #6 188 vaddw.u8 q4, q4, d4 193 vqmovun.s16 d4, q4 205 ; q4-q7 contain the results (out[j * 32 + 0-31]) 217 vrshr.s16 q4, q4, #6 222 vaddw.u8 q4, q4, d [all...] |
vp9_short_idct16x16_add_neon.asm | 206 vsub.s16 q13, q4, q5 ; step2[5] = step1[4] - step1[5]; 207 vadd.s16 q4, q4, q5 ; step2[4] = step1[4] + step1[5]; 251 vadd.s16 q11, q3, q4 ; step2[3] = step1[3] + step1[4]; 252 vsub.s16 q12, q3, q4 ; step2[4] = step1[3] - step1[4]; 340 vmull.s16 q4, d17, d13 348 vmlal.s16 q4, d31, d12 359 vqrshrn.s32 d15, q4, #14 ; >> 14 375 vmull.s16 q4, d24, d31 383 vmlal.s16 q4, d22, d3 [all...] |
/external/libvpx/libvpx/vp8/encoder/arm/neon/ |
fastquantizeb_neon.asm | 27 vstmdb sp!, {q4-q7} 37 vabs.s16 q4, q0 ; calculate x = abs(z) 49 vadd.s16 q4, q6 ; x + Round 54 vqdmulh.s16 q4, q8 ; y = ((Round+abs(z)) * Quant) >> 16 63 veor.s16 q4, q2 ; y^sz 74 vshr.s16 q4, #1 ; right shift 1 after vqdmulh 79 vsub.s16 q4, q2 ; x1=(y^sz)-sz = (y^sz)-(-1) (2's complement) 90 vst1.s16 {q4, q5}, [r7] ; store: qcoeff = x1 98 vmul.s16 q2, q6, q4 ; x * Dequant 112 vtst.16 q14, q4, q8 ; now find eo [all...] |
/external/libvpx/libvpx/vp9/common/ |
vp9_loopfilter_filters.c | 57 uint8_t q3, uint8_t q4) { 60 mask |= (abs(q4 - q0) > thresh) * -1; 223 q4 = *oq4, q5 = *oq5, q6 = *oq6, q7 = *oq7; local 235 q0 + q1 + q2 + q3 + q4, 4); 237 q0 + q1 + q2 + q3 + q4 + q5, 4); 239 q0 + q1 + q2 + q3 + q4 + q5 + q6, 4); 241 q0 * 2 + q1 + q2 + q3 + q4 + q5 + q6 + q7, 4); 243 q0 + q1 * 2 + q2 + q3 + q4 + q5 + q6 + q7 * 2, 4); 245 q0 + q1 + q2 * 2 + q3 + q4 + q5 + q6 + q7 * 3, 4); 247 q0 + q1 + q2 + q3 * 2 + q4 + q5 + q6 + q7 * 4, 4) [all...] |
/hardware/samsung_slsi/exynos5/libswconverter/ |
csc_ARGB8888_to_YUV420SP_NEON.s | 38 @q4: B 78 vand.u16 q4,#0x00FF @R 84 vmls.u16 q8,q4,q11 @q0:U -(38 * R[k]) @128<<6+ 32 + u>>2 89 vmla.u16 q7,q4,q13 @112 * R[k] 106 vmul.u16 q7,q4,q14 @q0 = 66 *R[k] 120 vshr.u16 q4,q4,#8 @R 124 vmul.u16 q0,q4,q14 @q0 = 66 *R[k] 151 vand.u16 q4,#0x00FF @R 157 vmul.u16 q7,q4,q14 @q0 = 66 *R[k [all...] |
/external/libvpx/libvpx/vp9/common/mips/dspr2/ |
vp9_loopfilter_masks_dspr2.h | 282 uint32_t q3, uint32_t q4, 297 /* flat |= (abs(q4 - q0) > thresh) */ 298 "subu_s.qb %[c], %[q4], %[q0] \n\t" 299 "subu_s.qb %[r_k], %[q0], %[q4] \n\t" 358 [q2] "r" (q2), [q3] "r" (q3), [q4] "r" (q4),
|