/frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/ |
omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.s | 136 dIn0 DN D0.S16 137 dIn1 DN D1.S16 138 dIn2 DN D2.S16 139 dIn3 DN D3.S16 144 dZero DN D4.S16 145 de0 DN D5.S16 146 de1 DN D6.S16 147 de2 DN D7.S16 148 de3 DN D8.S16 149 dIn1RS DN D7.S16 [all...] |
armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.s | 31 dCoeff5 DN 30.S16 32 dCoeff20 DN 31.S16 46 qSrcb QN 2.S16 47 qSrcc QN 1.S16 48 dSrcB DN 4.S16 49 dSrcC DN 2.S16 51 qRes0 QN 5.S16 52 qRes1 QN 6.S16 53 qRes2 QN 7.S16 54 qRes3 QN 8.S16 [all...] |
armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe_s.s | 33 dCoeff5 DN 30.S16 34 dCoeff20 DN 31.S16 64 qTemp01 QN 4.S16 65 qTemp23 QN 6.S16 66 dTemp0 DN 8.S16 67 dTemp2 DN 12.S16 69 qRes01 QN 11.S16 70 qRes23 QN 12.S16 71 qRes45 QN 13.S16 72 qRes67 QN 14.S16 [all...] |
/external/libhevc/common/arm/ |
ihevc_itrans_recon_4x4_ttype1.s | 143 vmull.s16 q3,d1,d4[2] @74 * pi2_src[1] 144 vmlal.s16 q3,d0,d4[0] @74 * pi2_src[1] + 29 * pi2_src[0] 145 vmlal.s16 q3,d3,d4[1] @74 * pi2_src[1] + 29 * pi2_src[0] + 55 * pi2_src[3] 146 vmlal.s16 q3,d2,d4[3] @pi2_out[0] = 29* pi2_src[0] + 74 * pi2_src[1] + 84* pi2_src[2] + 55 * pi2_src[3] 148 vmull.s16 q4,d1,d4[2] @74 * pi2_src[1] 149 vmlal.s16 q4,d0,d4[1] @74 * pi2_src[1] + 55 * pi2_src[0] 150 vmlsl.s16 q4,d2,d4[0] @74 * pi2_src[1] + 55 * pi2_src[0] - 29 * pi2_src[2] 151 vmlsl.s16 q4,d3,d4[3] @pi2_out[1] = 74 * pi2_src[1] + 55 * pi2_src[0] - 29 * pi2_src[2] - 84 * pi2_src[3]) 153 vmull.s16 q5,d0,d4[2] @ 74 * pi2_src[0] 154 vmlsl.s16 q5,d2,d4[2] @ 74 * pi2_src[0] - 74 * pi2_src[2 [all...] |
/external/llvm/test/MC/ARM/ |
neon-abs-encoding.s | 5 @ CHECK: vabs.s16 d16, d16 @ encoding: [0x20,0x03,0xf5,0xf3] 6 vabs.s16 d16, d16 13 @ CHECK: vabs.s16 q8, q8 @ encoding: [0x60,0x03,0xf5,0xf3] 14 vabs.s16 q8, q8 22 @ CHECK: vqabs.s16 d16, d16 @ encoding: [0x20,0x07,0xf4,0xf3] 23 vqabs.s16 d16, d16 28 @ CHECK: vqabs.s16 q8, q8 @ encoding: [0x60,0x07,0xf4,0xf3] 29 vqabs.s16 q8, q8
|
neon-neg-encoding.s | 5 @ CHECK: vneg.s16 d16, d16 @ encoding: [0xa0,0x03,0xf5,0xf3] 6 vneg.s16 d16, d16 13 @ CHECK: vneg.s16 q8, q8 @ encoding: [0xe0,0x03,0xf5,0xf3] 14 vneg.s16 q8, q8 21 @ CHECK: vqneg.s16 d16, d16 @ encoding: [0xa0,0x07,0xf4,0xf3] 22 vqneg.s16 d16, d16 27 @ CHECK: vqneg.s16 q8, q8 @ encoding: [0xe0,0x07,0xf4,0xf3] 28 vqneg.s16 q8, q8
|
neont2-abs-encoding.s | 7 @ CHECK: vabs.s16 d16, d16 @ encoding: [0xf5,0xff,0x20,0x03] 8 vabs.s16 d16, d16 15 @ CHECK: vabs.s16 q8, q8 @ encoding: [0xf5,0xff,0x60,0x03] 16 vabs.s16 q8, q8 24 @ CHECK: vqabs.s16 d16, d16 @ encoding: [0xf4,0xff,0x20,0x07] 25 vqabs.s16 d16, d16 30 @ CHECK: vqabs.s16 q8, q8 @ encoding: [0xf4,0xff,0x60,0x07] 31 vqabs.s16 q8, q8
|
neont2-neg-encoding.s | 7 @ CHECK: vneg.s16 d16, d16 @ encoding: [0xf5,0xff,0xa0,0x03] 8 vneg.s16 d16, d16 15 @ CHECK: vneg.s16 q8, q8 @ encoding: [0xf5,0xff,0xe0,0x03] 16 vneg.s16 q8, q8 23 @ CHECK: vqneg.s16 d16, d16 @ encoding: [0xf4,0xff,0xa0,0x07] 24 vqneg.s16 d16, d16 29 @ CHECK: vqneg.s16 q8, q8 @ encoding: [0xf4,0xff,0xe0,0x07] 30 vqneg.s16 q8, q8
|
neont2-mul-encoding.s | 30 vqdmulh.s16 d16, d16, d17 32 vqdmulh.s16 q8, q8, q9 34 vqdmulh.s16 d11, d2, d3[0] 36 @ CHECK: vqdmulh.s16 d16, d16, d17 @ encoding: [0x50,0xef,0xa1,0x0b] 38 @ CHECK: vqdmulh.s16 q8, q8, q9 @ encoding: [0x50,0xef,0xe2,0x0b] 40 @ CHECK: vqdmulh.s16 d11, d2, d3[0] @ encoding: [0x92,0xef,0x43,0xbc] 43 vqrdmulh.s16 d16, d16, d17 45 vqrdmulh.s16 q8, q8, q9 48 @ CHECK: vqrdmulh.s16 d16, d16, d17 @ encoding: [0x50,0xff,0xa1,0x0b] 50 @ CHECK: vqrdmulh.s16 q8, q8, q9 @ encoding: [0x50,0xff,0xe2,0x0b [all...] |
/external/chromium_org/third_party/libvpx/source/libvpx/vp8/encoder/arm/neon/ |
vp8_shortwalsh4x4_neon.asm | 40 vadd.s16 d4, d0, d2 ; ip[0] + ip[2] 41 vadd.s16 d5, d1, d3 ; ip[1] + ip[3] 42 vsub.s16 d6, d1, d3 ; ip[1] - ip[3] 43 vsub.s16 d7, d0, d2 ; ip[0] - ip[2] 45 vshl.s16 d4, d4, #2 ; a1 = (ip[0] + ip[2]) << 2 46 vshl.s16 d5, d5, #2 ; d1 = (ip[1] + ip[3]) << 2 47 vshl.s16 d6, d6, #2 ; c1 = (ip[1] - ip[3]) << 2 48 vceq.s16 d16, d4, #0 ; a1 == 0 49 vshl.s16 d7, d7, #2 ; b1 = (ip[0] - ip[2]) << 2 51 vadd.s16 d0, d4, d5 ; a1 + d [all...] |
/external/libvpx/libvpx/vp8/encoder/arm/neon/ |
vp8_shortwalsh4x4_neon.asm | 40 vadd.s16 d4, d0, d2 ; ip[0] + ip[2] 41 vadd.s16 d5, d1, d3 ; ip[1] + ip[3] 42 vsub.s16 d6, d1, d3 ; ip[1] - ip[3] 43 vsub.s16 d7, d0, d2 ; ip[0] - ip[2] 45 vshl.s16 d4, d4, #2 ; a1 = (ip[0] + ip[2]) << 2 46 vshl.s16 d5, d5, #2 ; d1 = (ip[1] + ip[3]) << 2 47 vshl.s16 d6, d6, #2 ; c1 = (ip[1] - ip[3]) << 2 48 vceq.s16 d16, d4, #0 ; a1 == 0 49 vshl.s16 d7, d7, #2 ; b1 = (ip[0] - ip[2]) << 2 51 vadd.s16 d0, d4, d5 ; a1 + d [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/arm/neon/ |
vp8_shortwalsh4x4_neon.asm | 40 vadd.s16 d4, d0, d2 ; ip[0] + ip[2] 41 vadd.s16 d5, d1, d3 ; ip[1] + ip[3] 42 vsub.s16 d6, d1, d3 ; ip[1] - ip[3] 43 vsub.s16 d7, d0, d2 ; ip[0] - ip[2] 45 vshl.s16 d4, d4, #2 ; a1 = (ip[0] + ip[2]) << 2 46 vshl.s16 d5, d5, #2 ; d1 = (ip[1] + ip[3]) << 2 47 vshl.s16 d6, d6, #2 ; c1 = (ip[1] - ip[3]) << 2 48 vceq.s16 d16, d4, #0 ; a1 == 0 49 vshl.s16 d7, d7, #2 ; b1 = (ip[0] - ip[2]) << 2 51 vadd.s16 d0, d4, d5 ; a1 + d [all...] |
/frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/ |
omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.S | 64 VHADD.S16 d7,d1,d4 65 VHADD.S16 d8,d3,d4 77 VHADD.S16 d7,d1,d4 78 VHADD.S16 d8,d3,d4 85 VRSHR.S16 d0,d0,#6 86 VRSHR.S16 d1,d1,#6 87 VRSHR.S16 d2,d2,#6 88 VRSHR.S16 d3,d3,#6 109 VQMOVUN.S16 d0,q3 110 VQMOVUN.S16 d1,q [all...] |
/external/chromium_org/third_party/libvpx/source/libvpx/vp9/common/arm/neon/ |
vp9_idct32x32_add_neon.asm | 65 vld1.s16 {q14}, [r0] 67 vld1.s16 {q13}, [r0] 81 vld1.s16 {$reg1}, [r1] 83 vld1.s16 {$reg2}, [r1] 107 vld1.s16 {d8}, [r10], r2 108 vld1.s16 {d11}, [r9], r11 109 vld1.s16 {d9}, [r10] 110 vld1.s16 {d10}, [r9] 112 vrshr.s16 q7, q7, #6 113 vrshr.s16 q8, q8, # [all...] |
/external/libvpx/libvpx/vp9/common/arm/neon/ |
vp9_idct32x32_add_neon.asm | 65 vld1.s16 {q14}, [r0] 67 vld1.s16 {q13}, [r0] 81 vld1.s16 {$reg1}, [r1] 83 vld1.s16 {$reg2}, [r1] 107 vld1.s16 {d8}, [r10], r2 108 vld1.s16 {d11}, [r9], r11 109 vld1.s16 {d9}, [r10] 110 vld1.s16 {d10}, [r9] 112 vrshr.s16 q7, q7, #6 113 vrshr.s16 q8, q8, # [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/arm/neon/ |
vp9_idct32x32_add_neon.asm | 65 vld1.s16 {q14}, [r0] 67 vld1.s16 {q13}, [r0] 81 vld1.s16 {$reg1}, [r1] 83 vld1.s16 {$reg2}, [r1] 107 vld1.s16 {d8}, [r10], r2 108 vld1.s16 {d11}, [r9], r11 109 vld1.s16 {d9}, [r10] 110 vld1.s16 {d10}, [r9] 112 vrshr.s16 q7, q7, #6 113 vrshr.s16 q8, q8, # [all...] |
/external/chromium_org/third_party/libvpx/source/libvpx/vp8/common/arm/neon/ |
buildintrapredictorsmby_neon.asm | 251 vsub.s16 q4, q4, q7 252 vsub.s16 q5, q5, q7 268 vqadd.s16 q8, q0, q4 269 vqadd.s16 q9, q0, q5 271 vqadd.s16 q10, q1, q4 272 vqadd.s16 q11, q1, q5 274 vqadd.s16 q12, q2, q4 275 vqadd.s16 q13, q2, q5 277 vqadd.s16 q14, q3, q4 278 vqadd.s16 q15, q3, q [all...] |
/external/libvpx/libvpx/vp8/common/arm/neon/ |
buildintrapredictorsmby_neon.asm | 247 vsub.s16 q4, q4, q7 248 vsub.s16 q5, q5, q7 264 vqadd.s16 q8, q0, q4 265 vqadd.s16 q9, q0, q5 267 vqadd.s16 q10, q1, q4 268 vqadd.s16 q11, q1, q5 270 vqadd.s16 q12, q2, q4 271 vqadd.s16 q13, q2, q5 273 vqadd.s16 q14, q3, q4 274 vqadd.s16 q15, q3, q [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/arm/neon/ |
buildintrapredictorsmby_neon.asm | 247 vsub.s16 q4, q4, q7 248 vsub.s16 q5, q5, q7 264 vqadd.s16 q8, q0, q4 265 vqadd.s16 q9, q0, q5 267 vqadd.s16 q10, q1, q4 268 vqadd.s16 q11, q1, q5 270 vqadd.s16 q12, q2, q4 271 vqadd.s16 q13, q2, q5 273 vqadd.s16 q14, q3, q4 274 vqadd.s16 q15, q3, q [all...] |
/frameworks/av/services/audioflinger/ |
AudioResamplerFirProcessNeon.h | 30 // issues with S16 coefs. Consider this later. 77 "vmlal.s16 q0, d4, d17 \n"// (1+0d) multiply (reversed)samples by coef 78 "vmlal.s16 q0, d5, d16 \n"// (1) multiply (reversed)samples by coef 79 "vmlal.s16 q0, d6, d20 \n"// (1) multiply neg samples 80 "vmlal.s16 q0, d7, d21 \n"// (1) multiply neg samples 130 "vmlal.s16 q0, d4, d17 \n"// (1) multiply (reversed) samples left 131 "vmlal.s16 q0, d5, d16 \n"// (1) multiply (reversed) samples left 132 "vmlal.s16 q4, d6, d17 \n"// (1) multiply (reversed) samples right 133 "vmlal.s16 q4, d7, d16 \n"// (1) multiply (reversed) samples right 134 "vmlal.s16 q0, d10, d20 \n"// (1) multiply samples lef [all...] |
/external/libhevc/decoder/arm/ |
ihevcd_itrans_recon_dc_chroma.s | 80 vdup.s16 q0,r6 115 vqmovun.s16 d2,q15 116 vqmovun.s16 d4,q14 117 vqmovun.s16 d6,q13 118 vqmovun.s16 d8,q12 122 vqmovun.s16 d10,q11 123 vqmovun.s16 d12,q10 124 vqmovun.s16 d14,q9 125 vqmovun.s16 d16,q15 174 vqmovun.s16 d2,q1 [all...] |
ihevcd_itrans_recon_dc_luma.s | 81 vdup.s16 q0,r6 116 vqmovun.s16 d2,q15 117 vqmovun.s16 d3,q14 118 vqmovun.s16 d4,q13 119 vqmovun.s16 d5,q12 120 vqmovun.s16 d6,q11 121 vqmovun.s16 d7,q10 122 vqmovun.s16 d8,q9 123 vqmovun.s16 d9,q8 172 vqmovun.s16 d2,q1 [all...] |
/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/ |
Syn_filt_32_neon.s | 52 VLD1.S16 {D0, D1, D2, D3}, [r0]! @a[1] ~ a[16] 56 VLD1.S16 {D4, D5, D6, D7}, [r10]! @ sig_hi[-16] ~ sig_hi[-1] 59 VLD1.S16 {D8, D9, D10, D11}, [r11]! @ sig_lo[-16] ~ sig_lo[-1] 68 VMULL.S16 Q10, D8, D3 70 VMLAL.S16 Q10, D9, D2 71 VMLAL.S16 Q10, D10, D1 72 VMLAL.S16 Q10, D11, D0 82 VMULL.S16 Q11, D4, D3 83 VMLAL.S16 Q11, D5, D2 87 VMLAL.S16 Q11, D6, D [all...] |
/external/chromium_org/third_party/libyuv/source/ |
compare_neon.cc | 42 "vmlal.s16 q8, d4, d4 \n" 43 "vmlal.s16 q9, d6, d6 \n" 44 "vmlal.s16 q10, d5, d5 \n" 45 "vmlal.s16 q11, d7, d7 \n"
|
/external/libyuv/files/source/ |
compare_neon.cc | 35 "vmlal.s16 q8, d4, d4 \n" 36 "vmlal.s16 q9, d6, d6 \n" 37 "vmlal.s16 q10, d5, d5 \n" 38 "vmlal.s16 q11, d7, d7 \n"
|