/external/libhevc/common/arm/ |
ihevc_itrans_recon_32x32.s | 211 vmull.s16 q12,d8,d0[1] @// y1 * cos1(part of b0) 212 vmull.s16 q13,d8,d0[3] @// y1 * cos3(part of b1) 213 vmull.s16 q14,d8,d1[1] @// y1 * sin3(part of b2) 214 vmull.s16 q15,d8,d1[3] @// y1 * sin1(part of b3) 216 vmlal.s16 q12,d9,d0[3] @// y1 * cos1 + y3 * cos3(part of b0) 217 vmlal.s16 q13,d9,d2[1] @// y1 * cos3 - y3 * sin1(part of b1) 218 vmlal.s16 q14,d9,d3[3] @// y1 * sin3 - y3 * cos1(part of b2) 219 vmlal.s16 q15,d9,d5[1] @// y1 * sin1 - y3 * sin3(part of b3) 225 vmull.s16 q10,d10,d0[0] 226 vmlal.s16 q10,d11,d0[2 [all...] |
ihevc_itrans_recon_16x16.s | 242 vmull.s16 q12,d6,d0[1] @// y1 * cos1(part of b0) 243 vmull.s16 q13,d6,d0[3] @// y1 * cos3(part of b1) 244 vmull.s16 q14,d6,d1[1] @// y1 * sin3(part of b2) 245 vmull.s16 q15,d6,d1[3] @// y1 * sin1(part of b3) 247 vmlal.s16 q12,d7,d0[3] @// y1 * cos1 + y3 * cos3(part of b0) 248 vmlal.s16 q13,d7,d2[1] @// y1 * cos3 - y3 * sin1(part of b1) 249 vmlal.s16 q14,d7,d3[3] @// y1 * sin3 - y3 * cos1(part of b2) 250 vmlsl.s16 q15,d7,d2[3] @// y1 * sin1 - y3 * sin3(part of b3) 257 vmull.s16 q6,d10,d0[0] 258 vmlal.s16 q6,d11,d0[2 [all...] |
ihevc_itrans_recon_8x8.s | 187 vmull.s16 q10,d2,d0[0] @// y0 * cos4(part of c0 and c1) 189 vmull.s16 q9,d3,d1[2] @// y2 * sin2 (q3 is freed by this time)(part of d1) 192 vmull.s16 q12,d6,d0[1] @// y1 * cos1(part of b0) 194 vmull.s16 q13,d6,d0[3] @// y1 * cos3(part of b1) 196 vmull.s16 q14,d6,d1[1] @// y1 * sin3(part of b2) 198 vmull.s16 q15,d6,d1[3] @// y1 * sin1(part of b3) 200 vmlal.s16 q12,d7,d0[3] @// y1 * cos1 + y3 * cos3(part of b0) 202 vmlsl.s16 q13,d7,d1[3] @// y1 * cos3 - y3 * sin1(part of b1) 204 vmlsl.s16 q14,d7,d0[1] @// y1 * sin3 - y3 * cos1(part of b2) 206 vmlsl.s16 q15,d7,d1[1] @// y1 * sin1 - y3 * sin3(part of b3 [all...] |
ihevc_inter_pred_chroma_vert_w16inp.s | 139 vmull.s16 q0,d0,d12 @vmull_s16(src_tmp1, coeff_0) 142 vmull.s16 q4,d2,d12 @vmull_s16(src_tmp2, coeff_0) 144 vmlal.s16 q0,d2,d13 146 vmlal.s16 q4,d3,d13 149 vmlal.s16 q0,d3,d14 150 vmlal.s16 q4,d6,d14 151 vmlal.s16 q0,d6,d15 152 vmlal.s16 q4,d2,d15 155 vqrshrun.s16 d0,q0,#6 @rounding shift 156 vqrshrun.s16 d30,q15,#6 @rounding shif [all...] |
ihevc_weighted_pred_bi_default.s | 136 vadd.s16 q0,q0,q2 181 vld1.s16 {d6},[r0]! @load and increment the pi2_src1 183 vld1.s16 {d7},[r1]! @load and increment the pi2_src2 184 vld1.s16 {d8},[r11],r3 @load and increment the pi2_src1 ii iteration 185 vqadd.s16 d18,d6,d7 186 vqadd.s16 d18,d18,d0 @vaddq_s32(i4_tmp1_t1, tmp_lvl_shift_t) 187 vld1.s16 {d9},[r12],r4 @load and increment the pi2_src2 ii iteration 188 vqadd.s16 d20,d8,d9 @vaddq_s32(i4_tmp2_t1, i4_tmp2_t2) 189 vqadd.s16 d19,d20,d0 @vaddq_s32(i4_tmp2_t1, tmp_lvl_shift_t) 190 vqshrun.s16 d20,q9,# [all...] |
ihevc_inter_pred_filters_luma_vert_w16inp.s | 148 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ 150 vmlal.s16 q4,d0,d22 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_0)@ 152 vmlal.s16 q4,d2,d24 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_2)@ 154 vmlal.s16 q4,d3,d25 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_3)@ 156 vmlal.s16 q4,d4,d26 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_4)@ 158 vmlal.s16 q4,d5,d27 @mul_res1 = vmlal_u8(mul_res1, src_tmp2, coeffabs_5)@ 159 vmlal.s16 q4,d6,d28 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_6)@ 160 vmlal.s16 q4,d7,d29 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_7)@ 164 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@ 166 vmlal.s16 q5,d1,d22 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_0) [all...] |
ihevc_inter_pred_luma_vert_w16inp_w16out.s | 158 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ 160 vmlal.s16 q4,d0,d22 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_0)@ 162 vmlal.s16 q4,d2,d24 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_2)@ 164 vmlal.s16 q4,d3,d25 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_3)@ 166 vmlal.s16 q4,d4,d26 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_4)@ 168 vmlal.s16 q4,d5,d27 @mul_res1 = vmlal_u8(mul_res1, src_tmp2, coeffabs_5)@ 169 vmlal.s16 q4,d6,d28 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_6)@ 170 vmlal.s16 q4,d7,d29 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_7)@ 174 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@ 176 vmlal.s16 q5,d1,d22 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_0) [all...] |
/hardware/intel/common/libmix/mix_audio/src/ |
pvt.h | 7 typedef signed short s16;
typedef
|
/frameworks/rs/cpu_ref/ |
rsCpuIntrinsics_neon_Convolve.S | 69 vmull.s16 q8, d4, d0[0] 70 vmlal.s16 q8, d5, d0[1] 71 vmlal.s16 q8, d6, d0[2] 72 vmlal.s16 q8, d8, d0[3] 73 vmlal.s16 q8, d9, d1[0] 74 vmlal.s16 q8, d10, d1[1] 75 vmlal.s16 q8, d12, d1[2] 76 vmlal.s16 q8, d13, d1[3] 77 vmlal.s16 q8, d14, d2[0] 79 vmull.s16 q9, d5, d0[0 [all...] |
rsCpuIntrinsics_neon_Resize.S | 193 vld1.s16 {q5}, [r9] 195 vdup.s16 q6, r2 196 vdup.s16 q7, r3 197 vmla.s16 q6, q5, q7 // vxf 198 vshl.s16 q7, q7, #VECSHIFT // vxinc 243 vdup.s16 d24, d25[0] 244 vst1.s16 {q12}, [r12] 245 vld1.s16 {d24}, [r8] 246 vst1.s16 {d24}, [r9] 250 vst1.s16 {q11,q12}, [r12 [all...] |
/external/libavc/encoder/arm/ |
ih264e_half_pel.s | 170 vqrshrun.s16 d20, q4, #5 @// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row0) 171 vqrshrun.s16 d21, q5, #5 @// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2,row0) 172 vqrshrun.s16 d22, q6, #5 @// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column3,row0) 173 vqrshrun.s16 d23, q7, #5 @// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row1) 174 vqrshrun.s16 d24, q8, #5 @// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2,row1) 175 vqrshrun.s16 d25, q9, #5 @// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column3,row1) 316 vqrshrun.s16 d2, q10, #5 @// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row0) 318 vqrshrun.s16 d3, q11, #5 @// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2,row0) 321 vaddl.s16 q13, d31, d20 @// a0 + a5 (set1) 323 vmlal.s16 q13, d30, d0[1] @// a0 + a5 + 20a2 (set1 [all...] |
/external/libvpx/libvpx/vp8/encoder/arm/neon/ |
shortfdct_neon.asm | 50 vadd.s16 d4, d0, d3 ; a1 = ip[0] + ip[3] 51 vadd.s16 d5, d1, d2 ; b1 = ip[1] + ip[2] 52 vsub.s16 d6, d1, d2 ; c1 = ip[1] - ip[2] 53 vsub.s16 d7, d0, d3 ; d1 = ip[0] - ip[3] 55 vshl.s16 q2, q2, #3 ; (a1, b1) << 3 56 vshl.s16 q3, q3, #3 ; (c1, d1) << 3 58 vadd.s16 d0, d4, d5 ; op[0] = a1 + b1 59 vsub.s16 d2, d4, d5 ; op[2] = a1 - b1 61 vmlal.s16 q9, d7, d16 ; d1*5352 + 14500 62 vmlal.s16 q10, d7, d17 ; d1*2217 + 750 [all...] |
fastquantizeb_neon.asm | 37 vabs.s16 q4, q0 ; calculate x = abs(z) 38 vabs.s16 q5, q1 41 vshr.s16 q2, q0, #15 ; sz 42 vshr.s16 q3, q1, #15 44 vld1.s16 {q6, q7}, [r6@128] ; load round_ptr [0-15] 45 vld1.s16 {q8, q9}, [r5@128] ; load quant_ptr [0-15] 49 vadd.s16 q4, q6 ; x + Round 50 vadd.s16 q5, q7 54 vqdmulh.s16 q4, q8 ; y = ((Round+abs(z)) * Quant) >> 16 55 vqdmulh.s16 q5, q [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/arm/neon/ |
shortfdct_neon.asm | 50 vadd.s16 d4, d0, d3 ; a1 = ip[0] + ip[3] 51 vadd.s16 d5, d1, d2 ; b1 = ip[1] + ip[2] 52 vsub.s16 d6, d1, d2 ; c1 = ip[1] - ip[2] 53 vsub.s16 d7, d0, d3 ; d1 = ip[0] - ip[3] 55 vshl.s16 q2, q2, #3 ; (a1, b1) << 3 56 vshl.s16 q3, q3, #3 ; (c1, d1) << 3 58 vadd.s16 d0, d4, d5 ; op[0] = a1 + b1 59 vsub.s16 d2, d4, d5 ; op[2] = a1 - b1 61 vmlal.s16 q9, d7, d16 ; d1*5352 + 14500 62 vmlal.s16 q10, d7, d17 ; d1*2217 + 750 [all...] |
fastquantizeb_neon.asm | 37 vabs.s16 q4, q0 ; calculate x = abs(z) 38 vabs.s16 q5, q1 41 vshr.s16 q2, q0, #15 ; sz 42 vshr.s16 q3, q1, #15 44 vld1.s16 {q6, q7}, [r6@128] ; load round_ptr [0-15] 45 vld1.s16 {q8, q9}, [r5@128] ; load quant_ptr [0-15] 49 vadd.s16 q4, q6 ; x + Round 50 vadd.s16 q5, q7 54 vqdmulh.s16 q4, q8 ; y = ((Round+abs(z)) * Quant) >> 16 55 vqdmulh.s16 q5, q [all...] |
/external/lldb/test/lang/cpp/char1632_t/ |
main.cpp | 15 char16_t *s16 = (char16_t *)u"???"; local 19 s16 = (char16_t *)u"??????????";
|
/external/lldb/test/lang/cpp/rdar12991846/ |
main.cpp | 15 char16_t *s16 = (char16_t *)u"???"; local 19 s16 = (char16_t *)u"??????????";
|
/external/libvpx/libvpx/vp9/common/arm/neon/ |
vp9_idct16x16_add_neon.asm | 53 vld2.s16 {q8,q9}, [r0]! 54 vld2.s16 {q9,q10}, [r0]! 55 vld2.s16 {q10,q11}, [r0]! 56 vld2.s16 {q11,q12}, [r0]! 57 vld2.s16 {q12,q13}, [r0]! 58 vld2.s16 {q13,q14}, [r0]! 59 vld2.s16 {q14,q15}, [r0]! 60 vld2.s16 {q1,q2}, [r0]! 61 vmov.s16 q15, q1 88 vmull.s16 q2, d18, d [all...] |
vp9_iht8x8_add_neon.asm | 131 vmull.s16 q2, d18, d0 132 vmull.s16 q3, d19, d0 135 vmull.s16 q5, d26, d2 136 vmull.s16 q6, d27, d2 139 vmlsl.s16 q2, d30, d1 140 vmlsl.s16 q3, d31, d1 143 vmlsl.s16 q5, d22, d3 144 vmlsl.s16 q6, d23, d3 155 vmull.s16 q2, d18, d1 156 vmull.s16 q3, d19, d [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/arm/neon/ |
vp9_idct16x16_add_neon.asm | 53 vld2.s16 {q8,q9}, [r0]! 54 vld2.s16 {q9,q10}, [r0]! 55 vld2.s16 {q10,q11}, [r0]! 56 vld2.s16 {q11,q12}, [r0]! 57 vld2.s16 {q12,q13}, [r0]! 58 vld2.s16 {q13,q14}, [r0]! 59 vld2.s16 {q14,q15}, [r0]! 60 vld2.s16 {q1,q2}, [r0]! 61 vmov.s16 q15, q1 88 vmull.s16 q2, d18, d [all...] |
vp9_iht8x8_add_neon.asm | 131 vmull.s16 q2, d18, d0 132 vmull.s16 q3, d19, d0 135 vmull.s16 q5, d26, d2 136 vmull.s16 q6, d27, d2 139 vmlsl.s16 q2, d30, d1 140 vmlsl.s16 q3, d31, d1 143 vmlsl.s16 q5, d22, d3 144 vmlsl.s16 q6, d23, d3 155 vmull.s16 q2, d18, d1 156 vmull.s16 q3, d19, d [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/arm/neon/ |
shortidct4x4llm_neon.asm | 47 vqdmulh.s16 q3, q2, d0[2] 48 vqdmulh.s16 q4, q2, d0[0] 50 vqadd.s16 d12, d2, d3 ;a1 51 vqsub.s16 d13, d2, d3 ;b1 53 vshr.s16 q3, q3, #1 54 vshr.s16 q4, q4, #1 56 vqadd.s16 q3, q3, q2 ;modify since sinpi8sqrt2 > 65536/2 (negtive number) 57 vqadd.s16 q4, q4, q2 64 vqsub.s16 d10, d6, d9 ;c1 65 vqadd.s16 d11, d7, d8 ;d [all...] |
/external/libavc/common/arm/ |
ih264_inter_pred_luma_horz_hpel_vert_hpel_a9q.s | 124 vmov.s16 d0, #20 @ Filter coeff 20 125 vmov.s16 d1, #5 @ Filter coeff 5 149 vmls.s16 q12, q11, d1[0] @ temp -= temp2 * 5 154 vmls.s16 q13, q10, d1[0] @ temp -= temp2 * 5 158 vmls.s16 q14, q10, d1[0] @ temp -= temp2 * 5 164 vaddl.s16 q1, d20, d24 @// a0 + a5 (column1) 165 vaddl.s16 q15, d21, d25 @// a0 + a5 (column1) 166 vmlal.s16 q1, d22, d0[0] @// a0 + a5 + 20a2 (column1) 167 vmlal.s16 q15, d23, d0[0] @// a0 + a5 + 20a2 (column1) 170 vmlsl.s16 q1, d22, d1[0] @// a0 + a5 + 20a2 + 20a3 - 5a1 (column1 [all...] |
/external/linux-tools-perf/src/tools/perf/util/ |
types.h | 15 typedef signed short s16; typedef
|
/external/llvm/test/MC/ARM/ |
basic-arm-instructions-v8.1a.s | 43 vqrdmlah.s16 d0, d1, d2 44 //CHECK-V81aARM: vqrdmlah.s16 d0, d1, d2 @ encoding: [0x12,0x0b,0x11,0xf3] 45 //CHECK-V81aTHUMB: vqrdmlah.s16 d0, d1, d2 @ encoding: [0x11,0xff,0x12,0x0b] 47 //CHECK-V8: vqrdmlah.s16 d0, d1, d2 57 vqrdmlah.s16 q0, q1, q2 58 //CHECK-V81aARM: vqrdmlah.s16 q0, q1, q2 @ encoding: [0x54,0x0b,0x12,0xf3] 59 //CHECK-V81aTHUMB: vqrdmlah.s16 q0, q1, q2 @ encoding: [0x12,0xff,0x54,0x0b] 61 //CHECK-V8: vqrdmlah.s16 q0, q1, q2 72 vqrdmlsh.s16 d7, d6, d5 73 //CHECK-V81aARM: vqrdmlsh.s16 d7, d6, d5 @ encoding: [0x15,0x7c,0x16,0xf3 [all...] |