HomeSort by relevance Sort by last modified time
    Searched refs:q5 (Results 26 - 50 of 205) sorted by null

12 3 4 5 6 7 8 9

  /external/libavc/encoder/arm/
ih264e_evaluate_intra_chroma_modes_a9q.s 100 vld1.32 {q5}, [r1]!
102 vuzp.u8 q4, q5 @
129 vzip.u16 q4, q5
131 vadd.u16 q7, q5, q6
134 vqrshrn.u16 d16, q5, #2
141 vzip.u16 q4, q5
142 vqrshrn.u16 d16, q5, #2
149 vzip.u16 q4, q5
161 vld1.32 {q5}, [r12]!
326 vmov q15, q5
    [all...]
  /external/libhevc/common/arm/
ihevc_itrans_recon_4x4_ttype1.s 153 vmull.s16 q5,d0,d4[2] @ 74 * pi2_src[0]
154 vmlsl.s16 q5,d2,d4[2] @ 74 * pi2_src[0] - 74 * pi2_src[2]
155 vmlal.s16 q5,d3,d4[2] @pi2_out[2] = 74 * pi2_src[0] - 74 * pi2_src[2] + 74 * pi2_src[3]
164 vqrshrn.s32 d16,q5,#shift_stage1_idct @ (pi2_out[2] + rounding ) >> shift_stage1_idct
192 vmull.s16 q5,d14,d4[2] @ 74 * pi2_src[0]
193 vmlsl.s16 q5,d16,d4[2] @ 74 * pi2_src[0] - 74 * pi2_src[2]
194 vmlal.s16 q5,d17,d4[2] @pi2_out[2] = 74 * pi2_src[0] - 74 * pi2_src[2] + 74 * pi2_src[3]
204 vqrshrn.s32 d2,q5,#shift_stage2_idct @ (pi2_out[2] + rounding ) >> shift_stage1_idct
ihevc_weighted_pred_uni.s 170 vmull.s16 q5,d8,d0[0] @vmull_n_s16(pi2_src_val1, (int16_t) wgt0) iii iteration
173 vadd.i32 q5,q5,q15 @vaddq_s32(i4_tmp1_t, tmp_lvl_shift_t) iii iteration
181 vshl.s32 q5,q5,q14 @vshlq_s32(i4_tmp1_t, tmp_shift_t) iii iteration
187 vqmovun.s32 d10,q5 @vqmovun_s32(sto_res_tmp1) iii iteration
196 vqmovn.u16 d10,q5 @vqmovn_u16(sto_res_tmp3) iii iteration
ihevc_inter_pred_filters_luma_vert.s 176 vmull.u8 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
179 vmlsl.u8 q5,d1,d22 @mul_res2 = vmlsl_u8(mul_res2, src_tmp2, coeffabs_0)@
182 vmlsl.u8 q5,d3,d24 @mul_res2 = vmlsl_u8(mul_res2, src_tmp4, coeffabs_2)@
185 vmlal.u8 q5,d4,d25 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_3)@
187 vmlal.u8 q5,d5,d26 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_4)@
189 vmlsl.u8 q5,d6,d27 @mul_res2 = vmlsl_u8(mul_res2, src_tmp3, coeffabs_5)@
192 vmlal.u8 q5,d7,d28 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_6)@
195 vmlsl.u8 q5,d16,d29 @mul_res2 = vmlsl_u8(mul_res2, src_tmp1, coeffabs_7)@
213 vqrshrun.s16 d10,q5,#6 @sto_res = vqmovun_s16(sto_res_tmp)@
267 vmull.u8 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)
    [all...]
ihevc_weighted_pred_bi.s 198 vmull.s16 q5,d2,d7[0] @vmull_n_s16(pi2_src1_val2, (int16_t) wgt0) ii iteration
209 vadd.s32 q5,q5,q6 @vaddq_s32(i4_tmp2_t1, i4_tmp2_t2) ii iteration
214 vadd.s32 q5,q5,q15 @vaddq_s32(i4_tmp2_t1, tmp_lvl_shift_t) ii iteration
218 vshl.s32 q5,q5,q14 @vshlq_s32(i4_tmp2_t1, tmp_shift_t) ii iteration
223 vqmovun.s32 d10,q5 @vqmovun_s32(sto_res_tmp1) ii iteration
235 vqmovn.u16 d10,q5 @vqmovn_u16(sto_res_tmp3) ii iteration
ihevc_inter_pred_filters_luma_horz.s 233 vmull.u8 q5,d15,d27 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
235 vmlsl.u8 q5,d14,d26 @mul_res = vmlsl_u8(src[0_2], coeffabs_2)@
237 vmlal.u8 q5,d16,d28 @mul_res = vmlal_u8(src[0_4], coeffabs_4)@
239 vmlsl.u8 q5,d17,d29 @mul_res = vmlsl_u8(src[0_5], coeffabs_5)@
241 vmlal.u8 q5,d18,d30 @mul_res = vmlal_u8(src[0_6], coeffabs_6)@
242 vmlsl.u8 q5,d19,d31 @mul_res = vmlsl_u8(src[0_7], coeffabs_7)@
244 vmlsl.u8 q5,d12,d24 @mul_res = vmlsl_u8(src[0_0], coeffabs_0)@
245 vmlal.u8 q5,d13,d25 @mul_res = vmlal_u8(src[0_1], coeffabs_1)@
249 vqrshrun.s16 d8,q5,#6 @right shift and saturating narrow result 2
340 vmull.u8 q5,d2,d25 @mul_res = vmlal_u8(src[0_1], coeffabs_1)
    [all...]
ihevc_intra_pred_luma_planar.s 224 vdup.16 q5, r4 @(4)
236 vmlal.u8 q5, d5, d0 @(4)
239 vmlal.u8 q5, d8, d1 @(4)
242 vmlal.u8 q5, d6, d3 @(4)
245 vmlal.u8 q5, d9, d23 @(4)
264 vshl.s16 q5, q5, q7 @(4)shr
268 vmovn.i16 d10, q5 @(4)
393 vdup.16 q5, r4 @(4)
406 vmlal.u8 q5, d5, d0 @(4
    [all...]
ihevc_intra_pred_chroma_horz.s 121 vld1.16 {q5},[r12] @load 16 values. d1[7] will have the 1st value.
203 vdup.16 q5,d1[3]
218 vst1.16 {q5},[r2],r3
220 vdup.16 q5,d0[2]
239 vst1.16 {q5},[r2],r3
253 @vdup.8 q5,d0[2]
263 @vst1.8 {q5},[r2],r3
ihevc_intra_pred_chroma_mode_27_to_33.s 175 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract)
178 vmlal.u8 q5,d9,d31 @(i row)vmull_u8(ref_main_idx_1, dup_const_fract)
193 vrshrn.i16 d10,q5,#5 @(i row)shift_res = vrshrn_n_u16(add_res, 5)
236 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract)
239 vmlal.u8 q5,d9,d31 @(v)vmull_u8(ref_main_idx_1, dup_const_fract)
259 vrshrn.i16 d10,q5,#5 @(v)shift_res = vrshrn_n_u16(add_res, 5)
313 vmull.u8 q5,d8,d30 @(i)vmull_u8(ref_main_idx, dup_const_32_fract)
317 vmlal.u8 q5,d9,d31 @(i)vmull_u8(ref_main_idx_1, dup_const_fract)
335 vrshrn.i16 d10,q5,#5 @(i)shift_res = vrshrn_n_u16(add_res, 5)
388 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract
    [all...]
ihevc_intra_pred_filters_luma_mode_19_to_25.s 287 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract)
290 vmlal.u8 q5,d9,d31 @(i row)vmull_u8(ref_main_idx_1, dup_const_fract)
304 vrshrn.i16 d10,q5,#5 @(i row)shift_res = vrshrn_n_u16(add_res, 5)
344 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract)
347 vmlal.u8 q5,d9,d31 @(v)vmull_u8(ref_main_idx_1, dup_const_fract)
366 vrshrn.i16 d10,q5,#5 @(v)shift_res = vrshrn_n_u16(add_res, 5)
420 vmull.u8 q5,d8,d30 @(i)vmull_u8(ref_main_idx, dup_const_32_fract)
423 vmlal.u8 q5,d9,d31 @(i)vmull_u8(ref_main_idx_1, dup_const_fract)
442 vrshrn.i16 d10,q5,#5 @(i)shift_res = vrshrn_n_u16(add_res, 5)
496 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract
    [all...]
ihevc_intra_pred_luma_mode_27_to_33.s 178 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract)
181 vmlal.u8 q5,d9,d31 @(i row)vmull_u8(ref_main_idx_1, dup_const_fract)
196 vrshrn.i16 d10,q5,#5 @(i row)shift_res = vrshrn_n_u16(add_res, 5)
238 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract)
241 vmlal.u8 q5,d9,d31 @(v)vmull_u8(ref_main_idx_1, dup_const_fract)
261 vrshrn.i16 d10,q5,#5 @(v)shift_res = vrshrn_n_u16(add_res, 5)
314 vmull.u8 q5,d8,d30 @(i)vmull_u8(ref_main_idx, dup_const_32_fract)
318 vmlal.u8 q5,d9,d31 @(i)vmull_u8(ref_main_idx_1, dup_const_fract)
336 vrshrn.i16 d10,q5,#5 @(i)shift_res = vrshrn_n_u16(add_res, 5)
389 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract
    [all...]
ihevc_itrans_recon_8x8.s 242 vadd.s32 q5,q10,q11 @// c0 = y0 * cos4 + y4 * cos4(part of a0 and a1)
250 vadd.s32 q7,q5,q3 @// a0 = c0 + d0(part of r0,r7)
251 vsub.s32 q5,q5,q3 @// a3 = c0 - d0(part of r3,r4)
264 vadd.s32 q13,q5,q15 @// a3 + b3(part of r3)
265 vsub.s32 q15,q5,q15 @// a3 - b3(part of r4)
319 vsub.s32 q5,q10,q3 @// a3 = c0 - d0(part of r3,r4)
332 vadd.s32 q13,q5,q15 @// a3 + b3(part of r3)
333 vsub.s32 q15,q5,q15 @// a3 - b3(part of r4)
421 vtrn.16 q5,q7 @//[r7,r5],[r6,r4] third qudrant transposin
    [all...]
  /external/libjpeg-turbo/simd/
jsimd_arm_neon.S 111 JLONG q1, q2, q3, q4, q5, q6, q7; \
124 q5 = row7 + row3; \ define
126 q6 = MULTIPLY(q5, FIX_1_175875602_MINUS_1_961570560) + \
128 q7 = MULTIPLY(q5, FIX_1_175875602) + \
140 q5 = q7; \ define
150 q5 += MULTIPLY(row5, FIX_2_053119869_MINUS_2_562915447) + \
166 tmp1 = q5; \
289 vmov q5, q7
298 vmlal.s16 q5, ROW5L, XFIX_2_053119869_MINUS_2_562915447
300 vmlsl.s16 q5, ROW3L, XFIX_2_56291544
    [all...]
  /external/valgrind/none/tests/arm/
neon128.stdout.exp 8 vmov.i32 q5, #0x700 :: Qd 0x00000700 0x00000700 0x00000700 0x00000700
9 vmov.i32 q5, #0x700 :: Qd 0x00000700 0x00000700 0x00000700 0x00000700
33 vmvn.i32 q5, #0x700 :: Qd 0xfffff8ff 0xfffff8ff 0xfffff8ff 0xfffff8ff
34 vmvn.i32 q5, #0x700 :: Qd 0xfffff8ff 0xfffff8ff 0xfffff8ff 0xfffff8ff
65 vbic.i32 q5, #0x700 :: Qd 0x55555055 0x55555055 0x55555055 0x55555055
66 vbic.i32 q5, #0x700 :: Qd 0x151d181d 0x141c181c 0x131b181b 0x121f181f
120 vand q4, q6, q5 :: Qd 0x00570057 0x00570057 0x00570057 0x00570057 Qm (i8)0x000000ff Qn (i16)0x00000057
125 vbic q4, q6, q5 :: Qd 0xffa8ffa8 0xffa8ffa8 0xffa8ffa8 0xffa8ffa8 Qm (i8)0x000000ff Qn (i16)0x00000057
140 veor q4, q6, q5 :: Qd 0xffa8ffa8 0xffa8ffa8 0xffa8ffa8 0xffa8ffa8 Qm (i8)0x000000ff Qn (i16)0x00000057
149 vbsl q4, q6, q5 :: Qd 0x55575557 0x55575557 0x55575557 0x55575557 Qm (i8)0x000000ff Qn (i16)0x0000005
    [all...]
  /external/llvm/test/MC/ARM/
neont2-pairwise-encoding.s 22 vpaddl.s16 q5, q6
23 vpaddl.s32 q6, q5
35 @ CHECK: vpaddl.s16 q5, q6 @ encoding: [0xb4,0xff,0x4c,0xa2]
36 @ CHECK: vpaddl.s32 q6, q5 @ encoding: [0xb8,0xff,0x4a,0xc2]
49 vpadal.s16 q5, q11
62 @ CHECK: vpadal.s16 q5, q11 @ encoding: [0xb4,0xff,0x66,0xa6]
neon-bitwise-encoding.s 286 vand q6, q5
287 vand.s8 q6, q5
292 veor q6, q5
293 veor.8 q6, q5
298 veor q6, q5
299 veor.i8 q6, q5
304 vclt.s16 q5, #0
307 vceq.s16 q5, q3
310 vcgt.s16 q5, q3
313 vcge.s16 q5, q
    [all...]
  /external/swiftshader/third_party/LLVM/test/MC/ARM/
neont2-pairwise-encoding.s 22 vpaddl.s16 q5, q6
23 vpaddl.s32 q6, q5
35 @ CHECK: vpaddl.s16 q5, q6 @ encoding: [0xb4,0xff,0x4c,0xa2]
36 @ CHECK: vpaddl.s32 q6, q5 @ encoding: [0xb8,0xff,0x4a,0xc2]
49 vpadal.s16 q5, q11
62 @ CHECK: vpadal.s16 q5, q11 @ encoding: [0xb4,0xff,0x66,0xa6]
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/
omxVCM4P10_TransformDequantLumaDCFromPair_s.S 62 VMOV.I32 q5,#0x2
66 VMLAL.S16 q5,d2,d5
70 VSHRN.I32 d2,q5,#2
  /external/libvpx/libvpx/vpx_dsp/mips/
loopfilter_macros_dspr2.h 341 "preceu.ph.qbl %[q5_l], %[q5] \n\t" \
349 [q4] "r"(q4), [q5] "r"(q5), [q6] "r"(q6), [q7] "r"(q7)); \
379 "preceu.ph.qbr %[q5_r], %[q5] \n\t" \
387 [q4] "r"(q4), [q5] "r"(q5), [q6] "r"(q6), [q7] "r"(q7)); \
417 "precr.qb.ph %[q5], %[q5_l], %[q5_r] \n\t" \
421 [q3] "=&r"(q3), [q4] "=&r"(q4), [q5] "=&r"(q5), [q6] "=&r"(q6) \
loopfilter_16_msa.c 83 v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; local
94 LD_UB8(src, pitch, q0, q1, q2, q3, q4, q5, q6, q7);
95 VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
233 q5_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q5);
241 q5_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q5);
369 /* q5 */
383 q5 = __msa_bmnz_v(q5, (v16u8)r_out, flat2);
384 ST_UB(q5, src);
432 v16u8 p3, p2, p1, p0, q3, q2, q1, q0, p7, p6, p5, p4, q4, q5, q6, q7 local
665 v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; local
689 v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; local
702 v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; local
827 v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; local
1148 v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; local
    [all...]
  /frameworks/rs/cpu_ref/
rsCpuIntrinsics_neon_YuvToRGB.S 37 vmull.u8 q5, d17, d15 // g1 = y1 * 149
46 vaddw.u8 q4, q5, d14 // r1 = y1 * 149 + (v >> 1)
50 vadd.u16 q6, q5, q7 // b1 = y1 * 149 + (u << 2)
60 vqadd.u16 q5, q14 // g1 = satu16(g1 + (-16 * 149 + 128 * 50 + 128 * 104) >> 0)
67 vqsub.u16 q5, q8 // g1 = satu16(g1 - g2)
74 vqrshrn.u16 d3, q5, #7
  /external/libmpeg2/common/arm/
impeg2_idct.s 158 vaddw.u8 q5, q15, d1
162 vqmovun.s16 d1, q5
229 vmovl.s16 q5, d3
231 vraddhn.s32 d13, q0, q5
239 vmovl.s16 q5, d3
241 vraddhn.s32 d13, q0, q5
249 vmovl.s16 q5, d3
251 vraddhn.s32 d13, q0, q5
259 vmovl.s16 q5, d3
261 vraddhn.s32 d13, q0, q5
    [all...]
  /external/libavc/common/arm/
ih264_inter_pred_luma_horz_hpel_vert_qpel_a9q.s 168 vaddl.u8 q5, d2, d3
173 vmla.u16 q4, q5, q11
175 vaddl.u8 q5, d1, d4
177 vmls.u16 q4, q5, q12
179 vaddl.u8 q5, d0, d5
187 vmla.u16 q5, q6, q11
191 vmls.u16 q5, q6, q12
198 vst1.32 {q5}, [r9], r6 @ store temp buffer 2
235 vadd.s16 q15, q5, q6
263 vadd.s16 q14, q5, q
    [all...]
ih264_resi_trans_quant_a9.s 189 vmov.s32 q5, q4 @copy round fact for row 2
198 vmlal.s16 q5, d1, d29 @Multiply and add row 2
203 vshl.s32 q12, q5, q10 @Shift row 2
215 vceq.s16 q5, q15, #0 @I compare with zero row 1 and 2 blk 1
222 vmovn.u16 d14, q5 @I Narrow the comparison for row 1 and 2 blk 1
384 vmov.s32 q5, q4 @copy round fact for row 2
393 vmlal.s16 q5, d1, d29 @Multiply and add row 2
398 vshl.s32 q12, q5, q10 @Shift row 2
410 vceq.s16 q5, q15, #0 @I compare with zero row 1 and 2 blk 1
416 vmovn.u16 d14, q5 @I Narrow the comparison for row 1 and 2 blk
    [all...]
  /external/boringssl/linux-arm/crypto/aes/
aesv8-armx32.S 319 veor q5,q8,q7
371 veor q8,q8,q5
414 veor q8,q8,q5
463 veor q5,q2,q7
504 veor q5,q5,q1
509 vst1.8 {q5},[r1]!
550 veor q5,q6,q7
559 veor q5,q5,q
    [all...]

Completed in 1356 milliseconds

12 3 4 5 6 7 8 9