HomeSort by relevance Sort by last modified time
    Searched refs:q0 (Results 176 - 200 of 363) sorted by null

1 2 3 4 5 6 78 91011>>

  /external/libvpx/libvpx/vpx_dsp/mips/
loopfilter_mb_vert_dspr2.c 33 uint32_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; local
86 "lw %[q0], (%[s4]) \n\t"
93 [q0] "=&r" (q0), [q7] "=&r" (q7), [q6] "=&r" (q6),
136 /* transpose q0, q1, q2, q3
142 q0 q0_0 q0_1 q0_2 q0_3
149 q0 q0_0 q1_0 q2_0 q3_0
154 "precrq.qb.ph %[prim3], %[q1], %[q0] \n\t"
155 "precr.qb.ph %[prim4], %[q1], %[q0] \n\t"
158 "precr.qb.ph %[q0], %[prim1], %[prim2] \n\t
    [all...]
loopfilter_mb_horiz_dspr2.c 35 uint32_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; local
97 "lw %[q0], (%[sq0]) \n\t"
106 : [q3] "=&r" (q3), [q2] "=&r" (q2), [q1] "=&r" (q1), [q0] "=&r" (q0),
113 p1, p0, p3, p2, q0, q1, q2, q3,
116 flatmask5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, &flat2);
121 filter1_dspr2(mask, hev, p1, p0, q0, q1,
179 "sw %[q0], (%[sq0]) \n\t"
183 [q2] "r" (q2), [q1] "r" (q1), [q0] "r" (q0),
    [all...]
  /external/opencv/cv/src/
cvmoments.cpp 539 double q0 = t0 * t0, q1 = t1 * t1; local
547 HuState->hu4 = q0 + q1;
548 HuState->hu6 = d * (q0 - q1) + n4 * t0 * t1;
550 t0 *= q0 - 3 * q1;
551 t1 *= 3 * q0 - q1;
553 q0 = nu30 - 3 * nu12;
556 HuState->hu3 = q0 * q0 + q1 * q1;
557 HuState->hu5 = q0 * t0 + q1 * t1;
558 HuState->hu7 = q1 * t0 - q0 * t1
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/mips/dspr2/
vp9_mblpf_vert_loopfilter_dspr2.c 34 uint32_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; local
87 "lw %[q0], (%[s4]) \n\t"
94 [q0] "=&r" (q0), [q7] "=&r" (q7), [q6] "=&r" (q6),
137 /* transpose q0, q1, q2, q3
143 q0 q0_0 q0_1 q0_2 q0_3
150 q0 q0_0 q1_0 q2_0 q3_0
155 "precrq.qb.ph %[prim3], %[q1], %[q0] \n\t"
156 "precr.qb.ph %[prim4], %[q1], %[q0] \n\t"
159 "precr.qb.ph %[q0], %[prim1], %[prim2] \n\t
    [all...]
vp9_mblpf_horiz_loopfilter_dspr2.c 36 uint32_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; local
98 "lw %[q0], (%[sq0]) \n\t"
107 : [q3] "=&r" (q3), [q2] "=&r" (q2), [q1] "=&r" (q1), [q0] "=&r" (q0),
114 p1, p0, p3, p2, q0, q1, q2, q3,
117 vp9_flatmask5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, &flat2);
122 vp9_filter1_dspr2(mask, hev, p1, p0, q0, q1,
180 "sw %[q0], (%[sq0]) \n\t"
184 [q2] "r" (q2), [q1] "r" (q1), [q0] "r" (q0),
    [all...]
  /external/libavc/common/arm/
ih264_weighted_bi_pred_a9q.s 150 vdup.16 q0, r10 @Q0 = -(log_wd + 1) (32-bit)
190 vrshl.s16 q2, q2, q0 @rounds off the weighted samples from rows 1,2
191 vrshl.s16 q4, q4, q0 @rounds off the weighted samples from rows 3,4
238 vrshl.s16 q2, q2, q0 @rounds off the weighted samples from row 1
239 vrshl.s16 q4, q4, q0 @rounds off the weighted samples from row 2
240 vrshl.s16 q6, q6, q0 @rounds off the weighted samples from row 3
242 vrshl.s16 q8, q8, q0 @rounds off the weighted samples from row 4
311 vrshl.s16 q10, q10, q0 @rounds off the weighted samples from row 1L
315 vrshl.s16 q2, q2, q0 @rounds off the weighted samples from row 1
    [all...]
ih264_ihadamard_scaling_a9.s 135 vadd.s32 q0, q12, q13 @pi4_tmp_ptr[0] = x0 + x1
141 vmul.s32 q0, q0, q9 @ Q0 = p[i] = (x[i] * trns_coeff[i]) where i = 0..3
146 vshl.s32 q0, q0, q10 @ Q0 = q[i] = (p[i] << (qP/6)) where i = 0..3
151 vqrshrn.s32 d0, q0, #0x6 @ D0 = c[i] = ((q[i] + 32) >> 4) where i = 0..3
ih264_resi_trans_quant_a9.s 128 vsubl.u8 q0, d30, d31 @find residue row 1
185 vabs.s16 q0, q12 @Abs val of row 1 blk 1
213 vneg.s16 q4, q0 @Get negative
216 vceq.s16 q6, q0 , #0 @I compare with zero row 1 and 2 blk 1
219 vbsl.s16 q3, q4, q0 @Restore sign of row 3 and 4
323 vsubl.u8 q0, d10, d11 @find residue row 1
380 vabs.s16 q0, q12 @Abs val of row 1 blk 1
408 vneg.s16 q4, q0 @Get negative
411 vceq.s16 q6, q0 , #0 @I compare with zero row 1 and 2 blk 1
414 vbsl.s16 q3, q4, q0 @Restore sign of row 3 and
    [all...]
  /external/libvpx/libvpx/vpx_dsp/x86/
loopfilter_avx2.c 69 // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
72 // mask |= (abs(q1 - q0) > limit) * -1;
410 __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4; local
449 q0 = _mm256_castsi256_si128(q256_0);
458 const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu8(q1, q0),
459 _mm_subs_epu8(q0, q1));
462 __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu8(p0, q0),
463 _mm_subs_epu8(q0, p0));
475 // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
478 // mask |= (abs(q1 - q0) > limit) * -1
    [all...]
  /external/libmpeg2/common/arm/
impeg2_idct.s 218 vdup.s32 q0, r4
230 vraddhn.s32 d12, q0, q4
231 vraddhn.s32 d13, q0, q5
240 vraddhn.s32 d12, q0, q4
241 vraddhn.s32 d13, q0, q5
250 vraddhn.s32 d12, q0, q4
251 vraddhn.s32 d13, q0, q5
260 vraddhn.s32 d12, q0, q4
261 vraddhn.s32 d13, q0, q5
270 vraddhn.s32 d12, q0, q
    [all...]
  /frameworks/base/core/java/android/hardware/
SensorManager.java 1561 float q0; local
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/arm/neon/
vp9_iht8x8_add_neon.asm 120 ; will be stored back into q8-q15 registers. This macro will touch q0-q7
232 vadd.s16 q0, q9, q15 ; output[0] = step[0] + step[3]
271 vadd.s16 q8, q0, q7 ; output[0] = step1[0] + step1[7];
278 vsub.s16 q15, q0, q7 ; output[7] = step1[0] - step1[7];
284 ; q0 - q7 registers and use them as buffer during calculation.
380 vmull.s16 q0, d18, d30
384 vmlal.s16 q0, d28, d31
412 vadd.s32 q9, q4, q0
416 vsub.s32 q4, q4, q0
436 vmull.s16 q0, d3, d3
    [all...]
  /external/libopus/celt/arm/
celt_pitch_xcorr_arm.s 49 ; q0 = opus_val32 sum[4]
51 ; q0 = opus_val32 sum[4]
86 VMLAL.S16 q0, d3, d6[0]
88 VMLAL.S16 q0, d4, d7[0]
90 VMLAL.S16 q0, d16, d6[1]
92 VMLAL.S16 q0, d17, d7[1]
94 VMLAL.S16 q0, d16, d6[2]
96 VMLAL.S16 q0, d17, d7[2]
98 VMLAL.S16 q0, d16, d6[3]
99 VMLAL.S16 q0, d17, d7[3
    [all...]
  /external/libhevc/common/arm/
ihevc_inter_pred_luma_copy_w16out.s 104 vmovl.u8 q0,d0 @vmovl_u8(vld1_u8(pu1_src_tmp)
107 vshl.i64 q0,q0,#6 @vshlq_n_s64(temp, 6)
157 vshl.i16 q0,q8,#6 @vshlq_n_s16(tmp, 6)
198 vshl.i16 q0,q8,#6 @vshlq_n_s16(tmp, 6)
232 vshl.i16 q0,q8,#6 @vshlq_n_s16(tmp, 6)
ihevc_inter_pred_chroma_copy.s 230 vld1.8 {q0},[r0]! @vld1_u8(pu1_src_tmp)
232 vst1.8 {q0},[r1]! @vst1_u8(pu1_dst_tmp, tmp_src)
257 vld1.8 {q0},[r0]! @vld1_u8(pu1_src_tmp)
259 vst1.8 {q0},[r1]! @vst1_u8(pu1_dst_tmp, tmp_src)
  /bionic/libc/arch-arm/cortex-a9/bionic/
memset.S 75 vdup.8 q0, r1
80 vmov q1, q0
  /external/llvm/test/MC/AArch64/
inline-asm-modifiers.s 132 ldr q0, [sp]
147 ldr q0, [sp]
  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/aarch64/
programmer-friendly.d 22 34: 9c000160 ldr q0, 60 <\.text\+0x60>
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/
armVCM4P10_TransformResidual4x4_s.S 45 VTRN.32 q0,q1
omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.S 74 VTRN.32 q0,q1
88 VTRN.32 q0,q1
121 VADDW.U8 q3,q0,d4
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/arm/neon/
variance_neon.asm 36 vld1.8 {q0}, [r0], r1 ;Load up source and reference
68 vpaddl.s32 q0, q8 ;accumulate sum
109 vld1.8 {q0}, [r0], r1 ;Load up source and reference
138 vpaddl.s32 q0, q8 ;accumulate sum
193 vpaddl.s32 q0, q8 ;accumulate sum
258 vpaddl.s32 q0, q8 ;accumulate sum
  /prebuilts/gdb/darwin-x86/lib/python2.7/
fractions.py 243 p0, q0, p1, q1 = 0, 1, 1, 0
247 q2 = q0+a*q1
250 p0, q0, p1, q1 = p1, q1, p0+a*p1, q2
253 k = (max_denominator-q0)//q1
254 bound1 = Fraction(p0+k*p1, q0+k*q1)
  /prebuilts/gdb/linux-x86/lib/python2.7/
fractions.py 243 p0, q0, p1, q1 = 0, 1, 1, 0
247 q2 = q0+a*q1
250 p0, q0, p1, q1 = p1, q1, p0+a*p1, q2
253 k = (max_denominator-q0)//q1
254 bound1 = Fraction(p0+k*p1, q0+k*q1)
  /prebuilts/python/darwin-x86/2.7.5/lib/python2.7/
fractions.py 243 p0, q0, p1, q1 = 0, 1, 1, 0
247 q2 = q0+a*q1
250 p0, q0, p1, q1 = p1, q1, p0+a*p1, q2
253 k = (max_denominator-q0)//q1
254 bound1 = Fraction(p0+k*p1, q0+k*q1)
  /prebuilts/python/linux-x86/2.7.5/lib/python2.7/
fractions.py 243 p0, q0, p1, q1 = 0, 1, 1, 0
247 q2 = q0+a*q1
250 p0, q0, p1, q1 = p1, q1, p0+a*p1, q2
253 k = (max_denominator-q0)//q1
254 bound1 = Fraction(p0+k*p1, q0+k*q1)

Completed in 1031 milliseconds

1 2 3 4 5 6 78 91011>>