HomeSort by relevance Sort by last modified time
    Searched refs:q4 (Results 1 - 25 of 243) sorted by null

1 2 3 4 5 6 7 8 910

  /frameworks/rs/cpu_ref/
rsCpuIntrinsics_neon_Convolve.S 31 vpush {q4-q7}
57 vmovl.u8 q4, d28
100 vpop {q4-q7}
120 vpush {q4-q7}
164 vmull.s16 q4, d18, d0[0]
165 vmlal.s16 q4, d19, d0[1]
166 vmlal.s16 q4, d20, d0[2]
167 vmlal.s16 q4, d21, d0[3]
168 vmlal.s16 q4, d22, d1[0]
170 vmlal.s16 q4, d24, d1[1
    [all...]
  /external/libvpx/libvpx/vp8/common/arm/neon/
idct_dequant_full_2x_neon.c 21 int16x8_t q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11; local
41 q4 = vld1q_s16(q);
68 q4 = vmulq_s16(q4, q0);
74 dLow1 = vget_low_s16(q4);
75 dHigh1 = vget_high_s16(q4);
77 q4 = vcombine_s16(dHigh0, dHigh1);
86 q6 = vqdmulhq_n_s16(q4, sinpi8sqrt2);
88 q8 = vqdmulhq_n_s16(q4, cospi8sqrt2minus1);
97 q4 = vqaddq_s16(q4, q8)
    [all...]
dequant_idct_neon.c 27 int16x8_t q1, q2, q3, q4, q5, q6; local
38 q4 = vld1q_s16(input);
59 vmulq_u16(vreinterpretq_u16_s16(q4), vreinterpretq_u16_s16(q6)));
67 q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1);
69 q4 = vshrq_n_s16(q4, 1);
71 q4 = vqaddq_s16(q4, q2);
73 d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4));
74 d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4));
    [all...]
mbloopfilter_neon.c 18 uint8x16_t q4, // p2
38 q11u8 = vabdq_u8(q3, q4);
39 q12u8 = vabdq_u8(q4, q5);
70 q4 = veorq_u8(q4, q0u8);
137 q0s8 = vqaddq_s8(vreinterpretq_s8_u8(q4), q0s8);
157 uint8x16_t qblimit, qlimit, qthresh, q3, q4; local
168 q4 = vld1q_u8(src);
182 vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9,
183 q10, &q4, &q5, &q6, &q7, &q8, &q9)
205 uint8x16_t qblimit, qlimit, qthresh, q3, q4; local
292 uint8x16_t qblimit, qlimit, qthresh, q3, q4; local
456 uint8x16_t qblimit, qlimit, qthresh, q3, q4; local
    [all...]
  /external/llvm/test/MC/ARM/
neon-bitwise-encoding.s 110 veor q4, q7, q3
111 veor.8 q4, q7, q3
112 veor.16 q4, q7, q3
113 veor.32 q4, q7, q3
114 veor.64 q4, q7, q3
116 veor.i8 q4, q7, q3
117 veor.i16 q4, q7, q3
118 veor.i32 q4, q7, q3
119 veor.i64 q4, q7, q3
121 veor.s8 q4, q7, q
    [all...]
neon-v8.s 5 vmaxnm.f32 q2, q4, q6
6 @ CHECK: vmaxnm.f32 q2, q4, q6 @ encoding: [0x5c,0x4f,0x08,0xf3]
16 vcvta.s32.f32 q4, q6
17 @ CHECK: vcvta.s32.f32 q4, q6 @ encoding: [0x4c,0x80,0xbb,0xf3]
18 vcvta.u32.f32 q4, q10
19 @ CHECK: vcvta.u32.f32 q4, q10 @ encoding: [0xe4,0x80,0xbb,0xf3]
43 vcvtp.s32.f32 q4, q15
44 @ CHECK: vcvtp.s32.f32 q4, q15 @ encoding: [0x6e,0x82,0xbb,0xf3]
50 vrintn.f32 q1, q4
51 @ CHECK: vrintn.f32 q1, q4 @ encoding: [0x48,0x24,0xba,0xf3
    [all...]
thumb-neon-v8.s 5 vmaxnm.f32 q2, q4, q6
6 @ CHECK: vmaxnm.f32 q2, q4, q6 @ encoding: [0x08,0xff,0x5c,0x4f]
16 vcvta.s32.f32 q4, q6
17 @ CHECK: vcvta.s32.f32 q4, q6 @ encoding: [0xbb,0xff,0x4c,0x80]
18 vcvta.u32.f32 q4, q10
19 @ CHECK: vcvta.u32.f32 q4, q10 @ encoding: [0xbb,0xff,0xe4,0x80]
43 vcvtp.s32.f32 q4, q15
44 @ CHECK: vcvtp.s32.f32 q4, q15 @ encoding: [0xbb,0xff,0x6e,0x82]
50 vrintn.f32 q1, q4
51 @ CHECK: vrintn.f32 q1, q4 @ encoding: [0xba,0xff,0x48,0x24
    [all...]
diagnostics-noneon.s 5 vmov q4, q5
neon-shiftaccum-encoding.s 10 vsra.s64 q8, q4, #64
18 vsra.u64 q4, q5, #25
28 vsra.s64 q4, #64
45 @ CHECK: vsra.s64 q8, q4, #64 @ encoding: [0xd8,0x01,0xc0,0xf2]
53 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xda,0x81,0xa7,0xf3]
62 @ CHECK: vsra.s64 q4, q4, #64 @ encoding: [0xd8,0x81,0x80,0xf2]
82 vrsra.s32 q3, q4, #32
83 vrsra.s64 q4, q5, #64
100 vrsra.s32 q4, #3
    [all...]
neont2-shiftaccum-encoding.s 12 vsra.s64 q8, q4, #64
20 vsra.u64 q4, q5, #25
30 vsra.s64 q4, #64
47 @ CHECK: vsra.s64 q8, q4, #64 @ encoding: [0xc0,0xef,0xd8,0x01]
55 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xa7,0xff,0xda,0x81]
64 @ CHECK: vsra.s64 q4, q4, #64 @ encoding: [0x80,0xef,0xd8,0x81]
85 vrsra.s32 q3, q4, #32
86 vrsra.s64 q4, q5, #64
103 vrsra.s32 q4, #3
    [all...]
basic-arm-instructions-v8.1a.s 15 vqrdmlsh.f32 q3, q4, q5
25 //CHECK-ERROR: vqrdmlsh.f32 q3, q4, q5
37 //CHECK-V8: vqrdmlsh.f32 q3, q4, q5
93 vqrdmlsh.s32 q3, q4, q5
94 //CHECK-V81aARM: vqrdmlsh.s32 q3, q4, q5 @ encoding: [0x5a,0x6c,0x28,0xf3]
95 //CHECK-V81aTHUMB: vqrdmlsh.s32 q3, q4, q5 @ encoding: [0x28,0xff,0x5a,0x6c]
97 //CHECK-V8: vqrdmlsh.s32 q3, q4, q5
103 vqrdmlsh.f32 q3, q4, d5[1]
113 //CHECK-ERROR: vqrdmlsh.f32 q3, q4, d5[1]
neon-shift-encoding.s 116 vsra.s64 q4, q5, #63
122 vsra.s8 q4, #7
134 @ CHECK: vsra.s64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf2]
139 @ CHECK: vsra.s8 q4, q4, #7 @ encoding: [0x58,0x81,0x89,0xf2]
152 vsra.u64 q4, q5, #63
158 vsra.u8 q4, #7
170 @ CHECK: vsra.u64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf3]
175 @ CHECK: vsra.u8 q4, q4, #7 @ encoding: [0x58,0x81,0x89,0xf3
    [all...]
directive-fpu-instrs.s 10 vmov q4, q11 @ v4si
  /toolchain/binutils/binutils-2.25/ld/testsuite/ld-aarch64/
emit-relocs-299.s 6 ldr q4, [x3, #:lo12:tempy]
  /external/freetype/src/base/
ftbbox.c 253 FT_Pos q4 )
271 FT_ABS( q4 ) ) );
282 q4 <<= shift;
289 q4 >>= -shift;
297 if ( q1 + q2 > q3 + q4 ) /* first half */
299 q4 = q4 + q3;
302 q4 = q4 + q3;
304 q4 = ( q4 + q3 ) / 8
    [all...]
  /external/pdfium/third_party/freetype/src/base/
ftbbox.c 253 FT_Pos q4 )
271 FT_ABS( q4 ) ) );
282 q4 <<= shift;
289 q4 >>= -shift;
297 if ( q1 + q2 > q3 + q4 ) /* first half */
299 q4 = q4 + q3;
302 q4 = q4 + q3;
304 q4 = ( q4 + q3 ) / 8
    [all...]
  /external/libavc/common/arm/
ih264_inter_pred_filters_luma_horz_a9q.s 127 vaddl.u8 q4, d31, d2 @// a0 + a5 (column1,row0)
135 vmlal.u8 q4, d31, d1 @// a0 + a5 + 20a2 (column1,row0)
143 vmlal.u8 q4, d31, d1 @// a0 + a5 + 20a2 + 20a3 (column1,row0)
151 vmlsl.u8 q4, d31, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row0)
159 vmlsl.u8 q4, d31, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 (column1,row0)
165 vqrshrun.s16 d20, q4, #5 @// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row0)
195 vaddl.u8 q4, d31, d2 @// a0 + a5 (column1,row0)
198 vmlal.u8 q4, d29, d1 @// a0 + a5 + 20a2 + 20a3 (column1,row0)
199 vmlal.u8 q4, d30, d1 @// a0 + a5 + 20a2 (column1,row0)
200 vmlsl.u8 q4, d27, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row0
    [all...]
ih264_inter_pred_luma_horz_qpel_a9q.s 134 vaddl.u8 q4, d31, d2 @// a0 + a5 (column1,row0)
142 vmlal.u8 q4, d31, d1 @// a0 + a5 + 20a2 (column1,row0)
150 vmlal.u8 q4, d31, d1 @// a0 + a5 + 20a2 + 20a3 (column1,row0)
158 vmlsl.u8 q4, d31, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row0)
166 vmlsl.u8 q4, d31, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 (column1,row0)
173 vqrshrun.s16 d20, q4, #5 @// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row0)
207 vaddl.u8 q4, d31, d2 @// a0 + a5 (column1,row0)
210 vmlal.u8 q4, d29, d1 @// a0 + a5 + 20a2 + 20a3 (column1,row0)
211 vmlal.u8 q4, d30, d1 @// a0 + a5 + 20a2 (column1,row0)
212 vmlsl.u8 q4, d27, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row0
    [all...]
  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/arm/
neon-omit.s 11 vshl.u16 q3,q4
33 vmls.s32 q3,q4
35 vacgt.f q3,q4
49 vrsra.u16 q4,#6
60 vshl.u16 q3,q4,q5
78 vmls.s32 q3,q4,q5
79 vacge.f q1,q4,q2
80 vacgt.f q3,q1,q4
94 vrsra.u16 q15,q4,#6
  /external/valgrind/none/tests/arm/
neon128.c 439 TESTINSN_bin("vand q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57);
445 TESTINSN_bin("vbic q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57);
452 TESTINSN_bin("vorr q4, q4, q4", q4, q4, i16, 0xff, q4, i16, 0xff)
    [all...]
  /external/clang/test/SemaTemplate/
instantiate-exception-spec.cpp 24 void (*q4)() throw(T) = p2; // ok
  /external/libhevc/common/arm/
ihevc_inter_pred_filters_luma_horz.s 216 vmull.u8 q4,d1,d25 @mul_res = vmlal_u8(src[0_1], coeffabs_1)@
218 vmlal.u8 q4,d3,d27 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
220 vmlsl.u8 q4,d0,d24 @mul_res = vmlsl_u8(src[0_0], coeffabs_0)@
222 vmlsl.u8 q4,d2,d26 @mul_res = vmlsl_u8(src[0_2], coeffabs_2)@
224 vmlal.u8 q4,d4,d28 @mul_res = vmlal_u8(src[0_4], coeffabs_4)@
226 vmlsl.u8 q4,d5,d29 @mul_res = vmlsl_u8(src[0_5], coeffabs_5)@
228 vmlal.u8 q4,d6,d30 @mul_res = vmlal_u8(src[0_6], coeffabs_6)@
230 vmlsl.u8 q4,d7,d31 @mul_res = vmlsl_u8(src[0_7], coeffabs_7)@
240 vqrshrun.s16 d20,q4,#6 @right shift and saturating narrow result 1
296 vmull.u8 q4,d2,d25 @mul_res = vmlal_u8(src[0_1], coeffabs_1)
    [all...]
ihevc_inter_pred_luma_vert_w16inp_w16out.s 158 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
160 vmlal.s16 q4,d0,d22 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_0)@
162 vmlal.s16 q4,d2,d24 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_2)@
164 vmlal.s16 q4,d3,d25 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_3)@
166 vmlal.s16 q4,d4,d26 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_4)@
168 vmlal.s16 q4,d5,d27 @mul_res1 = vmlal_u8(mul_res1, src_tmp2, coeffabs_5)@
169 vmlal.s16 q4,d6,d28 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_6)@
170 vmlal.s16 q4,d7,d29 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_7)@
187 vsub.s32 q4, q4, q1
    [all...]
ihevc_intra_pred_chroma_horz.s 130 vdup.16 q4,d1[0]
139 vst1.16 {q4},[r2],r3
140 vst1.16 {q4},[r9],r3
146 vdup.16 q4,d0[0]
155 vst1.16 {q4},[r2],r3
156 vst1.16 {q4},[r9],r3
162 vdup.16 q4,d11[0]
171 vst1.16 {q4},[r2],r3
172 vst1.16 {q4},[r9],r3
179 vdup.16 q4,d10[0
    [all...]
ihevc_intra_pred_luma_horz.s 128 vdup.8 q4,d1[4]
137 vst1.8 {q4},[r2],r3
138 vst1.8 {q4},[r9],r3
144 vdup.8 q4,d1[0]
153 vst1.8 {q4},[r2],r3
154 vst1.8 {q4},[r9],r3
160 vdup.8 q4,d0[4]
169 vst1.8 {q4},[r2],r3
170 vst1.8 {q4},[r9],r3
177 vdup.8 q4,d0[0
    [all...]

Completed in 334 milliseconds

1 2 3 4 5 6 7 8 910