HomeSort by relevance Sort by last modified time
    Searched refs:vmull (Results 1 - 25 of 80) sorted by null

1 2 3 4

  /frameworks/rs/cpu_ref/
rsCpuIntrinsics_neon_Blend.S 84 vmull.u8 q12, d15, d1
85 vmull.u8 q0, d14, d0
86 vmull.u8 q13, d15, d3
87 vmull.u8 q1, d14, d2
88 vmull.u8 q14, d15, d5
89 vmull.u8 q2, d14, d4
90 vmull.u8 q15, d15, d7
91 vmull.u8 q3, d14, d6
130 vmull.u8 q12, d15, d17
131 vmull.u8 q8, d14, d1
    [all...]
  /external/swiftshader/third_party/LLVM/test/MC/ARM/
neont2-mul-encoding.s 41 @ CHECK: vmull.s8 q8, d16, d17 @ encoding: [0xc0,0xef,0xa1,0x0c]
42 vmull.s8 q8, d16, d17
43 @ CHECK: vmull.s16 q8, d16, d17 @ encoding: [0xd0,0xef,0xa1,0x0c]
44 vmull.s16 q8, d16, d17
45 @ CHECK: vmull.s32 q8, d16, d17 @ encoding: [0xe0,0xef,0xa1,0x0c]
46 vmull.s32 q8, d16, d17
47 @ CHECK: vmull.u8 q8, d16, d17 @ encoding: [0xc0,0xff,0xa1,0x0c]
48 vmull.u8 q8, d16, d17
49 @ CHECK: vmull.u16 q8, d16, d17 @ encoding: [0xd0,0xff,0xa1,0x0c]
50 vmull.u16 q8, d16, d1
    [all...]
  /external/libhevc/common/arm/
ihevc_itrans_recon_8x8.s 193 vmull.s16 q10,d2,d0[0] @// y0 * cos4(part of c0 and c1)
195 vmull.s16 q9,d3,d1[2] @// y2 * sin2 (q3 is freed by this time)(part of d1)
198 vmull.s16 q12,d6,d0[1] @// y1 * cos1(part of b0)
200 vmull.s16 q13,d6,d0[3] @// y1 * cos3(part of b1)
202 vmull.s16 q14,d6,d1[1] @// y1 * sin3(part of b2)
204 vmull.s16 q15,d6,d1[3] @// y1 * sin1(part of b3)
214 vmull.s16 q11,d10,d0[0] @// y4 * cos4(part of c0 and c1)
216 vmull.s16 q3,d3,d0[2] @// y2 * cos2(part of d0)
308 vmull.s16 q12,d6,d0[1] @// y1 * cos1(part of b0)
309 vmull.s16 q13,d6,d0[3] @// y1 * cos3(part of b1
    [all...]
ihevc_itrans_recon_4x4_ttype1.s 150 vmull.s16 q3,d1,d4[2] @74 * pi2_src[1]
155 vmull.s16 q4,d1,d4[2] @74 * pi2_src[1]
160 vmull.s16 q5,d0,d4[2] @ 74 * pi2_src[0]
164 vmull.s16 q6,d2,d4[1] @ 55 * pi2_src[2]
189 vmull.s16 q3,d15,d4[2] @74 * pi2_src[1]
194 vmull.s16 q4,d15,d4[2] @74 * pi2_src[1]
199 vmull.s16 q5,d14,d4[2] @ 74 * pi2_src[0]
204 vmull.s16 q6,d16,d4[1] @ 55 * pi2_src[2]
ihevc_inter_pred_chroma_vert.s 149 vmull.u8 q3,d9,d1 @vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffabs_1)
154 vmull.u8 q2,d4,d1
194 vmull.u8 q2,d7,d1 @vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffabs_1)
238 vmull.u8 q15,d5,d1 @mul with coeff 1
246 vmull.u8 q14,d6,d1 @mul_res 2
256 vmull.u8 q13,d7,d1
266 vmull.u8 q12,d8,d1
285 vmull.u8 q15,d5,d1 @mul with coeff 1
297 vmull.u8 q14,d6,d1 @mul_res 2
310 vmull.u8 q13,d7,d
    [all...]
ihevc_inter_pred_chroma_vert_w16out.s 150 vmull.u8 q3,d9,d1 @vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffabs_1)
155 vmull.u8 q2,d4,d1
193 vmull.u8 q2,d7,d1 @vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffabs_1)
237 vmull.u8 q15,d5,d1 @mul with coeff 1
245 vmull.u8 q14,d6,d1 @mul_res 2
254 vmull.u8 q13,d7,d1
263 vmull.u8 q12,d8,d1
280 vmull.u8 q15,d5,d1 @mul with coeff 1
291 vmull.u8 q14,d6,d1 @mul_res 2
302 vmull.u8 q13,d7,d
    [all...]
ihevc_intra_pred_chroma_mode_27_to_33.s 150 vmull.u8 q1,d3,d0 @pos = ((row + 1) * intra_pred_ang)
179 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract)
190 vmull.u8 q7,d12,d28 @(ii)vmull_u8(ref_main_idx, dup_const_32_fract)
205 vmull.u8 q9,d16,d26 @(iii)vmull_u8(ref_main_idx, dup_const_32_fract)
222 vmull.u8 q11,d20,d24 @(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
240 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract)
256 vmull.u8 q7,d12,d28 @(vi)vmull_u8(ref_main_idx, dup_const_32_fract)
269 vmull.u8 q9,d16,d26 @(vii)vmull_u8(ref_main_idx, dup_const_32_fract)
281 vmull.u8 q1,d5,d0 @pos = ((row + 1) * intra_pred_ang)
301 vmull.u8 q11,d20,d24 @(viii)vmull_u8(ref_main_idx, dup_const_32_fract
    [all...]
ihevc_intra_pred_luma_mode_27_to_33.s 153 vmull.u8 q1,d3,d0 @pos = ((row + 1) * intra_pred_ang)
181 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract)
192 vmull.u8 q7,d12,d28 @(ii)vmull_u8(ref_main_idx, dup_const_32_fract)
207 vmull.u8 q9,d16,d26 @(iii)vmull_u8(ref_main_idx, dup_const_32_fract)
223 vmull.u8 q11,d20,d24 @(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
241 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract)
257 vmull.u8 q7,d12,d28 @(vi)vmull_u8(ref_main_idx, dup_const_32_fract)
270 vmull.u8 q9,d16,d26 @(vii)vmull_u8(ref_main_idx, dup_const_32_fract)
282 vmull.u8 q1,d5,d0 @pos = ((row + 1) * intra_pred_ang)
301 vmull.u8 q11,d20,d24 @(viii)vmull_u8(ref_main_idx, dup_const_32_fract
    [all...]
ihevc_inter_pred_chroma_vert_w16inp.s 145 vmull.s16 q0,d0,d12 @vmull_s16(src_tmp1, coeff_0)
148 vmull.s16 q4,d2,d12 @vmull_s16(src_tmp2, coeff_0)
193 vmull.s16 q15,d0,d12 @vmull_s16(src_tmp1, coeff_0)
201 vmull.s16 q14,d1,d12 @vmull_s16(src_tmp2, coeff_0)
212 vmull.s16 q13,d2,d12 @vmull_s16(src_tmp2, coeff_0)
223 vmull.s16 q12,d3,d12 @vmull_s16(src_tmp2, coeff_0)
239 vmull.s16 q15,d0,d12 @vmull_s16(src_tmp1, coeff_0)
250 vmull.s16 q14,d1,d12 @vmull_s16(src_tmp2, coeff_0)
262 vmull.s16 q13,d2,d12 @vmull_s16(src_tmp2, coeff_0)
275 vmull.s16 q12,d3,d12 @vmull_s16(src_tmp2, coeff_0
    [all...]
ihevc_inter_pred_chroma_vert_w16inp_w16out.s 145 vmull.s16 q0,d0,d12 @vmull_s16(src_tmp1, coeff_0)
148 vmull.s16 q4,d2,d12 @vmull_s16(src_tmp2, coeff_0)
192 vmull.s16 q15,d0,d12 @vmull_s16(src_tmp1, coeff_0)
200 vmull.s16 q14,d1,d12 @vmull_s16(src_tmp2, coeff_0)
211 vmull.s16 q13,d2,d12 @vmull_s16(src_tmp2, coeff_0)
221 vmull.s16 q12,d3,d12 @vmull_s16(src_tmp2, coeff_0)
236 vmull.s16 q15,d0,d12 @vmull_s16(src_tmp1, coeff_0)
246 vmull.s16 q14,d1,d12 @vmull_s16(src_tmp2, coeff_0)
257 vmull.s16 q13,d2,d12 @vmull_s16(src_tmp2, coeff_0)
269 vmull.s16 q12,d3,d12 @vmull_s16(src_tmp2, coeff_0
    [all...]
ihevc_intra_pred_chroma_mode_3_to_9.s 155 vmull.s8 q11, d30, d31 @(col+1)*intra_pred_angle [0:7](col)
211 vmull.u8 q12, d12, d7 @mul (row 0)
221 vmull.u8 q11, d16, d7 @mul (row 1)
232 vmull.u8 q10, d14, d7 @mul (row 2)
243 vmull.u8 q9, d10, d7 @mul (row 3)
254 vmull.u8 q12, d12, d7 @mul (row 4)
267 vmull.u8 q11, d16, d7 @mul (row 5)
278 vmull.u8 q10, d14, d7 @mul (row 6)
282 vmull.u8 q9, d10, d7 @mul (row 7)
310 vmull.s8 q6, d30, d31 @(col+1)*intra_pred_angle [0:7](col
    [all...]
ihevc_intra_pred_filters_luma_mode_11_to_17.s 269 vmull.s8 q11, d30, d31 @(col+1)*intra_pred_angle [0:7](col)
323 vmull.u8 q12, d12, d7 @mul (row 0)
333 vmull.u8 q11, d16, d7 @mul (row 1)
344 vmull.u8 q10, d14, d7 @mul (row 2)
355 vmull.u8 q9, d10, d7 @mul (row 3)
366 vmull.u8 q12, d12, d7 @mul (row 4)
377 vmull.u8 q11, d16, d7 @mul (row 5)
388 vmull.u8 q10, d14, d7 @mul (row 6)
392 vmull.u8 q9, d10, d7 @mul (row 7)
421 vmull.s8 q6, d30, d31 @(col+1)*intra_pred_angle [0:7](col
    [all...]
ihevc_intra_pred_luma_mode_3_to_9.s 161 vmull.s8 q11, d30, d31 @(col+1)*intra_pred_angle [0:7](col)
213 vmull.u8 q12, d12, d7 @mul (row 0)
223 vmull.u8 q11, d16, d7 @mul (row 1)
234 vmull.u8 q10, d14, d7 @mul (row 2)
245 vmull.u8 q9, d10, d7 @mul (row 3)
256 vmull.u8 q12, d12, d7 @mul (row 4)
267 vmull.u8 q11, d16, d7 @mul (row 5)
278 vmull.u8 q10, d14, d7 @mul (row 6)
282 vmull.u8 q9, d10, d7 @mul (row 7)
311 vmull.s8 q6, d30, d31 @(col+1)*intra_pred_angle [0:7](col
    [all...]
ihevc_intra_pred_filters_chroma_mode_19_to_25.s 260 vmull.s8 q1,d3,d0 @pos = ((row + 1) * intra_pred_ang)
288 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract)
298 vmull.u8 q7,d12,d28 @(ii)vmull_u8(ref_main_idx, dup_const_32_fract)
313 vmull.u8 q9,d16,d26 @(iii)vmull_u8(ref_main_idx, dup_const_32_fract)
329 vmull.u8 q11,d20,d24 @(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
346 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract)
361 vmull.u8 q7,d12,d28 @(vi)vmull_u8(ref_main_idx, dup_const_32_fract)
377 vmull.u8 q9,d16,d26 @(vii)vmull_u8(ref_main_idx, dup_const_32_fract)
389 vmull.s8 q1,d5,d0 @pos = ((row + 1) * intra_pred_ang)
411 vmull.u8 q11,d20,d24 @(viii)vmull_u8(ref_main_idx, dup_const_32_fract
    [all...]
ihevc_itrans_recon_16x16.s 243 vmull.s16 q12,d6,d0[1] @// y1 * cos1(part of b0)
244 vmull.s16 q13,d6,d0[3] @// y1 * cos3(part of b1)
245 vmull.s16 q14,d6,d1[1] @// y1 * sin3(part of b2)
246 vmull.s16 q15,d6,d1[3] @// y1 * sin1(part of b3)
258 vmull.s16 q6,d10,d0[0]
260 vmull.s16 q7,d10,d0[0]
262 vmull.s16 q8,d10,d0[0]
264 vmull.s16 q9,d10,d0[0]
420 vmull.s16 q12,d6,d2[1] @// y1 * cos1(part of b0)
421 vmull.s16 q13,d6,d2[3] @// y1 * cos3(part of b1
    [all...]
ihevc_weighted_pred_bi.s 204 vmull.s16 q2,d0,d7[0] @vmull_n_s16(pi2_src1_val1, (int16_t) wgt0)
206 vmull.s16 q4,d1,d7[1] @vmull_n_s16(pi2_src2_val1, (int16_t) wgt1)
211 vmull.s16 q5,d2,d7[0] @vmull_n_s16(pi2_src1_val2, (int16_t) wgt0) ii iteration
215 vmull.s16 q7,d0,d7[0] @vmull_n_s16(pi2_src1_val1, (int16_t) wgt0) iii iteration
218 vmull.s16 q6,d3,d7[1] @vmull_n_s16(pi2_src2_val2, (int16_t) wgt1) ii iteration
225 vmull.s16 q8,d1,d7[1] @vmull_n_s16(pi2_src2_val1, (int16_t) wgt1) iii iteration
232 vmull.s16 q9,d2,d7[0] @vmull_n_s16(pi2_src1_val2, (int16_t) wgt0) iv iteration
237 vmull.s16 q10,d3,d7[1] @vmull_n_s16(pi2_src2_val2, (int16_t) wgt1) iv iteration
  /external/llvm/test/MC/ARM/
neont2-mul-encoding.s 54 vmull.s8 q8, d16, d17
55 vmull.s16 q8, d16, d17
56 vmull.s32 q8, d16, d17
57 vmull.u8 q8, d16, d17
58 vmull.u16 q8, d16, d17
59 vmull.u32 q8, d16, d17
60 vmull.p8 q8, d16, d17
62 @ CHECK: vmull.s8 q8, d16, d17 @ encoding: [0xc0,0xef,0xa1,0x0c]
63 @ CHECK: vmull.s16 q8, d16, d17 @ encoding: [0xd0,0xef,0xa1,0x0c]
64 @ CHECK: vmull.s32 q8, d16, d17 @ encoding: [0xe0,0xef,0xa1,0x0c
    [all...]
neon-mul-encoding.s 82 vmull.s8 q8, d16, d17
83 vmull.s16 q8, d16, d17
84 vmull.s32 q8, d16, d17
85 vmull.u8 q8, d16, d17
86 vmull.u16 q8, d16, d17
87 vmull.u32 q8, d16, d17
88 vmull.p8 q8, d16, d17
90 @ CHECK: vmull.s8 q8, d16, d17 @ encoding: [0xa1,0x0c,0xc0,0xf2]
91 @ CHECK: vmull.s16 q8, d16, d17 @ encoding: [0xa1,0x0c,0xd0,0xf2]
92 @ CHECK: vmull.s32 q8, d16, d17 @ encoding: [0xa1,0x0c,0xe0,0xf2
    [all...]
  /external/capstone/suite/MC/ARM/
neont2-mul-encoding.s.cs 22 0xc0,0xef,0xa1,0x0c = vmull.s8 q8, d16, d17
23 0xd0,0xef,0xa1,0x0c = vmull.s16 q8, d16, d17
24 0xe0,0xef,0xa1,0x0c = vmull.s32 q8, d16, d17
25 0xc0,0xff,0xa1,0x0c = vmull.u8 q8, d16, d17
26 0xd0,0xff,0xa1,0x0c = vmull.u16 q8, d16, d17
27 0xe0,0xff,0xa1,0x0c = vmull.u32 q8, d16, d17
28 0xc0,0xef,0xa1,0x0e = vmull.p8 q8, d16, d17
neon-mul-encoding.s.cs 36 0xa1,0x0c,0xc0,0xf2 = vmull.s8 q8, d16, d17
37 0xa1,0x0c,0xd0,0xf2 = vmull.s16 q8, d16, d17
38 0xa1,0x0c,0xe0,0xf2 = vmull.s32 q8, d16, d17
39 0xa1,0x0c,0xc0,0xf3 = vmull.u8 q8, d16, d17
40 0xa1,0x0c,0xd0,0xf3 = vmull.u16 q8, d16, d17
41 0xa1,0x0c,0xe0,0xf3 = vmull.u32 q8, d16, d17
42 0xa1,0x0e,0xc0,0xf2 = vmull.p8 q8, d16, d17
  /external/libmpeg2/common/arm/
icv_variance_a9.s 89 vmull.u8 q10, d0, d0
90 vmull.u8 q11, d1, d1
91 vmull.u8 q12, d2, d2
92 vmull.u8 q13, d3, d3
impeg2_idct.s 442 vmull.s16 q10, d2, d0[0] @// y0 * cos4(part of c0 and c1)
444 vmull.s16 q9, d3, d1[2] @// y2 * sin2 (Q3 is freed by this time)(part of d1)
447 vmull.s16 q12, d6, d0[1] @// y1 * cos1(part of b0)
449 vmull.s16 q13, d6, d0[3] @// y1 * cos3(part of b1)
451 vmull.s16 q14, d6, d1[1] @// y1 * sin3(part of b2)
453 vmull.s16 q15, d6, d1[3] @// y1 * sin1(part of b3)
463 vmull.s16 q11, d10, d0[0] @// y4 * cos4(part of c0 and c1)
465 vmull.s16 q3, d3, d0[2] @// y2 * cos2(part of d0)
561 vmull.s16 q12, d6, d0[1] @// y1 * cos1(part of b0)
562 vmull.s16 q13, d6, d0[3] @// y1 * cos3(part of b1
    [all...]
  /external/libavc/common/arm/
ih264_inter_pred_chroma_a9q.s 148 vmull.u8 q5, d0, d28
156 vmull.u8 q6, d6, d30
175 vmull.u8 q5, d0, d28
181 vmull.u8 q6, d6, d30
200 vmull.u8 q2, d2, d30
216 vmull.u8 q2, d0, d28
232 vmull.u8 q2, d0, d28
240 vmull.u8 q4, d2, d28
  /external/boringssl/ios-arm/crypto/fipsmodule/
ghash-armv4.S 440 vmull.p8 q8, d16, d6 @ F = A1*B
442 vmull.p8 q0, d26, d0 @ E = A*B1
444 vmull.p8 q9, d18, d6 @ H = A2*B
446 vmull.p8 q11, d26, d22 @ G = A*B2
449 vmull.p8 q10, d20, d6 @ J = A3*B
452 vmull.p8 q0, d26, d0 @ I = A*B3
458 vmull.p8 q11, d26, d22 @ K = A*B4
469 vmull.p8 q0, d26, d6 @ D = A*B
478 vmull.p8 q8, d16, d6 @ F = A1*B
480 vmull.p8 q1, d28, d2 @ E = A*B
    [all...]
  /external/boringssl/linux-arm/crypto/fipsmodule/
ghash-armv4.S 429 vmull.p8 q8, d16, d6 @ F = A1*B
431 vmull.p8 q0, d26, d0 @ E = A*B1
433 vmull.p8 q9, d18, d6 @ H = A2*B
435 vmull.p8 q11, d26, d22 @ G = A*B2
438 vmull.p8 q10, d20, d6 @ J = A3*B
441 vmull.p8 q0, d26, d0 @ I = A*B3
447 vmull.p8 q11, d26, d22 @ K = A*B4
458 vmull.p8 q0, d26, d6 @ D = A*B
467 vmull.p8 q8, d16, d6 @ F = A1*B
469 vmull.p8 q1, d28, d2 @ E = A*B
    [all...]

Completed in 279 milliseconds

1 2 3 4