HomeSort by relevance Sort by last modified time
    Searched refs:v22 (Results 26 - 50 of 159) sorted by null

12 3 4 5 6 7

  /external/libhevc/common/arm64/
ihevc_intra_pred_chroma_mode_3_to_9.s 133 smull v22.8h, v30.8b, v31.8b //(col+1)*intra_pred_angle [0:7](col)
150 xtn v6.8b, v22.8h
157 sshr v22.8h, v22.8h,#5
163 sqxtn v2.8b, v22.8h
199 umull v22.8h, v16.8b, v7.8b //mul (row 1)
200 umlal v22.8h, v17.8b, v6.8b //mul (row 1)
207 rshrn v22.8b, v22.8h,#5 //round shft (row 1)
217 st1 {v22.8b},[x2], x3 //st (row 1
    [all...]
ihevc_intra_pred_chroma_mode_27_to_33.s 212 umull v22.8h, v20.8b, v24.8b //(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
216 umlal v22.8h, v21.8b, v25.8b //(iv)vmull_u8(ref_main_idx_1, dup_const_fract)
236 rshrn v22.8b, v22.8h,#5 //(iv)shift_res = vrshrn_n_u16(add_res, 5)
252 st1 {v22.8b},[x0],x3 //(iv)
294 umull v22.8h, v20.8b, v24.8b //(viii)vmull_u8(ref_main_idx, dup_const_32_fract)
298 umlal v22.8h, v21.8b, v25.8b //(viii)vmull_u8(ref_main_idx_1, dup_const_fract)
318 rshrn v22.8b, v22.8h,#5 //(viii)shift_res = vrshrn_n_u16(add_res, 5)
337 st1 {v22.8b},[x0] //(viii
    [all...]
ihevc_intra_pred_filters_luma_mode_19_to_25.s 325 umull v22.8h, v20.8b, v24.8b //(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
329 umlal v22.8h, v21.8b, v25.8b //(iv)vmull_u8(ref_main_idx_1, dup_const_fract)
348 rshrn v22.8b, v22.8h,#5 //(iv)shift_res = vrshrn_n_u16(add_res, 5)
363 st1 {v22.8b},[x0],x3 //(iv)
406 umull v22.8h, v20.8b, v24.8b //(viii)vmull_u8(ref_main_idx, dup_const_32_fract)
409 umlal v22.8h, v21.8b, v25.8b //(viii)vmull_u8(ref_main_idx_1, dup_const_fract)
432 rshrn v22.8b, v22.8h,#5 //(viii)shift_res = vrshrn_n_u16(add_res, 5)
450 st1 {v22.8b},[x0] //(viii
    [all...]
ihevc_intra_pred_luma_mode_27_to_33.s 217 umull v22.8h, v20.8b, v24.8b //(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
221 umlal v22.8h, v21.8b, v25.8b //(iv)vmull_u8(ref_main_idx_1, dup_const_fract)
241 rshrn v22.8b, v22.8h,#5 //(iv)shift_res = vrshrn_n_u16(add_res, 5)
257 st1 {v22.8b},[x0],x3 //(iv)
299 umull v22.8h, v20.8b, v24.8b //(viii)vmull_u8(ref_main_idx, dup_const_32_fract)
303 umlal v22.8h, v21.8b, v25.8b //(viii)vmull_u8(ref_main_idx_1, dup_const_fract)
324 rshrn v22.8b, v22.8h,#5 //(viii)shift_res = vrshrn_n_u16(add_res, 5)
343 st1 {v22.8b},[x0] //(viii
    [all...]
ihevc_sao_edge_offset_class2_chroma.s 445 cmhi v22.16b, v18.16b , v5.16b //I vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
446 SUB v22.16b, v22.16b , v20.16b //I sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
449 ADD v18.16b, v18.16b , v22.16b //I edge_idx = vaddq_s8(edge_idx, sign_down)
452 NEG v17.16b, v22.16b //I sign_up = vnegq_s8(sign_down)
458 AND v22.16b, v18.16b , v1.16b //I edge_idx = vandq_s8(edge_idx, au1_mask)
459 mov v23.d[0],v22.d[1]
462 UZP1 v31.8b, v22.8b, v23.8b
463 UZP2 v23.8b, v22.8b, v23.8b //I
464 mov v22.8b,v31.8
    [all...]
ihevc_intra_pred_filters_chroma_mode_11_to_17.s 251 smull v22.8h, v30.8b, v31.8b //(col+1)*intra_pred_angle [0:7](col)
269 xtn v6.8b, v22.8h
276 sshr v22.8h, v22.8h,#5
284 sqxtn v19.8b, v22.8h
321 umull v22.8h, v16.8b, v7.8b //mul (row 1)
322 umlal v22.8h, v17.8b, v6.8b //mul (row 1)
329 rshrn v22.8b, v22.8h,#5 //round shft (row 1)
339 st1 {v22.8b},[x2], x3 //st (row 1
    [all...]
ihevc_itrans_recon_32x32.s 234 smull v22.4s, v10.4h, v0.h[0]
235 smlal v22.4s, v11.4h, v1.h[2]
270 smlal v22.4s, v12.4h, v3.h[0]
271 smlal v22.4s, v13.4h, v4.h[2]
304 smlal v22.4s, v10.4h, v6.h[0]
305 smlal v22.4s, v11.4h, v7.h[2]
344 smlsl v22.4s, v12.4h, v7.h[0]
345 smlsl v22.4s, v13.4h, v5.h[2]
380 smlsl v22.4s, v10.4h, v0.h[0]
381 smlsl v22.4s, v11.4h, v2.h[2
    [all...]
ihevc_inter_pred_filters_luma_horz.s 402 umull v22.8h, v3.8b, v25.8b
404 umlsl v22.8h, v1.8b, v24.8b
408 umlal v22.8h, v7.8b, v27.8b
415 umlsl v22.8h, v5.8b, v26.8b
419 umlal v22.8h, v13.8b, v28.8b
423 umlal v22.8h, v17.8b, v30.8b
426 umlsl v22.8h, v15.8b, v29.8b
429 umlsl v22.8h, v19.8b, v31.8b
442 sqrshrun v11.8b, v22.8h,#6
467 sqrshrun v11.8b, v22.8h,#
    [all...]
ihevc_inter_pred_luma_horz_w16out.s 213 ld1 {v22.2s},[x8],x15 //vector load pu1_src + src_strd
216 zip1 v0.2s, v20.2s, v22.2s
217 zip2 v12.2s, v20.2s, v22.2s //vector zip the i iteration and ii interation in single register
223 ld1 {v22.2s},[x8],x15
226 zip1 v2.2s, v20.2s, v22.2s
227 zip2 v14.2s, v20.2s, v22.2s
233 ld1 {v22.2s},[x8],x15
236 zip1 v4.2s, v20.2s, v22.2s
237 zip2 v16.2s, v20.2s, v22.2s
243 ld1 {v22.2s},[x8],x1
    [all...]
ihevc_intra_pred_chroma_horz.s 300 sqadd v22.8h, v26.8h , v24.8h
303 sqxtun v22.8b, v22.8h
344 sqadd v22.8h, v26.8h , v24.8h
346 sqxtun v22.8b, v22.8h
348 st1 {v22.s}[0],[x2],x3
ihevc_sao_edge_offset_class3.s 343 Uxtl2 v22.8h, v5.16b //I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
351 SADDW2 v22.8h, v22.8h , v3.16b //I pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
353 SMAX v22.8h, v22.8h , v2.8h //I pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
354 UMIN v22.8h, v22.8h , v4.8h //I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip))
364 xtn2 v20.16b, v22.8h //I vmovn_s16(pi2_tmp_cur_row.val[1])
468 Uxtl2 v22.8h, v16.16b //III pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
472 SADDW2 v22.8h, v22.8h , v3.16b //III pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset
    [all...]
ihevc_weighted_pred_bi_default.s 206 ld1 {v22.4h},[x11],x3 //load and increment the pi2_src1 iii iteration
208 sqadd v30.4h,v22.4h,v23.4h
300 sqadd v22.8h,v28.8h,v30.8h //vaddq_s32(i4_tmp2_t1, i4_tmp2_t2)
302 sqadd v22.8h,v22.8h,v0.8h //vaddq_s32(i4_tmp2_t1, tmp_lvl_shift_t)
306 sqshrun v21.8b, v22.8h,#7
360 sqadd v22.8h,v28.8h,v30.8h //vaddq_s32(i4_tmp2_t1, i4_tmp2_t2)
361 sqadd v22.8h,v22.8h,v0.8h //vaddq_s32(i4_tmp2_t1, tmp_lvl_shift_t)
363 sqshrun v21.8b, v22.8h,#
    [all...]
ihevc_inter_pred_filters_luma_vert_w16inp.s 128 dup v22.4h, v0.h[0] //coeffabs_0 = vdup_lane_u8(coeffabs, 0)//
157 smlal v19.4s, v0.4h, v22.4h //mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_0)//
174 smlal v20.4s, v1.4h, v22.4h //mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_0)//
190 smlal v21.4s, v2.4h, v22.4h
203 smlal v30.4s, v3.4h, v22.4h
231 smlal v19.4s, v0.4h, v22.4h //mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_0)//
247 smlal v20.4s, v1.4h, v22.4h //mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_0)//
265 smlal v21.4s, v2.4h, v22.4h
290 smlal v30.4s, v3.4h, v22.4h
317 smlal v19.4s, v0.4h, v22.4h //mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_0)/
    [all...]
ihevc_inter_pred_luma_vert_w16inp_w16out.s 137 dup v22.4h,v0.h[0] //coeffabs_0 = vdup_lane_u8(coeffabs, 0)//
168 smlal v19.4s,v0.4h,v22.4h //mul_res1 = smlal_u8(mul_res1, src_tmp1, coeffabs_0)//
185 smlal v20.4s,v1.4h,v22.4h //mul_res2 = smlal_u8(mul_res2, src_tmp2, coeffabs_0)//
201 smlal v21.4s,v2.4h,v22.4h
215 smlal v31.4s,v3.4h,v22.4h
245 smlal v19.4s,v0.4h,v22.4h //mul_res1 = smlal_u8(mul_res1, src_tmp1, coeffabs_0)//
262 smlal v20.4s,v1.4h,v22.4h //mul_res2 = smlal_u8(mul_res2, src_tmp2, coeffabs_0)//
281 smlal v21.4s,v2.4h,v22.4h
307 smlal v31.4s,v3.4h,v22.4h
335 smlal v19.4s,v0.4h,v22.4h //mul_res1 = smlal_u8(mul_res1, src_tmp1, coeffabs_0)/
    [all...]
  /external/libavc/common/armv8/
ih264_inter_pred_luma_vert_qpel_av8.s 122 movi v22.8h, #20 // Filter coeff 0x14 into Q11
145 mla v14.8h, v12.8h , v22.8h // temp += temp1 * 20
148 mla v20.8h, v18.8h , v22.8h // temp4 += temp3 * 20
155 mla v16.8h, v12.8h , v22.8h
162 mla v14.8h, v12.8h , v22.8h
171 mla v18.8h, v12.8h , v22.8h
178 mla v16.8h, v12.8h , v22.8h
188 mla v14.8h, v12.8h , v22.8h
194 mla v18.8h, v12.8h , v22.8h
212 mla v14.8h, v12.8h , v22.8h // temp += temp1 * 2
    [all...]
ih264_deblk_luma_av8.s 114 uabd v22.16b, v6.16b, v0.16b
128 cmhs v18.16b, v22.16b, v20.16b
132 cmhi v22.16b, v16.16b , v30.16b //Q11=(Aq<Beta)
151 sub v18.16b, v18.16b , v22.16b //Q9 = C0 + (Ap < Beta) + (Aq < Beta)
153 and v22.16b, v22.16b , v12.16b //
189 and v30.16b, v22.16b , v30.16b //condition check Aq<beta
275 uabd v22.16b , v14.16b, v4.16b
278 cmhi v22.16b, v2.16b , v22.16b //Aq < Bet
    [all...]
ih264_weighted_pred_av8.s 222 uxtl v22.8h, v9.8b //converting row 3H to 16-bit
227 mul v22.8h, v22.8h , v2.h[0] //weight mult. for row 3H
240 srshl v22.8h, v22.8h , v0.8h //rounds off the weighted samples from row 3H
247 saddw v22.8h, v22.8h , v3.8b //adding offset for row 3H
250 sqxtun v9.8b, v22.8h //saturating row 3H to unsigned 8-bit
417 uxtl v22.8h, v10.8b //converting row 3L to 16-bit
420 mul v22.8h, v22.8h , v2.8h //weight mult. for row 3
    [all...]
ih264_iquant_itrans_recon_av8.s 142 mul v22.4h, v22.4h, v28.4h // x[i]=(scale[i] * dequant[i]) where i = 8..11
147 smull v4.4s, v18.4h, v22.4h // q2 = p[i] = (x[i] * trns_coeff[i]) where i = 8..11
213 sub v22.4h, v15.4h, v16.4h // x1 - x2
217 mov v22.d[1], v23.d[0]
220 srshr v22.8h, v22.8h, #6
223 uaddw v22.8h, v22.8h , v31.8b
226 sqxtun v1.8b, v22.8
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/ppc/
platform_altivec.asm 30 W v22, r3
48 R v22, r3
  /cts/tests/tests/jni/src/android/jni/cts/
InstanceFromNative.java 87 int v20, int v21, int v22, int v23, int v24,
100 (v20 == 20) && (v21 == 21) && (v22 == 22) && (v23 == 23) &&
StaticFromNative.java 84 int v20, int v21, int v22, int v23, int v24,
97 (v20 == 20) && (v21 == 21) && (v22 == 22) && (v23 == 23) &&
  /external/llvm/test/MC/AArch64/
neon-max-min-pairwise.s 85 fminp v10.4h, v15.4h, v22.4h
87 fminp v10.2s, v15.2s, v22.2s
91 // CHECK: fminp v10.4h, v15.4h, v22.4h // encoding: [0xea,0x35,0xd6,0x2e]
93 // CHECK: fminp v10.2s, v15.2s, v22.2s // encoding: [0xea,0xf5,0xb6,0x2e]
115 fminnmp v10.4h, v15.4h, v22.4h
117 fminnmp v10.2s, v15.2s, v22.2s
121 // CHECK: fminnmp v10.4h, v15.4h, v22.4h // encoding: [0xea,0x05,0xd6,0x2e]
123 // CHECK: fminnmp v10.2s, v15.2s, v22.2s // encoding: [0xea,0xc5,0xb6,0x2e]
  /external/libmpeg2/common/armv8/
impeg2_idct.s 196 uaddw v22.8h, v30.8h , v7.8b
200 sqxtun v7.8b, v22.8h
408 smull v22.4s, v10.4h, v0.4h[0] //// y4 * cos4(part of c0 and c1)
442 add v10.4s, v20.4s , v22.4s //// c0 = y0 * cos4 + y4 * cos4(part of a0 and a1)
443 sub v20.4s, v20.4s , v22.4s //// c1 = y0 * cos4 - y4 * cos4(part of a0 and a1)
452 sub v22.4s, v20.4s , v18.4s //// a2 = c1 - d1(part of x2,x5)
458 add v24.4s, v22.4s , v28.4s //// a2 + b2(part of x2)
459 sub v22.4s, v22.4s , v28.4s //// a2 - b2(part of x5)
470 sqrshrn v14.4h, v22.4s, #idct_stg1_shift //// x5 = (a2 - b2 + rnd) >> 7(IDCT_STG1_SHIFT
    [all...]
  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/ppc/
altivec.d 24 38: (7e d0 60 4c|4c 60 d0 7e) lvsr v22,r16,r12
28 48: (7e c1 10 ce|ce 10 c1 7e) lvx v22,r1,r2
48 98: (12 da db 80|80 db da 12) vaddsws v22,v26,v27
61 cc: (13 36 54 42|42 54 36 13) vavguh v25,v22,v10
81 11c: (12 16 33 06|06 33 16 12) vcmpgtsb v16,v22,v6
98 160: (12 c0 d9 ca|ca d9 c0 12) vlogefp v22,v27
103 174: (12 63 b1 82|82 b1 63 12) vmaxsw v19,v3,v22
107 184: (12 cd 2d a0|a0 2d cd 12) vmhaddshs v22,v13,v5,v22
119 1b4: (12 00 b0 8c|8c b0 00 12) vmrghw v16,v0,v22
    [all...]
  /frameworks/rs/cpu_ref/
rsCpuIntrinsics_advsimd_ColorMatrix.S 223 vmxx_f32 \i, 4, v16.4s, v22.4s, v2.s[0]
237 vmxx_f32 \i^31, 4, v16.4s, v22.4s, v2.s[0]
251 vmxx_f32 \i, 4, v17.4s, v22.4s, v2.s[1]
265 vmxx_f32 \i^31, 4, v17.4s, v22.4s, v2.s[1]
279 vmxx_f32 \i, 4, v18.4s, v22.4s, v2.s[2]
293 vmxx_f32 \i^31, 4, v18.4s, v22.4s, v2.s[2]
307 vmxx_f32 \i, 4, v19.4s, v22.4s, v2.s[3]
321 vmxx_f32 \i^31, 4, v19.4s, v22.4s, v2.s[3]
330 ld4 {v20.8b,v21.8b,v22.8b,v23.8b}, [x1], #32
333 uxtl v22.8h, v22.8
    [all...]

Completed in 2700 milliseconds

12 3 4 5 6 7