HomeSort by relevance Sort by last modified time
    Searched refs:v24 (Results 1 - 25 of 104) sorted by null

1 2 3 4 5

  /external/libhevc/common/arm64/
ihevc_intra_pred_luma_vert.s 188 dup v24.16b,w12 //src[2nt+1]
213 sqxtun v24.8b, v28.8h
214 sqxtun2 v24.16b, v0.8h
218 rev64 v24.16b, v24.16b
219 mov v25.d[0], v24.d[1]
223 bsl v18.8b, v24.8b , v16.8b //only select row values from q12(predpixel)
239 bsl v1.8b, v24.8b , v16.8b
264 bsl v18.8b, v24.8b , v16.8b //only select row values from q12(predpixel)
277 bsl v1.8b, v24.8b , v16.8
    [all...]
ihevc_inter_pred_chroma_vert_w16inp.s 224 smull v24.4s, v3.4h, v16.4h //vmull_s16(src_tmp2, coeff_0)
226 smlal v24.4s, v4.4h, v17.4h
228 smlal v24.4s, v5.4h, v18.4h
230 smlal v24.4s, v6.4h, v19.4h
248 sqshrn v24.4h, v24.4s,#6 //right shift
263 sqrshrun v24.8b, v24.8h,#6 //rounding shift
269 st1 {v24.s}[0],[x9] //stores the loaded value
279 smull v24.4s, v3.4h, v16.4h //vmull_s16(src_tmp2, coeff_0
    [all...]
ihevc_intra_pred_luma_horz.s 209 usubl v24.8h, v30.8b, v28.8b
212 sshr v24.8h, v24.8h,#1
215 sqadd v22.8h, v26.8h , v24.8h
223 usubl v24.8h, v31.8b, v28.8b
226 sshr v24.8h, v24.8h,#1
229 sqadd v22.8h, v26.8h , v24.8h
291 usubl v24.8h, v30.8b, v28.8b
294 sshr v24.8h, v24.8h,#
    [all...]
ihevc_itrans_recon_8x8.s 192 smull v24.4s, v6.4h, v0.4h[1] //// y1 * cos1(part of b0)
200 smlal v24.4s, v7.4h, v0.4h[3] //// y1 * cos1 + y3 * cos3(part of b0)
234 smlal v24.4s, v14.4h, v1.4h[1] //// y1 * cos1 + y3 * cos3 + y5 * sin3(part of b0)
245 smlal v24.4s, v15.4h, v1.4h[3] //// b0 = y1 * cos1 + y3 * cos3 + y5 * sin3 + y7 * sin1(part of x0,x7)
255 add v20.4s, v14.4s , v24.4s //// a0 + b0(part of x0)
256 sub v6.4s, v14.4s , v24.4s //// a0 - b0(part of x7)
258 add v24.4s, v22.4s , v28.4s //// a2 + b2(part of x2)
269 sqrshrn v3.4h, v24.4s,#shift_stage1_idct //// x2 = (a2 + b2 + rnd) >> 7(shift_stage1_idct)
304 smull v24.4s, v6.4h, v0.4h[1] //// y1 * cos1(part of b0)
309 smlal v24.4s, v7.4h, v0.4h[3] //// y1 * cos1 + y3 * cos3(part of b0
    [all...]
ihevc_itrans_recon_32x32.s 216 smull v24.4s, v8.4h, v0.4h[1] //// y1 * cos1(part of b0)
221 smlal v24.4s, v9.4h, v0.4h[3] //// y1 * cos1 + y3 * cos3(part of b0)
256 smlal v24.4s, v14.4h, v1.4h[1]
262 smlal v24.4s, v15.4h, v1.4h[3]
286 smlal v24.4s, v8.4h, v2.4h[1] //// y1 * cos1(part of b0)
291 smlal v24.4s, v9.4h, v2.4h[3] //// y1 * cos1 + y3 * cos3(part of b0)
330 smlal v24.4s, v14.4h, v3.4h[1]
336 smlal v24.4s, v15.4h, v3.4h[3]
362 smlal v24.4s, v8.4h, v4.4h[1] //// y1 * cos1(part of b0)
367 smlal v24.4s, v9.4h, v4.4h[3] //// y1 * cos1 + y3 * cos3(part of b0
    [all...]
ihevc_inter_pred_chroma_vert.s 268 umull v24.8h, v16.8b, v1.8b
270 umlsl v24.8h, v7.8b, v0.8b
273 umlal v24.8h, v17.8b, v2.8b
275 umlsl v24.8h, v18.8b, v3.8b
300 sqrshrun v24.8b, v24.8h,#6
308 st1 {v24.8b},[x7],x3 //stores the loaded value
337 umull v24.8h, v16.8b, v1.8b
348 umlsl v24.8h, v7.8b, v0.8b
351 umlal v24.8h, v17.8b, v2.8
    [all...]
ihevc_inter_pred_chroma_vert_w16inp_w16out.s 222 smull v24.4s, v3.4h, v16.4h //vmull_s16(src_tmp2, coeff_0)
224 smlal v24.4s, v4.4h, v17.4h
226 smlal v24.4s, v5.4h, v18.4h
228 smlal v24.4s, v6.4h, v19.4h
245 sqshrn v24.4h, v24.4s,#6 //right shift
264 st1 {v24.2s},[x9] //stores the loaded value
273 smull v24.4s, v3.4h, v16.4h //vmull_s16(src_tmp2, coeff_0)
275 smlal v24.4s, v4.4h, v17.4h
278 smlal v24.4s, v5.4h, v18.4
    [all...]
ihevc_intra_pred_luma_mode_3_to_9.s 190 umull v24.8h, v12.8b, v7.8b //mul (row 0)
191 umlal v24.8h, v13.8b, v6.8b //mul (row 0)
197 rshrn v24.8b, v24.8h,#5 //round shft (row 0)
207 st1 {v24.8b},[x2], x3 //st (row 0)
233 umull v24.8h, v12.8b, v7.8b //mul (row 4)
234 umlal v24.8h, v13.8b, v6.8b //mul (row 4)
241 rshrn v24.8b, v24.8h,#5 //round shft (row 4)
251 st1 {v24.8b},[x2], x3 //st (row 4
    [all...]
ihevc_sao_edge_offset_class0_chroma.s 198 cmhi v24.16b, v28.16b , v30.16b //II vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
223 SUB v20.16b, v24.16b , v26.16b //II sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
227 cmhi v24.16b, v28.16b , v30.16b //II vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
237 SUB v22.16b, v24.16b , v26.16b //II sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
239 ADD v24.16b, v2.16b , v20.16b //II edge_idx = vaddq_s8(const_2, sign_left)
243 ADD v24.16b, v24.16b , v22.16b //II edge_idx = vaddq_s8(edge_idx, sign_right)
249 TBL v24.16b, {v5.16b},v24.16b //II vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
257 AND v24.16b, v24.16b , v3.16b //II edge_idx = vandq_s8(edge_idx, au1_mask
    [all...]
ihevc_inter_pred_chroma_horz_w16out.s 131 dup v24.8b, v2.8b[0] //coeffabs_0 = vdup_lane_u8(coeffabs, 0)
210 umlsl v30.8h, v0.8b, v24.8b //mul_res = vmlsl_u8(src[0_2], coeffabs_2)//
229 umlsl v28.8h, v1.8b, v24.8b
267 umlsl v22.8h, v29.8b, v24.8b //mul_res = vmlsl_u8(src[0_2], coeffabs_2)//
290 umlsl v20.8h, v31.8b, v24.8b //mul_res = vmlsl_u8(src[0_2], coeffabs_2)//
308 umlsl v30.8h, v0.8b, v24.8b //mul_res = vmlsl_u8(src[0_2], coeffabs_2)//
322 umlsl v28.8h, v1.8b, v24.8b
352 umlsl v22.8h, v29.8b, v24.8b //mul_res = vmlsl_u8(src[0_2], coeffabs_2)//
371 umlsl v20.8h, v31.8b, v24.8b //mul_res = vmlsl_u8(src[0_2], coeffabs_2)//
389 umlsl v30.8h, v0.8b, v24.8b //mul_res = vmlsl_u8(src[0_2], coeffabs_2)/
    [all...]
ihevc_intra_pred_filters_chroma_mode_11_to_17.s 311 umull v24.8h, v12.8b, v7.8b //mul (row 0)
312 umlal v24.8h, v13.8b, v6.8b //mul (row 0)
318 rshrn v24.8b, v24.8h,#5 //round shft (row 0)
328 st1 {v24.8b},[x2], x3 //st (row 0)
354 umull v24.8h, v12.8b, v7.8b //mul (row 4)
355 umlal v24.8h, v13.8b, v6.8b //mul (row 4)
364 rshrn v24.8b, v24.8h,#5 //round shft (row 4)
374 st1 {v24.8b},[x2], x3 //st (row 4
    [all...]
ihevc_weighted_pred_bi_default.s 210 ld1 {v24.4h},[x11],x3 //load and increment the pi2_src1 iv iteration
212 sqadd v18.4h,v24.4h,v25.4h //vaddq_s32(i4_tmp2_t1, i4_tmp2_t2) iv iteration
292 ld1 { v24.8h},[x0],#16 //load and increment the pi2_src1
295 sqadd v24.8h,v24.8h,v26.8h
297 sqadd v24.8h,v24.8h,v0.8h //vaddq_s32(i4_tmp1_t1, tmp_lvl_shift_t)
303 sqshrun v20.8b, v24.8h,#7
352 ld1 { v24.8h},[x0],#16 //load and increment the pi2_src1
355 sqadd v24.8h,v24.8h,v26.8
    [all...]
ihevc_intra_pred_chroma_horz.s 294 usubl v24.8h, v30.8b, v28.8b
297 sshr v24.8h, v24.8h,#1
300 sqadd v22.8h, v26.8h , v24.8h
338 usubl v24.8h, v30.8b, v28.8b
341 sshr v24.8h, v24.8h,#1
344 sqadd v22.8h, v26.8h , v24.8h
ihevc_inter_pred_chroma_horz.s 131 dup v24.8b, v2.8b[0] //coeffabs_0 = vdup_lane_u8(coeffabs, 0)
195 umlsl v30.8h, v0.8b, v24.8b //mul_res = vmlsl_u8(src[0_2], coeffabs_2)//
212 umlsl v28.8h, v1.8b, v24.8b
247 umlsl v22.8h, v29.8b, v24.8b //mul_res = vmlsl_u8(src[0_2], coeffabs_2)//
281 umlsl v20.8h, v9.8b, v24.8b //mul_res = vmlsl_u8(src[0_2], coeffabs_2)//
301 umlsl v30.8h, v0.8b, v24.8b //mul_res = vmlsl_u8(src[0_2], coeffabs_2)//
323 umlsl v28.8h, v1.8b, v24.8b
359 umlsl v22.8h, v29.8b, v24.8b //mul_res = vmlsl_u8(src[0_2], coeffabs_2)//
376 umlsl v20.8h, v9.8b, v24.8b //mul_res = vmlsl_u8(src[0_2], coeffabs_2)//
393 umlsl v30.8h, v0.8b, v24.8b //mul_res = vmlsl_u8(src[0_2], coeffabs_2)/
    [all...]
ihevc_itrans_recon_4x4.s 164 trn1 v24.4h, v28.4h, v29.4h
168 trn1 v0.2s, v24.2s, v26.2s
169 trn2 v2.2s, v24.2s, v26.2s
200 trn1 v24.4h, v28.4h, v29.4h
204 trn1 v0.2s, v24.2s, v26.2s
205 trn2 v2.2s, v24.2s, v26.2s
ihevc_sao_edge_offset_class1_chroma.s 195 cmhi v24.16b, v30.16b , v18.16b //II vcltq_u8(pu1_cur_row, pu1_top_row)
197 SUB v28.16b, v24.16b , v22.16b //II sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
232 TBL v24.8b, {v7.16b},v22.8b //offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
236 ZIP1 v27.8b, v24.8b, v25.8b
237 ZIP2 v25.8b, v24.8b, v25.8b
238 mov v24.8b,v27.8b
244 SADDW v26.8h, v26.8h , v24.8b //II pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
287 TBL v24.8b, {v7.16b},v22.8b
289 ZIP1 v27.8b, v24.8b, v25.8b
290 ZIP2 v25.8b, v24.8b, v25.8
    [all...]
ihevc_inter_pred_chroma_vert_w16out.s 267 umull v24.8h, v16.8b, v1.8b
269 umlsl v24.8h, v7.8b, v0.8b
272 umlal v24.8h, v17.8b, v2.8b
274 umlsl v24.8h, v18.8b, v3.8b
305 st1 { v24.16b},[x7],x3 //stores the loaded value
328 umull v24.8h, v16.8b, v1.8b
340 umlsl v24.8h, v7.8b, v0.8b
344 umlal v24.8h, v17.8b, v2.8b
347 umlsl v24.8h, v18.8b, v3.8b
365 st1 { v24.16b},[x7],x3 //stores the loaded valu
    [all...]
  /external/chromium_org/third_party/libvpx/source/libvpx/vp8/common/ppc/
platform_altivec.asm 32 W v24, r3
50 R v24, r3
filter_bilinear_altivec.asm 72 ;# v24 tmp
79 vperm v24, v21, v21, v10 ;# v20 = 0123 1234 2345 3456
82 vmsummbm v24, v20, v24, v18
85 vpkswus v24, v24, v25 ;# v24 = 0 4 8 C 1 5 9 D (16-bit)
87 vsrh v24, v24, v19 ;# divide v0, v1 by 128
89 vpkuhus \V, v24, v24 ;# \V = scrambled 8-bit resul
    [all...]
  /external/libvpx/libvpx/vp8/common/ppc/
platform_altivec.asm 32 W v24, r3
50 R v24, r3
filter_bilinear_altivec.asm 72 ;# v24 tmp
79 vperm v24, v21, v21, v10 ;# v20 = 0123 1234 2345 3456
82 vmsummbm v24, v20, v24, v18
85 vpkswus v24, v24, v25 ;# v24 = 0 4 8 C 1 5 9 D (16-bit)
87 vsrh v24, v24, v19 ;# divide v0, v1 by 128
89 vpkuhus \V, v24, v24 ;# \V = scrambled 8-bit resul
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/ppc/
platform_altivec.asm 32 W v24, r3
50 R v24, r3
filter_bilinear_altivec.asm 72 ;# v24 tmp
79 vperm v24, v21, v21, v10 ;# v20 = 0123 1234 2345 3456
82 vmsummbm v24, v20, v24, v18
85 vpkswus v24, v24, v25 ;# v24 = 0 4 8 C 1 5 9 D (16-bit)
87 vsrh v24, v24, v19 ;# divide v0, v1 by 128
89 vpkuhus \V, v24, v24 ;# \V = scrambled 8-bit resul
    [all...]
  /cts/tests/tests/jni/src/android/jni/cts/
InstanceFromNative.java 87 int v20, int v21, int v22, int v23, int v24,
101 (v24 == 24) && (v25 == 25) && (v26 == 26) && (v27 == 27) &&
StaticFromNative.java 84 int v20, int v21, int v22, int v23, int v24,
98 (v24 == 24) && (v25 == 25) && (v26 == 26) && (v27 == 27) &&

Completed in 930 milliseconds

1 2 3 4 5