HomeSort by relevance Sort by last modified time
    Searched refs:v18 (Results 1 - 25 of 122) sorted by null

1 2 3 4 5

  /external/libhevc/common/arm64/
ihevc_sao_edge_offset_class3_chroma.s 351 movi v18.16b, #0
382 movi v18.16b, #0 //I
392 mov v18.4h[7], w5 //I vsetq_lane_u8
396 EXT v18.16b, v18.16b , v16.16b,#14 //I pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 14)
429 cmhi v20.16b, v5.16b , v18.16b //I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
431 cmhi v22.16b, v18.16b , v5.16b //I vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
434 ADD v18.16b, v0.16b , v17.16b //I edge_idx = vaddq_s8(const_2, sign_up)
435 ADD v18.16b, v18.16b , v22.16b //I edge_idx = vaddq_s8(edge_idx, sign_down
    [all...]
ihevc_weighted_pred_bi.s 251 smull v18.4s, v2.4h, v7.4h[0] //vmull_n_s16(pi2_src1_val2, (int16_t) wgt0) iv iteration
263 add v18.4s, v18.4s , v20.4s //vaddq_s32(i4_tmp2_t1, i4_tmp2_t2) iv iteration
266 add v18.4s, v18.4s , v30.4s //vaddq_s32(i4_tmp2_t1, tmp_lvl_shift_t) iv iteration
271 sshl v18.4s,v18.4s,v28.4s
279 sqxtun v18.4h, v18.4s //vqmovun_s32(sto_res_tmp1) iv iteration
280 //mov v19, v18 //vcombine_u16(sto_res_tmp2, sto_res_tmp2
    [all...]
ihevc_sao_edge_offset_class2.s 284 movi v18.16b, #0
292 mov v18.8b[0], w5 //I pu1_next_row_tmp = vsetq_lane_u8(pu1_src_cpy[src_strd + 16], pu1_next_row_tmp, 0)
294 EXT v18.16b, v16.16b , v18.16b,#1 //I pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tmp, 1)
313 cmhi v3.16b, v5.16b , v18.16b //I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
316 cmhi v18.16b, v18.16b , v5.16b //I vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
317 SUB v3.16b, v18.16b , v3.16b //I sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
320 TBL v18.16b, {v6.16b},v24.16b //I vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
323 AND v18.16b, v18.16b , v1.16b //I edge_idx = vandq_s8(edge_idx, au1_mask
    [all...]
ihevc_sao_edge_offset_class3.s 290 movi v18.16b, #0
305 mov v18.16b[15], w8 //I vsetq_lane_u8
308 EXT v18.16b, v18.16b , v16.16b,#15 //I pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 15)
326 cmhi v3.16b, v5.16b , v18.16b //I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
327 cmhi v18.16b, v18.16b , v5.16b //I vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
328 SUB v3.16b, v18.16b , v3.16b //I sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
330 ADD v18.16b, v0.16b , v17.16b //I edge_idx = vaddq_s8(const_2, sign_up)
331 ADD v18.16b, v18.16b , v3.16b //I edge_idx = vaddq_s8(edge_idx, sign_down
    [all...]
ihevc_sao_edge_offset_class0.s 162 cmhi v18.16b, v21.16b , v17.16b //vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
165 SUB v20.16b, v18.16b , v16.16b //sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
188 cmhi v18.16b, v21.16b , v17.16b //vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
192 SUB v22.16b, v18.16b , v16.16b //sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
199 Uxtl v18.8h, v17.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
216 SADDW v18.8h, v18.8h , v16.8b
218 SMAX v18.8h, v18.8h , v4.8h //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
221 UMIN v18.8h, v18.8h , v6.8h //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u (…)
    [all...]
ihevc_sao_edge_offset_class2_chroma.s 399 movi v18.16b, #0
403 mov v18.4h[0], w5 //I pu1_next_row_tmp = vsetq_lane_u8(pu1_src_cpy[src_strd + 16], pu1_next_row_tmp, 0)
407 EXT v18.16b, v16.16b , v18.16b,#2 //I pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tmp, 2)
443 cmhi v20.16b, v5.16b , v18.16b //I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
445 cmhi v22.16b, v18.16b , v5.16b //I vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
448 ADD v18.16b, v0.16b , v17.16b //I edge_idx = vaddq_s8(const_2, sign_up)
449 ADD v18.16b, v18.16b , v22.16b //I edge_idx = vaddq_s8(edge_idx, sign_down)
451 TBL v18.16b, {v30.16b},v18.16b //I vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)
    [all...]
ihevc_intra_pred_chroma_horz.s 120 ld1 { v18.8h},[x12] //load 16 values. d1[7] will have the 1st value.
149 dup v2.8h, v18.4h[7]
153 dup v4.8h, v18.4h[6]
157 dup v6.8h, v18.4h[5]
161 dup v1.8h, v18.4h[4]
165 dup v2.8h, v18.4h[3]
169 dup v4.8h, v18.4h[2]
173 dup v6.8h, v18.4h[1]
178 dup v1.8h, v18.4h[0]
206 dup v18.8h, v0.4h[7
    [all...]
ihevc_sao_edge_offset_class1.s 145 LD1 {v18.16b},[x10] //pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
148 cmhi v5.16b, v3.16b , v18.16b //vcgtq_u8(pu1_cur_row, pu1_top_row)
151 cmhi v17.16b, v18.16b , v3.16b //vcltq_u8(pu1_cur_row, pu1_top_row)
155 Uxtl v26.8h, v18.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
158 Uxtl2 v28.8h, v18.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
161 cmhi v22.16b, v18.16b , v30.16b //II vcgtq_u8(pu1_cur_row, pu1_top_row)
165 cmhi v24.16b, v30.16b , v18.16b //II vcltq_u8(pu1_cur_row, pu1_top_row)
222 LD1 {v18.16b},[x10] //pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
223 cmhi v5.16b, v3.16b , v18.16b //vcgtq_u8(pu1_cur_row, pu1_top_row)
224 cmhi v17.16b, v18.16b , v3.16b //vcltq_u8(pu1_cur_row, pu1_top_row
    [all...]
ihevc_itrans_recon_8x8.s 189 smull v18.4s, v3.4h, v1.4h[2] //// y2 * sin2 (q3 is freed by this time)(part of d1)
239 smlsl v18.4s, v11.4h, v0.4h[2] //// d1 = y2 * sin2 - y6 * cos2(part of a0 and a1)
252 sub v22.4s, v20.4s , v18.4s //// a2 = c1 - d1(part of x2,x5)
253 add v18.4s, v20.4s , v18.4s //// a1 = c1 + d1(part of x1,x6)
261 add v28.4s, v18.4s , v26.4s //// a1 + b1(part of x1)
262 sub v18.4s, v18.4s , v26.4s //// a1 - b1(part of x6)
272 sqrshrn v11.4h, v18.4s,#shift_stage1_idct //// x6 = (a1 - b1 + rnd) >> 7(shift_stage1_idct)
314 smull v18.4s, v3.4h, v1.4h[2] //// y2 * sin2 (q3 is freed by this time)(part of d1
    [all...]
ihevc_intra_pred_chroma_dc.s 118 mov v18.s[0], w9
119 mov v18.s[1], w9
145 uadalp v18.1d, v3.2s
154 uadalp v18.1d, v3.2s
168 uadalp v18.1d, v3.2s
177 uadalp v18.1d, v3.2s
183 smov x1, v18.2s[0]
266 uadalp v18.1d, v2.2s
275 uadalp v18.1d, v2.2s
278 smov x11, v18.2s[0
    [all...]
ihevc_intra_pred_luma_mode_3_to_9.s 222 umull v18.8h, v23.8b, v7.8b //mul (row 3)
223 umlal v18.8h, v25.8b, v6.8b //mul (row 3)
230 rshrn v18.8b, v18.8h,#5 //round shft (row 3)
240 st1 {v18.8b},[x2], x3 //st (row 3)
259 umull v18.8h, v23.8b, v7.8b //mul (row 7)
260 umlal v18.8h, v25.8b, v6.8b //mul (row 7)
264 rshrn v18.8b, v18.8h,#5 //round shft (row 7)
270 st1 {v18.8b},[x2], x3 //st (row 7
    [all...]
ihevc_itrans_recon_32x32.s 240 smull v18.4s, v10.4h, v0.4h[0]
241 smlal v18.4s, v11.4h, v3.4h[2]
274 smlal v18.4s, v12.4h, v7.4h[0]
275 smlsl v18.4s, v13.4h, v5.4h[2]
310 smlsl v18.4s, v10.4h, v2.4h[0]
311 smlsl v18.4s, v11.4h, v1.4h[2]
348 smlsl v18.4s, v12.4h, v5.4h[0]
349 smlal v18.4s, v13.4h, v7.4h[2]
386 smlal v18.4s, v10.4h, v0.4h[0]
387 smlal v18.4s, v11.4h, v0.4h[2
    [all...]
ihevc_sao_edge_offset_class0_chroma.s 185 cmhi v18.16b, v21.16b , v19.16b //vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
188 SUB v20.16b, v18.16b , v16.16b //sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
212 cmhi v18.16b, v21.16b , v19.16b //vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
215 SUB v22.16b, v18.16b , v16.16b //sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
241 Uxtl v18.8h, v19.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
253 SADDW v18.8h, v18.8h , v16.8b //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
255 SMAX v18.8h, v18.8h , v4.8h //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
259 UMIN v18.8h, v18.8h , v6.8h //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u (…)
    [all...]
ihevc_intra_pred_chroma_mode_3_to_9.s 221 umull v18.8h, v19.8b, v7.8b //mul (row 3)
222 umlal v18.8h, v23.8b, v6.8b //mul (row 3)
229 rshrn v18.8b, v18.8h,#5 //round shft (row 3)
239 st1 {v18.8b},[x2], x3 //st (row 3)
260 umull v18.8h, v19.8b, v7.8b //mul (row 7)
261 umlal v18.8h, v23.8b, v6.8b //mul (row 7)
265 rshrn v18.8b, v18.8h,#5 //round shft (row 7)
271 st1 {v18.8b},[x2], x3 //st (row 7
    [all...]
ihevc_intra_pred_filters_chroma_mode_11_to_17.s 343 umull v18.8h, v23.8b, v7.8b //mul (row 3)
344 umlal v18.8h, v25.8b, v6.8b //mul (row 3)
351 rshrn v18.8b, v18.8h,#5 //round shft (row 3)
361 st1 {v18.8b},[x2], x3 //st (row 3)
382 umull v18.8h, v23.8b, v7.8b //mul (row 7)
383 umlal v18.8h, v25.8b, v6.8b //mul (row 7)
387 rshrn v18.8b, v18.8h,#5 //round shft (row 7)
393 st1 {v18.8b},[x2], x3 //st (row 7
    [all...]
  /external/openssl/crypto/modes/asm/
ghashv8-armx-64.S 13 ushr v18.2d,v16.2d,#63
14 ext v16.16b,v18.16b,v16.16b,#8 //t0=0xc2....01
73 ext v18.16b,v0.16b,v0.16b,#8
75 eor v17.16b,v17.16b,v18.16b //v17.16b is rotated inp^Xi
86 eor v18.16b,v0.16b,v2.16b
89 eor v1.16b,v1.16b,v18.16b
90 pmull v18.1q,v0.1d,v19.1d //1st phase
97 eor v0.16b,v1.16b,v18.16b
100 ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
102 eor v18.16b,v18.16b,v2.16
    [all...]
  /external/chromium_org/third_party/libvpx/source/libvpx/vp8/common/ppc/
variance_subpixel_altivec.asm 56 vspltisw v18, 3
57 vslw v18, v21, v18 ;# 0x00000040000000400000004000000040
69 ;# v18 rounding
97 vmsummbm v24, v20, v24, v18
98 vmsummbm v25, v20, v25, v18
109 vadduhm v22, v18, v22
111 vadduhm v23, v18, v23
120 vmrghh \P0, v22, v23 ;# v18 v19 = 16-bit result in order
166 compute_sum_sse \V, v16, v18, v19, v20, v21, v2
    [all...]
filter_bilinear_altivec.asm 53 vspltisw v18, 3
54 vslw v18, v21, v18 ;# 0x00000040000000400000004000000040
66 ;# v18 rounding
82 vmsummbm v24, v20, v24, v18
83 vmsummbm v25, v20, v25, v18
133 vadduhm v22, v18, v22
135 vadduhm v23, v18, v23
144 vmrghh \P0, v22, v23 ;# v18 v19 = 16-bit result in order
207 vspltish v18,
    [all...]
  /external/libvpx/libvpx/vp8/common/ppc/
variance_subpixel_altivec.asm 56 vspltisw v18, 3
57 vslw v18, v21, v18 ;# 0x00000040000000400000004000000040
69 ;# v18 rounding
97 vmsummbm v24, v20, v24, v18
98 vmsummbm v25, v20, v25, v18
109 vadduhm v22, v18, v22
111 vadduhm v23, v18, v23
120 vmrghh \P0, v22, v23 ;# v18 v19 = 16-bit result in order
166 compute_sum_sse \V, v16, v18, v19, v20, v21, v2
    [all...]
filter_bilinear_altivec.asm 53 vspltisw v18, 3
54 vslw v18, v21, v18 ;# 0x00000040000000400000004000000040
66 ;# v18 rounding
82 vmsummbm v24, v20, v24, v18
83 vmsummbm v25, v20, v25, v18
133 vadduhm v22, v18, v22
135 vadduhm v23, v18, v23
144 vmrghh \P0, v22, v23 ;# v18 v19 = 16-bit result in order
207 vspltish v18,
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/ppc/
variance_subpixel_altivec.asm 56 vspltisw v18, 3
57 vslw v18, v21, v18 ;# 0x00000040000000400000004000000040
69 ;# v18 rounding
97 vmsummbm v24, v20, v24, v18
98 vmsummbm v25, v20, v25, v18
109 vadduhm v22, v18, v22
111 vadduhm v23, v18, v23
120 vmrghh \P0, v22, v23 ;# v18 v19 = 16-bit result in order
166 compute_sum_sse \V, v16, v18, v19, v20, v21, v2
    [all...]
filter_bilinear_altivec.asm 53 vspltisw v18, 3
54 vslw v18, v21, v18 ;# 0x00000040000000400000004000000040
66 ;# v18 rounding
82 vmsummbm v24, v20, v24, v18
83 vmsummbm v25, v20, v25, v18
133 vadduhm v22, v18, v22
135 vadduhm v23, v18, v23
144 vmrghh \P0, v22, v23 ;# v18 v19 = 16-bit result in order
207 vspltish v18,
    [all...]
  /cts/tests/tests/jni/src/android/jni/cts/
InstanceFromNative.java 86 int v15, int v16, int v17, int v18, int v19,
99 (v18 == 18) && (v19 == 19) &&
StaticFromNative.java 83 int v15, int v16, int v17, int v18, int v19,
96 (v18 == 18) && (v19 == 19) &&
  /external/llvm/test/MC/AArch64/
neon-simd-ldst-multi-elem.s 103 st1 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15]
107 st1 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15]
111 // CHECK: st1 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15] // encoding: [0xef,0x25,0x00,0x4c]
115 // CHECK: st1 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15] // encoding: [0xef,0x25,0x00,0x0c]
120 st1 { v15.8h-v18.8h }, [x15]
124 st1 { v15.4h-v18.4h }, [x15]
128 // CHECK: st1 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15] // encoding: [0xef,0x25,0x00,0x4c]
132 // CHECK: st1 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15] // encoding: [0xef,0x25,0x00,0x0c]
206 st4 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15]
210 st4 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15
    [all...]

Completed in 1099 milliseconds

1 2 3 4 5