HomeSort by relevance Sort by last modified time
    Searched refs:v28 (Results 1 - 25 of 108) sorted by null

1 2 3 4 5

  /external/libhevc/common/arm64/
ihevc_sao_edge_offset_class0.s 161 mov v28.8b[15], w11 //II Iteration vsetq_lane_u8(pu1_src_left[ht - row], pu1_cur_row_tmp, 15)
169 EXT v28.16b, v28.16b , v26.16b,#15 //II Iteration pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, 15)
174 cmhi v30.16b, v26.16b , v28.16b //II vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp)
180 cmhi v0.16b, v28.16b , v26.16b //II vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
181 mov v28.8b[0], w11 //II pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[16], pu1_cur_row_tmp, 0)
189 EXT v28.16b, v26.16b , v28.16b,#1 //II pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, 1)
203 cmhi v30.16b, v26.16b , v28.16b //II vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp)
205 cmhi v0.16b, v28.16b , v26.16b //II vcltq_u8(pu1_cur_row, pu1_cur_row_tmp
    [all...]
ihevc_sao_edge_offset_class0_chroma.s 184 mov v28.4h[7], w11 //II vsetq_lane_u16(pu1_src_left[ht - row], pu1_cur_row_tmp, 14,15)
192 EXT v28.16b, v28.16b , v30.16b,#14 //II pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, 14)
195 cmhi v26.16b, v30.16b , v28.16b //II vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp)
198 cmhi v24.16b, v28.16b , v30.16b //II vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
206 mov v28.8b[0], w11 //II pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[16], pu1_cur_row_tmp, 0)
213 mov v28.8b[1], w11 //II pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[17], pu1_cur_row_tmp, 1)
216 EXT v28.16b, v30.16b , v28.16b,#2 //II pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, 2)
225 cmhi v26.16b, v30.16b , v28.16b //II vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp
    [all...]
ihevc_inter_pred_chroma_vert_w16inp.s 200 smull v28.4s, v1.4h, v16.4h //vmull_s16(src_tmp2, coeff_0)
203 smlal v28.4s, v2.4h, v17.4h
205 smlal v28.4s, v3.4h, v18.4h
207 smlal v28.4s, v4.4h, v19.4h
221 sqshrn v28.4h, v28.4s,#6 //right shift
236 sqrshrun v28.8b, v28.8h,#6 //rounding shift
244 st1 {v28.s}[0],[x9],x3 //stores the loaded value
252 smull v28.4s, v1.4h, v16.4h //vmull_s16(src_tmp2, coeff_0
    [all...]
ihevc_sao_edge_offset_class1_chroma.s 152 LD1 {v28.16b},[x11],#16 //pu1_top_row = vld1q_u8(pu1_src_top_cpy || pu1_src - src_strd)
159 cmhi v5.16b, v3.16b , v28.16b //vcgtq_u8(pu1_cur_row, pu1_top_row)
162 cmhi v19.16b, v28.16b , v3.16b //vcltq_u8(pu1_cur_row, pu1_top_row)
187 Uxtl2 v28.8h, v18.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
197 SUB v28.16b, v24.16b , v22.16b //II sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
205 NEG v16.16b, v28.16b //II sign_up = vnegq_s8(sign_down)
207 ADD v22.16b, v22.16b , v28.16b //II edge_idx = vaddq_s8(edge_idx, sign_down)
227 Uxtl2 v28.8h, v3.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
231 SADDW v28.8h, v28.8h , v17.8b //pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset
    [all...]
ihevc_inter_pred_chroma_vert.s 245 umull v28.8h, v6.8b, v1.8b //mul_res 2
248 umlsl v28.8h, v5.8b, v0.8b
251 umlal v28.8h, v7.8b, v2.8b
253 umlsl v28.8h, v16.8b, v3.8b
264 sqrshrun v28.8b, v28.8h,#6
278 st1 {v28.8b},[x7],x3 //stores the loaded value
304 umull v28.8h, v6.8b, v1.8b //mul_res 2
307 umlsl v28.8h, v5.8b, v0.8b
310 umlal v28.8h, v7.8b, v2.8
    [all...]
ihevc_inter_pred_chroma_vert_w16inp_w16out.s 199 smull v28.4s, v1.4h, v16.4h //vmull_s16(src_tmp2, coeff_0)
204 smlal v28.4s, v2.4h, v17.4h
205 smlal v28.4s, v3.4h, v18.4h
207 smlal v28.4s, v4.4h, v19.4h
219 sqshrn v28.4h, v28.4s,#6 //right shift
241 st1 {v28.2s},[x9],x3 //stores the loaded value
248 smull v28.4s, v1.4h, v16.4h //vmull_s16(src_tmp2, coeff_0)
249 smlal v28.4s, v2.4h, v17.4h
250 smlal v28.4s, v3.4h, v18.4
    [all...]
ihevc_itrans_recon_16x16.s 257 smull v28.4s, v6.4h, v1.4h[1] //// y1 * sin3(part of b2)
262 smlal v28.4s, v7.4h, v3.4h[3] //// y1 * sin3 - y3 * cos1(part of b2)
284 smlsl v28.4s, v8.4h, v1.4h[3]
290 smlsl v28.4s, v9.4h, v0.4h[3]
333 smlsl v28.4s, v6.4h, v3.4h[1] //// y1 * sin3(part of b2)
338 smlal v28.4s, v7.4h, v2.4h[1] //// y1 * sin3 - y3 * cos1(part of b2)
345 smlal v28.4s, v8.4h, v0.4h[1]
351 smlal v28.4s, v9.4h, v2.4h[3]
390 add v14.4s, v16.4s , v28.4s
391 sub v26.4s, v16.4s , v28.4
    [all...]
ihevc_intra_pred_chroma_planar.s 198 umull v28.8h, v5.8b, v0.8b
201 umlal v28.8h, v6.8b, v11.8b
205 umlal v28.8h, v31.8b, v4.8b
207 umlal v28.8h, v25.8b, v1.8b
215 add v28.8h, v28.8h , v16.8h
217 sshl v28.8h, v28.8h, v14.8h
230 xtn v13.8b, v28.8h
268 umull v28.8h, v18.8b, v0.8
    [all...]
ihevc_itrans_recon_8x8.s 196 smull v28.4s, v6.4h, v1.4h[1] //// y1 * sin3(part of b2)
204 smlsl v28.4s, v7.4h, v0.4h[1] //// y1 * sin3 - y3 * cos1(part of b2)
236 smlal v28.4s, v14.4h, v1.4h[3] //// y1 * sin3 - y3 * cos1 + y5 * sin1(part of b2)
247 smlal v28.4s, v15.4h, v0.4h[3] //// b2 = y1 * sin3 - y3 * cos1 + y5 * sin1 + y7 * cos3(part of x2,x5)
258 add v24.4s, v22.4s , v28.4s //// a2 + b2(part of x2)
259 sub v22.4s, v22.4s , v28.4s //// a2 - b2(part of x5)
261 add v28.4s, v18.4s , v26.4s //// a1 + b1(part of x1)
271 sqrshrn v6.4h, v28.4s,#shift_stage1_idct //// x1 = (a1 + b1 + rnd) >> 7(shift_stage1_idct)
306 smull v28.4s, v6.4h, v1.4h[1] //// y1 * sin3(part of b2)
311 smlsl v28.4s, v7.4h, v0.4h[1] //// y1 * sin3 - y3 * cos1(part of b2
    [all...]
ihevc_weighted_pred_bi.s 198 dup v28.4s,w10 //vmovq_n_s32(0-shift)
201 neg v28.4s, v28.4s
237 sshl v4.4s,v4.4s,v28.4s //vshlq_s32(i4_tmp1_t1, tmp_shift_t)
249 sshl v6.4s,v6.4s,v28.4s
259 sshl v19.4s,v19.4s,v28.4s
271 sshl v18.4s,v18.4s,v28.4s
ihevc_weighted_pred_uni.s 156 dup v28.4s,w6 //vmovq_n_s32(tmp_shift)
160 neg v28.4s, v28.4s
183 sshl v4.4s,v4.4s,v28.4s
193 sshl v6.4s,v6.4s,v28.4s
199 sshl v7.4s,v7.4s,v28.4s
208 sshl v16.4s,v16.4s,v28.4s
ihevc_sao_edge_offset_class1.s 158 Uxtl2 v28.8h, v18.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
201 SADDW2 v28.8h, v28.8h , v24.16b //II pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
207 SMAX v28.8h, v28.8h , v2.8h //II pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
208 UMIN v28.8h, v28.8h , v4.8h //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip))
213 xtn2 v30.16b, v28.8h //II vmovn_s16(pi2_tmp_cur_row.val[1])
240 Uxtl2 v28.8h, v3.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
241 SADDW2 v28.8h, v28.8h , v24.16b //pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset
    [all...]
ihevc_itrans_recon_4x4.s 159 sqrshrn v28.4h, v19.4s,#shift_stage1_idct //pi2_out[0] = clip_s16((e[0] + o[0] + add)>>shift) )
164 trn1 v24.4h, v28.4h, v29.4h
165 trn2 v25.4h, v28.4h, v29.4h
194 sqrshrn v28.4h, v19.4s,#shift_stage2_idct //pi2_out[0] = clip_s16((e[0] + o[0] + add)>>shift) )
200 trn1 v24.4h, v28.4h, v29.4h
201 trn2 v25.4h, v28.4h, v29.4h
ihevc_inter_pred_chroma_vert_w16out.s 246 umull v28.8h, v6.8b, v1.8b //mul_res 2
249 umlsl v28.8h, v5.8b, v0.8b
252 umlal v28.8h, v7.8b, v2.8b
254 umlsl v28.8h, v16.8b, v3.8b
279 st1 { v28.16b},[x7],x3 //stores the loaded value
301 umull v28.8h, v6.8b, v1.8b //mul_res 2
304 umlsl v28.8h, v5.8b, v0.8b
307 umlal v28.8h, v7.8b, v2.8b
310 umlsl v28.8h, v16.8b, v3.8b
348 st1 { v28.16b},[x7],x3 //stores the loaded valu
    [all...]
ihevc_itrans_recon_32x32.s 218 smull v28.4s, v8.4h, v1.4h[1] //// y1 * sin3(part of b2)
223 smlal v28.4s, v9.4h, v3.4h[3] //// y1 * sin3 - y3 * cos1(part of b2)
258 smlal v28.4s, v14.4h, v6.4h[1]
264 smlsl v28.4s, v15.4h, v7.4h[1]
288 smlsl v28.4s, v8.4h, v4.4h[3] //// y1 * sin3(part of b2)
293 smlsl v28.4s, v9.4h, v2.4h[1] //// y1 * sin3 - y3 * cos1(part of b2)
332 smlsl v28.4s, v14.4h, v0.4h[1]
338 smlsl v28.4s, v15.4h, v2.4h[3]
364 smlsl v28.4s, v8.4h, v5.4h[1] //// y1 * sin3(part of b2)
369 smlsl v28.4s, v9.4h, v7.4h[3] //// y1 * sin3 - y3 * cos1(part of b2
    [all...]
  /external/llvm/test/MC/AArch64/
neon-facge-facgt.s 27 facgt v31.4s, v29.4s, v28.4s
30 faclt v31.4s, v28.4s, v29.4s
34 // CHECK: facgt v31.4s, v29.4s, v28.4s // encoding: [0xbf,0xef,0xbc,0x6e]
37 // CHECK: facgt v31.4s, v29.4s, v28.4s // encoding: [0xbf,0xef,0xbc,0x6e]
neon-frsqrt-frecp.s 19 frecps v31.4s, v29.4s, v28.4s
23 // CHECK: frecps v31.4s, v29.4s, v28.4s // encoding: [0xbf,0xff,0x3c,0x4e]
neon-scalar-by-elem-mla.s 25 fmls s29, s10, v28.s[1]
32 // CHECK: fmls s29, s10, v28.s[1] // encoding: [0x5d,0x51,0xbc,0x5f]
neon-scalar-by-elem-saturating-mla.s 31 sqdmlsl d16, s18, v28.s[3]
39 // CHECK: sqdmlsl d16, s18, v28.s[3] // encoding: [0x50,0x7a,0xbc,0x5f]
neon-compare-instructions.s 13 cmeq v29.2s, v27.2s, v28.2s
21 // CHECK: cmeq v29.2s, v27.2s, v28.2s // encoding: [0x7d,0x8f,0xbc,0x2e]
35 cmhs v29.2s, v27.2s, v28.2s
43 cmls v29.2s, v28.2s, v27.2s
51 // CHECK: cmhs v29.2s, v27.2s, v28.2s // encoding: [0x7d,0x3f,0xbc,0x2e]
58 // CHECK: cmhs v29.2s, v27.2s, v28.2s // encoding: [0x7d,0x3f,0xbc,0x2e]
72 cmge v29.2s, v27.2s, v28.2s
80 cmle v29.2s, v28.2s, v27.2s
88 // CHECK: cmge v29.2s, v27.2s, v28.2s // encoding: [0x7d,0x3f,0xbc,0x0e]
95 // CHECK: cmge v29.2s, v27.2s, v28.2s // encoding: [0x7d,0x3f,0xbc,0x0e
    [all...]
  /external/chromium_org/third_party/libvpx/source/libvpx/vp8/common/ppc/
platform_altivec.asm 36 W v28, r3
54 R v28, r3
  /external/libvpx/libvpx/vp8/common/ppc/
platform_altivec.asm 36 W v28, r3
54 R v28, r3
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/ppc/
platform_altivec.asm 36 W v28, r3
54 R v28, r3
  /cts/tests/tests/jni/src/android/jni/cts/
InstanceFromNative.java 88 int v25, int v26, int v27, int v28, int v29,
102 (v28 == 28) && (v29 == 29) &&
StaticFromNative.java 85 int v25, int v26, int v27, int v28, int v29,
99 (v28 == 28) && (v29 == 29) &&

Completed in 421 milliseconds

1 2 3 4 5