HomeSort by relevance Sort by last modified time
    Searched refs:v20 (Results 1 - 25 of 164) sorted by null

1 2 3 4 5 6 7

  /external/libmpeg2/common/armv8/
icv_variance_av8.s 86 umull v20.8h, v0.8b, v0.8b
91 uaddl v21.4s, v20.4h, v22.4h
93 uaddl2 v20.4s, v20.8h, v22.8h
96 add v20.4s, v20.4s, v21.4s
98 add v20.4s, v20.4s, v22.4s
99 addp v20.4s, v20.4s, v20.4
    [all...]
ideint_spatial_filter_av8.s 70 movi v20.8h, #0
115 uabal v20.8h, v2.8b, v4.8b
127 addp v20.8h, v20.8h, v20.8h
131 uaddlp v20.2s, v20.4h
140 mul v20.2s, v20.2s, v31.2s
149 smov x7, v20.2s[0
    [all...]
ideint_cac_av8.s 142 cmhs v20.8h, v16.8h, v18.8h
143 and v20.16b, v16.16b, v20.16b
145 // v20 now contains 8 absolute diff of sums above the threshold
148 mov v21.d[0], v20.d[1]
149 add v20.4h, v20.4h, v21.4h
151 // v20 has four adj values for two sub-blocks
186 add v20.4h, v0.4h, v20.4
    [all...]
  /external/libhevc/common/arm64/
ihevc_intra_pred_chroma_ver.s 117 ld2 {v20.8b, v21.8b}, [x6],#16 //16 loads (col 0:15)
127 st2 {v20.8b, v21.8b}, [x2],#16
128 st2 {v20.8b, v21.8b}, [x5],#16
129 st2 {v20.8b, v21.8b}, [x8],#16
130 st2 {v20.8b, v21.8b}, [x10],#16
140 st2 {v20.8b, v21.8b}, [x2],#16
141 st2 {v20.8b, v21.8b}, [x5],#16
142 st2 {v20.8b, v21.8b}, [x8],#16
143 st2 {v20.8b, v21.8b}, [x10],#16
153 st2 {v20.8b, v21.8b}, [x2],#1
    [all...]
ihevc_intra_pred_luma_dc.s 237 uxtl v20.8h, v0.8b
240 add v20.8h, v20.8h , v24.8h //col 1::7 add 3dc+2 (prol)
243 sqshrun v2.8b, v20.8h,#2 //columns shx2 movn (prol)
263 bsl v20.8b, v3.8b , v16.8b //row 1 (prol)
268 st1 {v20.8b},[x2], x3 //store row 1 (prol)
280 bsl v20.8b, v3.8b , v16.8b //row 3 (prol)
285 st1 {v20.8b},[x2], x3 //store row 3 (prol)
296 bsl v20.8b, v3.8b , v16.8b //row 5 (prol)
301 st1 {v20.8b},[x2], x3 //store row 5 (prol
    [all...]
ihevc_inter_pred_filters_luma_vert_w16inp.s 171 smull v20.4s, v2.4h, v23.4h //mul_res2 = vmull_u8(src_tmp3, coeffabs_1)//
174 smlal v20.4s, v1.4h, v22.4h //mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_0)//
176 smlal v20.4s, v3.4h, v24.4h //mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_2)//
178 smlal v20.4s, v4.4h, v25.4h //mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_3)//
180 smlal v20.4s, v5.4h, v26.4h //mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_4)//
182 smlal v20.4s, v6.4h, v27.4h //mul_res2 = vmlal_u8(mul_res2, src_tmp3, coeffabs_5)//
183 smlal v20.4s, v7.4h, v28.4h //mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_6)//
184 smlal v20.4s, v16.4h, v29.4h //mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_7)//
199 sqshrn v20.4h, v20.4s,#
    [all...]
ihevc_inter_pred_luma_vert_w16inp_w16out.s 182 smull v20.4s,v2.4h,v23.4h //mul_res2 = smull_u8(src_tmp3, coeffabs_1)//
185 smlal v20.4s,v1.4h,v22.4h //mul_res2 = smlal_u8(mul_res2, src_tmp2, coeffabs_0)//
187 smlal v20.4s,v3.4h,v24.4h //mul_res2 = smlal_u8(mul_res2, src_tmp4, coeffabs_2)//
189 smlal v20.4s,v4.4h,v25.4h //mul_res2 = smlal_u8(mul_res2, src_tmp1, coeffabs_3)//
191 smlal v20.4s,v5.4h,v26.4h //mul_res2 = smlal_u8(mul_res2, src_tmp2, coeffabs_4)//
193 smlal v20.4s,v6.4h,v27.4h //mul_res2 = smlal_u8(mul_res2, src_tmp3, coeffabs_5)//
194 smlal v20.4s,v7.4h,v28.4h //mul_res2 = smlal_u8(mul_res2, src_tmp4, coeffabs_6)//
195 smlal v20.4s,v16.4h,v29.4h //mul_res2 = smlal_u8(mul_res2, src_tmp1, coeffabs_7)//
210 sub v20.4s, v20.4s, v30.4
    [all...]
ihevc_sao_edge_offset_class1.s 154 SUB v20.16b, v17.16b , v5.16b //sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
160 ADD v5.16b, v5.16b , v20.16b //edge_idx = vaddq_s8(edge_idx, sign_down)
163 NEG v16.16b, v20.16b //sign_up = vnegq_s8(sign_down)
177 Uxtl v20.8h, v3.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
179 SADDW v20.8h, v20.8h , v5.8b //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
181 SMAX v20.8h, v20.8h , v2.8h //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
183 UMIN v20.8h, v20.8h , v4.8h //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi (…)
    [all...]
ihevc_sao_edge_offset_class1_chroma.s 183 SUB v20.16b, v19.16b , v5.16b //sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
189 ADD v5.16b, v5.16b , v20.16b //edge_idx = vaddq_s8(edge_idx, sign_down)
193 NEG v16.16b, v20.16b //sign_up = vnegq_s8(sign_down)
209 Uxtl v20.8h, v3.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
215 SADDW v20.8h, v20.8h , v5.8b //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
217 SMAX v20.8h, v20.8h , v2.8h //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
219 UMIN v20.8h, v20.8h , v4.8h //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi (…)
    [all...]
ihevc_inter_pred_filters_luma_vert.s 182 umull v20.8h, v2.8b, v23.8b //mul_res2 = vmull_u8(src_tmp3, coeffabs_1)//
186 umlsl v20.8h, v1.8b, v22.8b //mul_res2 = vmlsl_u8(mul_res2, src_tmp2, coeffabs_0)//
190 umlsl v20.8h, v3.8b, v24.8b //mul_res2 = vmlsl_u8(mul_res2, src_tmp4, coeffabs_2)//
193 umlal v20.8h, v4.8b, v25.8b //mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_3)//
196 umlal v20.8h, v5.8b, v26.8b //mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_4)//
199 umlsl v20.8h, v6.8b, v27.8b //mul_res2 = vmlsl_u8(mul_res2, src_tmp3, coeffabs_5)//
202 umlal v20.8h, v7.8b, v28.8b //mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_6)//
206 umlsl v20.8h, v16.8b, v29.8b //mul_res2 = vmlsl_u8(mul_res2, src_tmp1, coeffabs_7)//
224 sqrshrun v20.8b, v20.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)/
    [all...]
  /external/llvm/test/MC/AArch64/
noneon-diagnostics.s 5 fmla v1.2d, v30.2d, v20.2d
11 // CHECK-ERROR-NEXT: fmla v1.2d, v30.2d, v20.2d
18 fmls v1.2d, v30.2d, v20.2d
25 // CHECK-ERROR-NEXT: fmls v1.2d, v30.2d, v20.2d
33 fmls.2d v1, v30, v20
40 // CHECK-ERROR-NEXT: fmls.2d v1, v30, v20
neon-simd-copy.s 11 ins v20.s[0], w30
16 mov v20.s[0], w30
21 // CHECK: {{mov|ins}} v20.s[0], w30 // encoding: [0xd4,0x1f,0x04,0x4e]
26 // CHECK: {{mov|ins}} v20.s[0], w30 // encoding: [0xd4,0x1f,0x04,0x4e]
94 dup v17.2s, v20.s[0]
97 dup v17.4s, v20.s[0]
102 // CHECK: {{mov|dup}} v17.2s, v20.s[0] // encoding: [0x91,0x06,0x04,0x0e]
105 // CHECK: {{mov|dup}} v17.4s, v20.s[0] // encoding: [0x91,0x06,0x04,0x4e]
  /external/libavc/common/armv8/
ih264_inter_pred_luma_horz_hpel_vert_hpel_av8.s 110 uaddl v20.8h, v4.8b, v6.8b
113 mla v18.8h, v20.8h , v28.8h
115 uaddl v20.8h, v1.8b, v11.8b
117 mla v20.8h, v24.8h , v28.8h
121 mls v20.8h, v26.8h , v30.8h
126 ext v24.16b, v18.16b , v20.16b , #4
127 ext v26.16b, v18.16b , v20.16b , #6
129 ext v23.16b, v18.16b , v20.16b , #10
131 ext v24.16b, v18.16b , v20.16b , #2
132 ext v26.16b, v18.16b , v20.16b , #
    [all...]
ih264_inter_pred_luma_horz_qpel_vert_hpel_av8.s 172 uaddl v20.8h, v4.8b, v6.8b
175 mla v18.8h, v20.8h , v28.8h
177 uaddl v20.8h, v1.8b, v11.8b
179 mla v20.8h, v24.8h , v28.8h
183 mls v20.8h, v26.8h , v30.8h
188 st1 {v20.4s}, [x9], #16
189 ext v24.16b, v18.16b , v20.16b , #4
190 ext v26.16b, v18.16b , v20.16b , #6
192 ext v22.16b, v18.16b , v20.16b , #10
194 ext v24.16b, v18.16b , v20.16b , #
    [all...]
ih264_inter_pred_chroma_av8.s 153 umull v20.8h, v0.8b, v28.8b
155 umlal v20.8h, v3.8b, v29.8b
157 umlal v20.8h, v5.8b, v30.8b
159 umlal v20.8h, v8.8b, v31.8b
160 sqrshrun v26.8b, v20.8h, #6
184 umull v20.8h, v10.8b, v28.8b
185 umlal v20.8h, v13.8b, v29.8b
186 umlal v20.8h, v0.8b, v30.8b
187 umlal v20.8h, v3.8b, v31.8b
188 sqrshrun v26.8b, v20.8h, #
    [all...]
ih264_intra_pred_luma_16x16_av8.s 217 dup v20.16b, v0.b[5]
224 st1 {v20.16b}, [x1], x3
320 dup v20.16b, w15
327 dup v20.8h, w11
334 uqshl v0.8h, v0.8h, v20.8h
336 dup v20.16b, v0.b[0]
340 st1 { v20.16b}, [x1], x3
341 st1 { v20.16b}, [x1], x3
342 st1 { v20.16b}, [x1], x3
343 st1 { v20.16b}, [x1], x
    [all...]
ih264_resi_trans_quant_av8.s 164 sub v20.4h, v15.4h , v16.4h //x2 = x5-x6
167 shl v22.4h, v20.4h, #1 //u_shift(x2,1,shft)
174 add v25.4h, v23.4h , v20.4h //x6 = u_shift(x3,1,shft) + x2;
198 add v20.4s, v0.4s, v23.4s
205 sshl v20.4s, v20.4s, v24.4s //shift row 1
210 xtn v20.4h, v20.4s //narrow row 1
215 neg v24.8h, v20.8h //get negative
221 cmeq v0.4h, v20.4h, #
    [all...]
ih264_inter_pred_filters_luma_vert_av8.s 139 uaddl v20.8h, v1.8b, v11.8b // temp4 = src[0_8] + src[5_8]
141 mla v20.8h, v18.8h , v22.8h // temp4 += temp3 * 20
149 mls v20.8h, v26.8h , v24.8h // temp4 -= temp5 * 5
157 sqrshrun v31.8b, v20.8h, #5 // dst[0_8] = CLIP_U8((temp4 +16) >> 5)
163 uaddl v20.8h, v6.8b, v0.8b
170 mls v18.8h, v20.8h , v24.8h
176 uaddl v20.8h, v8.8b, v2.8b
185 mls v14.8h, v20.8h , v24.8h
192 uaddl v20.8h, v9.8b, v7.8b // temp4 = src[0_8] + src[5_8]
201 mla v20.8h, v18.8h , v22.8h // temp4 += temp3 * 2
    [all...]
  /external/libavc/encoder/armv8/
ih264e_half_pel_av8.s 172 sqrshrun v20.8b, v8.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row0)
179 st1 {v20.8b, v21.8b}, [x1], #16 ////Store dest row0
303 uaddl v20.8h, v2.8b, v17.8b //// a0 + a5 (column1,row0)
305 umlal v20.8h, v8.8b, v1.8b //// a0 + a5 + 20a2 (column1,row0)
306 umlal v20.8h, v11.8b, v1.8b //// a0 + a5 + 20a2 + 20a3 (column1,row0)
307 umlsl v20.8h, v5.8b, v31.8b //// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row0)
308 umlsl v20.8h, v14.8b, v31.8b //// a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 (column1,row0)
309 mov v21.d[0], v20.d[1]
316 ext v30.8b, v20.8b , v21.8b , #4
321 ext v29.8b, v20.8b , v21.8b , #
    [all...]
ih264e_evaluate_intra16x16_modes_av8.s 133 dup v20.8h, w11
140 uqshl v0.8h, v0.8h, v20.8h
166 dup v20.8b, v9.b[15] ///HORIZONTAL VALUE ROW=0//
176 uabdl v26.8h, v0.8b, v20.8b
187 dup v20.8b, v9.b[14] ///HORIZONTAL VALUE ROW=1//
197 uabal v26.8h, v2.8b, v20.8b
204 dup v20.8b, v9.b[13] ///HORIZONTAL VALUE ROW=2//
213 uabal v26.8h, v4.8b, v20.8b
220 dup v20.8b, v9.b[12] ///HORIZONTAL VALUE ROW=3//
229 uabal v26.8h, v6.8b, v20.8
    [all...]
  /external/libhevc/decoder/arm64/
ihevcd_fmt_conv_420sp_to_rgba8888.s 203 sMULL v20.4s, v6.4h, v0.h[0] ////(V-128)*C1 FOR R
217 sqshrn v7.4h, v20.4s,#13 ////D10 = (V-128)*C1>>13 4 16-BIT VALUES
230 UADDW v20.8h, v5.8h , v31.8b ////Q10 - HAS Y + B
239 sqxtun v20.8b, v20.8h
251 ZIP1 v27.8b, v20.8b, v21.8b
252 ZIP2 v21.8b, v20.8b, v21.8b
253 mov v20.d[0], v27.d[0]
259 mov v20.d[1], v21.d[0]
266 ZIP1 v25.8h, v20.8h, v22.8
    [all...]
  /external/valgrind/none/tests/arm64/
memory.stdout.exp 126 0000000000000000 v20.d[0] (xor, xfer vecreg #3)
127 0000000000000000 v20.d[1] (xor, xfer vecreg #3)
156 0000000000000000 v20.d[0] (xor, xfer vecreg #3)
157 0000000000000000 v20.d[1] (xor, xfer vecreg #3)
186 0000000000000000 v20.d[0] (xor, xfer vecreg #3)
187 0000000000000000 v20.d[1] (xor, xfer vecreg #3)
216 0000000000000000 v20.d[0] (xor, xfer vecreg #3)
217 0000000000000000 v20.d[1] (xor, xfer vecreg #3)
246 0000000000000000 v20.d[0] (xor, xfer vecreg #3)
247 0000000000000000 v20.d[1] (xor, xfer vecreg #3
    [all...]
  /frameworks/rs/cpu_ref/
rsCpuIntrinsics_advsimd_3DLUT.S 93 .ifc \dst, v20.16b
134 1: st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [x0], #32
189 lanepair dst=v20.8b, src0=v6.s[0], src1=v6.s[1], xr0=v0.h[0], xr1=v0.h[1], yr0=v1.b[0], yr1=v1.b[1], zr0=v2.h[0], zr1=v2.h[1]
192 lanepair dst=v20.16b, src0=v6.s[2], src1=v6.s[3], xr0=v0.h[2], xr1=v0.h[3], yr0=v1.b[2], yr1=v1.b[3], zr0=v2.h[2], zr1=v2.h[3]
200 uzp1 v6.16b, v20.16b, v21.16b
201 uzp2 v7.16b, v20.16b, v21.16b
202 uzp1 v20.16b, v6.16b, v7.16b
204 mov v21.d[0], v20.d[1]
214 st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [x0], #32
233 st4 {v20.b-v23.b}[0], [x0], #
    [all...]
rsCpuIntrinsics_advsimd_ColorMatrix.S 221 vmxx_f32 \i, 1, v16.4s, v20.4s, v0.s[0]
235 vmxx_f32 \i^31, 1, v16.4s, v20.4s, v0.s[0]
249 vmxx_f32 \i, 1, v17.4s, v20.4s, v0.s[1]
263 vmxx_f32 \i^31, 1, v17.4s, v20.4s, v0.s[1]
277 vmxx_f32 \i, 1, v18.4s, v20.4s, v0.s[2]
291 vmxx_f32 \i^31, 1, v18.4s, v20.4s, v0.s[2]
305 vmxx_f32 \i, 1, v19.4s, v20.4s, v0.s[3]
319 vmxx_f32 \i^31, 1, v19.4s, v20.4s, v0.s[3]
330 ld4 {v20.8b,v21.8b,v22.8b,v23.8b}, [x1], #32
331 uxtl v20.8h, v20.8
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/ppc/
variance_subpixel_altivec.asm 45 load_c v20, hfilter_b, r5, r12, r0
71 ;# v20 filter taps
94 vperm v24, v21, v21, \hp ;# v20 = 0123 1234 2345 3456
97 vmsummbm v24, v20, v24, v18
98 vmsummbm v25, v20, v25, v18
108 vmuleub v22, \P0, v20 ;# 64 + 4 positive taps
110 vmuloub v23, \P0, v20
166 compute_sum_sse \V, v16, v18, v19, v20, v21, v23
233 vspltish v20, 8
235 vslh v18, v20, v18 ;# 0x0040 0040 0040 0040 0040 0040 0040 004
    [all...]

Completed in 343 milliseconds

1 2 3 4 5 6 7