HomeSort by relevance Sort by last modified time
    Searched refs:v20 (Results 1 - 25 of 189) sorted by null

1 2 3 4 5 6 7 8

  /toolchain/binutils/binutils-2.27/gas/testsuite/gas/s390/
zarch-z13.s 48 vmrh %v15,%v17,%v20,13
49 vmrhb %v15,%v17,%v20
50 vmrhh %v15,%v17,%v20
51 vmrhf %v15,%v17,%v20
52 vmrhg %v15,%v17,%v20
53 vmrl %v15,%v17,%v20,13
54 vmrlb %v15,%v17,%v20
55 vmrlh %v15,%v17,%v20
56 vmrlf %v15,%v17,%v20
57 vmrlg %v15,%v17,%v20
    [all...]
zarch-z13.d 54 .*: e7 f1 40 00 d6 61 [ ]*vmrh %v15,%v17,%v20,13
55 .*: e7 f1 40 00 06 61 [ ]*vmrhb %v15,%v17,%v20
56 .*: e7 f1 40 00 16 61 [ ]*vmrhh %v15,%v17,%v20
57 .*: e7 f1 40 00 26 61 [ ]*vmrhf %v15,%v17,%v20
58 .*: e7 f1 40 00 36 61 [ ]*vmrhg %v15,%v17,%v20
59 .*: e7 f1 40 00 d6 60 [ ]*vmrl %v15,%v17,%v20,13
60 .*: e7 f1 40 00 06 60 [ ]*vmrlb %v15,%v17,%v20
61 .*: e7 f1 40 00 16 60 [ ]*vmrlh %v15,%v17,%v20
62 .*: e7 f1 40 00 26 60 [ ]*vmrlf %v15,%v17,%v20
63 .*: e7 f1 40 00 36 60 [ ]*vmrlg %v15,%v17,%v20
    [all...]
  /external/libmpeg2/common/armv8/
icv_variance_av8.s 86 umull v20.8h, v0.8b, v0.8b
91 uaddl v21.4s, v20.4h, v22.4h
93 uaddl2 v20.4s, v20.8h, v22.8h
96 add v20.4s, v20.4s, v21.4s
98 add v20.4s, v20.4s, v22.4s
99 addp v20.4s, v20.4s, v20.4
    [all...]
ideint_spatial_filter_av8.s 70 movi v20.8h, #0
115 uabal v20.8h, v2.8b, v4.8b
127 addp v20.8h, v20.8h, v20.8h
131 uaddlp v20.2s, v20.4h
140 mul v20.2s, v20.2s, v31.2s
149 smov x7, v20.s[0
    [all...]
ideint_cac_av8.s 142 cmhs v20.8h, v16.8h, v18.8h
143 and v20.16b, v16.16b, v20.16b
145 // v20 now contains 8 absolute diff of sums above the threshold
148 mov v21.d[0], v20.d[1]
149 add v20.4h, v20.4h, v21.4h
151 // v20 has four adj values for two sub-blocks
186 add v20.4h, v0.4h, v20.4
    [all...]
  /external/libhevc/common/arm64/
ihevc_intra_pred_chroma_ver.s 117 ld2 {v20.8b, v21.8b}, [x6],#16 //16 loads (col 0:15)
127 st2 {v20.8b, v21.8b}, [x2],#16
128 st2 {v20.8b, v21.8b}, [x5],#16
129 st2 {v20.8b, v21.8b}, [x8],#16
130 st2 {v20.8b, v21.8b}, [x10],#16
140 st2 {v20.8b, v21.8b}, [x2],#16
141 st2 {v20.8b, v21.8b}, [x5],#16
142 st2 {v20.8b, v21.8b}, [x8],#16
143 st2 {v20.8b, v21.8b}, [x10],#16
153 st2 {v20.8b, v21.8b}, [x2],#1
    [all...]
ihevc_intra_pred_luma_dc.s 237 uxtl v20.8h, v0.8b
240 add v20.8h, v20.8h , v24.8h //col 1::7 add 3dc+2 (prol)
243 sqshrun v2.8b, v20.8h,#2 //columns shx2 movn (prol)
263 bsl v20.8b, v3.8b , v16.8b //row 1 (prol)
268 st1 {v20.8b},[x2], x3 //store row 1 (prol)
280 bsl v20.8b, v3.8b , v16.8b //row 3 (prol)
285 st1 {v20.8b},[x2], x3 //store row 3 (prol)
296 bsl v20.8b, v3.8b , v16.8b //row 5 (prol)
301 st1 {v20.8b},[x2], x3 //store row 5 (prol
    [all...]
ihevc_inter_pred_filters_luma_vert_w16inp.s 171 smull v20.4s, v2.4h, v23.4h //mul_res2 = vmull_u8(src_tmp3, coeffabs_1)//
174 smlal v20.4s, v1.4h, v22.4h //mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_0)//
176 smlal v20.4s, v3.4h, v24.4h //mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_2)//
178 smlal v20.4s, v4.4h, v25.4h //mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_3)//
180 smlal v20.4s, v5.4h, v26.4h //mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_4)//
182 smlal v20.4s, v6.4h, v27.4h //mul_res2 = vmlal_u8(mul_res2, src_tmp3, coeffabs_5)//
183 smlal v20.4s, v7.4h, v28.4h //mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_6)//
184 smlal v20.4s, v16.4h, v29.4h //mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_7)//
199 sqshrn v20.4h, v20.4s,#
    [all...]
ihevc_inter_pred_luma_vert_w16inp_w16out.s 182 smull v20.4s,v2.4h,v23.4h //mul_res2 = smull_u8(src_tmp3, coeffabs_1)//
185 smlal v20.4s,v1.4h,v22.4h //mul_res2 = smlal_u8(mul_res2, src_tmp2, coeffabs_0)//
187 smlal v20.4s,v3.4h,v24.4h //mul_res2 = smlal_u8(mul_res2, src_tmp4, coeffabs_2)//
189 smlal v20.4s,v4.4h,v25.4h //mul_res2 = smlal_u8(mul_res2, src_tmp1, coeffabs_3)//
191 smlal v20.4s,v5.4h,v26.4h //mul_res2 = smlal_u8(mul_res2, src_tmp2, coeffabs_4)//
193 smlal v20.4s,v6.4h,v27.4h //mul_res2 = smlal_u8(mul_res2, src_tmp3, coeffabs_5)//
194 smlal v20.4s,v7.4h,v28.4h //mul_res2 = smlal_u8(mul_res2, src_tmp4, coeffabs_6)//
195 smlal v20.4s,v16.4h,v29.4h //mul_res2 = smlal_u8(mul_res2, src_tmp1, coeffabs_7)//
210 sub v20.4s, v20.4s, v30.4
    [all...]
ihevc_sao_edge_offset_class1.s 154 SUB v20.16b, v17.16b , v5.16b //sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
160 ADD v5.16b, v5.16b , v20.16b //edge_idx = vaddq_s8(edge_idx, sign_down)
163 NEG v16.16b, v20.16b //sign_up = vnegq_s8(sign_down)
177 Uxtl v20.8h, v3.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
179 SADDW v20.8h, v20.8h , v5.8b //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
181 SMAX v20.8h, v20.8h , v2.8h //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
183 UMIN v20.8h, v20.8h , v4.8h //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi (…)
    [all...]
ihevc_sao_edge_offset_class1_chroma.s 183 SUB v20.16b, v19.16b , v5.16b //sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
189 ADD v5.16b, v5.16b , v20.16b //edge_idx = vaddq_s8(edge_idx, sign_down)
193 NEG v16.16b, v20.16b //sign_up = vnegq_s8(sign_down)
209 Uxtl v20.8h, v3.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
215 SADDW v20.8h, v20.8h , v5.8b //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
217 SMAX v20.8h, v20.8h , v2.8h //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
219 UMIN v20.8h, v20.8h , v4.8h //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi (…)
    [all...]
ihevc_inter_pred_filters_luma_vert.s 182 umull v20.8h, v2.8b, v23.8b //mul_res2 = vmull_u8(src_tmp3, coeffabs_1)//
186 umlsl v20.8h, v1.8b, v22.8b //mul_res2 = vmlsl_u8(mul_res2, src_tmp2, coeffabs_0)//
190 umlsl v20.8h, v3.8b, v24.8b //mul_res2 = vmlsl_u8(mul_res2, src_tmp4, coeffabs_2)//
193 umlal v20.8h, v4.8b, v25.8b //mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_3)//
196 umlal v20.8h, v5.8b, v26.8b //mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_4)//
199 umlsl v20.8h, v6.8b, v27.8b //mul_res2 = vmlsl_u8(mul_res2, src_tmp3, coeffabs_5)//
202 umlal v20.8h, v7.8b, v28.8b //mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_6)//
206 umlsl v20.8h, v16.8b, v29.8b //mul_res2 = vmlsl_u8(mul_res2, src_tmp1, coeffabs_7)//
224 sqrshrun v20.8b, v20.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)/
    [all...]
  /external/llvm/test/MC/AArch64/
noneon-diagnostics.s 5 fmla v1.2d, v30.2d, v20.2d
11 // CHECK-ERROR-NEXT: fmla v1.2d, v30.2d, v20.2d
18 fmls v1.2d, v30.2d, v20.2d
25 // CHECK-ERROR-NEXT: fmls v1.2d, v30.2d, v20.2d
33 fmls.2d v1, v30, v20
40 // CHECK-ERROR-NEXT: fmls.2d v1, v30, v20
  /external/libavc/common/armv8/
ih264_inter_pred_luma_horz_hpel_vert_hpel_av8.s 114 uaddl v20.8h, v4.8b, v6.8b
117 mla v18.8h, v20.8h , v28.8h
119 uaddl v20.8h, v1.8b, v11.8b
121 mla v20.8h, v24.8h , v28.8h
125 mls v20.8h, v26.8h , v30.8h
130 ext v24.16b, v18.16b , v20.16b , #4
131 ext v26.16b, v18.16b , v20.16b , #6
133 ext v23.16b, v18.16b , v20.16b , #10
135 ext v24.16b, v18.16b , v20.16b , #2
136 ext v26.16b, v18.16b , v20.16b , #
    [all...]
ih264_inter_pred_luma_horz_qpel_vert_hpel_av8.s 176 uaddl v20.8h, v4.8b, v6.8b
179 mla v18.8h, v20.8h , v28.8h
181 uaddl v20.8h, v1.8b, v11.8b
183 mla v20.8h, v24.8h , v28.8h
187 mls v20.8h, v26.8h , v30.8h
192 st1 {v20.4s}, [x9], #16
193 ext v24.16b, v18.16b , v20.16b , #4
194 ext v26.16b, v18.16b , v20.16b , #6
196 ext v22.16b, v18.16b , v20.16b , #10
198 ext v24.16b, v18.16b , v20.16b , #
    [all...]
ih264_inter_pred_chroma_av8.s 159 umull v20.8h, v0.8b, v28.8b
161 umlal v20.8h, v3.8b, v29.8b
163 umlal v20.8h, v5.8b, v30.8b
165 umlal v20.8h, v8.8b, v31.8b
166 sqrshrun v26.8b, v20.8h, #6
190 umull v20.8h, v10.8b, v28.8b
191 umlal v20.8h, v13.8b, v29.8b
192 umlal v20.8h, v0.8b, v30.8b
193 umlal v20.8h, v3.8b, v31.8b
194 sqrshrun v26.8b, v20.8h, #
    [all...]
ih264_intra_pred_luma_16x16_av8.s 219 dup v20.16b, v0.b[5]
226 st1 {v20.16b}, [x1], x3
323 dup v20.16b, w15
330 dup v20.8h, w11
337 uqshl v0.8h, v0.8h, v20.8h
339 dup v20.16b, v0.b[0]
343 st1 { v20.16b}, [x1], x3
344 st1 { v20.16b}, [x1], x3
345 st1 { v20.16b}, [x1], x3
346 st1 { v20.16b}, [x1], x
    [all...]
ih264_resi_trans_quant_av8.s 142 sub v20.4h, v15.4h , v16.4h //x2 = x5-x6
145 shl v22.4h, v20.4h, #1 //u_shift(x2,1,shft)
152 add v25.4h, v23.4h , v20.4h //x6 = u_shift(x3,1,shft) + x2;
176 add v20.4s, v0.4s, v23.4s
183 sshl v20.4s, v20.4s, v24.4s //shift row 1
188 xtn v20.4h, v20.4s //narrow row 1
193 neg v24.8h, v20.8h //get negative
199 cmeq v0.4h, v20.4h, #
    [all...]
  /external/libavc/encoder/armv8/
ih264e_half_pel_av8.s 174 sqrshrun v20.8b, v8.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row0)
181 st1 {v20.8b, v21.8b}, [x1], #16 ////Store dest row0
307 uaddl v20.8h, v2.8b, v17.8b //// a0 + a5 (column1,row0)
309 umlal v20.8h, v8.8b, v1.8b //// a0 + a5 + 20a2 (column1,row0)
310 umlal v20.8h, v11.8b, v1.8b //// a0 + a5 + 20a2 + 20a3 (column1,row0)
311 umlsl v20.8h, v5.8b, v31.8b //// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row0)
312 umlsl v20.8h, v14.8b, v31.8b //// a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 (column1,row0)
313 mov v21.d[0], v20.d[1]
320 ext v30.8b, v20.8b , v21.8b , #4
325 ext v29.8b, v20.8b , v21.8b , #
    [all...]
ih264e_evaluate_intra16x16_modes_av8.s 135 dup v20.8h, w11
142 uqshl v0.8h, v0.8h, v20.8h
168 dup v20.8b, v9.b[15] ///HORIZONTAL VALUE ROW=0//
178 uabdl v26.8h, v0.8b, v20.8b
189 dup v20.8b, v9.b[14] ///HORIZONTAL VALUE ROW=1//
199 uabal v26.8h, v2.8b, v20.8b
206 dup v20.8b, v9.b[13] ///HORIZONTAL VALUE ROW=2//
215 uabal v26.8h, v4.8b, v20.8b
222 dup v20.8b, v9.b[12] ///HORIZONTAL VALUE ROW=3//
231 uabal v26.8h, v6.8b, v20.8
    [all...]
  /external/libhevc/decoder/arm64/
ihevcd_fmt_conv_420sp_to_rgba8888.s 203 sMULL v20.4s, v6.4h, v0.h[0] ////(V-128)*C1 FOR R
217 sqshrn v7.4h, v20.4s,#13 ////D10 = (V-128)*C1>>13 4 16-BIT VALUES
230 UADDW v20.8h, v5.8h , v31.8b ////Q10 - HAS Y + B
239 sqxtun v20.8b, v20.8h
251 ZIP1 v27.8b, v20.8b, v21.8b
252 ZIP2 v21.8b, v20.8b, v21.8b
253 mov v20.d[0], v27.d[0]
259 mov v20.d[1], v21.d[0]
266 ZIP1 v25.8h, v20.8h, v22.8
    [all...]
  /external/libxaac/decoder/armv8/
ixheaacd_pre_twiddle.s 201 uMULL v20.4s, v6.4h, v11.4h
206 ushR v20.4s, v20.4s, #16
213 sMLAL v20.4s, v7.4h, v11.4h
222 ADD v20.4s, v20.4s , v18.4s
224 NEG v20.4s, v20.4s
232 sshL v20.4s, v20.4s, v14.4
    [all...]
  /external/valgrind/none/tests/arm64/
memory.stdout.exp 133 0000000000000000 v20.d[0] (xor, xfer vecreg #3)
134 0000000000000000 v20.d[1] (xor, xfer vecreg #3)
163 0000000000000000 v20.d[0] (xor, xfer vecreg #3)
164 0000000000000000 v20.d[1] (xor, xfer vecreg #3)
193 0000000000000000 v20.d[0] (xor, xfer vecreg #3)
194 0000000000000000 v20.d[1] (xor, xfer vecreg #3)
223 0000000000000000 v20.d[0] (xor, xfer vecreg #3)
224 0000000000000000 v20.d[1] (xor, xfer vecreg #3)
253 0000000000000000 v20.d[0] (xor, xfer vecreg #3)
254 0000000000000000 v20.d[1] (xor, xfer vecreg #3
    [all...]
  /frameworks/rs/cpu_ref/
rsCpuIntrinsics_advsimd_3DLUT.S 93 .ifc \dst, v20.16b
134 1: st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [x0], #32
189 lanepair dst=v20.8b, src0=v6.s[0], src1=v6.s[1], xr0=v0.h[0], xr1=v0.h[1], yr0=v1.b[0], yr1=v1.b[1], zr0=v2.h[0], zr1=v2.h[1]
192 lanepair dst=v20.16b, src0=v6.s[2], src1=v6.s[3], xr0=v0.h[2], xr1=v0.h[3], yr0=v1.b[2], yr1=v1.b[3], zr0=v2.h[2], zr1=v2.h[3]
200 uzp1 v6.16b, v20.16b, v21.16b
201 uzp2 v7.16b, v20.16b, v21.16b
202 uzp1 v20.16b, v6.16b, v7.16b
204 mov v21.d[0], v20.d[1]
214 st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [x0], #32
233 st4 {v20.b-v23.b}[0], [x0], #
    [all...]
rsCpuIntrinsics_advsimd_ColorMatrix.S 221 vmxx_f32 \i, 1, v16.4s, v20.4s, v0.s[0]
235 vmxx_f32 \i^31, 1, v16.4s, v20.4s, v0.s[0]
249 vmxx_f32 \i, 1, v17.4s, v20.4s, v0.s[1]
263 vmxx_f32 \i^31, 1, v17.4s, v20.4s, v0.s[1]
277 vmxx_f32 \i, 1, v18.4s, v20.4s, v0.s[2]
291 vmxx_f32 \i^31, 1, v18.4s, v20.4s, v0.s[2]
305 vmxx_f32 \i, 1, v19.4s, v20.4s, v0.s[3]
319 vmxx_f32 \i^31, 1, v19.4s, v20.4s, v0.s[3]
330 ld4 {v20.8b,v21.8b,v22.8b,v23.8b}, [x1], #32
331 uxtl v20.8h, v20.8
    [all...]

Completed in 775 milliseconds

1 2 3 4 5 6 7 8