/external/llvm/test/MC/AArch64/ |
neon-2velem.s | 10 mla v0.2s, v1.2s, v22.s[2] 12 mla v3.4s, v8.4s, v22.s[3] 15 // CHECK: mla v0.2s, v1.2s, v22.s[2] // encoding: [0x20,0x08,0x96,0x2f] 17 // CHECK: mla v3.4s, v8.4s, v22.s[3] // encoding: [0x03,0x09,0xb6,0x6f] 30 mls v0.2s, v1.2s, v22.s[2] 32 mls v3.4s, v8.4s, v22.s[3] 35 // CHECK: mls v0.2s, v1.2s, v22.s[2] // encoding: [0x20,0x48,0x96,0x2f] 37 // CHECK: mls v3.4s, v8.4s, v22.s[3] // encoding: [0x03,0x49,0xb6,0x6f] 52 fmla v0.2s, v1.2s, v22.s[2] 54 fmla v3.4s, v8.4s, v22.s[3 [all...] |
neon-max-min.s | 85 fmin v10.4h, v15.4h, v22.4h 86 fmin v10.8h, v15.8h, v22.8h 87 fmin v10.2s, v15.2s, v22.2s 91 // CHECK: fmin v10.4h, v15.4h, v22.4h // encoding: [0xea,0x35,0xd6,0x0e] 92 // CHECK: fmin v10.8h, v15.8h, v22.8h // encoding: [0xea,0x35,0xd6,0x4e] 93 // CHECK: fmin v10.2s, v15.2s, v22.2s // encoding: [0xea,0xf5,0xb6,0x0e] 115 fminnm v10.4h, v15.4h, v22.4h 116 fminnm v10.8h, v15.8h, v22.8h 117 fminnm v10.2s, v15.2s, v22.2s 121 // CHECK: fminnm v10.4h, v15.4h, v22.4h // encoding: [0xea,0x05,0xd6,0x0e [all...] |
/cts/tools/dasm/test/ |
all_opcodes.d | 77 cmpl-float v11, v22, v33 78 cmpg-float v11, v22, v33 79 cmpl-double v11, v22, v33 80 cmpg-double v11, v22, v33 81 cmp-long v11, v22, v33 93 aget v11, v22, v33 94 aget-wide v11, v22, v33 95 aget-object v11, v22, v33 96 aget-boolean v11, v22, v33 97 aget-byte v11, v22, v33 [all...] |
/external/libhevc/common/arm64/ |
ihevc_intra_pred_chroma_ver.s | 121 ld2 {v22.8b, v23.8b}, [x6] //16 loads (col 16:31) 132 st2 {v22.8b, v23.8b}, [x2], x11 133 st2 {v22.8b, v23.8b}, [x5], x11 134 st2 {v22.8b, v23.8b}, [x8], x11 135 st2 {v22.8b, v23.8b}, [x10], x11 145 st2 {v22.8b, v23.8b}, [x2], x11 146 st2 {v22.8b, v23.8b}, [x5], x11 147 st2 {v22.8b, v23.8b}, [x8], x11 148 st2 {v22.8b, v23.8b}, [x10], x11 158 st2 {v22.8b, v23.8b}, [x2], x1 [all...] |
ihevc_sao_edge_offset_class1.s | 161 cmhi v22.16b, v18.16b , v30.16b //II vcgtq_u8(pu1_cur_row, pu1_top_row) 167 SUB v1.16b, v24.16b , v22.16b //II sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 169 ADD v22.16b, v0.16b , v16.16b //II edge_idx = vaddq_s8(const_2, sign_up) 174 ADD v22.16b, v22.16b , v1.16b //II edge_idx = vaddq_s8(edge_idx, sign_down) 178 TBL v22.16b, {v6.16b},v22.16b //II vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)) 191 TBL v24.16b, {v7.16b},v22.16b //II offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx)) 228 ADD v22.16b, v0.16b , v16.16b //edge_idx = vaddq_s8(const_2, sign_up) 229 ADD v22.16b, v22.16b , v20.16b //edge_idx = vaddq_s8(edge_idx, sign_down [all...] |
ihevc_sao_edge_offset_class1_chroma.s | 190 cmhi v22.16b, v18.16b , v30.16b //II vcgtq_u8(pu1_cur_row, pu1_top_row) 197 SUB v28.16b, v24.16b , v22.16b //II sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 199 ADD v22.16b, v0.16b , v16.16b //II edge_idx = vaddq_s8(const_2, sign_up) 207 ADD v22.16b, v22.16b , v28.16b //II edge_idx = vaddq_s8(edge_idx, sign_down) 216 TBL v22.16b, {v6.16b},v22.16b //II vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)) 222 mov v23.d[0], v22.d[1] 223 UZP1 v27.8b, v22.8b, v23.8b 224 UZP2 v23.8b, v22.8b, v23.8 [all...] |
ihevc_inter_pred_chroma_copy_w16out.s | 146 ld1 {v22.8b},[x5],x2 //vld1_u8(pu1_src_tmp) 150 uxtl v22.8h, v22.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 152 shl v22.2d, v22.2d,#6 //vshlq_n_s64(temp, 6) 154 st1 {v22.1d},[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0) 190 ld1 {v22.8b},[x5],x2 //vld1_u8(pu1_src_tmp) 194 uxtl v22.8h, v22.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 196 shl v22.2d, v22.2d,#6 //vshlq_n_s64(temp, 6 [all...] |
ihevc_inter_pred_chroma_horz.s | 240 umull v22.8h, v10.8b, v25.8b //mul_res = vmull_u8(src[0_3], coeffabs_3)// 247 umlsl v22.8h, v29.8b, v24.8b //mul_res = vmlsl_u8(src[0_2], coeffabs_2)// 265 umlal v22.8h, v12.8b, v26.8b //mul_res = vmlsl_u8(src[0_0], coeffabs_0)// 272 umlsl v22.8h, v14.8b, v27.8b //mul_res = vmlal_u8(src[0_1], coeffabs_1)// 308 sqrshrun v22.8b, v22.8h,#6 320 //mov v22.s[1],v23.s[0] 321 st1 { v22.4h},[x1],x6 //store the result pu1_dst 354 umull v22.8h, v10.8b, v25.8b //mul_res = vmull_u8(src[0_3], coeffabs_3)// 359 umlsl v22.8h, v29.8b, v24.8b //mul_res = vmlsl_u8(src[0_2], coeffabs_2)/ [all...] |
ihevc_inter_pred_luma_copy_w16out.s | 115 ld1 {v22.8b},[x5],x2 //vld1_u8(pu1_src_tmp) 119 uxtl v22.8h, v22.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 121 shl v22.2d, v22.2d,#6 //vshlq_n_s64(temp, 6) 123 st1 {v22.d}[0],[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0) 169 uxtl v22.8h, v7.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 174 shl v6.8h, v22.8h,#6 //vshlq_n_s16(tmp, 6) 207 uxtl v22.8h, v7.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 227 shl v6.8h, v22.8h,#6 //vshlq_n_s16(tmp, 6 [all...] |
ihevc_intra_pred_luma_horz.s | 215 sqadd v22.8h, v26.8h , v24.8h 218 sqxtun v22.8b, v22.8h 220 st1 {v22.8b},[x2],#8 229 sqadd v22.8h, v26.8h , v24.8h 232 sqxtun v22.8b, v22.8h 234 st1 {v22.8b},[x2],x3 297 sqadd v22.8h, v26.8h , v24.8h 300 sqxtun v22.8b, v22.8 [all...] |
ihevc_intra_pred_luma_mode_3_to_9.s | 139 smull v22.8h, v30.8b, v31.8b //(col+1)*intra_pred_angle [0:7](col) 157 xtn v6.8b, v22.8h 164 sshr v22.8h, v22.8h,#5 170 sqxtn v1.8b, v22.8h 200 umull v22.8h, v16.8b, v7.8b //mul (row 1) 201 umlal v22.8h, v17.8b, v6.8b //mul (row 1) 208 rshrn v22.8b, v22.8h,#5 //round shft (row 1) 218 st1 {v22.8b},[x2], x3 //st (row 1 [all...] |
ihevc_intra_pred_luma_vert.s | 122 ld1 {v22.8b, v23.8b}, [x6] //16 loads (col 16:31) 131 st1 {v22.8b, v23.8b}, [x2], x11 132 st1 {v22.8b, v23.8b}, [x5], x11 133 st1 {v22.8b, v23.8b}, [x8], x11 134 st1 {v22.8b, v23.8b}, [x10], x11 144 st1 {v22.8b, v23.8b}, [x2], x11 145 st1 {v22.8b, v23.8b}, [x5], x11 146 st1 {v22.8b, v23.8b}, [x8], x11 147 st1 {v22.8b, v23.8b}, [x10], x11 156 st1 {v22.8b, v23.8b}, [x2], x1 [all...] |
ihevc_itrans_recon_8x8.s | 208 smull v22.4s, v10.4h, v0.h[0] //// y4 * cos4(part of c0 and c1) 242 add v10.4s, v20.4s , v22.4s //// c0 = y0 * cos4 + y4 * cos4(part of a0 and a1) 243 sub v20.4s, v20.4s , v22.4s //// c1 = y0 * cos4 - y4 * cos4(part of a0 and a1) 252 sub v22.4s, v20.4s , v18.4s //// a2 = c1 - d1(part of x2,x5) 258 add v24.4s, v22.4s , v28.4s //// a2 + b2(part of x2) 259 sub v22.4s, v22.4s , v28.4s //// a2 - b2(part of x5) 270 sqrshrn v14.4h, v22.4s,#shift_stage1_idct //// x5 = (a2 - b2 + rnd) >> 7(shift_stage1_idct) 322 sub v22.4s, v20.4s , v18.4s //// a2 = c1 - d1(part of x2,x5) 328 add v24.4s, v22.4s , v28.4s //// a2 + b2(part of x2 [all...] |
ihevc_inter_pred_chroma_horz_w16out.s | 252 umull v22.8h, v10.8b, v25.8b //mul_res = vmull_u8(src[0_3], coeffabs_3)// 267 umlsl v22.8h, v29.8b, v24.8b //mul_res = vmlsl_u8(src[0_2], coeffabs_2)// 275 umlal v22.8h, v12.8b, v26.8b //mul_res = vmlsl_u8(src[0_0], coeffabs_0)// 282 umlsl v22.8h, v14.8b, v27.8b //mul_res = vmlal_u8(src[0_1], coeffabs_1)// 314 st1 { v22.8h},[x1],#16 //store the result pu1_dst 347 umull v22.8h, v10.8b, v25.8b //mul_res = vmull_u8(src[0_3], coeffabs_3)// 352 umlsl v22.8h, v29.8b, v24.8b //mul_res = vmlsl_u8(src[0_2], coeffabs_2)// 354 umlal v22.8h, v12.8b, v26.8b //mul_res = vmlsl_u8(src[0_0], coeffabs_0)// 359 umlsl v22.8h, v14.8b, v27.8b //mul_res = vmlal_u8(src[0_1], coeffabs_1)// 405 st1 { v22.8h},[x1],#16 //store the result pu1_ds [all...] |
ihevc_intra_pred_filters_luma_mode_11_to_17.s | 255 smull v22.8h, v30.8b, v31.8b //(col+1)*intra_pred_angle [0:7](col) 277 xtn v6.8b, v22.8h 284 sshr v22.8h, v22.8h,#5 292 sqxtn v19.8b, v22.8h 320 umull v22.8h, v16.8b, v7.8b //mul (row 1) 321 umlal v22.8h, v17.8b, v6.8b //mul (row 1) 328 rshrn v22.8b, v22.8h,#5 //round shft (row 1) 338 st1 {v22.8b},[x2], x3 //st (row 1 [all...] |
/external/libavc/common/armv8/ |
ih264_inter_pred_luma_horz_hpel_vert_hpel_av8.s | 112 uaddl v22.8h, v2.8b, v8.8b 119 mls v18.8h, v22.8h , v30.8h 120 uaddl v22.8h, v12.8b, v17.8b 123 mla v22.8h, v24.8h , v28.8h 124 mls v22.8h, v26.8h , v30.8h 151 ext v24.16b, v20.16b , v22.16b , #4 152 ext v26.16b, v20.16b , v22.16b , #6 153 ext v0.16b, v20.16b , v22.16b , #10 156 ext v24.16b, v20.16b , v22.16b , #2 157 ext v26.16b, v20.16b , v22.16b , # [all...] |
ih264_inter_pred_luma_horz_qpel_vert_hpel_av8.s | 174 uaddl v22.8h, v2.8b, v8.8b 181 mls v18.8h, v22.8h , v30.8h 182 uaddl v22.8h, v12.8b, v17.8b 185 mla v22.8h, v24.8h , v28.8h 186 mls v22.8h, v26.8h , v30.8h 191 st1 {v22.4s}, [x9] 192 ext v22.16b, v18.16b , v20.16b , #10 198 saddl v26.4s, v18.4h, v22.4h 202 saddl2 v22.4s, v18.8h, v22.8 [all...] |
ih264_inter_pred_filters_luma_vert_av8.s | 115 movi v22.8h, #20 // Filter coeff 0x14 into Q11 138 mla v14.8h, v12.8h, v22.8h // temp += temp1 * 20 141 mla v20.8h, v18.8h , v22.8h // temp4 += temp3 * 20 148 mla v16.8h, v12.8h , v22.8h 155 mla v14.8h, v12.8h , v22.8h 162 mla v18.8h, v12.8h , v22.8h 169 mla v16.8h, v12.8h , v22.8h 177 mla v14.8h, v12.8h , v22.8h 183 mla v18.8h, v12.8h , v22.8h 197 mla v14.8h, v12.8h , v22.8h // temp += temp1 * 2 [all...] |
ih264_inter_pred_chroma_av8.s | 161 umull v22.8h, v1.8b, v28.8b 163 umlal v22.8h, v4.8b, v29.8b 165 umlal v22.8h, v6.8b, v30.8b 167 umlal v22.8h, v9.8b, v31.8b 168 sqrshrun v27.8b, v22.8h, #6 202 umull v22.8h, v1.8b, v28.8b 203 umlal v22.8h, v4.8b, v29.8b 204 umlal v22.8h, v6.8b, v30.8b 206 umlal v22.8h, v9.8b, v31.8b 208 sqrshrun v27.8b, v22.8h, # [all...] |
ih264_resi_trans_quant_av8.s | 167 shl v22.4h, v20.4h, #1 //u_shift(x2,1,shft) 175 sub v27.4h, v21.4h , v22.4h //x8 = x3 - u_shift(x2,1,shft); 200 add v22.4s, v2.4s, v23.4s 207 sshl v22.4s, v22.4s, v24.4s //shift row 3 212 xtn v22.4h, v22.4s //narrow row 3 217 neg v26.8h, v22.8h //get negative 223 cmeq v2.4h, v22.4h, #0 228 bsl v6.8b, v22.8b, v26.8b //restore sign of row 1 and [all...] |
ih264_inter_pred_luma_horz_hpel_vert_qpel_av8.s | 150 movi v22.8h, #20 // Filter coeff 0x14 into Q11 166 mla v6.8h, v8.8h , v22.8h 180 mla v8.8h, v10.8h , v22.8h 194 mla v10.8h, v12.8h , v22.8h 208 mla v12.8h, v14.8h , v22.8h 222 mla v14.8h, v16.8h , v22.8h 239 mla v16.8h, v18.8h , v22.8h 261 smlal v18.4s, v30.4h, v22.4h 263 smlal2 v6.4s, v30.8h, v22.8h 267 mla v20.8h, v2.8h , v22.8 [all...] |
/external/libavc/encoder/armv8/ |
ih264e_half_pel_av8.s | 174 sqrshrun v22.8b, v12.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column3,row0) 180 st1 {v22.h}[0], [x1], x3 311 uaddl v22.8h, v3.8b, v18.8b //// a0 + a5 (column2,row0) 312 umlal v22.8h, v9.8b, v1.8b //// a0 + a5 + 20a2 (column2,row0) 313 umlal v22.8h, v12.8b, v1.8b //// a0 + a5 + 20a2 + 20a3 (column2,row0) 314 umlsl v22.8h, v6.8b, v31.8b //// a0 + a5 + 20a2 + 20a3 - 5a1 (column2,row0) 315 umlsl v22.8h, v15.8b, v31.8b //// a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 (column2,row0) 317 mov v23.d[0], v22.d[1] 329 ext v31.8b, v21.8b , v22.8b , #2 330 sqrshrun v3.8b, v22.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2,row0 [all...] |
/external/libhevc/decoder/arm64/ |
ihevcd_fmt_conv_420sp_to_rgba8888.s | 204 sMULL2 v22.4s, v6.8h, v0.h[0] ////(V-128)*C1 FOR R 218 sqshrn2 v7.8h, v22.4s,#13 ////D11 = (V-128)*C1>>13 4 16-BIT VALUES 231 UADDW v22.8h, v7.8h , v31.8b ////Q11 - HAS Y + R 241 sqxtun v22.8b, v22.8h 254 ZIP1 v27.8b, v22.8b, v23.8b 255 ZIP2 v23.8b, v22.8b, v23.8b 256 mov v22.d[0], v27.d[0] 261 mov v22.d[1], v23.d[0] 266 ZIP1 v25.8h, v20.8h, v22.8 [all...] |
/external/libmpeg2/common/armv8/ |
icv_variance_av8.s | 87 umull v22.8h, v1.8b, v1.8b 91 uaddl v21.4s, v20.4h, v22.4h 93 uaddl2 v20.4s, v20.8h, v22.8h 97 add v22.4s, v24.4s, v25.4s 98 add v20.4s, v20.4s, v22.4s
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/ppc/ |
filter_bilinear_altivec.asm | 70 ;# v22 tmp 97 lvx v22, r10, r3 102 vperm v21, v21, v22, v17 114 lvx v22, r10, r3 120 vperm \V, v21, v22, v17 132 vmuleub v22, \P0, v20 ;# 64 + 4 positive taps 133 vadduhm v22, v18, v22 138 vadduhm v22, v22, v24 ;# Re = evens, saturation unnecessar [all...] |