/external/libhevc/common/arm/ |
ihevc_inter_pred_luma_copy_w16out.s | 104 vmovl.u8 q0,d0 @vmovl_u8(vld1_u8(pu1_src_tmp) 112 vmovl.u8 q11,d22 @vmovl_u8(vld1_u8(pu1_src_tmp) 115 vmovl.u8 q12,d24 @vmovl_u8(vld1_u8(pu1_src_tmp) 120 vmovl.u8 q13,d26 @vmovl_u8(vld1_u8(pu1_src_tmp) 152 vmovl.u8 q8,d8 @vmovl_u8(vld1_u8(pu1_src_tmp)) 153 vmovl.u8 q9,d10 @vmovl_u8(vld1_u8(pu1_src_tmp) 154 vmovl.u8 q10,d12 @vmovl_u8(vld1_u8(pu1_src_tmp) 155 vmovl.u8 q11,d14 @vmovl_u8(vld1_u8(pu1_src_tmp) 182 vmovl.u8 q8,d8 @vmovl_u8(vld1_u8(pu1_src_tmp)) 185 vmovl.u8 q9,d10 @vmovl_u8(vld1_u8(pu1_src_tmp [all...] |
ihevc_inter_pred_chroma_copy_w16out.s | 134 vmovl.u8 q0,d0 @vmovl_u8(vld1_u8(pu1_src_tmp) 142 vmovl.u8 q11,d22 @vmovl_u8(vld1_u8(pu1_src_tmp) 145 vmovl.u8 q12,d24 @vmovl_u8(vld1_u8(pu1_src_tmp) 150 vmovl.u8 q13,d26 @vmovl_u8(vld1_u8(pu1_src_tmp) 175 vmovl.u8 q0,d0 @vmovl_u8(vld1_u8(pu1_src_tmp) 183 vmovl.u8 q11,d22 @vmovl_u8(vld1_u8(pu1_src_tmp) 186 vmovl.u8 q12,d24 @vmovl_u8(vld1_u8(pu1_src_tmp) 212 vmovl.u8 q8,d8 @vmovl_u8(vld1_u8(pu1_src_tmp)) 213 vmovl.u8 q9,d10 @vmovl_u8(vld1_u8(pu1_src_tmp) 214 vmovl.u8 q10,d12 @vmovl_u8(vld1_u8(pu1_src_tmp [all...] |
ihevc_sao_edge_offset_class1_chroma.s | 161 VMOVL.U8 Q13,D18 @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 164 VMOVL.U8 Q14,D19 @II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 183 VMOVL.U8 Q10,D10 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 195 VMOVL.U8 Q14,D11 @pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 214 VMOVL.U8 Q14,D19 @II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 255 VMOVL.U8 Q13,D10 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 261 VMOVL.U8 Q14,D11 @pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 316 VMOVL.U8 Q13,D18 @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 340 VMOVL.U8 Q10,D10 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 391 VMOVL.U8 Q13,D10 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)) [all...] |
ihevc_sao_edge_offset_class1.s | 156 VMOVL.U8 Q13,D18 @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 159 VMOVL.U8 Q14,D19 @II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 178 VMOVL.U8 Q10,D10 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 187 VMOVL.U8 Q4,D11 @pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 237 VMOVL.U8 Q13,D10 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 243 VMOVL.U8 Q14,D11 @pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 298 VMOVL.U8 Q13,D18 @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 315 VMOVL.U8 Q10,D10 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 355 VMOVL.U8 Q13,D10 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
|
ihevc_sao_edge_offset_class0.s | 199 VMOVL.U8 Q9,D12 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 211 VMOVL.U8 Q0,D26 @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 226 VMOVL.U8 Q7,D13 @pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 240 VMOVL.U8 Q14,D27 @II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 321 VMOVL.U8 Q14,D12 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
|
ihevc_sao_edge_offset_class2.s | 309 VMOVL.U8 Q10,D12 @I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 314 VMOVL.U8 Q11,D13 @I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 396 VMOVL.U8 Q13,D12 @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 405 VMOVL.U8 Q10,D16 @III pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 415 VMOVL.U8 Q14,D13 @II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 423 VMOVL.U8 Q9,D17 @III pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 483 VMOVL.U8 Q10,D12 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 488 VMOVL.U8 Q6,D13 @pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) [all...] |
ihevc_sao_edge_offset_class2_chroma.s | 416 VMOVL.U8 Q10,D12 @I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 419 VMOVL.U8 Q9,D13 @I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 536 VMOVL.U8 Q14,D12 @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 552 VMOVL.U8 Q13,D13 @II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 563 VMOVL.U8 Q10,D16 @III pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 572 VMOVL.U8 Q9,D17 @III pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) [all...] |
ihevc_sao_edge_offset_class3.s | 320 VMOVL.U8 Q10,D12 @I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 325 VMOVL.U8 Q11,D13 @I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 416 VMOVL.U8 Q14,D12 @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 435 VMOVL.U8 Q13,D13 @II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 442 VMOVL.U8 Q10,D16 @III pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 449 VMOVL.U8 Q11,D17 @III pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 507 VMOVL.U8 Q10,D12 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 510 VMOVL.U8 Q11,D13 @pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) [all...] |
ihevc_sao_edge_offset_class3_chroma.s | 407 VMOVL.U8 Q10,D12 @I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 415 VMOVL.U8 Q9,D13 @I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 536 VMOVL.U8 Q14,D12 @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 544 VMOVL.U8 Q13,D13 @II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 547 VMOVL.U8 Q10,D16 @III pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 558 VMOVL.U8 Q9,D17 @III pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) [all...] |
ihevc_sao_edge_offset_class0_chroma.s | 217 VMOVL.U8 Q9,D12 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 223 VMOVL.U8 Q6,D13 @pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 245 VMOVL.U8 Q14,D30 @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 258 VMOVL.U8 Q15,D31 @II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 382 VMOVL.U8 Q9,D12 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 384 VMOVL.U8 Q12,D30 @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
|
/external/libhevc/common/arm64/ |
ihevc_inter_pred_chroma_copy_w16out.s | 142 uxtl v0.8h, v0.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 150 uxtl v22.8h, v22.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 153 uxtl v24.8h, v24.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 158 uxtl v26.8h, v26.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 186 uxtl v0.8h, v0.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 194 uxtl v22.8h, v22.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 197 uxtl v24.8h, v24.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 226 uxtl v16.8h, v1.8b //vmovl_u8(vld1_u8(pu1_src_tmp)) 227 uxtl v18.8h, v3.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 228 uxtl v20.8h, v5.8b //vmovl_u8(vld1_u8(pu1_src_tmp [all...] |
ihevc_inter_pred_luma_copy_w16out.s | 111 uxtl v0.8h, v0.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 119 uxtl v22.8h, v22.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 122 uxtl v24.8h, v24.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 127 uxtl v26.8h, v26.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 166 uxtl v16.8h, v1.8b //vmovl_u8(vld1_u8(pu1_src_tmp)) 167 uxtl v18.8h, v3.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 168 uxtl v20.8h, v5.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 169 uxtl v22.8h, v7.8b //vmovl_u8(vld1_u8(pu1_src_tmp) 199 uxtl v16.8h, v1.8b //vmovl_u8(vld1_u8(pu1_src_tmp)) 202 uxtl v18.8h, v3.8b //vmovl_u8(vld1_u8(pu1_src_tmp [all...] |
ihevc_sao_edge_offset_class1_chroma.s | 184 Uxtl v26.8h, v18.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 187 Uxtl2 v28.8h, v18.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 209 Uxtl v20.8h, v3.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 227 Uxtl2 v28.8h, v3.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 248 Uxtl2 v28.8h, v18.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 294 Uxtl v26.8h, v3.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 300 Uxtl2 v28.8h, v3.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 356 Uxtl v26.8h, v18.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 384 Uxtl v20.8h, v3.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 444 Uxtl v26.8h, v3.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)) [all...] |
ihevc_sao_edge_offset_class1.s | 155 Uxtl v26.8h, v18.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 158 Uxtl2 v28.8h, v18.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 177 Uxtl v20.8h, v3.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 186 Uxtl2 v1.8h, v3.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 234 Uxtl v26.8h, v3.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 240 Uxtl2 v28.8h, v3.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 290 Uxtl v26.8h, v18.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 307 Uxtl v20.8h, v3.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 345 Uxtl v26.8h, v3.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
|
ihevc_sao_edge_offset_class2.s | 329 Uxtl v20.8h, v5.8b //I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 334 Uxtl2 v22.8h, v5.16b //I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 418 Uxtl v26.8h, v5.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 427 Uxtl v20.8h, v16.8b //III pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 437 Uxtl2 v28.8h, v5.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 445 Uxtl2 v18.8h, v16.16b //III pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 507 Uxtl v20.8h, v5.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 512 Uxtl2 v5.8h, v5.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) [all...] |
ihevc_sao_edge_offset_class3.s | 338 Uxtl v20.8h, v5.8b //I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 343 Uxtl2 v22.8h, v5.16b //I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 435 Uxtl v28.8h, v5.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 454 Uxtl2 v26.8h, v5.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 461 Uxtl v20.8h, v16.8b //III pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 468 Uxtl2 v22.8h, v16.16b //III pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 527 Uxtl v20.8h, v5.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 530 Uxtl2 v22.8h, v5.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) [all...] |
ihevc_sao_edge_offset_class0_chroma.s | 241 Uxtl v18.8h, v19.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 250 Uxtl2 v19.8h, v19.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 281 Uxtl v28.8h, v30.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 295 Uxtl2 v30.8h, v30.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 424 Uxtl v18.8h, v19.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 426 Uxtl v24.8h, v30.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
|
ihevc_sao_edge_offset_class0.s | 199 Uxtl v18.8h, v17.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 211 Uxtl v0.8h, v26.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 226 Uxtl2 v21.8h, v17.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 240 Uxtl2 v28.8h, v26.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 319 Uxtl v28.8h, v17.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
|
/external/skia/src/opts/ |
SkColor_opts_neon.h | 26 return vshrn_n_u16(vmovl_u8(color) * scale, 8); 82 src_wide = vreinterpretq_s16_u16(vmovl_u8(src)); 83 dst_wide = vreinterpretq_s16_u16(vmovl_u8(dst)); 99 vsrc_wide = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(vdup_n_u32(src)))); 100 vdst_wide = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(vdup_n_u32(dst))));
|
SkBitmapProcState_arm_neon.cpp | 121 int16x8_t p01_16 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pixels))); 122 int16x8_t p23_16 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pixels))); 161 int16x8_t p01_16 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pixels))); 162 int16x8_t p23_16 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pixels))); 218 int16x8_t src16_01 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(src8))); 219 int16x8_t src16_23 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(src8))); 286 int16x8_t src16_01 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(src8))); 287 int16x8_t src16_23 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(src8))); 418 p01_16 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pixels))); \ 419 p23_16 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pixels))); [all...] |
Sk4px_NEON.h | 34 return Sk16h(vmovl_u8(vget_low_u8 (this->fVec)), 35 vmovl_u8(vget_high_u8(this->fVec)));
|
SkBlitRow_opts_arm_neon.cpp | 155 vres_r = vmovl_u8(vsrc.val[NEON_R]) - vdst_r; 156 vres_g = vmovl_u8(vsrc.val[NEON_G]) - vdst_g; 157 vres_b = vmovl_u8(vsrc.val[NEON_B]) - vdst_b; 534 wide_sr = vshlq_n_u16(vmovl_u8(vdup_n_u8(sr)), 2); // widen and src_red shift 538 wide_sg = vshlq_n_u16(vmovl_u8(vdup_n_u8(sg)), 3); // widen and src_grn shift 542 wide_sb = vshlq_n_u16(vmovl_u8(vdup_n_u8(sb)), 2); // widen and src blu shift [all...] |
/external/libvpx/libvpx/vpx_dsp/arm/ |
vpx_convolve8_avg_neon.c | 105 q8u16 = vmovl_u8(d0x2u8.val[0]); 106 q9u16 = vmovl_u8(d0x2u8.val[1]); 107 q10u16 = vmovl_u8(d1x2u8.val[0]); 108 q11u16 = vmovl_u8(d1x2u8.val[1]); 152 q12u16 = vmovl_u8(d28u8); 153 q13u16 = vmovl_u8(d29u8); 280 q8u16 = vmovl_u8(vreinterpret_u8_u32(d16u32)); 281 q9u16 = vmovl_u8(vreinterpret_u8_u32(d18u32)); 282 q10u16 = vmovl_u8(vreinterpret_u8_u32(d20u32)); 283 q11u16 = vmovl_u8(vreinterpret_u8_u32(d22u32)) [all...] |
vpx_convolve8_neon.c | 108 q8u16 = vmovl_u8(d0x2u8.val[0]); 109 q9u16 = vmovl_u8(d0x2u8.val[1]); 110 q10u16 = vmovl_u8(d1x2u8.val[0]); 111 q11u16 = vmovl_u8(d1x2u8.val[1]); 154 q12u16 = vmovl_u8(d28u8); 155 q13u16 = vmovl_u8(d29u8); 264 q8u16 = vmovl_u8(vreinterpret_u8_u32(d16u32)); 265 q9u16 = vmovl_u8(vreinterpret_u8_u32(d18u32)); 266 q10u16 = vmovl_u8(vreinterpret_u8_u32(d20u32)); 267 q11u16 = vmovl_u8(vreinterpret_u8_u32(d22u32)) [all...] |
/external/webp/src/dsp/ |
dec_neon.c | 394 return vreinterpretq_s16_u16(vmovl_u8(v)); [all...] |