HomeSort by relevance Sort by last modified time
    Searched full:sqrshrun (Results 1 - 25 of 50) sorted by null

1 2

  /external/libavc/common/armv8/
ih264_intra_pred_luma_16x16_av8.s 522 sqrshrun v20.8b, v26.8h, #5
523 sqrshrun v21.8b, v28.8h, #5
526 sqrshrun v22.8b, v26.8h, #5
528 sqrshrun v23.8b, v28.8h, #5
531 sqrshrun v20.8b, v26.8h, #5
533 sqrshrun v21.8b, v28.8h, #5
536 sqrshrun v22.8b, v26.8h, #5
538 sqrshrun v23.8b, v28.8h, #5
541 sqrshrun v20.8b, v26.8h, #5
543 sqrshrun v21.8b, v28.8h, #
    [all...]
ih264_inter_pred_chroma_av8.s 160 sqrshrun v26.8b, v20.8h, #6
168 sqrshrun v27.8b, v22.8h, #6
178 sqrshrun v18.8b, v24.8h, #6
182 sqrshrun v19.8b, v16.8h, #6
188 sqrshrun v26.8b, v20.8h, #6
197 sqrshrun v27.8b, v24.8h, #6
205 sqrshrun v26.8b, v20.8h, #6
208 sqrshrun v27.8b, v22.8h, #6
223 sqrshrun v18.8b, v24.8h, #6
228 sqrshrun v19.8b, v16.8h, #
    [all...]
ih264_inter_pred_luma_horz_qpel_vert_qpel_av8.s 163 sqrshrun v26.8b, v24.8h, #5
175 sqrshrun v28.8b, v28.8h, #5
182 sqrshrun v27.8b, v24.8h, #5
197 sqrshrun v29.8b, v24.8h, #5
207 sqrshrun v26.8b, v16.8h, #5
215 sqrshrun v27.8b, v24.8h, #5
232 sqrshrun v28.8b, v28.8h, #5
246 sqrshrun v29.8b, v24.8h, #5
254 sqrshrun v26.8b, v16.8h, #5
271 sqrshrun v27.8b, v24.8h, #
    [all...]
ih264_inter_pred_filters_luma_vert_av8.s 152 sqrshrun v30.8b, v14.8h, #5 // dst[0_0] = CLIP_U8((temp +16) >> 5)
157 sqrshrun v31.8b, v20.8h, #5 // dst[0_8] = CLIP_U8((temp4 +16) >> 5)
165 sqrshrun v30.8b, v16.8h, #5
173 sqrshrun v31.8b, v14.8h, #5
180 sqrshrun v30.8b, v18.8h, #5
187 sqrshrun v31.8b, v16.8h, #5
193 sqrshrun v30.8b, v14.8h, #5
196 sqrshrun v31.8b, v18.8h, #5
212 sqrshrun v30.8b, v14.8h, #5 // dst[0_0] = CLIP_U8((temp +16) >> 5)
215 sqrshrun v31.8b, v20.8h, #5 // dst[0_8] = CLIP_U8((temp4 +16) >> 5
    [all...]
ih264_inter_pred_luma_vert_qpel_av8.s 159 sqrshrun v30.8b, v14.8h, #5 // dst[0_0] = CLIP_U8((temp +16) >> 5)
164 sqrshrun v31.8b, v20.8h, #5 // dst[0_8] = CLIP_U8((temp4 +16) >> 5)
174 sqrshrun v30.8b, v16.8h, #5
181 sqrshrun v31.8b, v14.8h, #5
191 sqrshrun v30.8b, v18.8h, #5
198 sqrshrun v31.8b, v16.8h, #5
207 sqrshrun v30.8b, v14.8h, #5
210 sqrshrun v31.8b, v18.8h, #5
229 sqrshrun v30.8b, v14.8h, #5 // dst[0_0] = CLIP_U8((temp +16) >> 5)
231 sqrshrun v31.8b, v20.8h, #5 // dst[0_8] = CLIP_U8((temp4 +16) >> 5
    [all...]
ih264_inter_pred_luma_horz_qpel_vert_hpel_av8.s 206 sqrshrun v18.4h, v26.4s, #10
207 sqrshrun v19.4h, v22.4s, #10
231 sqrshrun v19.4h, v26.4s, #10
232 sqrshrun v18.4h, v22.4s, #10
244 sqrshrun v20.8b, v20.8h, #5
245 sqrshrun v21.8b, v22.8h, #5
293 sqrshrun v18.4h, v26.4s, #10
294 sqrshrun v19.4h, v22.4s, #10
319 sqrshrun v19.4h, v26.4s, #10
320 sqrshrun v18.4h, v22.4s, #1
    [all...]
ih264_inter_pred_luma_horz_hpel_vert_hpel_av8.s 143 sqrshrun v18.4h, v26.4s, #10
144 sqrshrun v19.4h, v23.4s, #10
168 sqrshrun v19.4h, v26.4s, #10
169 sqrshrun v25.4h, v22.4s, #10
224 sqrshrun v18.4h, v26.4s, #10
225 sqrshrun v19.4h, v23.4s, #10
250 sqrshrun v19.4h, v26.4s, #10
251 sqrshrun v25.4h, v22.4s, #10
303 sqrshrun v18.4h, v26.4s, #10
304 sqrshrun v19.4h, v23.4s, #1
    [all...]
ih264_inter_pred_luma_horz_hpel_vert_qpel_av8.s 259 sqrshrun v26.8b, v26.8h, #5
268 sqrshrun v18.4h, v18.4s, #10
270 sqrshrun v19.4h, v6.4s, #10
303 sqrshrun v28.8b, v8.8h, #5
307 sqrshrun v18.4h, v18.4s, #10
309 sqrshrun v19.4h, v6.4s, #10
344 sqrshrun v26.8b, v10.8h, #5
349 sqrshrun v18.4h, v18.4s, #10
351 sqrshrun v19.4h, v6.4s, #10
374 sqrshrun v18.4h, v18.4s, #1
    [all...]
ih264_intra_pred_luma_8x8_av8.s 342 sqrshrun v31.8b, v12.8h, #4
486 sqrshrun v4.8b, v24.8h, #2
487 sqrshrun v5.8b, v26.8h, #2
586 sqrshrun v4.8b, v24.8h, #2
587 sqrshrun v5.8b, v26.8h, #2
686 sqrshrun v4.8b, v20.8h, #1
687 sqrshrun v5.8b, v22.8h, #1
689 sqrshrun v6.8b, v24.8h, #2
690 sqrshrun v7.8b, v26.8h, #2
811 sqrshrun v4.8b, v20.8h, #
    [all...]
ih264_intra_pred_chroma_av8.s 511 sqrshrun v28.8b, v24.8h, #5
513 sqrshrun v29.8b, v0.8h, #5
516 sqrshrun v28.8b, v2.8h, #5
517 sqrshrun v29.8b, v26.8h, #5
524 sqrshrun v28.8b, v24.8h, #5
525 sqrshrun v29.8b, v0.8h, #5
532 sqrshrun v28.8b, v2.8h, #5
533 sqrshrun v29.8b, v26.8h, #5
540 sqrshrun v28.8b, v24.8h, #5
541 sqrshrun v29.8b, v0.8h, #
    [all...]
ih264_inter_pred_filters_luma_horz_av8.s 171 sqrshrun v20.8b, v8.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row0)
173 sqrshrun v21.8b, v10.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2,row0)
176 sqrshrun v23.8b, v14.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row1)
178 sqrshrun v24.8b, v16.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2,row1)
224 sqrshrun v20.8b, v8.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row2)
226 sqrshrun v21.8b, v10.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2,row2)
229 sqrshrun v23.8b, v14.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row3)
231 sqrshrun v24.8b, v16.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2,row3)
276 sqrshrun v20.8b, v8.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row4)
278 sqrshrun v21.8b, v10.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2,row4
    [all...]
ih264_inter_pred_luma_horz_qpel_av8.s 180 sqrshrun v20.8b, v8.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row0)
182 sqrshrun v21.8b, v10.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2,row0)
187 sqrshrun v18.8b, v14.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row1)
190 sqrshrun v19.8b, v16.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2,row1)
241 sqrshrun v20.8b, v8.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row2)
243 sqrshrun v21.8b, v10.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2,row2)
248 sqrshrun v18.8b, v14.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row3)
251 sqrshrun v19.8b, v16.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2,row3)
299 sqrshrun v20.8b, v8.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row4)
301 sqrshrun v21.8b, v10.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2,row4
    [all...]
ih264_intra_pred_luma_4x4_av8.s 427 sqrshrun v3.8b, v24.8h, #2
511 sqrshrun v3.8b, v24.8h, #2
594 sqrshrun v4.8b, v20.8h, #1
595 sqrshrun v3.8b, v24.8h, #2
678 sqrshrun v4.8b, v20.8h, #1
679 sqrshrun v5.8b, v24.8h, #2
765 sqrshrun v4.8b, v20.8h, #1
766 sqrshrun v5.8b, v24.8h, #2
849 sqrshrun v4.8b, v20.8h, #1
850 sqrshrun v5.8b, v24.8h, #
    [all...]
  /external/libhevc/common/arm64/
ihevc_inter_pred_chroma_vert.s 158 sqrshrun v6.8b, v6.8h,#6 //shifts right
161 sqrshrun v4.8b, v4.8h,#6 //shifts right
202 sqrshrun v4.8b, v4.8h,#6 //vrshrq_n_s16(vreinterpretq_s16_u16(mul_res1),6)
254 sqrshrun v30.8b, v30.8h,#6
264 sqrshrun v28.8b, v28.8h,#6
279 sqrshrun v26.8b, v26.8h,#6
300 sqrshrun v24.8b, v24.8h,#6
313 sqrshrun v30.8b, v30.8h,#6
335 sqrshrun v28.8b, v28.8h,#6
356 sqrshrun v26.8b, v26.8h,#
    [all...]
ihevc_inter_pred_chroma_vert_w16inp.s 160 sqrshrun v0.8b, v0.8h,#6 //rounding shift
161 sqrshrun v30.8b, v30.8h,#6 //rounding shift
220 sqrshrun v30.8b, v30.8h,#6 //rounding shift
236 sqrshrun v28.8b, v28.8h,#6 //rounding shift
249 sqrshrun v26.8b, v26.8h,#6 //rounding shift
263 sqrshrun v24.8b, v24.8h,#6 //rounding shift
276 sqrshrun v30.8b, v30.8h,#6 //rounding shift
290 sqrshrun v28.8b, v28.8h,#6 //rounding shift
306 sqrshrun v26.8b, v26.8h,#6 //rounding shift
316 sqrshrun v24.8b, v24.8h,#6 //rounding shif
    [all...]
ihevc_inter_pred_chroma_horz.s 253 sqrshrun v30.8b, v30.8h,#6
259 sqrshrun v31.8b, v28.8h,#6
308 sqrshrun v22.8b, v22.8h,#6
309 sqrshrun v23.8b, v20.8h,#6
344 sqrshrun v30.8b, v30.8h,#6
345 sqrshrun v31.8b, v28.8h,#6
406 sqrshrun v22.8b, v22.8h,#6
407 sqrshrun v23.8b, v20.8h,#6
421 sqrshrun v30.8b, v30.8h,#6
422 sqrshrun v31.8b, v28.8h,#
    [all...]
ihevc_inter_pred_filters_luma_vert.s 209 sqrshrun v19.8b, v19.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
224 sqrshrun v20.8b, v20.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
244 sqrshrun v21.8b, v21.8h,#6
278 sqrshrun v30.8b, v30.8h,#6
304 sqrshrun v19.8b, v19.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
333 sqrshrun v20.8b, v20.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
356 sqrshrun v21.8b, v21.8h,#6
375 sqrshrun v30.8b, v30.8h,#6
388 sqrshrun v19.8b, v19.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
401 sqrshrun v20.8b, v20.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)/
    [all...]
ihevc_inter_pred_filters_luma_vert_w16inp.s 200 sqrshrun v19.8b, v19.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
218 sqrshrun v20.8b, v20.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
243 sqrshrun v21.8b, v21.8h,#6
260 sqrshrun v30.8b, v30.8h,#6
286 sqrshrun v19.8b, v19.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
309 sqrshrun v20.8b, v20.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
327 sqrshrun v21.8b, v21.8h,#6
341 sqrshrun v30.8b, v30.8h,#6
354 sqrshrun v19.8b, v19.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
366 sqrshrun v20.8b, v20.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)/
    [all...]
ihevc_inter_pred_filters_luma_horz.s 253 sqrshrun v20.8b, v8.8h,#6 //right shift and saturating narrow result 1
262 sqrshrun v8.8b, v10.8h,#6 //right shift and saturating narrow result 2
372 sqrshrun v8.8b, v8.8h,#6 //right shift and saturating narrow result 1
391 sqrshrun v9.8b, v20.8h,#6
412 sqrshrun v10.8b, v10.8h,#6 //right shift and saturating narrow result 2
442 sqrshrun v11.8b, v22.8h,#6
467 sqrshrun v11.8b, v22.8h,#6
581 sqrshrun v8.8b, v8.8h,#6 //narrow right shift and saturating the result
  /external/libavc/encoder/armv8/
ih264e_half_pel_av8.s 172 sqrshrun v20.8b, v8.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row0)
173 sqrshrun v21.8b, v10.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2,row0)
174 sqrshrun v22.8b, v12.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column3,row0)
175 sqrshrun v23.8b, v14.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row1)
176 sqrshrun v24.8b, v16.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2,row1)
177 sqrshrun v25.8b, v18.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column3,row1)
328 sqrshrun v2.8b, v20.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row0)
330 sqrshrun v3.8b, v22.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2,row0)
341 sqrshrun v4.8b, v24.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column3,row0)
397 sqrshrun v26.8b, v20.8h, #2 //// half,half gird set1,
    [all...]
  /frameworks/rs/cpu_ref/
rsCpuIntrinsics_advsimd_ColorMatrix.S 431 sqrshrun v24.4h, v24.4s, #1
432 sqrshrun v25.4h, v25.4s, #1
433 sqrshrun v26.4h, v26.4s, #1
434 sqrshrun v27.4h, v27.4s, #1
467 sqrshrun v24.4h, v24.4s, #1
468 sqrshrun v25.4h, v25.4s, #1
469 sqrshrun v26.4h, v26.4s, #1
505 sqrshrun v24.4h, v24.4s, #1
506 sqrshrun v25.4h, v25.4s, #1
542 sqrshrun v24.4h, v24.4s, #
    [all...]
  /external/llvm/test/MC/AArch64/
neon-scalar-shift-imm.s 180 sqrshrun b17, h10, #6
181 sqrshrun h10, s13, #15
182 sqrshrun s22, d16, #31
184 // CHECK: sqrshrun b17, h10, #6 // encoding: [0x51,0x8d,0x0a,0x7f]
185 // CHECK: sqrshrun h10, s13, #15 // encoding: [0xaa,0x8d,0x11,0x7f]
186 // CHECK: sqrshrun s22, d16, #31 // encoding: [0x16,0x8e,0x21,0x7f]
arm64-advsimd.s     [all...]
  /external/llvm/test/CodeGen/AArch64/
arm64-neon-simd-shift.ll 402 %vqrshrun = tail call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> %b, i32 3)
413 %vqrshrun = tail call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> %b, i32 9)
425 %vqrshrun = tail call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> %b, i32 19)
578 declare <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16>, i32)
580 declare <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32>, i32)
582 declare <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64>, i32)
arm64-vshift.ll     [all...]

Completed in 458 milliseconds

1 2