Home | History | Annotate | Download | only in arm

Lines Matching refs:u8

217     @vext.u8   d2,d0,d1,#2                     @vector extract of src[0_2]
218 @vext.u8 d3,d0,d1,#3 @vector extract of src[0_3]
219 @vext.u8 d4,d0,d1,#4 @vector extract of src[0_4]
221 @vext.u8 d5,d0,d1,#5 @vector extract of src[0_5]
222 @vext.u8 d6,d0,d1,#6 @vector extract of src[0_6]
223 @vext.u8 d7,d0,d1,#7 @vector extract of src[0_7]
224 @vext.u8 d1,d0,d1,#1 @vector extract of src[0_1]
241 @ vext.u8 d14,d12,d13,#2 @vector extract of src[0_2]
242 @ vext.u8 d15,d12,d13,#3 @vector extract of src[0_3]
243 @ vext.u8 d16,d12,d13,#4 @vector extract of src[0_4]
244 @ vext.u8 d17,d12,d13,#5 @vector extract of src[0_5]
245 @ vext.u8 d18,d12,d13,#6 @vector extract of src[0_6]
246 @ vext.u8 d19,d12,d13,#7 @vector extract of src[0_7]
247 @vext.u8 d13,d12,d13,#1 @vector extract of src[0_1]
258 vmull.u8 q4,d1,d25 @arithmetic operations for ii iteration in the same time
259 vmlsl.u8 q4,d0,d24
260 vmlsl.u8 q4,d2,d26
261 vmlal.u8 q4,d3,d27
262 vmlal.u8 q4,d4,d28
263 vmlsl.u8 q4,d5,d29
264 vmlal.u8 q4,d6,d30
265 vmlsl.u8 q4,d7,d31
302 @ vext.u8 d2,d0,d1,#2 @vector extract of src[0_2]
303 @ vext.u8 d3,d0,d1,#3 @vector extract of src[0_3]
304 @ vext.u8 d4,d0,d1,#4 @vector extract of src[0_4]
309 @ vext.u8 d5,d0,d1,#5 @vector extract of src[0_5]
310 @ vext.u8 d6,d0,d1,#6 @vector extract of src[0_6]
311 @ vext.u8 d7,d0,d1,#7 @vector extract of src[0_7]
312 @ vext.u8 d1,d0,d1,#1 @vector extract of src[0_1]
314 vmull.u8 q4,d1,d25 @arithmetic operations for ii iteration in the same time
316 vmlsl.u8 q4,d0,d24
318 vmlsl.u8 q4,d2,d26
320 vmlal.u8 q4,d3,d27
322 vmlal.u8 q4,d4,d28
324 vmlsl.u8 q4,d5,d29
326 vmlal.u8 q4,d6,d30
327 vmlsl.u8 q4,d7,d31 @store the i iteration result which is in upper part of the register
369 @ vext.u8 d2,d0,d1,#2 @vector extract of src[0_2]
370 @ vext.u8 d3,d0,d1,#3 @vector extract of src[0_3]
371 @ vext.u8 d4,d0,d1,#4 @vector extract of src[0_4]
372 @ vext.u8 d5,d0,d1,#5 @vector extract of src[0_5]
373 @ vext.u8 d6,d0,d1,#6 @vector extract of src [0_6]
374 @ vext.u8 d7,d0,d1,#7 @vector extract of src[0_7]
375 @ vext.u8 d1,d0,d1,#1 @vector extract of src[0_1]
376 @ vext.u8 d14,d12,d13,#2
378 @vext.u8 d15,d12,d13,#3 @vector extract of src[0_3]
379 @ vext.u8 d16,d12,d13,#4 @vector extract of src[0_4]
380 @ vext.u8 d17,d12,d13,#5 @vector extract of src[0_5]
381 @vext.u8 d18,d12,d13,#6 @vector extract of src[0_6]
382 @vext.u8 d19,d12,d13,#7 @vector extract of src[0_7]
383 @vext.u8 d13,d12,d13,#1 @vector extract of src[0_1]
385 vmull.u8 q4,d1,d25 @mul_res = vmlal_u8(src[0_1], coeffabs_1)@
387 vmlal.u8 q4,d3,d27 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
389 vmlsl.u8 q4,d0,d24 @mul_res = vmlsl_u8(src[0_0], coeffabs_0)@
391 vmlsl.u8 q4,d2,d26 @mul_res = vmlsl_u8(src[0_2], coeffabs_2)@
393 vmlal.u8 q4,d4,d28 @mul_res = vmlal_u8(src[0_4], coeffabs_4)@
395 vmlsl.u8 q4,d5,d29 @mul_res = vmlsl_u8(src[0_5], coeffabs_5)@
397 vmlal.u8 q4,d6,d30 @mul_res = vmlal_u8(src[0_6], coeffabs_6)@
399 vmlsl.u8 q4,d7,d31 @mul_res = vmlsl_u8(src[0_7], coeffabs_7)@
402 vmull.u8 q5,d15,d27 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
404 vmlsl.u8 q5,d14,d26 @mul_res = vmlsl_u8(src[0_2], coeffabs_2)@
406 vmlal.u8 q5,d16,d28 @mul_res = vmlal_u8(src[0_4], coeffabs_4)@
408 vmlsl.u8 q5,d17,d29 @mul_res = vmlsl_u8(src[0_5], coeffabs_5)@
410 vmlal.u8 q5,d18,d30 @mul_res = vmlal_u8(src[0_6], coeffabs_6)@
411 vmlsl.u8 q5,d19,d31 @mul_res = vmlsl_u8(src[0_7], coeffabs_7)@
413 vmlsl.u8 q5,d12,d24 @mul_res = vmlsl_u8(src[0_0], coeffabs_0)@
414 vmlal.u8 q5,d13,d25 @mul_res = vmlal_u8(src[0_1], coeffabs_1)@
466 vmull.u8 q4,d2,d25 @mul_res = vmlal_u8(src[0_1], coeffabs_1)@
468 vmlal.u8 q4,d6,d27 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
470 vmlsl.u8 q4,d0,d24 @mul_res = vmlsl_u8(src[0_0], coeffabs_0)@
472 vmlsl.u8 q4,d4,d26 @mul_res = vmlsl_u8(src[0_2], coeffabs_2)@
473 vmlal.u8 q4,d12,d28 @mul_res = vmlal_u8(src[0_4], coeffabs_4)@
474 vmlsl.u8 q4,d14,d29 @mul_res = vmlsl_u8(src[0_5], coeffabs_5)@
475 vmlal.u8 q4,d16,d30 @mul_res = vmlal_u8(src[0_6], coeffabs_6)@
476 vmlsl.u8 q4,d18,d31 @mul_res = vmlsl_u8(src[0_7], coeffabs_7)@
483 vmull.u8 q10,d3,d25
486 vmlsl.u8 q10,d1,d24
489 vmlal.u8 q10,d7,d27
492 vmlsl.u8 q10,d5,d26
495 vmlal.u8 q10,d13,d28
498 vmlal.u8 q10,d17,d30
501 vmlsl.u8 q10,d15,d29
504 vmlsl.u8 q10,d19,d31
507 vmull.u8 q5,d2,d25 @mul_res = vmlal_u8(src[0_1], coeffabs_1)@
510 vmlal.u8 q5,d6,d27 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
513 vmlsl.u8 q5,d0,d24 @mul_res = vmlsl_u8(src[0_0], coeffabs_0)@
517 vmlsl.u8 q5,d4,d26 @mul_res = vmlsl_u8(src[0_2], coeffabs_2)@
520 vmlal.u8 q5,d12,d28 @mul_res = vmlal_u8(src[0_4], coeffabs_4)@
523 vmlsl.u8 q5,d14,d29 @mul_res = vmlsl_u8(src[0_5], coeffabs_5)@
526 vmlal.u8 q5,d16,d30 @mul_res = vmlal_u8(src[0_6], coeffabs_6)@
529 vmlsl.u8 q5,d18,d31 @mul_res = vmlsl_u8(src[0_7], coeffabs_7)@
532 vmull.u8 q11,d3,d25
535 vmlsl.u8 q11,d1,d24
538 vmlal.u8 q11,d7,d27
541 vmlsl.u8 q11,d5,d26
544 vmlal.u8 q11,d13,d28
547 vmlal.u8 q11,d17,d30
550 vmlsl.u8 q11,d15,d29
552 vmlsl.u8 q11,d19,d31
561 vmull.u8 q4,d2,d25 @mul_res = vmlal_u8(src[0_1], coeffabs_1)@
563 vmlal.u8 q4,d6,d27 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
565 vmlsl.u8 q4,d0,d24 @mul_res = vmlsl_u8(src[0_0], coeffabs_0)@
567 vmlsl.u8 q4,d4,d26 @mul_res = vmlsl_u8(src[0_2], coeffabs_2)@
568 vmlal.u8 q4,d12,d28 @mul_res = vmlal_u8(src[0_4], coeffabs_4)@
570 vmlsl.u8 q4,d14,d29 @mul_res = vmlsl_u8(src[0_5], coeffabs_5)@
572 vmlal.u8 q4,d16,d30 @mul_res = vmlal_u8(src[0_6], coeffabs_6)@
574 vmlsl.u8 q4,d18,d31 @mul_res = vmlsl_u8(src[0_7], coeffabs_7)@