Home | History | Annotate | Download | only in source

Lines Matching refs:temp

30       SIMD_ALIGNED(uint8 temp[64 * 5]);                                        \
31 memset(temp, 0, 64 * 4); /* for msan */ \
37 memcpy(temp, y_buf + n, r); \
38 memcpy(temp + 64, u_buf + (n >> UVSHIFT), SS(r, UVSHIFT)); \
39 memcpy(temp + 128, v_buf + (n >> UVSHIFT), SS(r, UVSHIFT)); \
40 memcpy(temp + 192, a_buf + n, r); \
41 ANY_SIMD(temp, temp + 64, temp + 128, temp + 192, temp + 256, \
43 memcpy(dst_ptr + (n >> DUVSHIFT) * BPP, temp + 256, \
62 SIMD_ALIGNED(uint8 temp[64 * 4]); \
63 memset(temp, 0, 64 * 3); /* for YUY2 and msan */ \
69 memcpy(temp, y_buf + n, r); \
70 memcpy(temp + 64, u_buf + (n >> UVSHIFT), SS(r, UVSHIFT)); \
71 memcpy(temp + 128, v_buf + (n >> UVSHIFT), SS(r, UVSHIFT)); \
72 ANY_SIMD(temp, temp + 64, temp + 128, temp + 192, MASK + 1); \
73 memcpy(dst_ptr + (n >> DUVSHIFT) * BPP, temp + 192, \
101 SIMD_ALIGNED(uint8 temp[64 * 4]); \
102 memset(temp, 0, 64 * 3); /* for YUY2 and msan */ \
108 memcpy(temp, y_buf + n, r); \
109 memcpy(temp + 64, u_buf + (n >> UVSHIFT), SS(r, UVSHIFT)); \
110 memcpy(temp + 128, v_buf + (n >> UVSHIFT), SS(r, UVSHIFT)); \
112 temp[64 + SS(r, UVSHIFT)] = temp[64 + SS(r, UVSHIFT) - 1]; \
113 temp[128 + SS(r, UVSHIFT)] = temp[128 + SS(r, UVSHIFT) - 1]; \
115 ANY_SIMD(temp, temp + 64, temp + 128, temp + 192, \
117 memcpy(dst_ptr + (n >> DUVSHIFT) * BPP, temp + 192, \
175 SIMD_ALIGNED(uint8 temp[64 * 3]); \
176 memset(temp, 0, 64 * 2); /* for msan */ \
182 memcpy(temp, y_buf + n * SBPP, r * SBPP); \
183 memcpy(temp + 64, uv_buf + (n >> UVSHIFT) * SBPP2, \
185 ANY_SIMD(temp, temp + 64, temp + 128, MASK + 1); \
186 memcpy(dst_ptr + n * BPP, temp + 128, r * BPP); \
253 SIMD_ALIGNED(uint8 temp[64 * 3]); \
254 memset(temp, 0, 64 * 2); /* for msan */ \
260 memcpy(temp, y_buf + n * SBPP, r * SBPP); \
261 memcpy(temp + 64, uv_buf + (n >> UVSHIFT) * SBPP2, \
263 ANY_SIMD(temp, temp + 64, temp + 128, yuvconstants, MASK + 1); \
264 memcpy(dst_ptr + n * BPP, temp + 128, r * BPP); \
300 SIMD_ALIGNED(uint8 temp[128 * 2]); \
301 memset(temp, 0, 128); /* for YUY2 and msan */ \
307 memcpy(temp, src_ptr + (n >> UVSHIFT) * SBPP, SS(r, UVSHIFT) * SBPP); \
308 ANY_SIMD(temp, temp + 128, MASK + 1); \
309 memcpy(dst_ptr + n * BPP, temp + 128, r * BPP); \
480 SIMD_ALIGNED(uint8 temp[128 * 2]); \
481 memset(temp, 0, 128 * 2); /* for YUY2 and msan */ \
487 memcpy(temp, src_ptr + (n >> UVSHIFT) * SBPP, SS(r, UVSHIFT) * SBPP); \
488 memcpy(temp + 128, dst_ptr + n * BPP, r * BPP); \
489 ANY_SIMD(temp, temp + 128, MASK + 1); \
490 memcpy(dst_ptr + n * BPP, temp + 128, r * BPP); \
511 SIMD_ALIGNED(uint8 temp[64 * 2]); \
512 memset(temp, 0, 64); /* for msan */ \
518 memcpy(temp, src_ptr + n * SBPP, r * SBPP); \
519 ANY_SIMD(temp, temp + 64, shuffler, MASK + 1); \
520 memcpy(dst_ptr + n * BPP, temp + 64, r * BPP); \
553 SIMD_ALIGNED(uint8 temp[128 * 2]); \
554 memset(temp, 0, 128); /* for YUY2 and msan */ \
560 memcpy(temp, src_ptr + (n >> UVSHIFT) * SBPP, SS(r, UVSHIFT) * SBPP); \
561 ANY_SIMD(temp, temp + 128, yuvconstants, MASK + 1); \
562 memcpy(dst_ptr + n * BPP, temp + 128, r * BPP); \
583 SIMD_ALIGNED(uint8 temp[64 * 3]); \
584 memset(temp, 0, 64 * 2); /* for msan */ \
590 memcpy(temp, src_ptr + n * SBPP, r * SBPP); \
591 memcpy(temp + 64, src_ptr + src_stride_ptr + n * SBPP, r * SBPP); \
592 ANY_SIMD(temp + 128, temp, 64, MASK + 1, source_y_fraction); \
593 memcpy(dst_ptr + n * BPP, temp + 128, r * BPP); \
613 SIMD_ALIGNED(uint8 temp[64 * 2]); \
614 memset(temp, 0, 64); /* for msan */ \
620 memcpy(temp, src_ptr, r * BPP); \
621 ANY_SIMD(temp, temp + 64, MASK + 1); \
622 memcpy(dst_ptr + n * BPP, temp + 64 + (MASK + 1 - r) * BPP, r * BPP); \
648 SIMD_ALIGNED(uint8 temp[64]); \
654 ANY_SIMD(temp, v32, MASK + 1); \
655 memcpy(dst_ptr + n * BPP, temp, r * BPP); \
672 SIMD_ALIGNED(uint8 temp[128 * 3]); \
673 memset(temp, 0, 128); /* for msan */ \
679 memcpy(temp, src_ptr + (n >> UVSHIFT) * BPP, SS(r, UVSHIFT) * BPP); \
682 memcpy(temp + SS(r, UVSHIFT) * BPP, \
683 temp + SS(r, UVSHIFT) * BPP - BPP, BPP); \
687 memcpy(temp + SS(r, UVSHIFT) * BPP, \
688 temp + SS(r, UVSHIFT) * BPP - BPP, BPP); \
689 memcpy(temp + SS(r, UVSHIFT) * BPP + BPP, \
690 temp + SS(r, UVSHIFT) * BPP - BPP, BPP * 2); \
693 memcpy(temp + SS(r, UVSHIFT) * BPP, \
694 temp
697 memcpy(temp + SS(r, UVSHIFT) * BPP, \
698 temp + SS(r, UVSHIFT) * BPP - BPP, BPP); \
700 ANY_SIMD(temp, temp + 128, temp + 256, MASK + 1); \
701 memcpy(dst_u + (n >> DUVSHIFT), temp + 128, SS(r, DUVSHIFT)); \
702 memcpy(dst_v + (n >> DUVSHIFT), temp + 256, SS(r, DUVSHIFT)); \
741 SIMD_ALIGNED(uint8 temp[128 * 4]); \
742 memset(temp, 0, 128 * 2); /* for msan */ \
748 memcpy(temp, src_ptr + (n >> UVSHIFT) * BPP, SS(r, UVSHIFT) * BPP); \
749 memcpy(temp + 128, src_ptr + src_stride_ptr + (n >> UVSHIFT) * BPP, \
752 memcpy(temp + SS(r, UVSHIFT) * BPP, \
753 temp + SS(r, UVSHIFT) * BPP - BPP, BPP); \
754 memcpy(temp + 128 + SS(r, UVSHIFT) * BPP, \
755 temp + 128 + SS(r, UVSHIFT) * BPP - BPP, BPP); \
757 ANY_SIMD(temp, 128, temp + 256, temp + 384, MASK + 1); \
758 memcpy(dst_u + (n >> 1), temp + 256, SS(r, 1)); \
759 memcpy(dst_v + (n >> 1), temp + 384, SS(r, 1)); \