/external/libvpx/libvpx/vp8/common/mips/msa/ |
mfqe_msa.c | 16 uint8_t *dst_ptr, int32_t dst_stride, 33 LD2(dst_ptr, dst_stride, dst0_d, dst1_d); 39 LD2((dst_ptr + 2 * dst_stride), dst_stride, dst0_d, dst1_d); 51 ST8x2_UB(dst0, dst_ptr, dst_stride); 52 dst_ptr += (2 * dst_stride); 62 ST8x2_UB(dst1, dst_ptr, dst_stride); 63 dst_ptr += (2 * dst_stride); 68 uint8_t *dst_ptr, int32_t dst_stride, 84 LD_SB4(dst_ptr, dst_stride, dst0, dst1, dst2, dst3); 93 PCKEV_ST_SB(res_h_r, res_h_l, dst_ptr); [all...] |
/external/libvpx/libvpx/third_party/libyuv/include/libyuv/ |
scale_row.h | 179 void ScaleCols_C(uint8* dst_ptr, const uint8* src_ptr, 181 void ScaleCols_16_C(uint16* dst_ptr, const uint16* src_ptr, 183 void ScaleColsUp2_C(uint8* dst_ptr, const uint8* src_ptr, 185 void ScaleColsUp2_16_C(uint16* dst_ptr, const uint16* src_ptr, 187 void ScaleFilterCols_C(uint8* dst_ptr, const uint8* src_ptr, 189 void ScaleFilterCols_16_C(uint16* dst_ptr, const uint16* src_ptr, 191 void ScaleFilterCols64_C(uint8* dst_ptr, const uint8* src_ptr, 193 void ScaleFilterCols64_16_C(uint16* dst_ptr, const uint16* src_ptr, 201 uint8* dst_ptr, int dst_width); 204 uint16* dst_ptr, int dst_width) [all...] |
/external/libvpx/libvpx/vp8/common/x86/ |
vp8_asm_stubs.c | 80 int xoffset, int yoffset, unsigned char *dst_ptr, 89 vp8_filter_block1dc_v6_mmx(FData2 + 8, dst_ptr, dst_pitch, 8, 4, 4, 4, 97 int yoffset, unsigned char *dst_ptr, 112 vp8_filter_block1d16_v6_sse2(FData2 + 32, dst_ptr, dst_pitch, 32, 16, 16, 117 vp8_filter_block1d16_h6_only_sse2(src_ptr, src_pixels_per_line, dst_ptr, 125 vp8_filter_block1d16_v6_sse2(FData2 + 32, dst_ptr, dst_pitch, 32, 16, 16, 132 unsigned char *dst_ptr, int dst_pitch) { 143 vp8_filter_block1d8_v6_sse2(FData2 + 16, dst_ptr, dst_pitch, 16, 8, 8, 148 vp8_filter_block1d8_h6_only_sse2(src_ptr, src_pixels_per_line, dst_ptr, 155 src_pixels_per_line, dst_ptr, dst_pitch, 8 [all...] |
/external/libvpx/libvpx/vp9/common/mips/msa/ |
vp9_mfqe_msa.c | 16 uint8_t *dst_ptr, int32_t dst_stride, 33 LD2(dst_ptr, dst_stride, dst0_d, dst1_d); 39 LD2((dst_ptr + 2 * dst_stride), dst_stride, dst0_d, dst1_d); 51 ST8x2_UB(dst0, dst_ptr, dst_stride); 52 dst_ptr += (2 * dst_stride); 62 ST8x2_UB(dst1, dst_ptr, dst_stride); 63 dst_ptr += (2 * dst_stride); 68 int32_t src_stride, uint8_t *dst_ptr, 81 LD_SB4(dst_ptr, dst_stride, dst0, dst1, dst2, dst3); 90 PCKEV_ST_SB(res_h_r, res_h_l, dst_ptr); [all...] |
/external/tensorflow/tensorflow/core/common_runtime/sycl/ |
sycl_device_context.cc | 32 void *dst_ptr = DMAHelper::base(device_tensor); local 36 static_cast<float *>(dst_ptr), static_cast<const float *>(src_ptr), 41 static_cast<double *>(dst_ptr), 46 static_cast<int32 *>(dst_ptr), static_cast<const int32 *>(src_ptr), 51 static_cast<int64 *>(dst_ptr), static_cast<const int64 *>(src_ptr), 56 static_cast<Eigen::half *>(dst_ptr), 61 static_cast<std::complex<float> *>(dst_ptr), 66 static_cast<std::complex<double> *>(dst_ptr), 71 static_cast<int8 *>(dst_ptr), static_cast<const int8 *>(src_ptr), 76 static_cast<int16 *>(dst_ptr), static_cast<const int16 *>(src_ptr) 110 void *dst_ptr = DMAHelper::base(cpu_tensor); local [all...] |
/external/libyuv/files/include/libyuv/ |
scale_row.h | 243 void ScaleCols_C(uint8* dst_ptr, 248 void ScaleCols_16_C(uint16* dst_ptr, 253 void ScaleColsUp2_C(uint8* dst_ptr, 258 void ScaleColsUp2_16_C(uint16* dst_ptr, 263 void ScaleFilterCols_C(uint8* dst_ptr, 268 void ScaleFilterCols_16_C(uint16* dst_ptr, 273 void ScaleFilterCols64_C(uint8* dst_ptr, 278 void ScaleFilterCols64_16_C(uint16* dst_ptr, 293 uint8* dst_ptr, 297 uint16* dst_ptr, [all...] |
/external/libvpx/libvpx/vp8/common/arm/neon/ |
dc_only_idct_add_neon.c | 16 int pred_stride, unsigned char *dst_ptr, 36 vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 0); 37 dst_ptr += dst_stride; 38 vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 1); 39 dst_ptr += dst_stride;
|
bilinearpredict_neon.c | 27 int yoffset, unsigned char *dst_ptr, 97 store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(e0, e1)); 115 store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(f0, f1)); 121 int yoffset, unsigned char *dst_ptr, 180 vst1_u8((uint8_t *)dst_ptr, d22u8); 181 dst_ptr += dst_pitch; 182 vst1_u8((uint8_t *)dst_ptr, d23u8); 183 dst_ptr += dst_pitch; 184 vst1_u8((uint8_t *)dst_ptr, d24u8); 185 dst_ptr += dst_pitch [all...] |
/external/libvpx/libvpx/vp8/common/ |
copy_c.c | 18 unsigned char *dst_ptr, int dst_stride, int height) { 22 memcpy(dst_ptr, src_ptr, 32); 25 dst_ptr += dst_stride;
|
filter.c | 126 int xoffset, int yoffset, unsigned char *dst_ptr, 134 filter_block2d(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, 138 int xoffset, int yoffset, unsigned char *dst_ptr, 152 filter_block2d_second_pass(FData + 16, dst_ptr, dst_pitch, 8, 8, 8, 8, 157 int xoffset, int yoffset, unsigned char *dst_ptr, 171 filter_block2d_second_pass(FData + 16, dst_ptr, dst_pitch, 8, 8, 4, 8, 176 int xoffset, int yoffset, unsigned char *dst_ptr, 190 filter_block2d_second_pass(FData + 32, dst_ptr, dst_pitch, 16, 16, 16, 16, 204 * OUTPUTS : INT32 *dst_ptr : Pointer to filtered block. 217 unsigned char *src_ptr, unsigned short *dst_ptr, unsigned int src_stride [all...] |
/external/libvpx/libvpx/third_party/libyuv/source/ |
scale.cc | 39 const uint8* src_ptr, uint8* dst_ptr, 43 uint8* dst_ptr, int dst_width) = 91 IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) { 102 ScaleRowDown2(src_ptr, src_stride, dst_ptr, dst_width); 104 dst_ptr += dst_stride; 111 const uint16* src_ptr, uint16* dst_ptr, 115 uint16* dst_ptr, int dst_width) = 141 IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) { 152 ScaleRowDown2(src_ptr, src_stride, dst_ptr, dst_width); 154 dst_ptr += dst_stride [all...] |
scale_any.cc | 23 void NAMEANY(uint8* dst_ptr, const uint8* src_ptr, \ 27 TERP_SIMD(dst_ptr, src_ptr, n, x, dx); \ 29 TERP_C(dst_ptr + n * BPP, src_ptr, \ 48 uint8* dst_ptr, int dst_width) { \ 52 SCALEROWDOWN_SIMD(src_ptr, src_stride, dst_ptr, n); \ 55 dst_ptr + n * BPP, r); \ 63 uint8* dst_ptr, int dst_width) { \ 67 SCALEROWDOWN_SIMD(src_ptr, src_stride, dst_ptr, n); \ 70 dst_ptr + n * BPP, r); \ 169 uint8* dst_ptr, int dst_width) { [all...] |
/external/libyuv/files/source/ |
scale.cc | 43 uint8* dst_ptr, 47 uint8* dst_ptr, int dst_width) = 109 IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) { 134 ScaleRowDown2(src_ptr, src_stride, dst_ptr, dst_width); 136 dst_ptr += dst_stride; 147 uint16* dst_ptr, 151 uint16* dst_ptr, int dst_width) = 182 IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) { 193 ScaleRowDown2(src_ptr, src_stride, dst_ptr, dst_width); 195 dst_ptr += dst_stride [all...] |
scale_any.cc | 23 void NAMEANY(uint8* dst_ptr, const uint8* src_ptr, int dst_width, int x, \ 27 TERP_SIMD(dst_ptr, src_ptr, n, x, dx); \ 29 TERP_C(dst_ptr + n * BPP, src_ptr, dst_width & MASK, x + n * dx, dx); \ 49 void NAMEANY(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, \ 54 SCALEROWDOWN_SIMD(src_ptr, src_stride, dst_ptr, n); \ 57 dst_ptr + n * BPP, r); \ 64 void NAMEANY(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, \ 69 SCALEROWDOWN_SIMD(src_ptr, src_stride, dst_ptr, n); \ 72 dst_ptr + n * BPP, r); \ 355 uint8* dst_ptr, int dst_width) { [all...] |
/frameworks/base/media/mca/filterfw/jni/ |
jni_native_frame.cpp | 184 uint8_t* dst_ptr = reinterpret_cast<uint8_t*>(frame->MutableData()); local 185 const uint8_t* end_ptr = dst_ptr + frame->Size(); 188 while (dst_ptr < end_ptr) { 190 *(dst_ptr++) = (pixel.rgba[0] + pixel.rgba[1] + pixel.rgba[2]) / 3; 195 while (dst_ptr < end_ptr) { 197 *(dst_ptr++) = pixel.rgba[0]; 198 *(dst_ptr++) = pixel.rgba[1]; 199 *(dst_ptr++) = pixel.rgba[2]; 204 memcpy(dst_ptr, src_ptr, frame->Size()); 224 Pixel* dst_ptr; local [all...] |
/toolchain/binutils/binutils-2.27/bfd/ |
coff-z8k.c | 196 unsigned int *dst_ptr) 205 data + *dst_ptr); 206 (*dst_ptr) += 1; 217 data + *dst_ptr); 228 bfd_put_32 (in_abfd, dst, data + *dst_ptr); 230 (*dst_ptr) += 4; 236 ((bfd_get_8 (in_abfd, data + *dst_ptr) & 0xf0) 240 data + *dst_ptr); local 241 (*dst_ptr) += 1; 248 data + *dst_ptr); 300 data + *dst_ptr); local 326 data + *dst_ptr); local 347 bfd_put_16 (in_abfd, (bfd_vma) gap, data + *dst_ptr); local [all...] |
coff-h8500.c | 167 unsigned int *dst_ptr) 169 bfd_byte *d = data+*dst_ptr; 178 (*dst_ptr) += 1; 187 (*dst_ptr) += 1; 195 (*dst_ptr) += 2; 204 (*dst_ptr) += 2; 214 (*dst_ptr) += 2; 221 int o = bfd_get_32 (in_abfd, data+ *dst_ptr -1); 223 bfd_put_32 (in_abfd, (bfd_vma) v, data + *dst_ptr -1); 224 (*dst_ptr) += 3 230 bfd_put_32 (in_abfd, (bfd_vma) v, data + *dst_ptr); local 273 bfd_put_16 (in_abfd, (bfd_vma) gap, data + *dst_ptr); local [all...] |
coff-z80.c | 180 unsigned int *dst_ptr) 196 bfd_put_8 (in_abfd, val, data + *dst_ptr); 197 (*dst_ptr) += 1; 204 bfd_put_8 (in_abfd, val, data + *dst_ptr); 205 (*dst_ptr) += 1; 212 bfd_put_16 (in_abfd, val, data + *dst_ptr); 213 (*dst_ptr) += 2; 221 bfd_put_16 (in_abfd, val, data + *dst_ptr); 222 bfd_put_8 (in_abfd, val >> 16, data + *dst_ptr+2); 223 (*dst_ptr) += 3 [all...] |
/external/libvpx/libvpx/vpx_dsp/mips/ |
convolve2_avg_dspr2.c | 27 uint8_t *dst_ptr; local 46 dst_ptr = dst + x; 81 "lbu %[scratch1], 0(%[dst_ptr]) \n\t" 82 "lbu %[scratch2], 1(%[dst_ptr]) \n\t" 91 "lbu %[scratch1], 2(%[dst_ptr]) \n\t" 93 "sb %[store1], 0(%[dst_ptr]) \n\t" 94 "sb %[store2], 1(%[dst_ptr]) \n\t" 95 "lbu %[scratch2], 3(%[dst_ptr]) \n\t" 102 "sb %[store1], 2(%[dst_ptr]) \n\t" 103 "sb %[store2], 3(%[dst_ptr]) \n\t 126 uint8_t *dst_ptr; local [all...] |
convolve2_vert_dspr2.c | 27 uint8_t *dst_ptr; local 46 dst_ptr = dst + x; 89 "sb %[store1], 0(%[dst_ptr]) \n\t" 90 "sb %[store2], 1(%[dst_ptr]) \n\t" 95 "sb %[store1], 2(%[dst_ptr]) \n\t" 96 "sb %[store2], 3(%[dst_ptr]) \n\t" 103 [src_stride] "r"(src_stride), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr)); 117 uint8_t *dst_ptr; local 136 dst_ptr = dst + x [all...] |
/external/libvpx/libvpx/vp8/common/mips/mmi/ |
idctllm_mmi.c | 41 int pred_stride, unsigned char *dst_ptr, 137 "gsswlc1 %[ftmp1], 0x03(%[dst_ptr]) \n\t" 138 "gsswrc1 %[ftmp1], 0x00(%[dst_ptr]) \n\t" 140 MMI_ADDU(%[dst_ptr], %[dst_ptr], %[dst_stride]) 152 "gsswlc1 %[ftmp2], 0x03(%[dst_ptr]) \n\t" 153 "gsswrc1 %[ftmp2], 0x00(%[dst_ptr]) \n\t" 155 MMI_ADDU(%[dst_ptr], %[dst_ptr], %[dst_stride]) 167 "gsswlc1 %[ftmp3], 0x03(%[dst_ptr]) \n\t [all...] |
/external/gemmlowp/internal/ |
pack_msa.h | 49 std::uint8_t* dst_ptr = dst->current_data(); local 95 __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0); local 96 dst_ptr += 16; 104 __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0); local 105 dst_ptr += 16; 115 __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0); local 116 dst_ptr += 16; 125 __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0); local 126 dst_ptr += 16; 134 __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0) local 203 std::uint8_t* dst_ptr = dst->current_data(); local 258 __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0); local 267 __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0); local 278 __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0); local 288 __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0); local 297 __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0); local [all...] |
pack_sse.h | 51 std::uint8_t* dst_ptr = dst->current_data(); local 83 _mm_storel_epi64(reinterpret_cast<__m128i*>(&dst_ptr[0]), xmm9); 85 reinterpret_cast<__m128i*>(&dst_ptr[kCellSize * kCells]), xmm10); 91 reinterpret_cast<__m128i*>(&dst_ptr[2 * kCellSize * kCells]), 94 reinterpret_cast<__m128i*>(&dst_ptr[3 * kCellSize * kCells]), 118 dst_ptr += kCellSize; 120 dst_ptr += 3 * kCellSize * kCells;
|
output_neon.h | 264 std::int32_t* dst_ptr = dst->data(row, col); local 267 vst1q_s32(dst_ptr + i * stride, block.buf.reg[i]); 305 std::int32_t* dst_ptr = dst->data(row, col); local 309 vst1q_s32(dst_ptr + i * col_stride + 0, src.buf.reg[2 * i + 0]); 310 vst1q_s32(dst_ptr + i * col_stride + 4, src.buf.reg[2 * i + 1]); 321 vst1q_s32(dst_ptr + i * row_stride, transpose_top.buf.reg[i]); 330 vst1q_s32(dst_ptr + (i + 4) * row_stride, transpose_bottom.buf.reg[i]); 376 std::int32_t* dst_ptr = dst->data(row, col); local 380 vst1q_s32(dst_ptr + i * col_stride, src.buf.reg[2 * i]); 381 vst1q_s32(dst_ptr + i * col_stride + 4, src.buf.reg[2 * i + 1]) 431 std::int32_t* dst_ptr = dst->data(row, col); local 448 std::int32_t* dst_ptr = dst->data(row, col); local 465 std::int16_t* dst_ptr = dst->data(row, col); local 503 std::uint8_t* dst_ptr = dst->data(row, col); local 524 std::uint8_t* dst_ptr = dst->data(row, col); local 552 std::uint8_t* dst_ptr = dst->data(row, col); local 617 std::uint8_t* dst_ptr = dst->data(row, col); local [all...] |
/external/libvpx/libvpx/vpx_dsp/x86/ |
highbd_convolve_avx2.c | 339 const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr, 352 store_8x2_pixels(&res0, &res1, &max, dst_ptr, dst_pitch); 355 dst_ptr += dst_pitch << 1; 361 store_8x1_pixels(&res0, &max, dst_ptr); 366 const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr, 379 store_16x1_pixels(&res0, &res1, &max, dst_ptr); 382 dst_ptr += dst_pitch; 459 const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr, 471 store_8x2_pixels(&res0, &res1, &max, dst_ptr, dst_pitch); 474 dst_ptr += dst_pitch << 1 [all...] |