HomeSort by relevance Sort by last modified time
    Searched refs:ref_stride (Results 26 - 50 of 107) sorted by null

12 3 4 5

  /external/libvpx/libvpx/vpx_dsp/x86/
variance_sse2.c 104 const uint8_t *ref_ptr, const int ref_stride,
115 const __m128i r = load4x2_sse2(ref_ptr, ref_stride);
119 ref_ptr += 2 * ref_stride;
124 const uint8_t *ref_ptr, const int ref_stride,
142 ref_ptr += ref_stride;
163 const uint8_t *ref_ptr, const int ref_stride,
175 ref_ptr += ref_stride;
180 const uint8_t *ref_ptr, const int ref_stride,
193 ref_ptr += ref_stride;
198 const uint8_t *ref_ptr, const int ref_stride,
    [all...]
sad4d_avx2.c 25 const uint8_t *const ref_array[4], int ref_stride,
63 refs[0] += ref_stride;
64 refs[1] += ref_stride;
65 refs[2] += ref_stride;
66 refs[3] += ref_stride;
73 const uint8_t *const ref_array[4], int ref_stride,
123 refs[0] += ref_stride;
124 refs[1] += ref_stride;
125 refs[2] += ref_stride;
126 refs[3] += ref_stride;
    [all...]
sad4d_avx512.c 15 const uint8_t *const ref_array[4], int ref_stride,
50 ref0 += ref_stride;
51 ref1 += ref_stride;
52 ref2 += ref_stride;
53 ref3 += ref_stride;
sad_avx2.c 16 const uint8_t *ref_ptr, int ref_stride) { \
31 ref_ptr += ref_stride; \
44 const uint8_t *ref_ptr, int ref_stride) { \
50 int ref2_stride = ref_stride << 1; \
55 ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + ref_stride)); \
94 int ref_stride, const uint8_t *second_pred) { \
113 ref_ptr += ref_stride; \
128 int ref_stride, const uint8_t *second_pred) { \
134 int ref2_stride = ref_stride << 1; \
139 ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + ref_stride)); \
    [all...]
variance_avx2.c 103 const int ref_stride, __m256i *const sse, __m256i *const sum) {
106 const __m128i r0 = _mm_loadu_si128((__m128i const *)(ref + 0 * ref_stride));
107 const __m128i r1 = _mm_loadu_si128((__m128i const *)(ref + 1 * ref_stride));
123 const uint8_t *ref, const int ref_stride,
131 variance16_kernel_avx2(src, src_stride, ref, ref_stride, vsse, vsum);
133 ref += 2 * ref_stride;
138 const uint8_t *ref, const int ref_stride,
148 ref += ref_stride;
153 const uint8_t *ref, const int ref_stride,
163 ref += ref_stride;
    [all...]
  /external/libvpx/libvpx/vpx_dsp/mips/
variance_mmi.c 409 const uint8_t *ref_ptr, int ref_stride,
475 MMI_ADDU(%[ref_ptr], %[ref_ptr], %[ref_stride])
495 [ref_stride]"r"((mips_reg)ref_stride),
505 const uint8_t *ref_ptr, int ref_stride, \
507 return vpx_variance64x(src_ptr, src_stride, ref_ptr, ref_stride, sse, n); \
514 const uint8_t *ref_ptr, int ref_stride,
556 MMI_ADDU(%[ref_ptr], %[ref_ptr], %[ref_stride])
    [all...]
sad_msa.c 24 const uint8_t *ref_ptr, int32_t ref_stride,
36 LW4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3);
37 ref_ptr += (4 * ref_stride);
50 const uint8_t *ref, int32_t ref_stride,
59 LD_UB4(ref, ref_stride, ref0, ref1, ref2, ref3);
60 ref += (4 * ref_stride);
71 const uint8_t *ref, int32_t ref_stride,
80 LD_UB2(ref, ref_stride, ref0, ref1);
81 ref += (2 * ref_stride);
86 LD_UB2(ref, ref_stride, ref0, ref1)
    [all...]
  /external/libvpx/libvpx/vpx_dsp/
variance.c 25 const uint8_t *ref_ptr, int ref_stride) {
36 ref_ptr += ref_stride;
53 const uint8_t *ref_ptr, int ref_stride, int w, int h,
68 ref_ptr += ref_stride;
130 const uint8_t *ref_ptr, int ref_stride, \
133 variance(src_ptr, src_stride, ref_ptr, ref_stride, W, H, sse, &sum); \
140 const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
149 return vpx_variance##W##x##H##_c(temp2, W, ref_ptr, ref_stride, sse); \
155 const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, \
168 return vpx_variance##W##x##H##_c(temp3, W, ref_ptr, ref_stride, sse);
    [all...]
  /external/libvpx/libvpx/vp8/common/x86/
copy_sse3.asm 18 %define ref_stride rdx
34 movsxd rdx, dword ptr arg(3) ; ref_stride
41 %define ref_stride r9
51 %define ref_stride rcx
66 %define ref_stride
113 lea end_ptr, [ref_ptr+ref_stride*2]
117 movdqa XMMWORD PTR [ref_ptr + ref_stride], xmm2
118 movdqa XMMWORD PTR [ref_ptr + ref_stride + 16], xmm3
121 movdqa XMMWORD PTR [end_ptr + ref_stride], xmm6
122 movdqa XMMWORD PTR [end_ptr + ref_stride + 16], xmm
    [all...]
  /external/libaom/libaom/aom_dsp/arm/
sad_neon.c 19 unsigned char *ref_ptr, int ref_stride) {
30 ref_ptr += ref_stride;
37 ref_ptr += ref_stride;
50 unsigned char *ref_ptr, int ref_stride) {
60 ref_ptr += ref_stride;
67 ref_ptr += ref_stride;
78 unsigned char *ref_ptr, int ref_stride) {
89 ref_ptr += ref_stride;
97 ref_ptr += ref_stride;
132 const uint8_t *ref, int ref_stride) {
    [all...]
sad4d_neon.c 84 const uint8_t *const ref[4], int ref_stride,
117 ref0 += ref_stride;
118 ref1 += ref_stride;
119 ref2 += ref_stride;
120 ref3 += ref_stride;
130 const uint8_t *const ref[4], int ref_stride,
161 ref0 += ref_stride;
162 ref1 += ref_stride;
163 ref2 += ref_stride;
164 ref3 += ref_stride;
    [all...]
  /external/webp/src/enc/
picture_psnr_enc.c 26 const uint8_t* ref, int ref_stride,
38 const uint8_t* ref, int ref_stride,
49 const double value = (double)ref[y * ref_stride + x];
67 const uint8_t* ref, int ref_stride,
74 ref += ref_stride;
82 const uint8_t* ref, int ref_stride,
92 sum += VP8SSIMGetClipped(src, src_stride, ref, ref_stride, x, y, w, h);
97 sum += VP8SSIMGetClipped(src, src_stride, ref, ref_stride, x, y, w, h);
101 const int off2 = x - VP8_SSIM_KERNEL + (y - VP8_SSIM_KERNEL) * ref_stride;
102 sum += VP8SSIMGet(src + off1, src_stride, ref + off2, ref_stride);
    [all...]
  /external/libaom/libaom/aom_dsp/x86/
variance_sse2.c 125 const uint8_t *ref, const int ref_stride,
133 const __m128i r = load4x2_sse2(ref, ref_stride);
137 ref += 2 * ref_stride;
142 const uint8_t *ref, const int ref_stride,
153 ref += ref_stride;
174 const uint8_t *ref, const int ref_stride,
183 ref += ref_stride;
188 const uint8_t *ref, const int ref_stride,
199 ref += ref_stride;
204 const uint8_t *ref, const int ref_stride,
    [all...]
jnt_variance_ssse3.c 50 int ref_stride,
76 ref += ref_stride - width;
83 __m128i p0_0 = xx_loadl_64(ref + 0 * ref_stride);
84 __m128i p0_1 = xx_loadl_64(ref + 1 * ref_stride);
92 ref += 2 * ref_stride;
99 const uint8_t *row0 = ref + 0 * ref_stride;
100 const uint8_t *row1 = ref + 1 * ref_stride;
101 const uint8_t *row2 = ref + 2 * ref_stride;
102 const uint8_t *row3 = ref + 3 * ref_stride;
114 ref += 4 * ref_stride;
    [all...]
highbd_variance_avx2.c 18 const uint16_t *ref, int ref_stride,
22 const uint16_t *ref, int ref_stride,
30 const __m128i v_p_b1 = _mm_loadu_si128((const __m128i *)(ref + ref_stride));
40 ref += ref_stride * 2;
57 const uint16_t *ref, int ref_stride,
70 ref += ref_stride;
85 const uint16_t *ref, int ref_stride, int w,
96 var_fn(src + src_stride * i + j, src_stride, ref + ref_stride * i + j,
97 ref_stride, &sse0, &sum0);
109 int ref_stride, uint32_t *sse) {
    [all...]
highbd_variance_sse2.c 28 const uint16_t *ref, int ref_stride,
32 const uint16_t *ref, int ref_stride,
36 const uint16_t *ref, int ref_stride,
40 const uint16_t *ref, int ref_stride, int w,
52 var_fn(src + src_stride * i + j, src_stride, ref + ref_stride * i + j,
53 ref_stride, &sse0, &sum0);
61 const uint16_t *ref, int ref_stride, int w,
72 var_fn(src + src_stride * i + j, src_stride, ref + ref_stride * i + j,
73 ref_stride, &sse0, &sum0);
83 const uint16_t *ref, int ref_stride, int w
    [all...]
sad_avx2.c 19 const uint8_t *ref_ptr, int ref_stride) { \
34 ref_ptr += ref_stride; \
48 const uint8_t *ref_ptr, int ref_stride) { \
54 int ref2_stride = ref_stride << 1; \
59 ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + ref_stride)); \
101 int ref_stride, const uint8_t *second_pred) { \
120 ref_ptr += ref_stride; \
136 int ref_stride, const uint8_t *second_pred) { \
142 int ref2_stride = ref_stride << 1; \
147 ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + ref_stride)); \
    [all...]
masked_sad_intrin_avx2.c 124 const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride,
131 sad = aom_masked_sad4xh_ssse3(src, src_stride, ref, ref_stride,
135 sad = aom_masked_sad8xh_ssse3(src, src_stride, ref, ref_stride,
139 sad = masked_sad16xh_avx2(src, src_stride, ref, ref_stride, second_pred,
143 sad = masked_sad32xh_avx2(src, src_stride, ref, ref_stride, second_pred,
151 ref_stride, msk, msk_stride, n);
155 ref_stride, msk, msk_stride, n);
159 ref_stride, msk, msk_stride, n);
163 ref_stride, msk, msk_stride, m, n);
172 const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
    [all...]
variance_avx2.c 102 const int ref_stride, __m256i *const sse, __m256i *const sum) {
105 const __m128i r0 = _mm_loadu_si128((__m128i const *)(ref + 0 * ref_stride));
106 const __m128i r1 = _mm_loadu_si128((__m128i const *)(ref + 1 * ref_stride));
122 const uint8_t *ref, const int ref_stride,
128 variance16_kernel_avx2(src, src_stride, ref, ref_stride, vsse, vsum);
130 ref += 2 * ref_stride;
135 const uint8_t *ref, const int ref_stride,
143 ref += ref_stride;
148 const uint8_t *ref, const int ref_stride,
157 ref += ref_stride;
    [all...]
  /external/libvpx/libvpx/vpx_dsp/ppc/
sad_vsx.c 116 const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
120 ref_stride); \
127 const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
131 ref_stride); \
138 const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
142 ref_stride); \
174 int ref_stride, uint32_t *sad_array) { \
187 PROCESS16_4D(y *ref_stride, ref_array[i], v_ah, v_al); \
198 int ref_stride, uint32_t *sad_array) { \
213 PROCESS16_4D(y *ref_stride, ref_array[i], v_ah1, v_al1);
    [all...]
  /external/libaom/libaom/aom_dsp/
sad_av1.c 44 const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
48 return masked_sad(src, src_stride, ref, ref_stride, second_pred, m, msk, \
51 return masked_sad(src, src_stride, second_pred, m, ref, ref_stride, msk, \
112 int ref_stride, const uint8_t *second_pred8, const uint8_t *msk, \
115 return highbd_masked_sad(src8, src_stride, ref8, ref_stride, \
119 ref_stride, msk, msk_stride, m, n); \
167 unsigned int aom_obmc_sad##m##x##n##_c(const uint8_t *ref, int ref_stride, \
170 return obmc_sad(ref, ref_stride, wsrc, mask, m, n); \
220 const uint8_t *ref, int ref_stride, const int32_t *wsrc, \
222 return highbd_obmc_sad(ref, ref_stride, wsrc, mask, m, n);
    [all...]
  /external/libvpx/libvpx/vp9/common/
vp9_reconintra.c 114 const MACROBLOCKD *xd, const uint8_t *ref8, int ref_stride, uint8_t *dst8,
158 for (i = 0; i < bs; ++i) left_col[i] = ref[i * ref_stride - 1];
162 left_col[i] = ref[i * ref_stride - 1];
164 left_col[i] = ref[(extend_bottom - 1) * ref_stride - 1];
168 for (i = 0; i < bs; ++i) left_col[i] = ref[i * ref_stride - 1];
178 const uint16_t *above_ref = ref - ref_stride;
206 const uint16_t *above_ref = ref - ref_stride;
264 int ref_stride, uint8_t *dst, int dst_stride,
305 for (i = 0; i < bs; ++i) left_col[i] = ref[i * ref_stride - 1];
309 left_col[i] = ref[i * ref_stride - 1]
    [all...]
  /external/libaom/libaom/test/
masked_variance_test.cc 37 const uint8_t *ref, int ref_stride, const uint8_t *second_pred,
73 int ref_stride = (MAX_SB_SIZE + 16); local
93 ref_stride, second_pred_ptr, msk_ptr, msk_stride,
97 ref_ptr, ref_stride, second_pred_ptr, msk_ptr,
129 int ref_stride = (MAX_SB_SIZE + 16); local
146 ref_stride, second_pred_ptr, msk_ptr, msk_stride,
150 ref_ptr, ref_stride, second_pred_ptr, msk_ptr,
212 int ref_stride = (MAX_SB_SIZE + 8); local
227 ref_stride, second_pred8_ptr, msk_ptr, msk_stride,
231 ref8_ptr, ref_stride, second_pred8_ptr
271 int ref_stride = (MAX_SB_SIZE + 8); local
    [all...]
  /external/libvpx/libvpx/vpx_dsp/arm/
avg_neon.c 79 const int ref_stride, const int height) {
88 const uint8x16_t vec_row2 = vld1q_u8(ref + ref_stride);
89 const uint8x16_t vec_row3 = vld1q_u8(ref + ref_stride * 2);
90 const uint8x16_t vec_row4 = vld1q_u8(ref + ref_stride * 3);
91 const uint8x16_t vec_row5 = vld1q_u8(ref + ref_stride * 4);
92 const uint8x16_t vec_row6 = vld1q_u8(ref + ref_stride * 5);
93 const uint8x16_t vec_row7 = vld1q_u8(ref + ref_stride * 6);
94 const uint8x16_t vec_row8 = vld1q_u8(ref + ref_stride * 7);
120 ref += ref_stride * 8;
  /external/webrtc/webrtc/modules/video_processing/util/
denoiser_filter_sse2.cc 20 int ref_stride,
31 _mm_loadl_epi64((const __m128i*)(ref + i * ref_stride)), zero);
37 _mm_loadl_epi64((const __m128i*)(ref + (i + 1) * ref_stride)), zero);
61 int ref_stride,
76 ref + ref_stride * i + j, ref_stride, &sse0, &sum0);
128 int ref_stride,
131 VarianceSSE2(src, src_stride << 1, ref, ref_stride << 1, 16, 8, sse, &sum, 8);

Completed in 782 milliseconds

12 3 4 5