HomeSort by relevance Sort by last modified time
    Searched refs:rtcd (Results 1 - 25 of 36) sorted by null

1 2

  /external/libvpx/vp8/common/arm/
arm_systemdependent.c 25 VP8_COMMON_RTCD *rtcd = &ctx->rtcd; local
30 rtcd->flags = flags;
36 rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_armv6;
37 rtcd->subpix.sixtap8x8 = vp8_sixtap_predict8x8_armv6;
38 rtcd->subpix.sixtap8x4 = vp8_sixtap_predict8x4_armv6;
39 rtcd->subpix.sixtap4x4 = vp8_sixtap_predict_armv6;
40 rtcd->subpix.bilinear16x16 = vp8_bilinear_predict16x16_armv6;
41 rtcd->subpix.bilinear8x8 = vp8_bilinear_predict8x8_armv6;
42 rtcd->subpix.bilinear8x4 = vp8_bilinear_predict8x4_armv6
    [all...]
  /external/libvpx/vp8/common/generic/
systemdependent.c 26 VP8_COMMON_RTCD *rtcd = &ctx->rtcd; local
28 rtcd->idct.idct1 = vp8_short_idct4x4llm_1_c;
29 rtcd->idct.idct16 = vp8_short_idct4x4llm_c;
30 rtcd->idct.idct1_scalar_add = vp8_dc_only_idct_add_c;
31 rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_c;
32 rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_c;
34 rtcd->recon.copy16x16 = vp8_copy_mem16x16_c;
35 rtcd->recon.copy8x8 = vp8_copy_mem8x8_c;
36 rtcd->recon.copy8x4 = vp8_copy_mem8x4_c
    [all...]
  /external/libvpx/vp8/common/x86/
x86_systemdependent.c 25 VP8_COMMON_RTCD *rtcd = &ctx->rtcd; local
44 rtcd->idct.idct1 = vp8_short_idct4x4llm_1_mmx;
45 rtcd->idct.idct16 = vp8_short_idct4x4llm_mmx;
46 rtcd->idct.idct1_scalar_add = vp8_dc_only_idct_add_mmx;
47 rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_mmx;
48 rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_mmx;
52 rtcd->recon.recon = vp8_recon_b_mmx;
53 rtcd->recon.copy8x8 = vp8_copy_mem8x8_mmx;
54 rtcd->recon.copy8x4 = vp8_copy_mem8x4_mmx
    [all...]
  /external/libvpx/vp8/encoder/arm/
arm_csystemdependent.c 24 int flags = cpi->common.rtcd.flags;
32 cpi->rtcd.variance.sad16x16 = vp8_sad16x16_armv6;
33 /*cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
34 cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
35 cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
36 cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;*/
38 /*cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;*/
39 cpi->rtcd.variance.var8x8 = vp8_variance8x8_armv6;
40 /*cpi->rtcd.variance.var8x16 = vp8_variance8x16_c;
41 cpi->rtcd.variance.var16x8 = vp8_variance16x8_c;*
    [all...]
  /external/libvpx/vp8/common/
invtrans.h 18 extern void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch);
19 extern void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
20 extern void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
21 extern void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
invtrans.c 28 void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch)
31 IDCT_INVOKE(rtcd, idct16)(b->dqcoeff, b->diff, pitch);
33 IDCT_INVOKE(rtcd, idct1)(b->dqcoeff, b->diff, pitch);
37 void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
42 IDCT_INVOKE(rtcd, iwalsh16)(x->block[24].dqcoeff, x->block[24].diff);
48 vp8_inverse_transform_b(rtcd, &x->block[i], 32);
52 void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
58 vp8_inverse_transform_b(rtcd, &x->block[i], 16);
64 void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
73 IDCT_INVOKE(rtcd, iwalsh16)(&x->block[24].dqcoeff[0], x->block[24].diff)
    [all...]
recon.c 109 void vp8_recon_mby_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
113 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
117 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
121 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
125 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
133 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
138 void vp8_recon_mb_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
143 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
145 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
147 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride)
    [all...]
recon.h 24 void sym(const struct vp8_recon_rtcd_vtable *rtcd, MACROBLOCKD *x)
116 void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
postproc.h 122 vp8_postproc_rtcd_vtable_t *rtcd);
129 vp8_postproc_rtcd_vtable_t *rtcd);
postproc.c 309 vp8_postproc_rtcd_vtable_t *rtcd)
316 POSTPROC_INVOKE(rtcd, downacross)(source->y_buffer, post->y_buffer, source->y_stride, post->y_stride, source->y_height, source->y_width, ppl);
317 POSTPROC_INVOKE(rtcd, across)(post->y_buffer, post->y_stride, post->y_height, post->y_width, q2mbl(q));
318 POSTPROC_INVOKE(rtcd, down)(post->y_buffer, post->y_stride, post->y_height, post->y_width, q2mbl(q));
320 POSTPROC_INVOKE(rtcd, downacross)(source->u_buffer, post->u_buffer, source->uv_stride, post->uv_stride, source->uv_height, source->uv_width, ppl);
321 POSTPROC_INVOKE(rtcd, downacross)(source->v_buffer, post->v_buffer, source->uv_stride, post->uv_stride, source->uv_height, source->uv_width, ppl);
330 vp8_postproc_rtcd_vtable_t *rtcd)
337 POSTPROC_INVOKE(rtcd, downacross)(source->y_buffer, post->y_buffer, source->y_stride, post->y_stride, source->y_height, source->y_width, ppl);
338 POSTPROC_INVOKE(rtcd, downacross)(source->u_buffer, post->u_buffer, source->uv_stride, post->uv_stride, source->uv_height, source->uv_width, ppl);
339 POSTPROC_INVOKE(rtcd, downacross)(source->v_buffer, post->v_buffer, source->uv_stride, post->uv_stride, source->uv_heig (…)
    [all...]
reconinter.c 186 RECON_INVOKE(&x->rtcd->recon, copy8x8)(ptr, d->pre_stride, pred_ptr, pitch);
205 RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr, d->pre_stride, pred_ptr, pitch);
237 RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, upred_ptr, 8);
238 RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, vpred_ptr, 8);
282 RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 16);
345 RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 16);
362 RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, upred_ptr, 8);
363 RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, vpred_ptr, 8);
570 RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, dst_ptr, x->dst.y_stride); /*x->block[0].dst_stride);*/
587 RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, udst_ptr, x->dst.uv_stride)
    [all...]
  /external/libvpx/vp8/encoder/generic/
csystemdependent.c 28 cpi->rtcd.common = &cpi->common.rtcd;
29 cpi->rtcd.variance.sad16x16 = vp8_sad16x16_c;
30 cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
31 cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
32 cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
33 cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;
35 cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_c;
36 cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_c;
37 cpi->rtcd.variance.sad8x16x3 = vp8_sad8x16x3_c
    [all...]
  /external/libvpx/vp8/encoder/x86/
x86_csystemdependent.c 199 cpi->rtcd.variance.sad16x16 = vp8_sad16x16_mmx;
200 cpi->rtcd.variance.sad16x8 = vp8_sad16x8_mmx;
201 cpi->rtcd.variance.sad8x16 = vp8_sad8x16_mmx;
202 cpi->rtcd.variance.sad8x8 = vp8_sad8x8_mmx;
203 cpi->rtcd.variance.sad4x4 = vp8_sad4x4_mmx;
205 cpi->rtcd.variance.var4x4 = vp8_variance4x4_mmx;
206 cpi->rtcd.variance.var8x8 = vp8_variance8x8_mmx;
207 cpi->rtcd.variance.var8x16 = vp8_variance8x16_mmx;
208 cpi->rtcd.variance.var16x8 = vp8_variance16x8_mmx;
209 cpi->rtcd.variance.var16x16 = vp8_variance16x16_mmx
    [all...]
  /external/libvpx/vp8/encoder/
encodeintra.c 33 void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x, BLOCK *be, BLOCKD *b, int best_mode)
37 ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
43 vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
45 RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
48 void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb)
60 vp8_encode_intra4x4block(rtcd, mb, be, b, b->bmi.mode);
66 void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
70 RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mby)(&x->e_mbd);
72 ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, x->src.y_buffer, x->e_mbd.predictor, x->src.y_stride);
81 vp8_optimize_mby(x, rtcd);
    [all...]
encodemb.h 96 void vp8_encode_inter16x16(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
104 void vp8_encode_inter16x16uvrd(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
105 void vp8_optimize_mby(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
106 void vp8_optimize_mbuv(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
107 void vp8_encode_inter16x16y(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
encodemb.c 101 static void vp8_subtract_mb(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
103 ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, x->src.y_buffer, x->e_mbd.predictor, x->src.y_stride);
104 ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
261 const VP8_ENCODER_RTCD *rtcd)
505 static void optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
527 ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
533 ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
540 ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
545 void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
574 ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
    [all...]
picklpf.c 25 extern int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd);
68 static int vp8_calc_partial_ssl_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, int Fraction, const vp8_variance_rtcd_vtable_t *rtcd)
77 (void)rtcd;
97 Total += VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride, dst + j, dest->y_stride, &sse);
195 best_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
212 filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
249 filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
312 if (cm->rtcd.flags & HAS_NEON)
354 best_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
360 if (cm->rtcd.flags & HAS_NEON
    [all...]
ssim.c 315 const vp8_variance_rtcd_vtable_t *rtcd)
318 rtcd->ssimpf(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
322 const vp8_variance_rtcd_vtable_t *rtcd)
325 rtcd->ssimpf_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
332 const vp8_variance_rtcd_vtable_t *rtcd)
339 rtcd->ssimpf(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
360 const vp8_variance_rtcd_vtable_t *rtcd
372 ssim_total += ssim_8x8(img1, stride_img1, img2, stride_img2, rtcd);
385 const vp8_variance_rtcd_vtable_t *rtcd
393 source->y_height, rtcd);
    [all...]
onyx_if.c 43 #define RTCD(x) &cpi->common.rtcd.x
46 #define RTCD(x) NULL
72 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd);
89 const vp8_variance_rtcd_vtable_t *rtcd
    [all...]
temporal_filter.c 66 RECON_INVOKE(&x->rtcd->recon, copy16x16)(yptr, stride, &pred[0], 16);
86 RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, stride, &pred[256], 8);
87 RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, stride, &pred[320], 8);
376 TEMPORAL_INVOKE(&cpi->rtcd.temporal, apply)
386 TEMPORAL_INVOKE(&cpi->rtcd.temporal, apply)
396 TEMPORAL_INVOKE(&cpi->rtcd.temporal, apply)
pickinter.c 36 extern int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd);
152 static int get_prediction_error(BLOCK *be, BLOCKD *b, const vp8_variance_rtcd_vtable_t *rtcd)
159 return VARIANCE_INVOKE(rtcd, get4x4sse_cs)(sptr, be->src_stride, dptr, 16, 0x7fffffff);
164 const VP8_ENCODER_RTCD *rtcd,
196 distortion = get_prediction_error(be, b, &rtcd->variance);
209 vp8_encode_intra4x4block(rtcd, x, be, b, b->bmi.mode);
215 int vp8_pick_intra4x4mby_modes(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb, int *Rate, int *best_dist)
234 pick_intra4x4block(rtcd, mb, mb->block + i, xd->block + i,
637 vp8_pick_intra4x4mby_modes(IF_RTCD(&cpi->rtcd), x, &rate, &distortion2);
639 distortion2 = VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16prederror)(x->src.y_buffer, x->src.y_stride, x->e_mbd.predictor, 16, 0x7fffffff)
    [all...]
  /external/libvpx/vp8/decoder/generic/
dsystemdependent.c 23 pbi->mb.rtcd = &pbi->common.rtcd;
  /external/libvpx/vp8/common/arm/neon/
recon_neon.c 18 void vp8_recon_mb_neon(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
  /external/libvpx/vp8/decoder/arm/
arm_dsystemdependent.c 23 int flags = pbi->common.rtcd.flags;
  /external/libvpx/vp8/decoder/
onyxd_if.c 286 if (cm->rtcd.flags & HAS_NEON)
299 if (cm->rtcd.flags & HAS_NEON)
332 if (cm->rtcd.flags & HAS_NEON)
352 if (cm->rtcd.flags & HAS_NEON)
369 if (cm->rtcd.flags & HAS_NEON)
450 if (cm->rtcd.flags & HAS_NEON)

Completed in 156 milliseconds

1 2