HomeSort by relevance Sort by last modified time
    Searched refs:rtcd (Results 1 - 25 of 36) sorted by null

1 2

  /external/libvpx/vp8/common/generic/
systemdependent.c 31 VP8_COMMON_RTCD *rtcd = &ctx->rtcd; local
33 rtcd->idct.idct1 = vp8_short_idct4x4llm_1_c;
34 rtcd->idct.idct16 = vp8_short_idct4x4llm_c;
35 rtcd->idct.idct1_scalar_add = vp8_dc_only_idct_add_c;
36 rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_c;
37 rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_c;
39 rtcd->recon.copy16x16 = vp8_copy_mem16x16_c;
40 rtcd->recon.copy8x8 = vp8_copy_mem8x8_c;
41 rtcd->recon.copy8x4 = vp8_copy_mem8x4_c
    [all...]
  /external/libvpx/vp8/common/x86/
x86_systemdependent.c 25 VP8_COMMON_RTCD *rtcd = &ctx->rtcd; local
44 rtcd->idct.idct1 = vp8_short_idct4x4llm_1_mmx;
45 rtcd->idct.idct16 = vp8_short_idct4x4llm_mmx;
46 rtcd->idct.idct1_scalar_add = vp8_dc_only_idct_add_mmx;
47 rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_mmx;
48 rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_mmx;
52 rtcd->recon.recon = vp8_recon_b_mmx;
53 rtcd->recon.copy8x8 = vp8_copy_mem8x8_mmx;
54 rtcd->recon.copy8x4 = vp8_copy_mem8x4_mmx
    [all...]
  /external/libvpx/vp8/common/arm/
systemdependent.c 32 VP8_COMMON_RTCD *rtcd = &ctx->rtcd; local
35 rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_neon;
36 rtcd->subpix.sixtap8x8 = vp8_sixtap_predict8x8_neon;
37 rtcd->subpix.sixtap8x4 = vp8_sixtap_predict8x4_neon;
38 rtcd->subpix.sixtap4x4 = vp8_sixtap_predict_neon;
39 rtcd->subpix.bilinear16x16 = vp8_bilinear_predict16x16_neon;
40 rtcd->subpix.bilinear8x8 = vp8_bilinear_predict8x8_neon;
41 rtcd->subpix.bilinear8x4 = vp8_bilinear_predict8x4_neon;
42 rtcd->subpix.bilinear4x4 = vp8_bilinear_predict4x4_neon
    [all...]
recon_arm.c 30 void vp8_recon16x16mby(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
33 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
37 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
41 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
45 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
49 void vp8_recon16x16mb(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
83 void vp8_recon16x16mb(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
87 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
89 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
91 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride)
    [all...]
reconintra4x4_arm.c 317 void vp8_recon_intra4x4mb(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
328 RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
335 void vp8_recon_intra4x4mb(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
344 RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
348 RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
352 RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
356 RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
360 RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
364 RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
368 RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride)
    [all...]
  /external/libvpx/vp8/encoder/arm/
csystemdependent.c 23 cpi->rtcd.common = &cpi->common.rtcd;
26 cpi->rtcd.variance.sad16x16 = vp8_sad16x16_neon;
27 cpi->rtcd.variance.sad16x8 = vp8_sad16x8_neon;
28 cpi->rtcd.variance.sad8x16 = vp8_sad8x16_neon;
29 cpi->rtcd.variance.sad8x8 = vp8_sad8x8_neon;
30 cpi->rtcd.variance.sad4x4 = vp8_sad4x4_neon;
32 cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;
33 cpi->rtcd.variance.var8x8 = vp8_variance8x8_neon;
34 cpi->rtcd.variance.var8x16 = vp8_variance8x16_neon
    [all...]
  /external/libvpx/vp8/encoder/generic/
csystemdependent.c 29 cpi->rtcd.common = &cpi->common.rtcd;
30 cpi->rtcd.variance.sad16x16 = vp8_sad16x16_c;
31 cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
32 cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
33 cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
34 cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;
36 cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_c;
37 cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_c;
38 cpi->rtcd.variance.sad8x16x3 = vp8_sad8x16x3_c
    [all...]
  /external/libvpx/vp8/common/
invtrans.h 18 extern void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch);
19 extern void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
20 extern void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
21 extern void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
invtrans.c 28 void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch)
31 IDCT_INVOKE(rtcd, idct16)(b->dqcoeff, b->diff, pitch);
33 IDCT_INVOKE(rtcd, idct1)(b->dqcoeff, b->diff, pitch);
37 void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
42 IDCT_INVOKE(rtcd, iwalsh16)(x->block[24].dqcoeff, x->block[24].diff);
48 vp8_inverse_transform_b(rtcd, &x->block[i], 32);
52 void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
58 vp8_inverse_transform_b(rtcd, &x->block[i], 16);
64 void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
73 IDCT_INVOKE(rtcd, iwalsh16)(&x->block[24].dqcoeff[0], x->block[24].diff)
    [all...]
recon.h 78 void vp8_recon16x16mby(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
79 void vp8_recon16x16mb(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
80 void vp8_recon_intra4x4mb(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
81 void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
recon.c 109 void vp8_recon16x16mby(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
117 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
121 void vp8_recon16x16mb(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
129 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
136 RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
postproc.h 90 vp8_postproc_rtcd_vtable_t *rtcd);
97 vp8_postproc_rtcd_vtable_t *rtcd);
postproc.c 262 vp8_postproc_rtcd_vtable_t *rtcd)
269 POSTPROC_INVOKE(rtcd, downacross)(source->y_buffer, post->y_buffer, source->y_stride, post->y_stride, source->y_height, source->y_width, ppl);
270 POSTPROC_INVOKE(rtcd, across)(post->y_buffer, post->y_stride, post->y_height, post->y_width, vp8_q2mbl(q));
271 POSTPROC_INVOKE(rtcd, down)(post->y_buffer, post->y_stride, post->y_height, post->y_width, vp8_q2mbl(q));
273 POSTPROC_INVOKE(rtcd, downacross)(source->u_buffer, post->u_buffer, source->uv_stride, post->uv_stride, source->uv_height, source->uv_width, ppl);
274 POSTPROC_INVOKE(rtcd, downacross)(source->v_buffer, post->v_buffer, source->uv_stride, post->uv_stride, source->uv_height, source->uv_width, ppl);
283 vp8_postproc_rtcd_vtable_t *rtcd)
290 POSTPROC_INVOKE(rtcd, downacross)(source->y_buffer, post->y_buffer, source->y_stride, post->y_stride, source->y_height, source->y_width, ppl);
291 POSTPROC_INVOKE(rtcd, downacross)(source->u_buffer, post->u_buffer, source->uv_stride, post->uv_stride, source->uv_height, source->uv_width, ppl);
292 POSTPROC_INVOKE(rtcd, downacross)(source->v_buffer, post->v_buffer, source->uv_stride, post->uv_stride, source->uv_heig (…)
    [all...]
reconinter.c 185 RECON_INVOKE(&x->rtcd->recon, copy8x8)(ptr, d->pre_stride, pred_ptr, pitch);
204 RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr, d->pre_stride, pred_ptr, pitch);
236 RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, upred_ptr, 8);
237 RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, vpred_ptr, 8);
281 RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 16);
344 RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 16);
361 RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, upred_ptr, 8);
362 RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, vpred_ptr, 8);
568 RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, dst_ptr, x->dst.y_stride); //x->block[0].dst_stride);
585 RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, udst_ptr, x->dst.uv_stride)
    [all...]
loopfilter.c 201 cm->lf_mbv = LF_INVOKE(&cm->rtcd.loopfilter, normal_mb_v);
202 cm->lf_bv = LF_INVOKE(&cm->rtcd.loopfilter, normal_b_v);
203 cm->lf_mbh = LF_INVOKE(&cm->rtcd.loopfilter, normal_mb_h);
204 cm->lf_bh = LF_INVOKE(&cm->rtcd.loopfilter, normal_b_h);
208 cm->lf_mbv = LF_INVOKE(&cm->rtcd.loopfilter, simple_mb_v);
209 cm->lf_bv = LF_INVOKE(&cm->rtcd.loopfilter, simple_b_v);
210 cm->lf_mbh = LF_INVOKE(&cm->rtcd.loopfilter, simple_mb_h);
211 cm->lf_bh = LF_INVOKE(&cm->rtcd.loopfilter, simple_b_h);
  /external/libvpx/vp8/encoder/x86/
x86_csystemdependent.c 193 cpi->rtcd.variance.sad16x16 = vp8_sad16x16_mmx;
194 cpi->rtcd.variance.sad16x8 = vp8_sad16x8_mmx;
195 cpi->rtcd.variance.sad8x16 = vp8_sad8x16_mmx;
196 cpi->rtcd.variance.sad8x8 = vp8_sad8x8_mmx;
197 cpi->rtcd.variance.sad4x4 = vp8_sad4x4_mmx;
199 cpi->rtcd.variance.var4x4 = vp8_variance4x4_mmx;
200 cpi->rtcd.variance.var8x8 = vp8_variance8x8_mmx;
201 cpi->rtcd.variance.var8x16 = vp8_variance8x16_mmx;
202 cpi->rtcd.variance.var16x8 = vp8_variance16x8_mmx;
203 cpi->rtcd.variance.var16x16 = vp8_variance16x16_mmx
    [all...]
  /external/libvpx/vp8/encoder/
encodeintra.c 46 void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x, BLOCK *be, BLOCKD *b, int best_mode)
50 ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
58 vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
60 RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
63 void vp8_encode_intra4x4block_rd(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x, BLOCK *be, BLOCKD *b, int best_mode)
67 ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
75 IDCT_INVOKE(&rtcd->common->idct, idct16)(b->dqcoeff, b->diff, 32);
77 RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
80 void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb)
92 vp8_encode_intra4x4block(rtcd, mb, be, b, b->bmi.mode)
    [all...]
encodemb.h 96 void vp8_encode_inter16x16(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
106 void vp8_encode_inter16x16uv(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
107 void vp8_encode_inter16x16uvrd(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
108 void vp8_optimize_mby(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
109 void vp8_optimize_mbuv(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
110 void vp8_encode_inter16x16y(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
encodemb.c 101 static void vp8_subtract_mb(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
103 ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, x->src.y_buffer, x->e_mbd.predictor, x->src.y_stride);
104 ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
247 const VP8_ENCODER_RTCD *rtcd)
488 void vp8_optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
510 ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
516 ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
522 ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
531 vp8_optimize_b(x, 24, 1, t.a, t.l, rtcd);
566 void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
    [all...]
picklpf.c 22 extern int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd);
65 static int vp8_calc_partial_ssl_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, int Fraction, const vp8_variance_rtcd_vtable_t *rtcd)
74 (void)rtcd;
94 Total += VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride, dst + j, dest->y_stride, &sse);
192 best_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
209 filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
246 filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
341 best_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
371 filt_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
400 filt_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance))
    [all...]
rdopt.c 541 int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd)
565 VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, uv_stride, &sse2);
566 VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, uv_stride, &sse1);
571 VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, uv_stride, &sse2);
572 VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, uv_stride, &sse1);
684 vp8_encode_intra4x4block_rd(IF_RTCD(&cpi->rtcd), x, be, b, mode);
691 distortion = ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), berr)(be->coeff, b->dqcoeff) >> 2;
708 vp8_encode_intra4x4block_rd(IF_RTCD(&cpi->rtcd), x, be, b, b->bmi.mode);
779 vp8_encode_intra16x16mbyrd(IF_RTCD(&cpi->rtcd), x);
785 VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16var)(x->src.y_buffer, x->src.y_stride, x->e_mbd.dst.y_buffer, x->e_mbd.dst.y_str (…)
    [all...]
pickinter.c 36 extern int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd);
153 static int get_prediction_error(BLOCK *be, BLOCKD *b, const vp8_variance_rtcd_vtable_t *rtcd)
160 return VARIANCE_INVOKE(rtcd, get4x4sse_cs)(sptr, be->src_stride, dptr, 16, 0x7fffffff);
165 const VP8_ENCODER_RTCD *rtcd,
201 distortion = get_prediction_error(be, b, &rtcd->variance);
214 vp8_encode_intra4x4block(rtcd, x, be, b, b->bmi.mode);
219 int vp8_pick_intra4x4mby_modes(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb, int *Rate, int *best_dist)
247 error += pick_intra4x4block(rtcd,
623 vp8_pick_intra4x4mby_modes(IF_RTCD(&cpi->rtcd), x, &rate, &distortion2);
625 distortion2 = VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16prederror)(x->src.y_buffer, x->src.y_stride, x->e_mbd.predictor, 16, 0x7fffffff)
    [all...]
  /external/libvpx/vp8/decoder/arm/
dsystemdependent.c 23 pbi->mb.rtcd = &pbi->common.rtcd;
  /external/libvpx/vp8/decoder/
dboolhuff.h 40 struct vp8_dboolhuff_rtcd_vtable *rtcd; member in struct:__anon5271
88 // functions right now. Disable RTCD to avoid using
100 /* wrapper functions to hide RTCD. static means inline means hopefully no
104 struct vp8_dboolhuff_rtcd_vtable *rtcd,
107 br->rtcd = rtcd;
109 return DBOOLHUFF_INVOKE(rtcd, start)(br, source, source_sz);
112 DBOOLHUFF_INVOKE(br->rtcd, fill)(br);
144 *return DBOOLHUFF_INVOKE(br->rtcd, debool)(br, probability);
196 *return DBOOLHUFF_INVOKE(br->rtcd, devalue)(br, bits)
    [all...]
  /external/libvpx/vp8/decoder/generic/
dsystemdependent.c 22 pbi->mb.rtcd = &pbi->common.rtcd;
31 #if 0 //For use with RTCD, when implemented

Completed in 73 milliseconds

1 2