HomeSort by relevance Sort by last modified time
    Searched refs:rtcd (Results 1 - 25 of 37) sorted by null

1 2

  /external/libvpx/vp8/common/x86/
x86_systemdependent.c 25 VP8_COMMON_RTCD *rtcd = &ctx->rtcd; local
44 rtcd->idct.idct1 = vp8_short_idct4x4llm_1_mmx;
45 rtcd->idct.idct16 = vp8_short_idct4x4llm_mmx;
46 rtcd->idct.idct1_scalar_add = vp8_dc_only_idct_add_mmx;
47 rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_mmx;
48 rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_mmx;
52 rtcd->recon.recon = vp8_recon_b_mmx;
53 rtcd->recon.copy8x8 = vp8_copy_mem8x8_mmx;
54 rtcd->recon.copy8x4 = vp8_copy_mem8x4_mmx
    [all...]
  /external/libvpx/vp8/encoder/generic/
csystemdependent.c 30 cpi->rtcd.common = &cpi->common.rtcd;
31 cpi->rtcd.variance.sad16x16 = vp8_sad16x16_c;
32 cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
33 cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
34 cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
35 cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;
37 cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_c;
38 cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_c;
39 cpi->rtcd.variance.sad8x16x3 = vp8_sad8x16x3_c
    [all...]
  /external/libvpx/vp8/common/arm/
arm_systemdependent.c 33 VP8_COMMON_RTCD *rtcd = &ctx->rtcd; local
38 rtcd->flags = flags;
44 rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_armv6;
45 rtcd->subpix.sixtap8x8 = vp8_sixtap_predict8x8_armv6;
46 rtcd->subpix.sixtap8x4 = vp8_sixtap_predict8x4_armv6;
47 rtcd->subpix.sixtap4x4 = vp8_sixtap_predict_armv6;
48 rtcd->subpix.bilinear16x16 = vp8_bilinear_predict16x16_armv6;
49 rtcd->subpix.bilinear8x8 = vp8_bilinear_predict8x8_armv6;
50 rtcd->subpix.bilinear8x4 = vp8_bilinear_predict8x4_armv6
    [all...]
  /external/libvpx/vp8/common/generic/
systemdependent.c 32 VP8_COMMON_RTCD *rtcd = &ctx->rtcd; local
34 rtcd->idct.idct1 = vp8_short_idct4x4llm_1_c;
35 rtcd->idct.idct16 = vp8_short_idct4x4llm_c;
36 rtcd->idct.idct1_scalar_add = vp8_dc_only_idct_add_c;
37 rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_c;
38 rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_c;
40 rtcd->recon.copy16x16 = vp8_copy_mem16x16_c;
41 rtcd->recon.copy8x8 = vp8_copy_mem8x8_c;
42 rtcd->recon.copy8x4 = vp8_copy_mem8x4_c
    [all...]
  /external/libvpx/vp8/common/
invtrans.h 18 extern void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch);
19 extern void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
20 extern void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
21 extern void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
invtrans.c 28 void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch)
31 IDCT_INVOKE(rtcd, idct16)(b->dqcoeff, b->diff, pitch);
33 IDCT_INVOKE(rtcd, idct1)(b->dqcoeff, b->diff, pitch);
37 void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
42 IDCT_INVOKE(rtcd, iwalsh16)(x->block[24].dqcoeff, x->block[24].diff);
48 vp8_inverse_transform_b(rtcd, &x->block[i], 32);
52 void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
58 vp8_inverse_transform_b(rtcd, &x->block[i], 16);
64 void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
73 IDCT_INVOKE(rtcd, iwalsh16)(&x->block[24].dqcoeff[0], x->block[24].diff)
    [all...]
recon.c 109 void vp8_recon_mby_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
113 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
117 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
121 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
125 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
133 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
138 void vp8_recon_mb_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
143 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
145 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
147 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride)
    [all...]
recon.h 24 void sym(const struct vp8_recon_rtcd_vtable *rtcd, MACROBLOCKD *x)
97 void vp8_recon_intra4x4mb(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
98 void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
reconintra4x4.c 316 void vp8_recon_intra4x4mb(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
327 RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
331 RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
335 RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
339 RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
343 RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
347 RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
351 RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
355 RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
359 RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride)
    [all...]
postproc.h 122 vp8_postproc_rtcd_vtable_t *rtcd);
129 vp8_postproc_rtcd_vtable_t *rtcd);
postproc.c 308 vp8_postproc_rtcd_vtable_t *rtcd)
315 POSTPROC_INVOKE(rtcd, downacross)(source->y_buffer, post->y_buffer, source->y_stride, post->y_stride, source->y_height, source->y_width, ppl);
316 POSTPROC_INVOKE(rtcd, across)(post->y_buffer, post->y_stride, post->y_height, post->y_width, vp8_q2mbl(q));
317 POSTPROC_INVOKE(rtcd, down)(post->y_buffer, post->y_stride, post->y_height, post->y_width, vp8_q2mbl(q));
319 POSTPROC_INVOKE(rtcd, downacross)(source->u_buffer, post->u_buffer, source->uv_stride, post->uv_stride, source->uv_height, source->uv_width, ppl);
320 POSTPROC_INVOKE(rtcd, downacross)(source->v_buffer, post->v_buffer, source->uv_stride, post->uv_stride, source->uv_height, source->uv_width, ppl);
329 vp8_postproc_rtcd_vtable_t *rtcd)
336 POSTPROC_INVOKE(rtcd, downacross)(source->y_buffer, post->y_buffer, source->y_stride, post->y_stride, source->y_height, source->y_width, ppl);
337 POSTPROC_INVOKE(rtcd, downacross)(source->u_buffer, post->u_buffer, source->uv_stride, post->uv_stride, source->uv_height, source->uv_width, ppl);
338 POSTPROC_INVOKE(rtcd, downacross)(source->v_buffer, post->v_buffer, source->uv_stride, post->uv_stride, source->uv_heig (…)
    [all...]
reconinter.c 186 RECON_INVOKE(&x->rtcd->recon, copy8x8)(ptr, d->pre_stride, pred_ptr, pitch);
205 RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr, d->pre_stride, pred_ptr, pitch);
237 RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, upred_ptr, 8);
238 RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, vpred_ptr, 8);
282 RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 16);
345 RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 16);
362 RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, upred_ptr, 8);
363 RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, vpred_ptr, 8);
570 RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, dst_ptr, x->dst.y_stride); /*x->block[0].dst_stride);*/
587 RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, udst_ptr, x->dst.uv_stride)
    [all...]
  /external/libvpx/vp8/encoder/arm/
arm_csystemdependent.c 24 int flags = cpi->common.rtcd.flags;
32 /*cpi->rtcd.variance.sad16x16 = vp8_sad16x16_c;
33 cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
34 cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
35 cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
36 cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;*/
38 /*cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;
39 cpi->rtcd.variance.var8x8 = vp8_variance8x8_c;
40 cpi->rtcd.variance.var8x16 = vp8_variance8x16_c;
41 cpi->rtcd.variance.var16x8 = vp8_variance16x8_c
    [all...]
  /external/libvpx/vp8/encoder/x86/
x86_csystemdependent.c 223 cpi->rtcd.variance.sad16x16 = vp8_sad16x16_mmx;
224 cpi->rtcd.variance.sad16x8 = vp8_sad16x8_mmx;
225 cpi->rtcd.variance.sad8x16 = vp8_sad8x16_mmx;
226 cpi->rtcd.variance.sad8x8 = vp8_sad8x8_mmx;
227 cpi->rtcd.variance.sad4x4 = vp8_sad4x4_mmx;
229 cpi->rtcd.variance.var4x4 = vp8_variance4x4_mmx;
230 cpi->rtcd.variance.var8x8 = vp8_variance8x8_mmx;
231 cpi->rtcd.variance.var8x16 = vp8_variance8x16_mmx;
232 cpi->rtcd.variance.var16x8 = vp8_variance16x8_mmx;
233 cpi->rtcd.variance.var16x16 = vp8_variance16x16_mmx
    [all...]
  /external/libvpx/vp8/encoder/
encodeintra.c 46 void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x, BLOCK *be, BLOCKD *b, int best_mode)
50 ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
56 vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
58 RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
61 void vp8_encode_intra4x4block_rd(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x, BLOCK *be, BLOCKD *b, int best_mode)
65 ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
71 IDCT_INVOKE(&rtcd->common->idct, idct16)(b->dqcoeff, b->diff, 32);
73 RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
76 void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb)
88 vp8_encode_intra4x4block(rtcd, mb, be, b, b->bmi.mode)
    [all...]
encodemb.h 96 void vp8_encode_inter16x16(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
106 void vp8_encode_inter16x16uv(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
107 void vp8_encode_inter16x16uvrd(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
108 void vp8_optimize_mby(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
109 void vp8_optimize_mbuv(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
110 void vp8_encode_inter16x16y(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
encodemb.c 101 static void vp8_subtract_mb(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
103 ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, x->src.y_buffer, x->e_mbd.predictor, x->src.y_stride);
104 ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
260 const VP8_ENCODER_RTCD *rtcd)
503 void vp8_optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
525 ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
531 ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
537 ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
545 ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
550 void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
    [all...]
picklpf.c 25 extern int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd);
68 static int vp8_calc_partial_ssl_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, int Fraction, const vp8_variance_rtcd_vtable_t *rtcd)
77 (void)rtcd;
97 Total += VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride, dst + j, dest->y_stride, &sse);
195 best_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
212 filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
249 filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
313 if (cm->rtcd.flags & HAS_NEON)
355 best_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
361 if (cm->rtcd.flags & HAS_NEON
    [all...]
encodeframe.c 34 #define RTCD(x) &cpi->common.rtcd.x
37 #define RTCD(x) NULL
556 &cpi->common.rtcd.subpix, sixtap4x4);
558 &cpi->common.rtcd.subpix, sixtap8x4);
560 &cpi->common.rtcd.subpix, sixtap8x8);
562 &cpi->common.rtcd.subpix, sixtap16x16);
567 &cpi->common.rtcd.subpix, bilinear4x4);
569 &cpi->common.rtcd.subpix, bilinear8x4);
571 &cpi->common.rtcd.subpix, bilinear8x8)
    [all...]
onyx_if.c 44 #define RTCD(x) &cpi->common.rtcd.x
47 #define RTCD(x) NULL
73 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd);
74 int vp8_calc_low_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd);
    [all...]
  /external/libvpx/vp8/decoder/
dboolhuff.h 40 struct vp8_dboolhuff_rtcd_vtable *rtcd; member in struct:__anon5107
88 * functions right now. Disable RTCD to avoid using
101 /* wrapper functions to hide RTCD. static means inline means hopefully no
105 struct vp8_dboolhuff_rtcd_vtable *rtcd,
108 br->rtcd = rtcd;
110 return DBOOLHUFF_INVOKE(rtcd, start)(br, source, source_sz);
113 DBOOLHUFF_INVOKE(br->rtcd, fill)(br);
145 *return DBOOLHUFF_INVOKE(br->rtcd, debool)(br, probability);
197 *return DBOOLHUFF_INVOKE(br->rtcd, devalue)(br, bits)
    [all...]
onyxd_if.c 339 if (cm->rtcd.flags & HAS_NEON)
352 if (cm->rtcd.flags & HAS_NEON)
378 if (cm->rtcd.flags & HAS_NEON)
397 if (cm->rtcd.flags & HAS_NEON)
413 if (cm->rtcd.flags & HAS_NEON)
500 if (cm->rtcd.flags & HAS_NEON)
  /external/libvpx/vp8/decoder/generic/
dsystemdependent.c 23 pbi->mb.rtcd = &pbi->common.rtcd;
32 #if 0 /*For use with RTCD, when implemented*/
  /external/libvpx/vp8/common/arm/neon/
recon_neon.c 18 void vp8_recon_mb_neon(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
  /external/libvpx/vp8/decoder/arm/
arm_dsystemdependent.c 24 int flags = pbi->common.rtcd.flags;
38 #if 0 /*For use with RTCD, when implemented*/
57 #if 0 /*For use with RTCD, when implemented*/

Completed in 2647 milliseconds

1 2