Home | History | Annotate | Download | only in encoder
      1 /*
      2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 
     12 #include "vpx_ports/config.h"
     13 #include "encodemb.h"
     14 #include "reconinter.h"
     15 #include "quantize.h"
     16 #include "tokenize.h"
     17 #include "invtrans.h"
     18 #include "recon.h"
     19 #include "reconintra.h"
     20 #include "dct.h"
     21 #include "vpx_mem/vpx_mem.h"
     22 
     23 #if CONFIG_RUNTIME_CPU_DETECT
     24 #define IF_RTCD(x) (x)
     25 #else
     26 #define IF_RTCD(x) NULL
     27 #endif
     28 void vp8_subtract_b_c(BLOCK *be, BLOCKD *bd, int pitch)
     29 {
     30     unsigned char *src_ptr = (*(be->base_src) + be->src);
     31     short *diff_ptr = be->src_diff;
     32     unsigned char *pred_ptr = bd->predictor;
     33     int src_stride = be->src_stride;
     34 
     35     int r, c;
     36 
     37     for (r = 0; r < 4; r++)
     38     {
     39         for (c = 0; c < 4; c++)
     40         {
     41             diff_ptr[c] = src_ptr[c] - pred_ptr[c];
     42         }
     43 
     44         diff_ptr += pitch;
     45         pred_ptr += pitch;
     46         src_ptr  += src_stride;
     47     }
     48 }
     49 
     50 void vp8_subtract_mbuv_c(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride)
     51 {
     52     short *udiff = diff + 256;
     53     short *vdiff = diff + 320;
     54     unsigned char *upred = pred + 256;
     55     unsigned char *vpred = pred + 320;
     56 
     57     int r, c;
     58 
     59     for (r = 0; r < 8; r++)
     60     {
     61         for (c = 0; c < 8; c++)
     62         {
     63             udiff[c] = usrc[c] - upred[c];
     64         }
     65 
     66         udiff += 8;
     67         upred += 8;
     68         usrc  += stride;
     69     }
     70 
     71     for (r = 0; r < 8; r++)
     72     {
     73         for (c = 0; c < 8; c++)
     74         {
     75             vdiff[c] = vsrc[c] - vpred[c];
     76         }
     77 
     78         vdiff += 8;
     79         vpred += 8;
     80         vsrc  += stride;
     81     }
     82 }
     83 
     84 void vp8_subtract_mby_c(short *diff, unsigned char *src, unsigned char *pred, int stride)
     85 {
     86     int r, c;
     87 
     88     for (r = 0; r < 16; r++)
     89     {
     90         for (c = 0; c < 16; c++)
     91         {
     92             diff[c] = src[c] - pred[c];
     93         }
     94 
     95         diff += 16;
     96         pred += 16;
     97         src  += stride;
     98     }
     99 }
    100 
    101 static void vp8_subtract_mb(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
    102 {
    103     ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, x->src.y_buffer, x->e_mbd.predictor, x->src.y_stride);
    104     ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
    105 }
    106 
    107 void vp8_build_dcblock(MACROBLOCK *x)
    108 {
    109     short *src_diff_ptr = &x->src_diff[384];
    110     int i;
    111 
    112     for (i = 0; i < 16; i++)
    113     {
    114         src_diff_ptr[i] = x->coeff[i * 16];
    115     }
    116 }
    117 
    118 void vp8_transform_mbuv(MACROBLOCK *x)
    119 {
    120     int i;
    121 
    122     for (i = 16; i < 24; i += 2)
    123     {
    124         x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
    125             &x->block[i].coeff[0], 16);
    126     }
    127 }
    128 
    129 
    130 void vp8_transform_intra_mby(MACROBLOCK *x)
    131 {
    132     int i;
    133 
    134     for (i = 0; i < 16; i += 2)
    135     {
    136         x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
    137             &x->block[i].coeff[0], 32);
    138     }
    139 
    140     // build dc block from 16 y dc values
    141     vp8_build_dcblock(x);
    142 
    143     // do 2nd order transform on the dc block
    144     x->short_walsh4x4(&x->block[24].src_diff[0],
    145         &x->block[24].coeff[0], 8);
    146 
    147 }
    148 
    149 
    150 void vp8_transform_mb(MACROBLOCK *x)
    151 {
    152     int i;
    153 
    154     for (i = 0; i < 16; i += 2)
    155     {
    156         x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
    157             &x->block[i].coeff[0], 32);
    158     }
    159 
    160     // build dc block from 16 y dc values
    161     if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
    162         vp8_build_dcblock(x);
    163 
    164     for (i = 16; i < 24; i += 2)
    165     {
    166         x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
    167             &x->block[i].coeff[0], 16);
    168     }
    169 
    170     // do 2nd order transform on the dc block
    171     if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
    172         x->short_walsh4x4(&x->block[24].src_diff[0],
    173         &x->block[24].coeff[0], 8);
    174 
    175 }
    176 
    177 void vp8_transform_mby(MACROBLOCK *x)
    178 {
    179     int i;
    180 
    181     for (i = 0; i < 16; i += 2)
    182     {
    183         x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
    184             &x->block[i].coeff[0], 32);
    185     }
    186 
    187     // build dc block from 16 y dc values
    188     if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
    189     {
    190         vp8_build_dcblock(x);
    191         x->short_walsh4x4(&x->block[24].src_diff[0],
    192             &x->block[24].coeff[0], 8);
    193     }
    194 }
    195 
    196 
    197 void vp8_stuff_inter16x16(MACROBLOCK *x)
    198 {
    199     vp8_build_inter_predictors_mb_s(&x->e_mbd);
    200     /*
    201         // recon = copy from predictors to destination
    202         {
    203             BLOCKD *b = &x->e_mbd.block[0];
    204             unsigned char *pred_ptr = b->predictor;
    205             unsigned char *dst_ptr = *(b->base_dst) + b->dst;
    206             int stride = b->dst_stride;
    207 
    208             int i;
    209             for(i=0;i<16;i++)
    210                 vpx_memcpy(dst_ptr+i*stride,pred_ptr+16*i,16);
    211 
    212             b = &x->e_mbd.block[16];
    213             pred_ptr = b->predictor;
    214             dst_ptr = *(b->base_dst) + b->dst;
    215             stride = b->dst_stride;
    216 
    217             for(i=0;i<8;i++)
    218                 vpx_memcpy(dst_ptr+i*stride,pred_ptr+8*i,8);
    219 
    220             b = &x->e_mbd.block[20];
    221             pred_ptr = b->predictor;
    222             dst_ptr = *(b->base_dst) + b->dst;
    223             stride = b->dst_stride;
    224 
    225             for(i=0;i<8;i++)
    226                 vpx_memcpy(dst_ptr+i*stride,pred_ptr+8*i,8);
    227         }
    228     */
    229 }
    230 
    231 #if !(CONFIG_REALTIME_ONLY)
    232 #define RDCOST(RM,DM,R,D) ( ((128+(R)*(RM)) >> 8) + (DM)*(D) )
    233 #define RDTRUNC(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
    234 
    235 typedef struct vp8_token_state vp8_token_state;
    236 
    237 struct vp8_token_state{
    238   int           rate;
    239   int           error;
    240   signed char   next;
    241   signed char   token;
    242   short         qc;
    243 };
    244 
    245 void vp8_optimize_b(MACROBLOCK *mb, int i, int type,
    246                     ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
    247                     const VP8_ENCODER_RTCD *rtcd)
    248 {
    249     BLOCK *b;
    250     BLOCKD *d;
    251     vp8_token_state tokens[17][2];
    252     unsigned best_mask[2];
    253     const short *dequant_ptr;
    254     const short *coeff_ptr;
    255     short *qcoeff_ptr;
    256     short *dqcoeff_ptr;
    257     int eob;
    258     int i0;
    259     int rc;
    260     int x;
    261     int sz;
    262     int next;
    263     int path;
    264     int rdmult;
    265     int rddiv;
    266     int final_eob;
    267     int rd_cost0;
    268     int rd_cost1;
    269     int rate0;
    270     int rate1;
    271     int error0;
    272     int error1;
    273     int t0;
    274     int t1;
    275     int best;
    276     int band;
    277     int pt;
    278 
    279     b = &mb->block[i];
    280     d = &mb->e_mbd.block[i];
    281 
    282     /* Enable this to test the effect of RDO as a replacement for the dynamic
    283      *  zero bin instead of an augmentation of it.
    284      */
    285 #if 0
    286     vp8_strict_quantize_b(b, d);
    287 #endif
    288 
    289     dequant_ptr = &d->dequant[0][0];
    290     coeff_ptr = &b->coeff[0];
    291     qcoeff_ptr = d->qcoeff;
    292     dqcoeff_ptr = d->dqcoeff;
    293     i0 = !type;
    294     eob = d->eob;
    295 
    296     /* Now set up a Viterbi trellis to evaluate alternative roundings. */
    297     /* TODO: These should vary with the block type, since the quantizer does. */
    298     rdmult = mb->rdmult << 2;
    299     rddiv = mb->rddiv;
    300     best_mask[0] = best_mask[1] = 0;
    301     /* Initialize the sentinel node of the trellis. */
    302     tokens[eob][0].rate = 0;
    303     tokens[eob][0].error = 0;
    304     tokens[eob][0].next = 16;
    305     tokens[eob][0].token = DCT_EOB_TOKEN;
    306     tokens[eob][0].qc = 0;
    307     *(tokens[eob] + 1) = *(tokens[eob] + 0);
    308     next = eob;
    309     for (i = eob; i-- > i0;)
    310     {
    311         int base_bits;
    312         int d2;
    313         int dx;
    314 
    315         rc = vp8_default_zig_zag1d[i];
    316         x = qcoeff_ptr[rc];
    317         /* Only add a trellis state for non-zero coefficients. */
    318         if (x)
    319         {
    320             int shortcut=0;
    321             error0 = tokens[next][0].error;
    322             error1 = tokens[next][1].error;
    323             /* Evaluate the first possibility for this state. */
    324             rate0 = tokens[next][0].rate;
    325             rate1 = tokens[next][1].rate;
    326             t0 = (vp8_dct_value_tokens_ptr + x)->Token;
    327             /* Consider both possible successor states. */
    328             if (next < 16)
    329             {
    330                 band = vp8_coef_bands[i + 1];
    331                 pt = vp8_prev_token_class[t0];
    332                 rate0 +=
    333                     mb->token_costs[type][band][pt][tokens[next][0].token];
    334                 rate1 +=
    335                     mb->token_costs[type][band][pt][tokens[next][1].token];
    336             }
    337             rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);
    338             rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);
    339             if (rd_cost0 == rd_cost1)
    340             {
    341                 rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);
    342                 rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);
    343             }
    344             /* And pick the best. */
    345             best = rd_cost1 < rd_cost0;
    346             base_bits = *(vp8_dct_value_cost_ptr + x);
    347             dx = dqcoeff_ptr[rc] - coeff_ptr[rc];
    348             d2 = dx*dx;
    349             tokens[i][0].rate = base_bits + (best ? rate1 : rate0);
    350             tokens[i][0].error = d2 + (best ? error1 : error0);
    351             tokens[i][0].next = next;
    352             tokens[i][0].token = t0;
    353             tokens[i][0].qc = x;
    354             best_mask[0] |= best << i;
    355             /* Evaluate the second possibility for this state. */
    356             rate0 = tokens[next][0].rate;
    357             rate1 = tokens[next][1].rate;
    358 
    359             if((abs(x)*dequant_ptr[rc]>abs(coeff_ptr[rc])) &&
    360                (abs(x)*dequant_ptr[rc]<abs(coeff_ptr[rc])+dequant_ptr[rc]))
    361                 shortcut = 1;
    362             else
    363                 shortcut = 0;
    364 
    365             if(shortcut)
    366             {
    367                 sz = -(x < 0);
    368                 x -= 2*sz + 1;
    369             }
    370 
    371             /* Consider both possible successor states. */
    372             if (!x)
    373             {
    374                 /* If we reduced this coefficient to zero, check to see if
    375                  *  we need to move the EOB back here.
    376                  */
    377                 t0 = tokens[next][0].token == DCT_EOB_TOKEN ?
    378                     DCT_EOB_TOKEN : ZERO_TOKEN;
    379                 t1 = tokens[next][1].token == DCT_EOB_TOKEN ?
    380                     DCT_EOB_TOKEN : ZERO_TOKEN;
    381             }
    382             else
    383             {
    384                 t0=t1 = (vp8_dct_value_tokens_ptr + x)->Token;
    385             }
    386             if (next < 16)
    387             {
    388                 band = vp8_coef_bands[i + 1];
    389                 if(t0!=DCT_EOB_TOKEN)
    390                 {
    391                     pt = vp8_prev_token_class[t0];
    392                     rate0 += mb->token_costs[type][band][pt][
    393                         tokens[next][0].token];
    394                 }
    395                 if(t1!=DCT_EOB_TOKEN)
    396                 {
    397                     pt = vp8_prev_token_class[t1];
    398                     rate1 += mb->token_costs[type][band][pt][
    399                         tokens[next][1].token];
    400                 }
    401             }
    402 
    403             rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);
    404             rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);
    405             if (rd_cost0 == rd_cost1)
    406             {
    407                 rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);
    408                 rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);
    409             }
    410             /* And pick the best. */
    411             best = rd_cost1 < rd_cost0;
    412             base_bits = *(vp8_dct_value_cost_ptr + x);
    413 
    414             if(shortcut)
    415             {
    416                 dx -= (dequant_ptr[rc] + sz) ^ sz;
    417                 d2 = dx*dx;
    418             }
    419             tokens[i][1].rate = base_bits + (best ? rate1 : rate0);
    420             tokens[i][1].error = d2 + (best ? error1 : error0);
    421             tokens[i][1].next = next;
    422             tokens[i][1].token =best?t1:t0;
    423             tokens[i][1].qc = x;
    424             best_mask[1] |= best << i;
    425             /* Finally, make this the new head of the trellis. */
    426             next = i;
    427         }
    428         /* There's no choice to make for a zero coefficient, so we don't
    429          *  add a new trellis node, but we do need to update the costs.
    430          */
    431         else
    432         {
    433             band = vp8_coef_bands[i + 1];
    434             t0 = tokens[next][0].token;
    435             t1 = tokens[next][1].token;
    436             /* Update the cost of each path if we're past the EOB token. */
    437             if (t0 != DCT_EOB_TOKEN)
    438             {
    439                 tokens[next][0].rate += mb->token_costs[type][band][0][t0];
    440                 tokens[next][0].token = ZERO_TOKEN;
    441             }
    442             if (t1 != DCT_EOB_TOKEN)
    443             {
    444                 tokens[next][1].rate += mb->token_costs[type][band][0][t1];
    445                 tokens[next][1].token = ZERO_TOKEN;
    446             }
    447             /* Don't update next, because we didn't add a new node. */
    448         }
    449     }
    450 
    451     /* Now pick the best path through the whole trellis. */
    452     band = vp8_coef_bands[i + 1];
    453     VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
    454     rate0 = tokens[next][0].rate;
    455     rate1 = tokens[next][1].rate;
    456     error0 = tokens[next][0].error;
    457     error1 = tokens[next][1].error;
    458     t0 = tokens[next][0].token;
    459     t1 = tokens[next][1].token;
    460     rate0 += mb->token_costs[type][band][pt][t0];
    461     rate1 += mb->token_costs[type][band][pt][t1];
    462     rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);
    463     rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);
    464     if (rd_cost0 == rd_cost1)
    465     {
    466         rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);
    467         rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);
    468     }
    469     best = rd_cost1 < rd_cost0;
    470     final_eob = i0 - 1;
    471     for (i = next; i < eob; i = next)
    472     {
    473         x = tokens[i][best].qc;
    474         if (x)
    475             final_eob = i;
    476         rc = vp8_default_zig_zag1d[i];
    477         qcoeff_ptr[rc] = x;
    478         dqcoeff_ptr[rc] = x * dequant_ptr[rc];
    479         next = tokens[i][best].next;
    480         best = (best_mask[best] >> i) & 1;
    481     }
    482     final_eob++;
    483 
    484     d->eob = final_eob;
    485     *a = *l = (d->eob != !type);
    486 }
    487 
    488 void vp8_optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
    489 {
    490     int b;
    491     int type;
    492     int has_2nd_order;
    493     ENTROPY_CONTEXT_PLANES t_above, t_left;
    494     ENTROPY_CONTEXT *ta;
    495     ENTROPY_CONTEXT *tl;
    496 
    497     vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
    498     vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
    499 
    500     ta = (ENTROPY_CONTEXT *)&t_above;
    501     tl = (ENTROPY_CONTEXT *)&t_left;
    502 
    503     has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
    504         && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
    505     type = has_2nd_order ? 0 : 3;
    506 
    507     for (b = 0; b < 16; b++)
    508     {
    509         vp8_optimize_b(x, b, type,
    510             ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
    511     }
    512 
    513     for (b = 16; b < 20; b++)
    514     {
    515         vp8_optimize_b(x, b, vp8_block2type[b],
    516             ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
    517     }
    518 
    519     for (b = 20; b < 24; b++)
    520     {
    521         vp8_optimize_b(x, b, vp8_block2type[b],
    522             ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
    523     }
    524 
    525 
    526     /*
    527     if (has_2nd_order)
    528     {
    529         vp8_setup_temp_context(&t, x->e_mbd.above_context[Y2CONTEXT],
    530             x->e_mbd.left_context[Y2CONTEXT], 1);
    531         vp8_optimize_b(x, 24, 1, t.a, t.l, rtcd);
    532     }
    533     */
    534 }
    535 
    536 
    537 
    538 static void vp8_find_mb_skip_coef(MACROBLOCK *x)
    539 {
    540     int i;
    541 
    542     x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = 1;
    543 
    544     if (x->e_mbd.mode_info_context->mbmi.mode != B_PRED && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
    545     {
    546         for (i = 0; i < 16; i++)
    547         {
    548             x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (x->e_mbd.block[i].eob < 2);
    549         }
    550 
    551         for (i = 16; i < 25; i++)
    552         {
    553             x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!x->e_mbd.block[i].eob);
    554         }
    555     }
    556     else
    557     {
    558         for (i = 0; i < 24; i++)
    559         {
    560             x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!x->e_mbd.block[i].eob);
    561         }
    562     }
    563 }
    564 
    565 
    566 void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
    567 {
    568     int b;
    569     int type;
    570     int has_2nd_order;
    571 
    572     ENTROPY_CONTEXT_PLANES t_above, t_left;
    573     ENTROPY_CONTEXT *ta;
    574     ENTROPY_CONTEXT *tl;
    575 
    576     if (!x->e_mbd.above_context)
    577         return;
    578 
    579     if (!x->e_mbd.left_context)
    580         return;
    581 
    582     vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
    583     vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
    584 
    585     ta = (ENTROPY_CONTEXT *)&t_above;
    586     tl = (ENTROPY_CONTEXT *)&t_left;
    587 
    588     has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
    589         && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
    590     type = has_2nd_order ? 0 : 3;
    591 
    592     for (b = 0; b < 16; b++)
    593     {
    594         vp8_optimize_b(x, b, type,
    595         ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
    596     }
    597 
    598     /*
    599     if (has_2nd_order)
    600     {
    601         vp8_setup_temp_context(&t, x->e_mbd.above_context[Y2CONTEXT],
    602             x->e_mbd.left_context[Y2CONTEXT], 1);
    603         vp8_optimize_b(x, 24, 1, t.a, t.l, rtcd);
    604     }
    605     */
    606 }
    607 
    608 void vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
    609 {
    610     int b;
    611     ENTROPY_CONTEXT_PLANES t_above, t_left;
    612     ENTROPY_CONTEXT *ta;
    613     ENTROPY_CONTEXT *tl;
    614 
    615     if (!x->e_mbd.above_context)
    616         return;
    617 
    618     if (!x->e_mbd.left_context)
    619         return;
    620 
    621     vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
    622     vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
    623 
    624     ta = (ENTROPY_CONTEXT *)&t_above;
    625     tl = (ENTROPY_CONTEXT *)&t_left;
    626 
    627     for (b = 16; b < 20; b++)
    628     {
    629         vp8_optimize_b(x, b, vp8_block2type[b],
    630             ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
    631     }
    632 
    633     for (b = 20; b < 24; b++)
    634     {
    635         vp8_optimize_b(x, b, vp8_block2type[b],
    636             ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
    637     }
    638 
    639 }
    640 #endif
    641 
    642 void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
    643 {
    644     vp8_build_inter_predictors_mb(&x->e_mbd);
    645 
    646     vp8_subtract_mb(rtcd, x);
    647 
    648     vp8_transform_mb(x);
    649 
    650     vp8_quantize_mb(x);
    651 
    652 #if !(CONFIG_REALTIME_ONLY)
    653     if (x->optimize && x->rddiv > 1)
    654     {
    655         vp8_optimize_mb(x, rtcd);
    656         vp8_find_mb_skip_coef(x);
    657     }
    658 #endif
    659 
    660     vp8_inverse_transform_mb(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
    661 
    662     vp8_recon16x16mb(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
    663 }
    664 
    665 
    666 /* this funciton is used by first pass only */
    667 void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
    668 {
    669     vp8_build_inter_predictors_mby(&x->e_mbd);
    670 
    671     ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, x->src.y_buffer, x->e_mbd.predictor, x->src.y_stride);
    672 
    673     vp8_transform_mby(x);
    674 
    675     vp8_quantize_mby(x);
    676 
    677     vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
    678 
    679     vp8_recon16x16mby(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
    680 }
    681 
    682 
    683 void vp8_encode_inter16x16uv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
    684 {
    685     vp8_build_inter_predictors_mbuv(&x->e_mbd);
    686 
    687     ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
    688 
    689     vp8_transform_mbuv(x);
    690 
    691     vp8_quantize_mbuv(x);
    692 
    693     vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
    694 
    695     vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
    696 }
    697 
    698 
    699 void vp8_encode_inter16x16uvrd(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
    700 {
    701     vp8_build_inter_predictors_mbuv(&x->e_mbd);
    702     ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
    703 
    704     vp8_transform_mbuv(x);
    705 
    706     vp8_quantize_mbuv(x);
    707 
    708 }
    709