Home | History | Annotate | Download | only in encoder
      1 /*
      2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 
     12 #include "vpx_ports/config.h"
     13 #include "encodemb.h"
     14 #include "reconinter.h"
     15 #include "quantize.h"
     16 #include "tokenize.h"
     17 #include "invtrans.h"
     18 #include "recon.h"
     19 #include "reconintra.h"
     20 #include "dct.h"
     21 #include "vpx_mem/vpx_mem.h"
     22 
     23 #if CONFIG_RUNTIME_CPU_DETECT
     24 #define IF_RTCD(x) (x)
     25 #else
     26 #define IF_RTCD(x) NULL
     27 #endif
     28 void vp8_subtract_b_c(BLOCK *be, BLOCKD *bd, int pitch)
     29 {
     30     unsigned char *src_ptr = (*(be->base_src) + be->src);
     31     short *diff_ptr = be->src_diff;
     32     unsigned char *pred_ptr = bd->predictor;
     33     int src_stride = be->src_stride;
     34 
     35     int r, c;
     36 
     37     for (r = 0; r < 4; r++)
     38     {
     39         for (c = 0; c < 4; c++)
     40         {
     41             diff_ptr[c] = src_ptr[c] - pred_ptr[c];
     42         }
     43 
     44         diff_ptr += pitch;
     45         pred_ptr += pitch;
     46         src_ptr  += src_stride;
     47     }
     48 }
     49 
     50 void vp8_subtract_mbuv_c(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride)
     51 {
     52     short *udiff = diff + 256;
     53     short *vdiff = diff + 320;
     54     unsigned char *upred = pred + 256;
     55     unsigned char *vpred = pred + 320;
     56 
     57     int r, c;
     58 
     59     for (r = 0; r < 8; r++)
     60     {
     61         for (c = 0; c < 8; c++)
     62         {
     63             udiff[c] = usrc[c] - upred[c];
     64         }
     65 
     66         udiff += 8;
     67         upred += 8;
     68         usrc  += stride;
     69     }
     70 
     71     for (r = 0; r < 8; r++)
     72     {
     73         for (c = 0; c < 8; c++)
     74         {
     75             vdiff[c] = vsrc[c] - vpred[c];
     76         }
     77 
     78         vdiff += 8;
     79         vpred += 8;
     80         vsrc  += stride;
     81     }
     82 }
     83 
     84 void vp8_subtract_mby_c(short *diff, unsigned char *src, unsigned char *pred, int stride)
     85 {
     86     int r, c;
     87 
     88     for (r = 0; r < 16; r++)
     89     {
     90         for (c = 0; c < 16; c++)
     91         {
     92             diff[c] = src[c] - pred[c];
     93         }
     94 
     95         diff += 16;
     96         pred += 16;
     97         src  += stride;
     98     }
     99 }
    100 
    101 static void vp8_subtract_mb(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
    102 {
    103     ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, x->src.y_buffer, x->e_mbd.predictor, x->src.y_stride);
    104     ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
    105 }
    106 
    107 void vp8_build_dcblock(MACROBLOCK *x)
    108 {
    109     short *src_diff_ptr = &x->src_diff[384];
    110     int i;
    111 
    112     for (i = 0; i < 16; i++)
    113     {
    114         src_diff_ptr[i] = x->coeff[i * 16];
    115     }
    116 }
    117 
    118 void vp8_transform_mbuv(MACROBLOCK *x)
    119 {
    120     int i;
    121 
    122     for (i = 16; i < 24; i += 2)
    123     {
    124         x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
    125             &x->block[i].coeff[0], 16);
    126     }
    127 }
    128 
    129 
    130 void vp8_transform_intra_mby(MACROBLOCK *x)
    131 {
    132     int i;
    133 
    134     for (i = 0; i < 16; i += 2)
    135     {
    136         x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
    137             &x->block[i].coeff[0], 32);
    138     }
    139 
    140     // build dc block from 16 y dc values
    141     vp8_build_dcblock(x);
    142 
    143     // do 2nd order transform on the dc block
    144     x->short_walsh4x4(&x->block[24].src_diff[0],
    145         &x->block[24].coeff[0], 8);
    146 
    147 }
    148 
    149 
    150 void vp8_transform_mb(MACROBLOCK *x)
    151 {
    152     int i;
    153 
    154     for (i = 0; i < 16; i += 2)
    155     {
    156         x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
    157             &x->block[i].coeff[0], 32);
    158     }
    159 
    160     // build dc block from 16 y dc values
    161     if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
    162         vp8_build_dcblock(x);
    163 
    164     for (i = 16; i < 24; i += 2)
    165     {
    166         x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
    167             &x->block[i].coeff[0], 16);
    168     }
    169 
    170     // do 2nd order transform on the dc block
    171     if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
    172         x->short_walsh4x4(&x->block[24].src_diff[0],
    173         &x->block[24].coeff[0], 8);
    174 
    175 }
    176 
    177 void vp8_transform_mby(MACROBLOCK *x)
    178 {
    179     int i;
    180 
    181     for (i = 0; i < 16; i += 2)
    182     {
    183         x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
    184             &x->block[i].coeff[0], 32);
    185     }
    186 
    187     // build dc block from 16 y dc values
    188     if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
    189     {
    190         vp8_build_dcblock(x);
    191         x->short_walsh4x4(&x->block[24].src_diff[0],
    192             &x->block[24].coeff[0], 8);
    193     }
    194 }
    195 
    196 
    197 void vp8_stuff_inter16x16(MACROBLOCK *x)
    198 {
    199     vp8_build_inter_predictors_mb_s(&x->e_mbd);
    200     /*
    201         // recon = copy from predictors to destination
    202         {
    203             BLOCKD *b = &x->e_mbd.block[0];
    204             unsigned char *pred_ptr = b->predictor;
    205             unsigned char *dst_ptr = *(b->base_dst) + b->dst;
    206             int stride = b->dst_stride;
    207 
    208             int i;
    209             for(i=0;i<16;i++)
    210                 vpx_memcpy(dst_ptr+i*stride,pred_ptr+16*i,16);
    211 
    212             b = &x->e_mbd.block[16];
    213             pred_ptr = b->predictor;
    214             dst_ptr = *(b->base_dst) + b->dst;
    215             stride = b->dst_stride;
    216 
    217             for(i=0;i<8;i++)
    218                 vpx_memcpy(dst_ptr+i*stride,pred_ptr+8*i,8);
    219 
    220             b = &x->e_mbd.block[20];
    221             pred_ptr = b->predictor;
    222             dst_ptr = *(b->base_dst) + b->dst;
    223             stride = b->dst_stride;
    224 
    225             for(i=0;i<8;i++)
    226                 vpx_memcpy(dst_ptr+i*stride,pred_ptr+8*i,8);
    227         }
    228     */
    229 }
    230 
    231 #if !(CONFIG_REALTIME_ONLY)
    232 #define RDCOST(RM,DM,R,D) ( ((128+(R)*(RM)) >> 8) + (DM)*(D) )
    233 #define RDTRUNC(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
    234 
    235 typedef struct vp8_token_state vp8_token_state;
    236 
    237 struct vp8_token_state{
    238   int           rate;
    239   int           error;
    240   signed char   next;
    241   signed char   token;
    242   short         qc;
    243 };
    244 
    245 // TODO: experiments to find optimal multiple numbers
    246 #define Y1_RD_MULT 1
    247 #define UV_RD_MULT 1
    248 #define Y2_RD_MULT 4
    249 
    250 static const int plane_rd_mult[4]=
    251 {
    252     Y1_RD_MULT,
    253     Y2_RD_MULT,
    254     UV_RD_MULT,
    255     Y1_RD_MULT
    256 };
    257 
    258 void vp8_optimize_b(MACROBLOCK *mb, int ib, int type,
    259                     ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
    260                     const VP8_ENCODER_RTCD *rtcd)
    261 {
    262     BLOCK *b;
    263     BLOCKD *d;
    264     vp8_token_state tokens[17][2];
    265     unsigned best_mask[2];
    266     const short *dequant_ptr;
    267     const short *coeff_ptr;
    268     short *qcoeff_ptr;
    269     short *dqcoeff_ptr;
    270     int eob;
    271     int i0;
    272     int rc;
    273     int x;
    274     int sz;
    275     int next;
    276     int path;
    277     int rdmult;
    278     int rddiv;
    279     int final_eob;
    280     int rd_cost0;
    281     int rd_cost1;
    282     int rate0;
    283     int rate1;
    284     int error0;
    285     int error1;
    286     int t0;
    287     int t1;
    288     int best;
    289     int band;
    290     int pt;
    291     int i;
    292     int err_mult = plane_rd_mult[type];
    293 
    294     b = &mb->block[ib];
    295     d = &mb->e_mbd.block[ib];
    296 
    297     /* Enable this to test the effect of RDO as a replacement for the dynamic
    298      *  zero bin instead of an augmentation of it.
    299      */
    300 #if 0
    301     vp8_strict_quantize_b(b, d);
    302 #endif
    303 
    304     dequant_ptr = d->dequant;
    305     coeff_ptr = b->coeff;
    306     qcoeff_ptr = d->qcoeff;
    307     dqcoeff_ptr = d->dqcoeff;
    308     i0 = !type;
    309     eob = d->eob;
    310 
    311     /* Now set up a Viterbi trellis to evaluate alternative roundings. */
    312     /* TODO: These should vary with the block type, since the quantizer does. */
    313     rdmult = (mb->rdmult << 2)*err_mult;
    314     rddiv = mb->rddiv;
    315     best_mask[0] = best_mask[1] = 0;
    316     /* Initialize the sentinel node of the trellis. */
    317     tokens[eob][0].rate = 0;
    318     tokens[eob][0].error = 0;
    319     tokens[eob][0].next = 16;
    320     tokens[eob][0].token = DCT_EOB_TOKEN;
    321     tokens[eob][0].qc = 0;
    322     *(tokens[eob] + 1) = *(tokens[eob] + 0);
    323     next = eob;
    324     for (i = eob; i-- > i0;)
    325     {
    326         int base_bits;
    327         int d2;
    328         int dx;
    329 
    330         rc = vp8_default_zig_zag1d[i];
    331         x = qcoeff_ptr[rc];
    332         /* Only add a trellis state for non-zero coefficients. */
    333         if (x)
    334         {
    335             int shortcut=0;
    336             error0 = tokens[next][0].error;
    337             error1 = tokens[next][1].error;
    338             /* Evaluate the first possibility for this state. */
    339             rate0 = tokens[next][0].rate;
    340             rate1 = tokens[next][1].rate;
    341             t0 = (vp8_dct_value_tokens_ptr + x)->Token;
    342             /* Consider both possible successor states. */
    343             if (next < 16)
    344             {
    345                 band = vp8_coef_bands[i + 1];
    346                 pt = vp8_prev_token_class[t0];
    347                 rate0 +=
    348                     mb->token_costs[type][band][pt][tokens[next][0].token];
    349                 rate1 +=
    350                     mb->token_costs[type][band][pt][tokens[next][1].token];
    351             }
    352             rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);
    353             rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);
    354             if (rd_cost0 == rd_cost1)
    355             {
    356                 rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);
    357                 rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);
    358             }
    359             /* And pick the best. */
    360             best = rd_cost1 < rd_cost0;
    361             base_bits = *(vp8_dct_value_cost_ptr + x);
    362             dx = dqcoeff_ptr[rc] - coeff_ptr[rc];
    363             d2 = dx*dx;
    364             tokens[i][0].rate = base_bits + (best ? rate1 : rate0);
    365             tokens[i][0].error = d2 + (best ? error1 : error0);
    366             tokens[i][0].next = next;
    367             tokens[i][0].token = t0;
    368             tokens[i][0].qc = x;
    369             best_mask[0] |= best << i;
    370             /* Evaluate the second possibility for this state. */
    371             rate0 = tokens[next][0].rate;
    372             rate1 = tokens[next][1].rate;
    373 
    374             if((abs(x)*dequant_ptr[rc]>abs(coeff_ptr[rc])) &&
    375                (abs(x)*dequant_ptr[rc]<abs(coeff_ptr[rc])+dequant_ptr[rc]))
    376                 shortcut = 1;
    377             else
    378                 shortcut = 0;
    379 
    380             if(shortcut)
    381             {
    382                 sz = -(x < 0);
    383                 x -= 2*sz + 1;
    384             }
    385 
    386             /* Consider both possible successor states. */
    387             if (!x)
    388             {
    389                 /* If we reduced this coefficient to zero, check to see if
    390                  *  we need to move the EOB back here.
    391                  */
    392                 t0 = tokens[next][0].token == DCT_EOB_TOKEN ?
    393                     DCT_EOB_TOKEN : ZERO_TOKEN;
    394                 t1 = tokens[next][1].token == DCT_EOB_TOKEN ?
    395                     DCT_EOB_TOKEN : ZERO_TOKEN;
    396             }
    397             else
    398             {
    399                 t0=t1 = (vp8_dct_value_tokens_ptr + x)->Token;
    400             }
    401             if (next < 16)
    402             {
    403                 band = vp8_coef_bands[i + 1];
    404                 if(t0!=DCT_EOB_TOKEN)
    405                 {
    406                     pt = vp8_prev_token_class[t0];
    407                     rate0 += mb->token_costs[type][band][pt][
    408                         tokens[next][0].token];
    409                 }
    410                 if(t1!=DCT_EOB_TOKEN)
    411                 {
    412                     pt = vp8_prev_token_class[t1];
    413                     rate1 += mb->token_costs[type][band][pt][
    414                         tokens[next][1].token];
    415                 }
    416             }
    417 
    418             rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);
    419             rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);
    420             if (rd_cost0 == rd_cost1)
    421             {
    422                 rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);
    423                 rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);
    424             }
    425             /* And pick the best. */
    426             best = rd_cost1 < rd_cost0;
    427             base_bits = *(vp8_dct_value_cost_ptr + x);
    428 
    429             if(shortcut)
    430             {
    431                 dx -= (dequant_ptr[rc] + sz) ^ sz;
    432                 d2 = dx*dx;
    433             }
    434             tokens[i][1].rate = base_bits + (best ? rate1 : rate0);
    435             tokens[i][1].error = d2 + (best ? error1 : error0);
    436             tokens[i][1].next = next;
    437             tokens[i][1].token =best?t1:t0;
    438             tokens[i][1].qc = x;
    439             best_mask[1] |= best << i;
    440             /* Finally, make this the new head of the trellis. */
    441             next = i;
    442         }
    443         /* There's no choice to make for a zero coefficient, so we don't
    444          *  add a new trellis node, but we do need to update the costs.
    445          */
    446         else
    447         {
    448             band = vp8_coef_bands[i + 1];
    449             t0 = tokens[next][0].token;
    450             t1 = tokens[next][1].token;
    451             /* Update the cost of each path if we're past the EOB token. */
    452             if (t0 != DCT_EOB_TOKEN)
    453             {
    454                 tokens[next][0].rate += mb->token_costs[type][band][0][t0];
    455                 tokens[next][0].token = ZERO_TOKEN;
    456             }
    457             if (t1 != DCT_EOB_TOKEN)
    458             {
    459                 tokens[next][1].rate += mb->token_costs[type][band][0][t1];
    460                 tokens[next][1].token = ZERO_TOKEN;
    461             }
    462             /* Don't update next, because we didn't add a new node. */
    463         }
    464     }
    465 
    466     /* Now pick the best path through the whole trellis. */
    467     band = vp8_coef_bands[i + 1];
    468     VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
    469     rate0 = tokens[next][0].rate;
    470     rate1 = tokens[next][1].rate;
    471     error0 = tokens[next][0].error;
    472     error1 = tokens[next][1].error;
    473     t0 = tokens[next][0].token;
    474     t1 = tokens[next][1].token;
    475     rate0 += mb->token_costs[type][band][pt][t0];
    476     rate1 += mb->token_costs[type][band][pt][t1];
    477     rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);
    478     rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);
    479     if (rd_cost0 == rd_cost1)
    480     {
    481         rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);
    482         rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);
    483     }
    484     best = rd_cost1 < rd_cost0;
    485     final_eob = i0 - 1;
    486     for (i = next; i < eob; i = next)
    487     {
    488         x = tokens[i][best].qc;
    489         if (x)
    490             final_eob = i;
    491         rc = vp8_default_zig_zag1d[i];
    492         qcoeff_ptr[rc] = x;
    493         dqcoeff_ptr[rc] = x * dequant_ptr[rc];
    494         next = tokens[i][best].next;
    495         best = (best_mask[best] >> i) & 1;
    496     }
    497     final_eob++;
    498 
    499     d->eob = final_eob;
    500     *a = *l = (d->eob != !type);
    501 }
    502 
    503 void vp8_optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
    504 {
    505     int b;
    506     int type;
    507     int has_2nd_order;
    508     ENTROPY_CONTEXT_PLANES t_above, t_left;
    509     ENTROPY_CONTEXT *ta;
    510     ENTROPY_CONTEXT *tl;
    511 
    512     vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
    513     vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
    514 
    515     ta = (ENTROPY_CONTEXT *)&t_above;
    516     tl = (ENTROPY_CONTEXT *)&t_left;
    517 
    518     has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
    519         && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
    520     type = has_2nd_order ? 0 : 3;
    521 
    522     for (b = 0; b < 16; b++)
    523     {
    524         vp8_optimize_b(x, b, type,
    525             ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
    526     }
    527 
    528     for (b = 16; b < 20; b++)
    529     {
    530         vp8_optimize_b(x, b, vp8_block2type[b],
    531             ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
    532     }
    533 
    534     for (b = 20; b < 24; b++)
    535     {
    536         vp8_optimize_b(x, b, vp8_block2type[b],
    537             ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
    538     }
    539 
    540 
    541     if (has_2nd_order)
    542     {
    543         b=24;
    544         vp8_optimize_b(x, b, vp8_block2type[b],
    545             ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
    546     }
    547 }
    548 
    549 
    550 void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
    551 {
    552     int b;
    553     int type;
    554     int has_2nd_order;
    555 
    556     ENTROPY_CONTEXT_PLANES t_above, t_left;
    557     ENTROPY_CONTEXT *ta;
    558     ENTROPY_CONTEXT *tl;
    559 
    560     if (!x->e_mbd.above_context)
    561         return;
    562 
    563     if (!x->e_mbd.left_context)
    564         return;
    565 
    566     vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
    567     vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
    568 
    569     ta = (ENTROPY_CONTEXT *)&t_above;
    570     tl = (ENTROPY_CONTEXT *)&t_left;
    571 
    572     has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
    573         && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
    574     type = has_2nd_order ? 0 : 3;
    575 
    576     for (b = 0; b < 16; b++)
    577     {
    578         vp8_optimize_b(x, b, type,
    579         ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
    580     }
    581 
    582 
    583     if (has_2nd_order)
    584     {
    585         b=24;
    586         vp8_optimize_b(x, b, vp8_block2type[b],
    587             ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
    588     }
    589 }
    590 
    591 void vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
    592 {
    593     int b;
    594     ENTROPY_CONTEXT_PLANES t_above, t_left;
    595     ENTROPY_CONTEXT *ta;
    596     ENTROPY_CONTEXT *tl;
    597 
    598     if (!x->e_mbd.above_context)
    599         return;
    600 
    601     if (!x->e_mbd.left_context)
    602         return;
    603 
    604     vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
    605     vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
    606 
    607     ta = (ENTROPY_CONTEXT *)&t_above;
    608     tl = (ENTROPY_CONTEXT *)&t_left;
    609 
    610     for (b = 16; b < 20; b++)
    611     {
    612         vp8_optimize_b(x, b, vp8_block2type[b],
    613             ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
    614     }
    615 
    616     for (b = 20; b < 24; b++)
    617     {
    618         vp8_optimize_b(x, b, vp8_block2type[b],
    619             ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
    620     }
    621 
    622 }
    623 #endif
    624 
    625 void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
    626 {
    627     vp8_build_inter_predictors_mb(&x->e_mbd);
    628 
    629     vp8_subtract_mb(rtcd, x);
    630 
    631     vp8_transform_mb(x);
    632 
    633     vp8_quantize_mb(x);
    634 
    635 #if !(CONFIG_REALTIME_ONLY)
    636     if (x->optimize==2 ||(x->optimize && x->rddiv > 1))
    637         vp8_optimize_mb(x, rtcd);
    638 #endif
    639 
    640     vp8_inverse_transform_mb(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
    641 
    642     RECON_INVOKE(&rtcd->common->recon, recon_mb)
    643         (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
    644 }
    645 
    646 
    647 /* this funciton is used by first pass only */
    648 void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
    649 {
    650     vp8_build_inter_predictors_mby(&x->e_mbd);
    651 
    652     ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, x->src.y_buffer, x->e_mbd.predictor, x->src.y_stride);
    653 
    654     vp8_transform_mby(x);
    655 
    656     vp8_quantize_mby(x);
    657 
    658     vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
    659 
    660     RECON_INVOKE(&rtcd->common->recon, recon_mby)
    661         (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
    662 }
    663 
    664 
    665 void vp8_encode_inter16x16uv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
    666 {
    667     vp8_build_inter_predictors_mbuv(&x->e_mbd);
    668 
    669     ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
    670 
    671     vp8_transform_mbuv(x);
    672 
    673     vp8_quantize_mbuv(x);
    674 
    675     vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
    676 
    677     vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
    678 }
    679 
    680 
    681 void vp8_encode_inter16x16uvrd(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
    682 {
    683     vp8_build_inter_predictors_mbuv(&x->e_mbd);
    684     ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
    685 
    686     vp8_transform_mbuv(x);
    687 
    688     vp8_quantize_mbuv(x);
    689 
    690 }
    691