Home | History | Annotate | Download | only in decoder
      1 /*
      2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 
     12 #include "treereader.h"
     13 #include "vp8/common/entropymv.h"
     14 #include "vp8/common/entropymode.h"
     15 #include "onyxd_int.h"
     16 #include "vp8/common/findnearmv.h"
     17 
     18 #if CONFIG_DEBUG
     19 #include <assert.h>
     20 #endif
     21 static B_PREDICTION_MODE read_bmode(vp8_reader *bc, const vp8_prob *p)
     22 {
     23     const int i = vp8_treed_read(bc, vp8_bmode_tree, p);
     24 
     25     return (B_PREDICTION_MODE)i;
     26 }
     27 
     28 static MB_PREDICTION_MODE read_ymode(vp8_reader *bc, const vp8_prob *p)
     29 {
     30     const int i = vp8_treed_read(bc, vp8_ymode_tree, p);
     31 
     32     return (MB_PREDICTION_MODE)i;
     33 }
     34 
     35 static MB_PREDICTION_MODE read_kf_ymode(vp8_reader *bc, const vp8_prob *p)
     36 {
     37     const int i = vp8_treed_read(bc, vp8_kf_ymode_tree, p);
     38 
     39     return (MB_PREDICTION_MODE)i;
     40 }
     41 
     42 static MB_PREDICTION_MODE read_uv_mode(vp8_reader *bc, const vp8_prob *p)
     43 {
     44     const int i = vp8_treed_read(bc, vp8_uv_mode_tree, p);
     45 
     46     return (MB_PREDICTION_MODE)i;
     47 }
     48 
     49 static void read_kf_modes(VP8D_COMP *pbi, MODE_INFO *mi)
     50 {
     51     vp8_reader *const bc = & pbi->mbc[8];
     52     const int mis = pbi->common.mode_info_stride;
     53 
     54     mi->mbmi.ref_frame = INTRA_FRAME;
     55     mi->mbmi.mode = read_kf_ymode(bc, vp8_kf_ymode_prob);
     56 
     57     if (mi->mbmi.mode == B_PRED)
     58     {
     59         int i = 0;
     60         mi->mbmi.is_4x4 = 1;
     61 
     62         do
     63         {
     64             const B_PREDICTION_MODE A = above_block_mode(mi, i, mis);
     65             const B_PREDICTION_MODE L = left_block_mode(mi, i);
     66 
     67             mi->bmi[i].as_mode =
     68                 read_bmode(bc, vp8_kf_bmode_prob [A] [L]);
     69         }
     70         while (++i < 16);
     71     }
     72 
     73     mi->mbmi.uv_mode = read_uv_mode(bc, vp8_kf_uv_mode_prob);
     74 }
     75 
     76 static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc)
     77 {
     78     const vp8_prob *const p = (const vp8_prob *) mvc;
     79     int x = 0;
     80 
     81     if (vp8_read(r, p [mvpis_short]))  /* Large */
     82     {
     83         int i = 0;
     84 
     85         do
     86         {
     87             x += vp8_read(r, p [MVPbits + i]) << i;
     88         }
     89         while (++i < 3);
     90 
     91         i = mvlong_width - 1;  /* Skip bit 3, which is sometimes implicit */
     92 
     93         do
     94         {
     95             x += vp8_read(r, p [MVPbits + i]) << i;
     96         }
     97         while (--i > 3);
     98 
     99         if (!(x & 0xFFF0)  ||  vp8_read(r, p [MVPbits + 3]))
    100             x += 8;
    101     }
    102     else   /* small */
    103         x = vp8_treed_read(r, vp8_small_mvtree, p + MVPshort);
    104 
    105     if (x  &&  vp8_read(r, p [MVPsign]))
    106         x = -x;
    107 
    108     return x;
    109 }
    110 
    111 static void read_mv(vp8_reader *r, MV *mv, const MV_CONTEXT *mvc)
    112 {
    113     mv->row = (short)(read_mvcomponent(r,   mvc) * 2);
    114     mv->col = (short)(read_mvcomponent(r, ++mvc) * 2);
    115 }
    116 
    117 
    118 static void read_mvcontexts(vp8_reader *bc, MV_CONTEXT *mvc)
    119 {
    120     int i = 0;
    121 
    122     do
    123     {
    124         const vp8_prob *up = vp8_mv_update_probs[i].prob;
    125         vp8_prob *p = (vp8_prob *)(mvc + i);
    126         vp8_prob *const pstop = p + MVPcount;
    127 
    128         do
    129         {
    130             if (vp8_read(bc, *up++))
    131             {
    132                 const vp8_prob x = (vp8_prob)vp8_read_literal(bc, 7);
    133 
    134                 *p = x ? x << 1 : 1;
    135             }
    136         }
    137         while (++p < pstop);
    138     }
    139     while (++i < 2);
    140 }
    141 
    142 static const unsigned char mbsplit_fill_count[4] = {8, 8, 4, 1};
    143 static const unsigned char mbsplit_fill_offset[4][16] = {
    144     { 0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
    145     { 0,  1,  4,  5,  8,  9, 12, 13,  2,  3,   6,  7, 10, 11, 14, 15},
    146     { 0,  1,  4,  5,  2,  3,  6,  7,  8,  9,  12, 13, 10, 11, 14, 15},
    147     { 0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15}
    148 };
    149 
    150 
    151 static void mb_mode_mv_init(VP8D_COMP *pbi)
    152 {
    153     vp8_reader *const bc = & pbi->mbc[8];
    154     MV_CONTEXT *const mvc = pbi->common.fc.mvc;
    155 
    156 #if CONFIG_ERROR_CONCEALMENT
    157     /* Default is that no macroblock is corrupt, therefore we initialize
    158      * mvs_corrupt_from_mb to something very big, which we can be sure is
    159      * outside the frame. */
    160     pbi->mvs_corrupt_from_mb = UINT_MAX;
    161 #endif
    162     /* Read the mb_no_coeff_skip flag */
    163     pbi->common.mb_no_coeff_skip = (int)vp8_read_bit(bc);
    164 
    165     pbi->prob_skip_false = 0;
    166     if (pbi->common.mb_no_coeff_skip)
    167         pbi->prob_skip_false = (vp8_prob)vp8_read_literal(bc, 8);
    168 
    169     if(pbi->common.frame_type != KEY_FRAME)
    170     {
    171         pbi->prob_intra = (vp8_prob)vp8_read_literal(bc, 8);
    172         pbi->prob_last  = (vp8_prob)vp8_read_literal(bc, 8);
    173         pbi->prob_gf    = (vp8_prob)vp8_read_literal(bc, 8);
    174 
    175         if (vp8_read_bit(bc))
    176         {
    177             int i = 0;
    178 
    179             do
    180             {
    181                 pbi->common.fc.ymode_prob[i] =
    182                     (vp8_prob) vp8_read_literal(bc, 8);
    183             }
    184             while (++i < 4);
    185         }
    186 
    187         if (vp8_read_bit(bc))
    188         {
    189             int i = 0;
    190 
    191             do
    192             {
    193                 pbi->common.fc.uv_mode_prob[i] =
    194                     (vp8_prob) vp8_read_literal(bc, 8);
    195             }
    196             while (++i < 3);
    197         }
    198 
    199         read_mvcontexts(bc, mvc);
    200     }
    201 }
    202 
    203 const vp8_prob vp8_sub_mv_ref_prob3 [8][VP8_SUBMVREFS-1] =
    204 {
    205     { 147, 136, 18 },   /* SUBMVREF_NORMAL          */
    206     { 223, 1  , 34 },   /* SUBMVREF_LEFT_ABOVE_SAME */
    207     { 106, 145, 1  },   /* SUBMVREF_LEFT_ZED        */
    208     { 208, 1  , 1  },   /* SUBMVREF_LEFT_ABOVE_ZED  */
    209     { 179, 121, 1  },   /* SUBMVREF_ABOVE_ZED       */
    210     { 223, 1  , 34 },   /* SUBMVREF_LEFT_ABOVE_SAME */
    211     { 179, 121, 1  },   /* SUBMVREF_ABOVE_ZED       */
    212     { 208, 1  , 1  }    /* SUBMVREF_LEFT_ABOVE_ZED  */
    213 };
    214 
    215 static
    216 const vp8_prob * get_sub_mv_ref_prob(const int left, const int above)
    217 {
    218     int lez = (left == 0);
    219     int aez = (above == 0);
    220     int lea = (left == above);
    221     const vp8_prob * prob;
    222 
    223     prob = vp8_sub_mv_ref_prob3[(aez << 2) |
    224                                 (lez << 1) |
    225                                 (lea)];
    226 
    227     return prob;
    228 }
    229 
    230 static void decode_split_mv(vp8_reader *const bc, MODE_INFO *mi,
    231                         const MODE_INFO *left_mb, const MODE_INFO *above_mb,
    232                         MB_MODE_INFO *mbmi, int_mv best_mv,
    233                         MV_CONTEXT *const mvc, int mb_to_left_edge,
    234                         int mb_to_right_edge, int mb_to_top_edge,
    235                         int mb_to_bottom_edge)
    236 {
    237     int s;      /* split configuration (16x8, 8x16, 8x8, 4x4) */
    238     int num_p;  /* number of partitions in the split configuration
    239                   (see vp8_mbsplit_count) */
    240     int j = 0;
    241 
    242     s = 3;
    243     num_p = 16;
    244     if( vp8_read(bc, 110) )
    245     {
    246         s = 2;
    247         num_p = 4;
    248         if( vp8_read(bc, 111) )
    249         {
    250             s = vp8_read(bc, 150);
    251             num_p = 2;
    252         }
    253     }
    254 
    255     do  /* for each subset j */
    256     {
    257         int_mv leftmv, abovemv;
    258         int_mv blockmv;
    259         int k;  /* first block in subset j */
    260 
    261         const vp8_prob *prob;
    262         k = vp8_mbsplit_offset[s][j];
    263 
    264         if (!(k & 3))
    265         {
    266             /* On L edge, get from MB to left of us */
    267             if(left_mb->mbmi.mode != SPLITMV)
    268                 leftmv.as_int =  left_mb->mbmi.mv.as_int;
    269             else
    270                 leftmv.as_int =  (left_mb->bmi + k + 4 - 1)->mv.as_int;
    271         }
    272         else
    273             leftmv.as_int =  (mi->bmi + k - 1)->mv.as_int;
    274 
    275         if (!(k >> 2))
    276         {
    277             /* On top edge, get from MB above us */
    278             if(above_mb->mbmi.mode != SPLITMV)
    279                 abovemv.as_int =  above_mb->mbmi.mv.as_int;
    280             else
    281                 abovemv.as_int =  (above_mb->bmi + k + 16 - 4)->mv.as_int;
    282         }
    283         else
    284             abovemv.as_int = (mi->bmi + k - 4)->mv.as_int;
    285 
    286         prob = get_sub_mv_ref_prob(leftmv.as_int, abovemv.as_int);
    287 
    288         if( vp8_read(bc, prob[0]) )
    289         {
    290             if( vp8_read(bc, prob[1]) )
    291             {
    292                 blockmv.as_int = 0;
    293                 if( vp8_read(bc, prob[2]) )
    294                 {
    295                     blockmv.as_mv.row = read_mvcomponent(bc, &mvc[0]) * 2;
    296                     blockmv.as_mv.row += best_mv.as_mv.row;
    297                     blockmv.as_mv.col = read_mvcomponent(bc, &mvc[1]) * 2;
    298                     blockmv.as_mv.col += best_mv.as_mv.col;
    299                 }
    300             }
    301             else
    302             {
    303                 blockmv.as_int = abovemv.as_int;
    304             }
    305         }
    306         else
    307         {
    308             blockmv.as_int = leftmv.as_int;
    309         }
    310 
    311         mbmi->need_to_clamp_mvs |= vp8_check_mv_bounds(&blockmv,
    312                                                   mb_to_left_edge,
    313                                                   mb_to_right_edge,
    314                                                   mb_to_top_edge,
    315                                                   mb_to_bottom_edge);
    316 
    317         {
    318             /* Fill (uniform) modes, mvs of jth subset.
    319              Must do it here because ensuing subsets can
    320              refer back to us via "left" or "above". */
    321             const unsigned char *fill_offset;
    322             unsigned int fill_count = mbsplit_fill_count[s];
    323 
    324             fill_offset = &mbsplit_fill_offset[s]
    325                              [(unsigned char)j * mbsplit_fill_count[s]];
    326 
    327             do {
    328                 mi->bmi[ *fill_offset].mv.as_int = blockmv.as_int;
    329                 fill_offset++;
    330             }while (--fill_count);
    331         }
    332 
    333     }
    334     while (++j < num_p);
    335 
    336     mbmi->partitioning = s;
    337 }
    338 
    339 static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi)
    340 {
    341     vp8_reader *const bc = & pbi->mbc[8];
    342     mbmi->ref_frame = (MV_REFERENCE_FRAME) vp8_read(bc, pbi->prob_intra);
    343     if (mbmi->ref_frame)    /* inter MB */
    344     {
    345         enum {CNT_INTRA, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV};
    346         int cnt[4];
    347         int *cntx = cnt;
    348         int_mv near_mvs[4];
    349         int_mv *nmv = near_mvs;
    350         const int mis = pbi->mb.mode_info_stride;
    351         const MODE_INFO *above = mi - mis;
    352         const MODE_INFO *left = mi - 1;
    353         const MODE_INFO *aboveleft = above - 1;
    354         int *ref_frame_sign_bias = pbi->common.ref_frame_sign_bias;
    355 
    356         mbmi->need_to_clamp_mvs = 0;
    357 
    358         if (vp8_read(bc, pbi->prob_last))
    359         {
    360             mbmi->ref_frame =
    361                 (MV_REFERENCE_FRAME)((int)(2 + vp8_read(bc, pbi->prob_gf)));
    362         }
    363 
    364         /* Zero accumulators */
    365         nmv[0].as_int = nmv[1].as_int = nmv[2].as_int = 0;
    366         cnt[0] = cnt[1] = cnt[2] = cnt[3] = 0;
    367 
    368         /* Process above */
    369         if (above->mbmi.ref_frame != INTRA_FRAME)
    370         {
    371             if (above->mbmi.mv.as_int)
    372             {
    373                 (++nmv)->as_int = above->mbmi.mv.as_int;
    374                 mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame],
    375                         mbmi->ref_frame, nmv, ref_frame_sign_bias);
    376                 ++cntx;
    377             }
    378 
    379             *cntx += 2;
    380         }
    381 
    382         /* Process left */
    383         if (left->mbmi.ref_frame != INTRA_FRAME)
    384         {
    385             if (left->mbmi.mv.as_int)
    386             {
    387                 int_mv this_mv;
    388 
    389                 this_mv.as_int = left->mbmi.mv.as_int;
    390                 mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame],
    391                         mbmi->ref_frame, &this_mv, ref_frame_sign_bias);
    392 
    393                 if (this_mv.as_int != nmv->as_int)
    394                 {
    395                     (++nmv)->as_int = this_mv.as_int;
    396                     ++cntx;
    397                 }
    398 
    399                 *cntx += 2;
    400             }
    401             else
    402                 cnt[CNT_INTRA] += 2;
    403         }
    404 
    405         /* Process above left */
    406         if (aboveleft->mbmi.ref_frame != INTRA_FRAME)
    407         {
    408             if (aboveleft->mbmi.mv.as_int)
    409             {
    410                 int_mv this_mv;
    411 
    412                 this_mv.as_int = aboveleft->mbmi.mv.as_int;
    413                 mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame],
    414                         mbmi->ref_frame, &this_mv, ref_frame_sign_bias);
    415 
    416                 if (this_mv.as_int != nmv->as_int)
    417                 {
    418                     (++nmv)->as_int = this_mv.as_int;
    419                     ++cntx;
    420                 }
    421 
    422                 *cntx += 1;
    423             }
    424             else
    425                 cnt[CNT_INTRA] += 1;
    426         }
    427 
    428         if( vp8_read(bc, vp8_mode_contexts [cnt[CNT_INTRA]] [0]) )
    429         {
    430 
    431             /* If we have three distinct MV's ... */
    432             /* See if above-left MV can be merged with NEAREST */
    433             cnt[CNT_NEAREST] += ( (cnt[CNT_SPLITMV] > 0) &
    434                 (nmv->as_int == near_mvs[CNT_NEAREST].as_int));
    435 
    436             /* Swap near and nearest if necessary */
    437             if (cnt[CNT_NEAR] > cnt[CNT_NEAREST])
    438             {
    439                 int tmp;
    440                 tmp = cnt[CNT_NEAREST];
    441                 cnt[CNT_NEAREST] = cnt[CNT_NEAR];
    442                 cnt[CNT_NEAR] = tmp;
    443                 tmp = near_mvs[CNT_NEAREST].as_int;
    444                 near_mvs[CNT_NEAREST].as_int = near_mvs[CNT_NEAR].as_int;
    445                 near_mvs[CNT_NEAR].as_int = tmp;
    446             }
    447 
    448             if( vp8_read(bc, vp8_mode_contexts [cnt[CNT_NEAREST]] [1]) )
    449             {
    450 
    451                 if( vp8_read(bc, vp8_mode_contexts [cnt[CNT_NEAR]] [2]) )
    452                 {
    453                     int mb_to_top_edge;
    454                     int mb_to_bottom_edge;
    455                     int mb_to_left_edge;
    456                     int mb_to_right_edge;
    457                     MV_CONTEXT *const mvc = pbi->common.fc.mvc;
    458                     int near_index;
    459 
    460                     mb_to_top_edge = pbi->mb.mb_to_top_edge;
    461                     mb_to_bottom_edge = pbi->mb.mb_to_bottom_edge;
    462                     mb_to_top_edge -= LEFT_TOP_MARGIN;
    463                     mb_to_bottom_edge += RIGHT_BOTTOM_MARGIN;
    464                     mb_to_right_edge = pbi->mb.mb_to_right_edge;
    465                     mb_to_right_edge += RIGHT_BOTTOM_MARGIN;
    466                     mb_to_left_edge = pbi->mb.mb_to_left_edge;
    467                     mb_to_left_edge -= LEFT_TOP_MARGIN;
    468 
    469                     /* Use near_mvs[0] to store the "best" MV */
    470                     near_index = CNT_INTRA +
    471                         (cnt[CNT_NEAREST] >= cnt[CNT_INTRA]);
    472 
    473                     vp8_clamp_mv2(&near_mvs[near_index], &pbi->mb);
    474 
    475                     cnt[CNT_SPLITMV] = ((above->mbmi.mode == SPLITMV)
    476                                         + (left->mbmi.mode == SPLITMV)) * 2
    477                                        + (aboveleft->mbmi.mode == SPLITMV);
    478 
    479                     if( vp8_read(bc, vp8_mode_contexts [cnt[CNT_SPLITMV]] [3]) )
    480                     {
    481                         decode_split_mv(bc, mi, left, above,
    482                                                     mbmi,
    483                                                     near_mvs[near_index],
    484                                                     mvc, mb_to_left_edge,
    485                                                     mb_to_right_edge,
    486                                                     mb_to_top_edge,
    487                                                     mb_to_bottom_edge);
    488                         mbmi->mv.as_int = mi->bmi[15].mv.as_int;
    489                         mbmi->mode =  SPLITMV;
    490                         mbmi->is_4x4 = 1;
    491                     }
    492                     else
    493                     {
    494                         int_mv *const mbmi_mv = & mbmi->mv;
    495                         read_mv(bc, &mbmi_mv->as_mv, (const MV_CONTEXT *) mvc);
    496                         mbmi_mv->as_mv.row += near_mvs[near_index].as_mv.row;
    497                         mbmi_mv->as_mv.col += near_mvs[near_index].as_mv.col;
    498 
    499                         /* Don't need to check this on NEARMV and NEARESTMV
    500                          * modes since those modes clamp the MV. The NEWMV mode
    501                          * does not, so signal to the prediction stage whether
    502                          * special handling may be required.
    503                          */
    504                         mbmi->need_to_clamp_mvs =
    505                             vp8_check_mv_bounds(mbmi_mv, mb_to_left_edge,
    506                                                 mb_to_right_edge,
    507                                                 mb_to_top_edge,
    508                                                 mb_to_bottom_edge);
    509                         mbmi->mode =  NEWMV;
    510                     }
    511                 }
    512                 else
    513                 {
    514                     mbmi->mode =  NEARMV;
    515                     mbmi->mv.as_int = near_mvs[CNT_NEAR].as_int;
    516                     vp8_clamp_mv2(&mbmi->mv, &pbi->mb);
    517                 }
    518             }
    519             else
    520             {
    521                 mbmi->mode =  NEARESTMV;
    522                 mbmi->mv.as_int = near_mvs[CNT_NEAREST].as_int;
    523                 vp8_clamp_mv2(&mbmi->mv, &pbi->mb);
    524             }
    525         }
    526         else
    527         {
    528             mbmi->mode =  ZEROMV;
    529             mbmi->mv.as_int = 0;
    530         }
    531 
    532 #if CONFIG_ERROR_CONCEALMENT
    533         if(pbi->ec_enabled && (mbmi->mode != SPLITMV))
    534         {
    535             mi->bmi[ 0].mv.as_int =
    536             mi->bmi[ 1].mv.as_int =
    537             mi->bmi[ 2].mv.as_int =
    538             mi->bmi[ 3].mv.as_int =
    539             mi->bmi[ 4].mv.as_int =
    540             mi->bmi[ 5].mv.as_int =
    541             mi->bmi[ 6].mv.as_int =
    542             mi->bmi[ 7].mv.as_int =
    543             mi->bmi[ 8].mv.as_int =
    544             mi->bmi[ 9].mv.as_int =
    545             mi->bmi[10].mv.as_int =
    546             mi->bmi[11].mv.as_int =
    547             mi->bmi[12].mv.as_int =
    548             mi->bmi[13].mv.as_int =
    549             mi->bmi[14].mv.as_int =
    550             mi->bmi[15].mv.as_int = mbmi->mv.as_int;
    551         }
    552 #endif
    553     }
    554     else
    555     {
    556         /* required for left and above block mv */
    557         mbmi->mv.as_int = 0;
    558 
    559         /* MB is intra coded */
    560         if ((mbmi->mode = read_ymode(bc, pbi->common.fc.ymode_prob)) == B_PRED)
    561         {
    562             int j = 0;
    563             mbmi->is_4x4 = 1;
    564             do
    565             {
    566                 mi->bmi[j].as_mode = read_bmode(bc, pbi->common.fc.bmode_prob);
    567             }
    568             while (++j < 16);
    569         }
    570 
    571         mbmi->uv_mode = read_uv_mode(bc, pbi->common.fc.uv_mode_prob);
    572     }
    573 
    574 }
    575 
    576 static void read_mb_features(vp8_reader *r, MB_MODE_INFO *mi, MACROBLOCKD *x)
    577 {
    578     /* Is segmentation enabled */
    579     if (x->segmentation_enabled && x->update_mb_segmentation_map)
    580     {
    581         /* If so then read the segment id. */
    582         if (vp8_read(r, x->mb_segment_tree_probs[0]))
    583             mi->segment_id =
    584                 (unsigned char)(2 + vp8_read(r, x->mb_segment_tree_probs[2]));
    585         else
    586             mi->segment_id =
    587                 (unsigned char)(vp8_read(r, x->mb_segment_tree_probs[1]));
    588     }
    589 }
    590 
    591 static void decode_mb_mode_mvs(VP8D_COMP *pbi, MODE_INFO *mi)
    592 {
    593     /* Read the Macroblock segmentation map if it is being updated explicitly
    594      * this frame (reset to 0 above by default)
    595      * By default on a key frame reset all MBs to segment 0
    596      */
    597     if (pbi->mb.update_mb_segmentation_map)
    598         read_mb_features(&pbi->mbc[8], &mi->mbmi, &pbi->mb);
    599     else if(pbi->common.frame_type == KEY_FRAME)
    600         mi->mbmi.segment_id = 0;
    601 
    602     /* Read the macroblock coeff skip flag if this feature is in use,
    603      * else default to 0 */
    604     if (pbi->common.mb_no_coeff_skip)
    605         mi->mbmi.mb_skip_coeff = vp8_read(&pbi->mbc[8], pbi->prob_skip_false);
    606     else
    607         mi->mbmi.mb_skip_coeff = 0;
    608 
    609     mi->mbmi.is_4x4 = 0;
    610     if(pbi->common.frame_type == KEY_FRAME)
    611         read_kf_modes(pbi, mi);
    612     else
    613         read_mb_modes_mv(pbi, mi, &mi->mbmi);
    614 
    615 }
    616 
    617 void vp8_decode_mode_mvs(VP8D_COMP *pbi)
    618 {
    619     MODE_INFO *mi = pbi->common.mi;
    620     int mb_row = -1;
    621     int mb_to_right_edge_start;
    622 
    623     mb_mode_mv_init(pbi);
    624 
    625     pbi->mb.mb_to_top_edge = 0;
    626     pbi->mb.mb_to_bottom_edge = ((pbi->common.mb_rows - 1) * 16) << 3;
    627     mb_to_right_edge_start = ((pbi->common.mb_cols - 1) * 16) << 3;
    628 
    629     while (++mb_row < pbi->common.mb_rows)
    630     {
    631         int mb_col = -1;
    632 
    633         pbi->mb.mb_to_left_edge =  0;
    634         pbi->mb.mb_to_right_edge = mb_to_right_edge_start;
    635 
    636         while (++mb_col < pbi->common.mb_cols)
    637         {
    638 #if CONFIG_ERROR_CONCEALMENT
    639             int mb_num = mb_row * pbi->common.mb_cols + mb_col;
    640 #endif
    641 
    642             decode_mb_mode_mvs(pbi, mi);
    643 
    644 #if CONFIG_ERROR_CONCEALMENT
    645             /* look for corruption. set mvs_corrupt_from_mb to the current
    646              * mb_num if the frame is corrupt from this macroblock. */
    647             if (vp8dx_bool_error(&pbi->mbc[8]) && mb_num <
    648                 (int)pbi->mvs_corrupt_from_mb)
    649             {
    650                 pbi->mvs_corrupt_from_mb = mb_num;
    651                 /* no need to continue since the partition is corrupt from
    652                  * here on.
    653                  */
    654                 return;
    655             }
    656 #endif
    657 
    658             pbi->mb.mb_to_left_edge -= (16 << 3);
    659             pbi->mb.mb_to_right_edge -= (16 << 3);
    660             mi++;       /* next macroblock */
    661         }
    662         pbi->mb.mb_to_top_edge -= (16 << 3);
    663         pbi->mb.mb_to_bottom_edge -= (16 << 3);
    664 
    665         mi++;           /* skip left predictor each row */
    666     }
    667 }
    668