Home | History | Annotate | Download | only in decoder
      1 /*
      2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 #include "treereader.h"
     12 #include "vp8/common/entropymv.h"
     13 #include "vp8/common/entropymode.h"
     14 #include "onyxd_int.h"
     15 #include "vp8/common/findnearmv.h"
     16 
     17 static B_PREDICTION_MODE read_bmode(vp8_reader *bc, const vp8_prob *p) {
     18   const int i = vp8_treed_read(bc, vp8_bmode_tree, p);
     19 
     20   return (B_PREDICTION_MODE)i;
     21 }
     22 
     23 static MB_PREDICTION_MODE read_ymode(vp8_reader *bc, const vp8_prob *p) {
     24   const int i = vp8_treed_read(bc, vp8_ymode_tree, p);
     25 
     26   return (MB_PREDICTION_MODE)i;
     27 }
     28 
     29 static MB_PREDICTION_MODE read_kf_ymode(vp8_reader *bc, const vp8_prob *p) {
     30   const int i = vp8_treed_read(bc, vp8_kf_ymode_tree, p);
     31 
     32   return (MB_PREDICTION_MODE)i;
     33 }
     34 
     35 static MB_PREDICTION_MODE read_uv_mode(vp8_reader *bc, const vp8_prob *p) {
     36   const int i = vp8_treed_read(bc, vp8_uv_mode_tree, p);
     37 
     38   return (MB_PREDICTION_MODE)i;
     39 }
     40 
     41 static void read_kf_modes(VP8D_COMP *pbi, MODE_INFO *mi) {
     42   vp8_reader *const bc = &pbi->mbc[8];
     43   const int mis = pbi->common.mode_info_stride;
     44 
     45   mi->mbmi.ref_frame = INTRA_FRAME;
     46   mi->mbmi.mode = read_kf_ymode(bc, vp8_kf_ymode_prob);
     47 
     48   if (mi->mbmi.mode == B_PRED) {
     49     int i = 0;
     50     mi->mbmi.is_4x4 = 1;
     51 
     52     do {
     53       const B_PREDICTION_MODE A = above_block_mode(mi, i, mis);
     54       const B_PREDICTION_MODE L = left_block_mode(mi, i);
     55 
     56       mi->bmi[i].as_mode = read_bmode(bc, vp8_kf_bmode_prob[A][L]);
     57     } while (++i < 16);
     58   }
     59 
     60   mi->mbmi.uv_mode = read_uv_mode(bc, vp8_kf_uv_mode_prob);
     61 }
     62 
     63 static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc) {
     64   const vp8_prob *const p = (const vp8_prob *)mvc;
     65   int x = 0;
     66 
     67   if (vp8_read(r, p[mvpis_short])) /* Large */
     68   {
     69     int i = 0;
     70 
     71     do {
     72       x += vp8_read(r, p[MVPbits + i]) << i;
     73     } while (++i < 3);
     74 
     75     i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */
     76 
     77     do {
     78       x += vp8_read(r, p[MVPbits + i]) << i;
     79     } while (--i > 3);
     80 
     81     if (!(x & 0xFFF0) || vp8_read(r, p[MVPbits + 3])) x += 8;
     82   } else { /* small */
     83     x = vp8_treed_read(r, vp8_small_mvtree, p + MVPshort);
     84   }
     85 
     86   if (x && vp8_read(r, p[MVPsign])) x = -x;
     87 
     88   return x;
     89 }
     90 
     91 static void read_mv(vp8_reader *r, MV *mv, const MV_CONTEXT *mvc) {
     92   mv->row = (short)(read_mvcomponent(r, mvc) * 2);
     93   mv->col = (short)(read_mvcomponent(r, ++mvc) * 2);
     94 }
     95 
     96 static void read_mvcontexts(vp8_reader *bc, MV_CONTEXT *mvc) {
     97   int i = 0;
     98 
     99   do {
    100     const vp8_prob *up = vp8_mv_update_probs[i].prob;
    101     vp8_prob *p = (vp8_prob *)(mvc + i);
    102     vp8_prob *const pstop = p + MVPcount;
    103 
    104     do {
    105       if (vp8_read(bc, *up++)) {
    106         const vp8_prob x = (vp8_prob)vp8_read_literal(bc, 7);
    107 
    108         *p = x ? x << 1 : 1;
    109       }
    110     } while (++p < pstop);
    111   } while (++i < 2);
    112 }
    113 
    114 static const unsigned char mbsplit_fill_count[4] = { 8, 8, 4, 1 };
    115 static const unsigned char mbsplit_fill_offset[4][16] = {
    116   { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
    117   { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15 },
    118   { 0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15 },
    119   { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }
    120 };
    121 
    122 static void mb_mode_mv_init(VP8D_COMP *pbi) {
    123   vp8_reader *const bc = &pbi->mbc[8];
    124   MV_CONTEXT *const mvc = pbi->common.fc.mvc;
    125 
    126 #if CONFIG_ERROR_CONCEALMENT
    127   /* Default is that no macroblock is corrupt, therefore we initialize
    128    * mvs_corrupt_from_mb to something very big, which we can be sure is
    129    * outside the frame. */
    130   pbi->mvs_corrupt_from_mb = UINT_MAX;
    131 #endif
    132   /* Read the mb_no_coeff_skip flag */
    133   pbi->common.mb_no_coeff_skip = (int)vp8_read_bit(bc);
    134 
    135   pbi->prob_skip_false = 0;
    136   if (pbi->common.mb_no_coeff_skip) {
    137     pbi->prob_skip_false = (vp8_prob)vp8_read_literal(bc, 8);
    138   }
    139 
    140   if (pbi->common.frame_type != KEY_FRAME) {
    141     pbi->prob_intra = (vp8_prob)vp8_read_literal(bc, 8);
    142     pbi->prob_last = (vp8_prob)vp8_read_literal(bc, 8);
    143     pbi->prob_gf = (vp8_prob)vp8_read_literal(bc, 8);
    144 
    145     if (vp8_read_bit(bc)) {
    146       int i = 0;
    147 
    148       do {
    149         pbi->common.fc.ymode_prob[i] = (vp8_prob)vp8_read_literal(bc, 8);
    150       } while (++i < 4);
    151     }
    152 
    153     if (vp8_read_bit(bc)) {
    154       int i = 0;
    155 
    156       do {
    157         pbi->common.fc.uv_mode_prob[i] = (vp8_prob)vp8_read_literal(bc, 8);
    158       } while (++i < 3);
    159     }
    160 
    161     read_mvcontexts(bc, mvc);
    162   }
    163 }
    164 
    165 const vp8_prob vp8_sub_mv_ref_prob3[8][VP8_SUBMVREFS - 1] = {
    166   { 147, 136, 18 }, /* SUBMVREF_NORMAL          */
    167   { 223, 1, 34 },   /* SUBMVREF_LEFT_ABOVE_SAME */
    168   { 106, 145, 1 },  /* SUBMVREF_LEFT_ZED        */
    169   { 208, 1, 1 },    /* SUBMVREF_LEFT_ABOVE_ZED  */
    170   { 179, 121, 1 },  /* SUBMVREF_ABOVE_ZED       */
    171   { 223, 1, 34 },   /* SUBMVREF_LEFT_ABOVE_SAME */
    172   { 179, 121, 1 },  /* SUBMVREF_ABOVE_ZED       */
    173   { 208, 1, 1 }     /* SUBMVREF_LEFT_ABOVE_ZED  */
    174 };
    175 
    176 static const vp8_prob *get_sub_mv_ref_prob(const int left, const int above) {
    177   int lez = (left == 0);
    178   int aez = (above == 0);
    179   int lea = (left == above);
    180   const vp8_prob *prob;
    181 
    182   prob = vp8_sub_mv_ref_prob3[(aez << 2) | (lez << 1) | (lea)];
    183 
    184   return prob;
    185 }
    186 
    187 static void decode_split_mv(vp8_reader *const bc, MODE_INFO *mi,
    188                             const MODE_INFO *left_mb, const MODE_INFO *above_mb,
    189                             MB_MODE_INFO *mbmi, int_mv best_mv,
    190                             MV_CONTEXT *const mvc, int mb_to_left_edge,
    191                             int mb_to_right_edge, int mb_to_top_edge,
    192                             int mb_to_bottom_edge) {
    193   int s; /* split configuration (16x8, 8x16, 8x8, 4x4) */
    194   /* number of partitions in the split configuration (see vp8_mbsplit_count) */
    195   int num_p;
    196   int j = 0;
    197 
    198   s = 3;
    199   num_p = 16;
    200   if (vp8_read(bc, 110)) {
    201     s = 2;
    202     num_p = 4;
    203     if (vp8_read(bc, 111)) {
    204       s = vp8_read(bc, 150);
    205       num_p = 2;
    206     }
    207   }
    208 
    209   do /* for each subset j */
    210   {
    211     int_mv leftmv, abovemv;
    212     int_mv blockmv;
    213     int k; /* first block in subset j */
    214 
    215     const vp8_prob *prob;
    216     k = vp8_mbsplit_offset[s][j];
    217 
    218     if (!(k & 3)) {
    219       /* On L edge, get from MB to left of us */
    220       if (left_mb->mbmi.mode != SPLITMV) {
    221         leftmv.as_int = left_mb->mbmi.mv.as_int;
    222       } else {
    223         leftmv.as_int = (left_mb->bmi + k + 4 - 1)->mv.as_int;
    224       }
    225     } else {
    226       leftmv.as_int = (mi->bmi + k - 1)->mv.as_int;
    227     }
    228 
    229     if (!(k >> 2)) {
    230       /* On top edge, get from MB above us */
    231       if (above_mb->mbmi.mode != SPLITMV) {
    232         abovemv.as_int = above_mb->mbmi.mv.as_int;
    233       } else {
    234         abovemv.as_int = (above_mb->bmi + k + 16 - 4)->mv.as_int;
    235       }
    236     } else {
    237       abovemv.as_int = (mi->bmi + k - 4)->mv.as_int;
    238     }
    239 
    240     prob = get_sub_mv_ref_prob(leftmv.as_int, abovemv.as_int);
    241 
    242     if (vp8_read(bc, prob[0])) {
    243       if (vp8_read(bc, prob[1])) {
    244         blockmv.as_int = 0;
    245         if (vp8_read(bc, prob[2])) {
    246           blockmv.as_mv.row = read_mvcomponent(bc, &mvc[0]) * 2;
    247           blockmv.as_mv.row += best_mv.as_mv.row;
    248           blockmv.as_mv.col = read_mvcomponent(bc, &mvc[1]) * 2;
    249           blockmv.as_mv.col += best_mv.as_mv.col;
    250         }
    251       } else {
    252         blockmv.as_int = abovemv.as_int;
    253       }
    254     } else {
    255       blockmv.as_int = leftmv.as_int;
    256     }
    257 
    258     mbmi->need_to_clamp_mvs |=
    259         vp8_check_mv_bounds(&blockmv, mb_to_left_edge, mb_to_right_edge,
    260                             mb_to_top_edge, mb_to_bottom_edge);
    261 
    262     {
    263       /* Fill (uniform) modes, mvs of jth subset.
    264        Must do it here because ensuing subsets can
    265        refer back to us via "left" or "above". */
    266       const unsigned char *fill_offset;
    267       unsigned int fill_count = mbsplit_fill_count[s];
    268 
    269       fill_offset =
    270           &mbsplit_fill_offset[s][(unsigned char)j * mbsplit_fill_count[s]];
    271 
    272       do {
    273         mi->bmi[*fill_offset].mv.as_int = blockmv.as_int;
    274         fill_offset++;
    275       } while (--fill_count);
    276     }
    277 
    278   } while (++j < num_p);
    279 
    280   mbmi->partitioning = s;
    281 }
    282 
    283 static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi,
    284                              MB_MODE_INFO *mbmi) {
    285   vp8_reader *const bc = &pbi->mbc[8];
    286   mbmi->ref_frame = (MV_REFERENCE_FRAME)vp8_read(bc, pbi->prob_intra);
    287   if (mbmi->ref_frame) /* inter MB */
    288   {
    289     enum { CNT_INTRA, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
    290     int cnt[4];
    291     int *cntx = cnt;
    292     int_mv near_mvs[4];
    293     int_mv *nmv = near_mvs;
    294     const int mis = pbi->mb.mode_info_stride;
    295     const MODE_INFO *above = mi - mis;
    296     const MODE_INFO *left = mi - 1;
    297     const MODE_INFO *aboveleft = above - 1;
    298     int *ref_frame_sign_bias = pbi->common.ref_frame_sign_bias;
    299 
    300     mbmi->need_to_clamp_mvs = 0;
    301 
    302     if (vp8_read(bc, pbi->prob_last)) {
    303       mbmi->ref_frame =
    304           (MV_REFERENCE_FRAME)((int)(2 + vp8_read(bc, pbi->prob_gf)));
    305     }
    306 
    307     /* Zero accumulators */
    308     nmv[0].as_int = nmv[1].as_int = nmv[2].as_int = 0;
    309     cnt[0] = cnt[1] = cnt[2] = cnt[3] = 0;
    310 
    311     /* Process above */
    312     if (above->mbmi.ref_frame != INTRA_FRAME) {
    313       if (above->mbmi.mv.as_int) {
    314         (++nmv)->as_int = above->mbmi.mv.as_int;
    315         mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], mbmi->ref_frame,
    316                 nmv, ref_frame_sign_bias);
    317         ++cntx;
    318       }
    319 
    320       *cntx += 2;
    321     }
    322 
    323     /* Process left */
    324     if (left->mbmi.ref_frame != INTRA_FRAME) {
    325       if (left->mbmi.mv.as_int) {
    326         int_mv this_mv;
    327 
    328         this_mv.as_int = left->mbmi.mv.as_int;
    329         mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], mbmi->ref_frame,
    330                 &this_mv, ref_frame_sign_bias);
    331 
    332         if (this_mv.as_int != nmv->as_int) {
    333           (++nmv)->as_int = this_mv.as_int;
    334           ++cntx;
    335         }
    336 
    337         *cntx += 2;
    338       } else {
    339         cnt[CNT_INTRA] += 2;
    340       }
    341     }
    342 
    343     /* Process above left */
    344     if (aboveleft->mbmi.ref_frame != INTRA_FRAME) {
    345       if (aboveleft->mbmi.mv.as_int) {
    346         int_mv this_mv;
    347 
    348         this_mv.as_int = aboveleft->mbmi.mv.as_int;
    349         mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], mbmi->ref_frame,
    350                 &this_mv, ref_frame_sign_bias);
    351 
    352         if (this_mv.as_int != nmv->as_int) {
    353           (++nmv)->as_int = this_mv.as_int;
    354           ++cntx;
    355         }
    356 
    357         *cntx += 1;
    358       } else {
    359         cnt[CNT_INTRA] += 1;
    360       }
    361     }
    362 
    363     if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_INTRA]][0])) {
    364       /* If we have three distinct MV's ... */
    365       /* See if above-left MV can be merged with NEAREST */
    366       cnt[CNT_NEAREST] += ((cnt[CNT_SPLITMV] > 0) &
    367                            (nmv->as_int == near_mvs[CNT_NEAREST].as_int));
    368 
    369       /* Swap near and nearest if necessary */
    370       if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
    371         int tmp;
    372         tmp = cnt[CNT_NEAREST];
    373         cnt[CNT_NEAREST] = cnt[CNT_NEAR];
    374         cnt[CNT_NEAR] = tmp;
    375         tmp = near_mvs[CNT_NEAREST].as_int;
    376         near_mvs[CNT_NEAREST].as_int = near_mvs[CNT_NEAR].as_int;
    377         near_mvs[CNT_NEAR].as_int = tmp;
    378       }
    379 
    380       if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
    381         if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
    382           int mb_to_top_edge;
    383           int mb_to_bottom_edge;
    384           int mb_to_left_edge;
    385           int mb_to_right_edge;
    386           MV_CONTEXT *const mvc = pbi->common.fc.mvc;
    387           int near_index;
    388 
    389           mb_to_top_edge = pbi->mb.mb_to_top_edge;
    390           mb_to_bottom_edge = pbi->mb.mb_to_bottom_edge;
    391           mb_to_top_edge -= LEFT_TOP_MARGIN;
    392           mb_to_bottom_edge += RIGHT_BOTTOM_MARGIN;
    393           mb_to_right_edge = pbi->mb.mb_to_right_edge;
    394           mb_to_right_edge += RIGHT_BOTTOM_MARGIN;
    395           mb_to_left_edge = pbi->mb.mb_to_left_edge;
    396           mb_to_left_edge -= LEFT_TOP_MARGIN;
    397 
    398           /* Use near_mvs[0] to store the "best" MV */
    399           near_index = CNT_INTRA + (cnt[CNT_NEAREST] >= cnt[CNT_INTRA]);
    400 
    401           vp8_clamp_mv2(&near_mvs[near_index], &pbi->mb);
    402 
    403           cnt[CNT_SPLITMV] =
    404               ((above->mbmi.mode == SPLITMV) + (left->mbmi.mode == SPLITMV)) *
    405                   2 +
    406               (aboveleft->mbmi.mode == SPLITMV);
    407 
    408           if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
    409             decode_split_mv(bc, mi, left, above, mbmi, near_mvs[near_index],
    410                             mvc, mb_to_left_edge, mb_to_right_edge,
    411                             mb_to_top_edge, mb_to_bottom_edge);
    412             mbmi->mv.as_int = mi->bmi[15].mv.as_int;
    413             mbmi->mode = SPLITMV;
    414             mbmi->is_4x4 = 1;
    415           } else {
    416             int_mv *const mbmi_mv = &mbmi->mv;
    417             read_mv(bc, &mbmi_mv->as_mv, (const MV_CONTEXT *)mvc);
    418             mbmi_mv->as_mv.row += near_mvs[near_index].as_mv.row;
    419             mbmi_mv->as_mv.col += near_mvs[near_index].as_mv.col;
    420 
    421             /* Don't need to check this on NEARMV and NEARESTMV
    422              * modes since those modes clamp the MV. The NEWMV mode
    423              * does not, so signal to the prediction stage whether
    424              * special handling may be required.
    425              */
    426             mbmi->need_to_clamp_mvs =
    427                 vp8_check_mv_bounds(mbmi_mv, mb_to_left_edge, mb_to_right_edge,
    428                                     mb_to_top_edge, mb_to_bottom_edge);
    429             mbmi->mode = NEWMV;
    430           }
    431         } else {
    432           mbmi->mode = NEARMV;
    433           mbmi->mv.as_int = near_mvs[CNT_NEAR].as_int;
    434           vp8_clamp_mv2(&mbmi->mv, &pbi->mb);
    435         }
    436       } else {
    437         mbmi->mode = NEARESTMV;
    438         mbmi->mv.as_int = near_mvs[CNT_NEAREST].as_int;
    439         vp8_clamp_mv2(&mbmi->mv, &pbi->mb);
    440       }
    441     } else {
    442       mbmi->mode = ZEROMV;
    443       mbmi->mv.as_int = 0;
    444     }
    445 
    446 #if CONFIG_ERROR_CONCEALMENT
    447     if (pbi->ec_enabled && (mbmi->mode != SPLITMV)) {
    448       mi->bmi[0].mv.as_int = mi->bmi[1].mv.as_int = mi->bmi[2].mv.as_int =
    449           mi->bmi[3].mv.as_int = mi->bmi[4].mv.as_int = mi->bmi[5].mv.as_int =
    450               mi->bmi[6].mv.as_int = mi->bmi[7].mv.as_int =
    451                   mi->bmi[8].mv.as_int = mi->bmi[9].mv.as_int =
    452                       mi->bmi[10].mv.as_int = mi->bmi[11].mv.as_int =
    453                           mi->bmi[12].mv.as_int = mi->bmi[13].mv.as_int =
    454                               mi->bmi[14].mv.as_int = mi->bmi[15].mv.as_int =
    455                                   mbmi->mv.as_int;
    456     }
    457 #endif
    458   } else {
    459     /* required for left and above block mv */
    460     mbmi->mv.as_int = 0;
    461 
    462     /* MB is intra coded */
    463     if ((mbmi->mode = read_ymode(bc, pbi->common.fc.ymode_prob)) == B_PRED) {
    464       int j = 0;
    465       mbmi->is_4x4 = 1;
    466       do {
    467         mi->bmi[j].as_mode = read_bmode(bc, pbi->common.fc.bmode_prob);
    468       } while (++j < 16);
    469     }
    470 
    471     mbmi->uv_mode = read_uv_mode(bc, pbi->common.fc.uv_mode_prob);
    472   }
    473 }
    474 
    475 static void read_mb_features(vp8_reader *r, MB_MODE_INFO *mi, MACROBLOCKD *x) {
    476   /* Is segmentation enabled */
    477   if (x->segmentation_enabled && x->update_mb_segmentation_map) {
    478     /* If so then read the segment id. */
    479     if (vp8_read(r, x->mb_segment_tree_probs[0])) {
    480       mi->segment_id =
    481           (unsigned char)(2 + vp8_read(r, x->mb_segment_tree_probs[2]));
    482     } else {
    483       mi->segment_id =
    484           (unsigned char)(vp8_read(r, x->mb_segment_tree_probs[1]));
    485     }
    486   }
    487 }
    488 
    489 static void decode_mb_mode_mvs(VP8D_COMP *pbi, MODE_INFO *mi,
    490                                MB_MODE_INFO *mbmi) {
    491   (void)mbmi;
    492 
    493   /* Read the Macroblock segmentation map if it is being updated explicitly
    494    * this frame (reset to 0 above by default)
    495    * By default on a key frame reset all MBs to segment 0
    496    */
    497   if (pbi->mb.update_mb_segmentation_map) {
    498     read_mb_features(&pbi->mbc[8], &mi->mbmi, &pbi->mb);
    499   } else if (pbi->common.frame_type == KEY_FRAME) {
    500     mi->mbmi.segment_id = 0;
    501   }
    502 
    503   /* Read the macroblock coeff skip flag if this feature is in use,
    504    * else default to 0 */
    505   if (pbi->common.mb_no_coeff_skip) {
    506     mi->mbmi.mb_skip_coeff = vp8_read(&pbi->mbc[8], pbi->prob_skip_false);
    507   } else {
    508     mi->mbmi.mb_skip_coeff = 0;
    509   }
    510 
    511   mi->mbmi.is_4x4 = 0;
    512   if (pbi->common.frame_type == KEY_FRAME) {
    513     read_kf_modes(pbi, mi);
    514   } else {
    515     read_mb_modes_mv(pbi, mi, &mi->mbmi);
    516   }
    517 }
    518 
    519 void vp8_decode_mode_mvs(VP8D_COMP *pbi) {
    520   MODE_INFO *mi = pbi->common.mi;
    521   int mb_row = -1;
    522   int mb_to_right_edge_start;
    523 
    524   mb_mode_mv_init(pbi);
    525 
    526   pbi->mb.mb_to_top_edge = 0;
    527   pbi->mb.mb_to_bottom_edge = ((pbi->common.mb_rows - 1) * 16) << 3;
    528   mb_to_right_edge_start = ((pbi->common.mb_cols - 1) * 16) << 3;
    529 
    530   while (++mb_row < pbi->common.mb_rows) {
    531     int mb_col = -1;
    532 
    533     pbi->mb.mb_to_left_edge = 0;
    534     pbi->mb.mb_to_right_edge = mb_to_right_edge_start;
    535 
    536     while (++mb_col < pbi->common.mb_cols) {
    537 #if CONFIG_ERROR_CONCEALMENT
    538       int mb_num = mb_row * pbi->common.mb_cols + mb_col;
    539 #endif
    540 
    541       decode_mb_mode_mvs(pbi, mi, &mi->mbmi);
    542 
    543 #if CONFIG_ERROR_CONCEALMENT
    544       /* look for corruption. set mvs_corrupt_from_mb to the current
    545        * mb_num if the frame is corrupt from this macroblock. */
    546       if (vp8dx_bool_error(&pbi->mbc[8]) &&
    547           mb_num < (int)pbi->mvs_corrupt_from_mb) {
    548         pbi->mvs_corrupt_from_mb = mb_num;
    549         /* no need to continue since the partition is corrupt from
    550          * here on.
    551          */
    552         return;
    553       }
    554 #endif
    555 
    556       pbi->mb.mb_to_left_edge -= (16 << 3);
    557       pbi->mb.mb_to_right_edge -= (16 << 3);
    558       mi++; /* next macroblock */
    559     }
    560     pbi->mb.mb_to_top_edge -= (16 << 3);
    561     pbi->mb.mb_to_bottom_edge -= (16 << 3);
    562 
    563     mi++; /* skip left predictor each row */
    564   }
    565 }
    566