Home | History | Annotate | Download | only in common
      1 /*
      2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 
     12 #include <limits.h>
     13 #include "vpx_config.h"
     14 #include "vp8_rtcd.h"
     15 #include "vpx/vpx_integer.h"
     16 #include "blockd.h"
     17 #include "reconinter.h"
     18 #if CONFIG_RUNTIME_CPU_DETECT
     19 #include "onyxc_int.h"
     20 #endif
     21 
     22 void vp8_copy_mem16x16_c(
     23     unsigned char *src,
     24     int src_stride,
     25     unsigned char *dst,
     26     int dst_stride)
     27 {
     28 
     29     int r;
     30 
     31     for (r = 0; r < 16; r++)
     32     {
     33 #if !(CONFIG_FAST_UNALIGNED)
     34         dst[0] = src[0];
     35         dst[1] = src[1];
     36         dst[2] = src[2];
     37         dst[3] = src[3];
     38         dst[4] = src[4];
     39         dst[5] = src[5];
     40         dst[6] = src[6];
     41         dst[7] = src[7];
     42         dst[8] = src[8];
     43         dst[9] = src[9];
     44         dst[10] = src[10];
     45         dst[11] = src[11];
     46         dst[12] = src[12];
     47         dst[13] = src[13];
     48         dst[14] = src[14];
     49         dst[15] = src[15];
     50 
     51 #else
     52         ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ;
     53         ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ;
     54         ((uint32_t *)dst)[2] = ((uint32_t *)src)[2] ;
     55         ((uint32_t *)dst)[3] = ((uint32_t *)src)[3] ;
     56 
     57 #endif
     58         src += src_stride;
     59         dst += dst_stride;
     60 
     61     }
     62 
     63 }
     64 
     65 void vp8_copy_mem8x8_c(
     66     unsigned char *src,
     67     int src_stride,
     68     unsigned char *dst,
     69     int dst_stride)
     70 {
     71     int r;
     72 
     73     for (r = 0; r < 8; r++)
     74     {
     75 #if !(CONFIG_FAST_UNALIGNED)
     76         dst[0] = src[0];
     77         dst[1] = src[1];
     78         dst[2] = src[2];
     79         dst[3] = src[3];
     80         dst[4] = src[4];
     81         dst[5] = src[5];
     82         dst[6] = src[6];
     83         dst[7] = src[7];
     84 #else
     85         ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ;
     86         ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ;
     87 #endif
     88         src += src_stride;
     89         dst += dst_stride;
     90 
     91     }
     92 
     93 }
     94 
     95 void vp8_copy_mem8x4_c(
     96     unsigned char *src,
     97     int src_stride,
     98     unsigned char *dst,
     99     int dst_stride)
    100 {
    101     int r;
    102 
    103     for (r = 0; r < 4; r++)
    104     {
    105 #if !(CONFIG_FAST_UNALIGNED)
    106         dst[0] = src[0];
    107         dst[1] = src[1];
    108         dst[2] = src[2];
    109         dst[3] = src[3];
    110         dst[4] = src[4];
    111         dst[5] = src[5];
    112         dst[6] = src[6];
    113         dst[7] = src[7];
    114 #else
    115         ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ;
    116         ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ;
    117 #endif
    118         src += src_stride;
    119         dst += dst_stride;
    120 
    121     }
    122 
    123 }
    124 
    125 
    126 void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, unsigned char *base_pre, int pre_stride, vp8_subpix_fn_t sppf)
    127 {
    128     int r;
    129     unsigned char *pred_ptr = d->predictor;
    130     unsigned char *ptr;
    131     ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3);
    132 
    133     if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
    134     {
    135         sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
    136     }
    137     else
    138     {
    139         for (r = 0; r < 4; r++)
    140         {
    141             pred_ptr[0]  = ptr[0];
    142             pred_ptr[1]  = ptr[1];
    143             pred_ptr[2]  = ptr[2];
    144             pred_ptr[3]  = ptr[3];
    145             pred_ptr     += pitch;
    146             ptr         += pre_stride;
    147         }
    148     }
    149 }
    150 
    151 static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride)
    152 {
    153     unsigned char *ptr;
    154     ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3);
    155 
    156     if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
    157     {
    158         x->subpixel_predict8x8(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride);
    159     }
    160     else
    161     {
    162         vp8_copy_mem8x8(ptr, pre_stride, dst, dst_stride);
    163     }
    164 }
    165 
    166 static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride)
    167 {
    168     unsigned char *ptr;
    169     ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3);
    170 
    171     if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
    172     {
    173         x->subpixel_predict8x4(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride);
    174     }
    175     else
    176     {
    177         vp8_copy_mem8x4(ptr, pre_stride, dst, dst_stride);
    178     }
    179 }
    180 
    181 static void build_inter_predictors_b(BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride, vp8_subpix_fn_t sppf)
    182 {
    183     int r;
    184     unsigned char *ptr;
    185     ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3);
    186 
    187     if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
    188     {
    189         sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride);
    190     }
    191     else
    192     {
    193         for (r = 0; r < 4; r++)
    194         {
    195           dst[0]  = ptr[0];
    196           dst[1]  = ptr[1];
    197           dst[2]  = ptr[2];
    198           dst[3]  = ptr[3];
    199           dst     += dst_stride;
    200           ptr     += pre_stride;
    201         }
    202     }
    203 }
    204 
    205 
    206 /*encoder only*/
    207 void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x)
    208 {
    209     unsigned char *uptr, *vptr;
    210     unsigned char *upred_ptr = &x->predictor[256];
    211     unsigned char *vpred_ptr = &x->predictor[320];
    212 
    213     int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
    214     int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
    215     int offset;
    216     int pre_stride = x->pre.uv_stride;
    217 
    218     /* calc uv motion vectors */
    219     mv_row += 1 | (mv_row >> (sizeof(int) * CHAR_BIT - 1));
    220     mv_col += 1 | (mv_col >> (sizeof(int) * CHAR_BIT - 1));
    221     mv_row /= 2;
    222     mv_col /= 2;
    223     mv_row &= x->fullpixel_mask;
    224     mv_col &= x->fullpixel_mask;
    225 
    226     offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
    227     uptr = x->pre.u_buffer + offset;
    228     vptr = x->pre.v_buffer + offset;
    229 
    230     if ((mv_row | mv_col) & 7)
    231     {
    232         x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, 8);
    233         x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, 8);
    234     }
    235     else
    236     {
    237         vp8_copy_mem8x8(uptr, pre_stride, upred_ptr, 8);
    238         vp8_copy_mem8x8(vptr, pre_stride, vpred_ptr, 8);
    239     }
    240 }
    241 
    242 /*encoder only*/
    243 void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x)
    244 {
    245     int i, j;
    246     int pre_stride = x->pre.uv_stride;
    247     unsigned char *base_pre;
    248 
    249     /* build uv mvs */
    250     for (i = 0; i < 2; i++)
    251     {
    252         for (j = 0; j < 2; j++)
    253         {
    254             int yoffset = i * 8 + j * 2;
    255             int uoffset = 16 + i * 2 + j;
    256             int voffset = 20 + i * 2 + j;
    257 
    258             int temp;
    259 
    260             temp = x->block[yoffset  ].bmi.mv.as_mv.row
    261                    + x->block[yoffset+1].bmi.mv.as_mv.row
    262                    + x->block[yoffset+4].bmi.mv.as_mv.row
    263                    + x->block[yoffset+5].bmi.mv.as_mv.row;
    264 
    265             temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
    266 
    267             x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
    268 
    269             temp = x->block[yoffset  ].bmi.mv.as_mv.col
    270                    + x->block[yoffset+1].bmi.mv.as_mv.col
    271                    + x->block[yoffset+4].bmi.mv.as_mv.col
    272                    + x->block[yoffset+5].bmi.mv.as_mv.col;
    273 
    274             temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
    275 
    276             x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
    277 
    278             x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int;
    279         }
    280     }
    281 
    282     base_pre = x->pre.u_buffer;
    283     for (i = 16; i < 20; i += 2)
    284     {
    285         BLOCKD *d0 = &x->block[i];
    286         BLOCKD *d1 = &x->block[i+1];
    287 
    288         if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
    289             build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride);
    290         else
    291         {
    292             vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride, x->subpixel_predict);
    293             vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride, x->subpixel_predict);
    294         }
    295     }
    296 
    297     base_pre = x->pre.v_buffer;
    298     for (i = 20; i < 24; i += 2)
    299     {
    300         BLOCKD *d0 = &x->block[i];
    301         BLOCKD *d1 = &x->block[i+1];
    302 
    303         if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
    304             build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride);
    305         else
    306         {
    307             vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride, x->subpixel_predict);
    308             vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride, x->subpixel_predict);
    309         }
    310     }
    311 }
    312 
    313 
    314 /*encoder only*/
    315 void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x,
    316                                          unsigned char *dst_y,
    317                                          int dst_ystride)
    318 {
    319     unsigned char *ptr_base;
    320     unsigned char *ptr;
    321     int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
    322     int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
    323     int pre_stride = x->pre.y_stride;
    324 
    325     ptr_base = x->pre.y_buffer;
    326     ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
    327 
    328     if ((mv_row | mv_col) & 7)
    329     {
    330         x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7,
    331                                  dst_y, dst_ystride);
    332     }
    333     else
    334     {
    335         vp8_copy_mem16x16(ptr, pre_stride, dst_y,
    336             dst_ystride);
    337     }
    338 }
    339 
    340 static void clamp_mv_to_umv_border(MV *mv, const MACROBLOCKD *xd)
    341 {
    342     /* If the MV points so far into the UMV border that no visible pixels
    343      * are used for reconstruction, the subpel part of the MV can be
    344      * discarded and the MV limited to 16 pixels with equivalent results.
    345      *
    346      * This limit kicks in at 19 pixels for the top and left edges, for
    347      * the 16 pixels plus 3 taps right of the central pixel when subpel
    348      * filtering. The bottom and right edges use 16 pixels plus 2 pixels
    349      * left of the central pixel when filtering.
    350      */
    351     if (mv->col < (xd->mb_to_left_edge - (19 << 3)))
    352         mv->col = xd->mb_to_left_edge - (16 << 3);
    353     else if (mv->col > xd->mb_to_right_edge + (18 << 3))
    354         mv->col = xd->mb_to_right_edge + (16 << 3);
    355 
    356     if (mv->row < (xd->mb_to_top_edge - (19 << 3)))
    357         mv->row = xd->mb_to_top_edge - (16 << 3);
    358     else if (mv->row > xd->mb_to_bottom_edge + (18 << 3))
    359         mv->row = xd->mb_to_bottom_edge + (16 << 3);
    360 }
    361 
    362 /* A version of the above function for chroma block MVs.*/
    363 static void clamp_uvmv_to_umv_border(MV *mv, const MACROBLOCKD *xd)
    364 {
    365     mv->col = (2*mv->col < (xd->mb_to_left_edge - (19 << 3))) ?
    366         (xd->mb_to_left_edge - (16 << 3)) >> 1 : mv->col;
    367     mv->col = (2*mv->col > xd->mb_to_right_edge + (18 << 3)) ?
    368         (xd->mb_to_right_edge + (16 << 3)) >> 1 : mv->col;
    369 
    370     mv->row = (2*mv->row < (xd->mb_to_top_edge - (19 << 3))) ?
    371         (xd->mb_to_top_edge - (16 << 3)) >> 1 : mv->row;
    372     mv->row = (2*mv->row > xd->mb_to_bottom_edge + (18 << 3)) ?
    373         (xd->mb_to_bottom_edge + (16 << 3)) >> 1 : mv->row;
    374 }
    375 
    376 void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x,
    377                                         unsigned char *dst_y,
    378                                         unsigned char *dst_u,
    379                                         unsigned char *dst_v,
    380                                         int dst_ystride,
    381                                         int dst_uvstride)
    382 {
    383     int offset;
    384     unsigned char *ptr;
    385     unsigned char *uptr, *vptr;
    386 
    387     int_mv _16x16mv;
    388 
    389     unsigned char *ptr_base = x->pre.y_buffer;
    390     int pre_stride = x->pre.y_stride;
    391 
    392     _16x16mv.as_int = x->mode_info_context->mbmi.mv.as_int;
    393 
    394     if (x->mode_info_context->mbmi.need_to_clamp_mvs)
    395     {
    396         clamp_mv_to_umv_border(&_16x16mv.as_mv, x);
    397     }
    398 
    399     ptr = ptr_base + ( _16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3);
    400 
    401     if ( _16x16mv.as_int & 0x00070007)
    402     {
    403         x->subpixel_predict16x16(ptr, pre_stride, _16x16mv.as_mv.col & 7,  _16x16mv.as_mv.row & 7, dst_y, dst_ystride);
    404     }
    405     else
    406     {
    407         vp8_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
    408     }
    409 
    410     /* calc uv motion vectors */
    411     _16x16mv.as_mv.row += 1 | (_16x16mv.as_mv.row >> (sizeof(int) * CHAR_BIT - 1));
    412     _16x16mv.as_mv.col += 1 | (_16x16mv.as_mv.col >> (sizeof(int) * CHAR_BIT - 1));
    413     _16x16mv.as_mv.row /= 2;
    414     _16x16mv.as_mv.col /= 2;
    415     _16x16mv.as_mv.row &= x->fullpixel_mask;
    416     _16x16mv.as_mv.col &= x->fullpixel_mask;
    417 
    418     pre_stride >>= 1;
    419     offset = ( _16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3);
    420     uptr = x->pre.u_buffer + offset;
    421     vptr = x->pre.v_buffer + offset;
    422 
    423     if ( _16x16mv.as_int & 0x00070007)
    424     {
    425         x->subpixel_predict8x8(uptr, pre_stride, _16x16mv.as_mv.col & 7,  _16x16mv.as_mv.row & 7, dst_u, dst_uvstride);
    426         x->subpixel_predict8x8(vptr, pre_stride, _16x16mv.as_mv.col & 7,  _16x16mv.as_mv.row & 7, dst_v, dst_uvstride);
    427     }
    428     else
    429     {
    430         vp8_copy_mem8x8(uptr, pre_stride, dst_u, dst_uvstride);
    431         vp8_copy_mem8x8(vptr, pre_stride, dst_v, dst_uvstride);
    432     }
    433 }
    434 
    435 static void build_inter4x4_predictors_mb(MACROBLOCKD *x)
    436 {
    437     int i;
    438     unsigned char *base_dst = x->dst.y_buffer;
    439     unsigned char *base_pre = x->pre.y_buffer;
    440 
    441     if (x->mode_info_context->mbmi.partitioning < 3)
    442     {
    443         BLOCKD *b;
    444         int dst_stride = x->dst.y_stride;
    445 
    446         x->block[ 0].bmi = x->mode_info_context->bmi[ 0];
    447         x->block[ 2].bmi = x->mode_info_context->bmi[ 2];
    448         x->block[ 8].bmi = x->mode_info_context->bmi[ 8];
    449         x->block[10].bmi = x->mode_info_context->bmi[10];
    450         if (x->mode_info_context->mbmi.need_to_clamp_mvs)
    451         {
    452             clamp_mv_to_umv_border(&x->block[ 0].bmi.mv.as_mv, x);
    453             clamp_mv_to_umv_border(&x->block[ 2].bmi.mv.as_mv, x);
    454             clamp_mv_to_umv_border(&x->block[ 8].bmi.mv.as_mv, x);
    455             clamp_mv_to_umv_border(&x->block[10].bmi.mv.as_mv, x);
    456         }
    457 
    458         b = &x->block[ 0];
    459         build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride);
    460         b = &x->block[ 2];
    461         build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride);
    462         b = &x->block[ 8];
    463         build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride);
    464         b = &x->block[10];
    465         build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride);
    466     }
    467     else
    468     {
    469         for (i = 0; i < 16; i += 2)
    470         {
    471             BLOCKD *d0 = &x->block[i];
    472             BLOCKD *d1 = &x->block[i+1];
    473             int dst_stride = x->dst.y_stride;
    474 
    475             x->block[i+0].bmi = x->mode_info_context->bmi[i+0];
    476             x->block[i+1].bmi = x->mode_info_context->bmi[i+1];
    477             if (x->mode_info_context->mbmi.need_to_clamp_mvs)
    478             {
    479                 clamp_mv_to_umv_border(&x->block[i+0].bmi.mv.as_mv, x);
    480                 clamp_mv_to_umv_border(&x->block[i+1].bmi.mv.as_mv, x);
    481             }
    482 
    483             if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
    484                 build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride);
    485             else
    486             {
    487                 build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
    488                 build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
    489             }
    490 
    491         }
    492 
    493     }
    494     base_dst = x->dst.u_buffer;
    495     base_pre = x->pre.u_buffer;
    496     for (i = 16; i < 20; i += 2)
    497     {
    498         BLOCKD *d0 = &x->block[i];
    499         BLOCKD *d1 = &x->block[i+1];
    500         int dst_stride = x->dst.uv_stride;
    501 
    502         /* Note: uv mvs already clamped in build_4x4uvmvs() */
    503 
    504         if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
    505             build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride);
    506         else
    507         {
    508             build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
    509             build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
    510         }
    511     }
    512 
    513     base_dst = x->dst.v_buffer;
    514     base_pre = x->pre.v_buffer;
    515     for (i = 20; i < 24; i += 2)
    516     {
    517         BLOCKD *d0 = &x->block[i];
    518         BLOCKD *d1 = &x->block[i+1];
    519         int dst_stride = x->dst.uv_stride;
    520 
    521         /* Note: uv mvs already clamped in build_4x4uvmvs() */
    522 
    523         if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
    524             build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride);
    525         else
    526         {
    527             build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
    528             build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
    529         }
    530     }
    531 }
    532 
    533 static
    534 void build_4x4uvmvs(MACROBLOCKD *x)
    535 {
    536     int i, j;
    537 
    538     for (i = 0; i < 2; i++)
    539     {
    540         for (j = 0; j < 2; j++)
    541         {
    542             int yoffset = i * 8 + j * 2;
    543             int uoffset = 16 + i * 2 + j;
    544             int voffset = 20 + i * 2 + j;
    545 
    546             int temp;
    547 
    548             temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.row
    549                  + x->mode_info_context->bmi[yoffset + 1].mv.as_mv.row
    550                  + x->mode_info_context->bmi[yoffset + 4].mv.as_mv.row
    551                  + x->mode_info_context->bmi[yoffset + 5].mv.as_mv.row;
    552 
    553             temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
    554 
    555             x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
    556 
    557             temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.col
    558                  + x->mode_info_context->bmi[yoffset + 1].mv.as_mv.col
    559                  + x->mode_info_context->bmi[yoffset + 4].mv.as_mv.col
    560                  + x->mode_info_context->bmi[yoffset + 5].mv.as_mv.col;
    561 
    562             temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
    563 
    564             x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
    565 
    566             if (x->mode_info_context->mbmi.need_to_clamp_mvs)
    567                 clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.mv.as_mv, x);
    568 
    569             x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int;
    570         }
    571     }
    572 }
    573 
    574 void vp8_build_inter_predictors_mb(MACROBLOCKD *xd)
    575 {
    576     if (xd->mode_info_context->mbmi.mode != SPLITMV)
    577     {
    578         vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
    579                                            xd->dst.u_buffer, xd->dst.v_buffer,
    580                                            xd->dst.y_stride, xd->dst.uv_stride);
    581     }
    582     else
    583     {
    584         build_4x4uvmvs(xd);
    585         build_inter4x4_predictors_mb(xd);
    586     }
    587 }
    588