Home | History | Annotate | Download | only in common
      1 /*
      2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 
     12 #include <limits.h>
     13 #include "vpx_config.h"
     14 #include "vp8_rtcd.h"
     15 #include "vpx/vpx_integer.h"
     16 #include "blockd.h"
     17 #include "reconinter.h"
     18 #if CONFIG_RUNTIME_CPU_DETECT
     19 #include "onyxc_int.h"
     20 #endif
     21 
     22 void vp8_copy_mem16x16_c(
     23     unsigned char *src,
     24     int src_stride,
     25     unsigned char *dst,
     26     int dst_stride)
     27 {
     28 
     29     int r;
     30 
     31     for (r = 0; r < 16; r++)
     32     {
     33 #if !(CONFIG_FAST_UNALIGNED)
     34         dst[0] = src[0];
     35         dst[1] = src[1];
     36         dst[2] = src[2];
     37         dst[3] = src[3];
     38         dst[4] = src[4];
     39         dst[5] = src[5];
     40         dst[6] = src[6];
     41         dst[7] = src[7];
     42         dst[8] = src[8];
     43         dst[9] = src[9];
     44         dst[10] = src[10];
     45         dst[11] = src[11];
     46         dst[12] = src[12];
     47         dst[13] = src[13];
     48         dst[14] = src[14];
     49         dst[15] = src[15];
     50 
     51 #else
     52         ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ;
     53         ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ;
     54         ((uint32_t *)dst)[2] = ((uint32_t *)src)[2] ;
     55         ((uint32_t *)dst)[3] = ((uint32_t *)src)[3] ;
     56 
     57 #endif
     58         src += src_stride;
     59         dst += dst_stride;
     60 
     61     }
     62 
     63 }
     64 
     65 void vp8_copy_mem8x8_c(
     66     unsigned char *src,
     67     int src_stride,
     68     unsigned char *dst,
     69     int dst_stride)
     70 {
     71     int r;
     72 
     73     for (r = 0; r < 8; r++)
     74     {
     75 #if !(CONFIG_FAST_UNALIGNED)
     76         dst[0] = src[0];
     77         dst[1] = src[1];
     78         dst[2] = src[2];
     79         dst[3] = src[3];
     80         dst[4] = src[4];
     81         dst[5] = src[5];
     82         dst[6] = src[6];
     83         dst[7] = src[7];
     84 #else
     85         ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ;
     86         ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ;
     87 #endif
     88         src += src_stride;
     89         dst += dst_stride;
     90 
     91     }
     92 
     93 }
     94 
     95 void vp8_copy_mem8x4_c(
     96     unsigned char *src,
     97     int src_stride,
     98     unsigned char *dst,
     99     int dst_stride)
    100 {
    101     int r;
    102 
    103     for (r = 0; r < 4; r++)
    104     {
    105 #if !(CONFIG_FAST_UNALIGNED)
    106         dst[0] = src[0];
    107         dst[1] = src[1];
    108         dst[2] = src[2];
    109         dst[3] = src[3];
    110         dst[4] = src[4];
    111         dst[5] = src[5];
    112         dst[6] = src[6];
    113         dst[7] = src[7];
    114 #else
    115         ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ;
    116         ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ;
    117 #endif
    118         src += src_stride;
    119         dst += dst_stride;
    120 
    121     }
    122 
    123 }
    124 
    125 
    126 void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, unsigned char *base_pre, int pre_stride, vp8_subpix_fn_t sppf)
    127 {
    128     int r;
    129     unsigned char *pred_ptr = d->predictor;
    130     unsigned char *ptr;
    131     ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3);
    132 
    133     if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
    134     {
    135         sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
    136     }
    137     else
    138     {
    139         for (r = 0; r < 4; r++)
    140         {
    141 #if !(CONFIG_FAST_UNALIGNED)
    142             pred_ptr[0]  = ptr[0];
    143             pred_ptr[1]  = ptr[1];
    144             pred_ptr[2]  = ptr[2];
    145             pred_ptr[3]  = ptr[3];
    146 #else
    147             *(uint32_t *)pred_ptr = *(uint32_t *)ptr ;
    148 #endif
    149             pred_ptr     += pitch;
    150             ptr         += pre_stride;
    151         }
    152     }
    153 }
    154 
    155 static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride)
    156 {
    157     unsigned char *ptr;
    158     ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3);
    159 
    160     if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
    161     {
    162         x->subpixel_predict8x8(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride);
    163     }
    164     else
    165     {
    166         vp8_copy_mem8x8(ptr, pre_stride, dst, dst_stride);
    167     }
    168 }
    169 
    170 static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride)
    171 {
    172     unsigned char *ptr;
    173     ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3);
    174 
    175     if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
    176     {
    177         x->subpixel_predict8x4(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride);
    178     }
    179     else
    180     {
    181         vp8_copy_mem8x4(ptr, pre_stride, dst, dst_stride);
    182     }
    183 }
    184 
    185 static void build_inter_predictors_b(BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride, vp8_subpix_fn_t sppf)
    186 {
    187     int r;
    188     unsigned char *ptr;
    189     ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3);
    190 
    191     if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
    192     {
    193         sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride);
    194     }
    195     else
    196     {
    197         for (r = 0; r < 4; r++)
    198         {
    199 #if !(CONFIG_FAST_UNALIGNED)
    200           dst[0]  = ptr[0];
    201           dst[1]  = ptr[1];
    202           dst[2]  = ptr[2];
    203           dst[3]  = ptr[3];
    204 #else
    205             *(uint32_t *)dst = *(uint32_t *)ptr ;
    206 #endif
    207             dst     += dst_stride;
    208             ptr     += pre_stride;
    209         }
    210     }
    211 }
    212 
    213 
    214 /*encoder only*/
    215 void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x)
    216 {
    217     unsigned char *uptr, *vptr;
    218     unsigned char *upred_ptr = &x->predictor[256];
    219     unsigned char *vpred_ptr = &x->predictor[320];
    220 
    221     int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
    222     int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
    223     int offset;
    224     int pre_stride = x->pre.uv_stride;
    225 
    226     /* calc uv motion vectors */
    227     mv_row += 1 | (mv_row >> (sizeof(int) * CHAR_BIT - 1));
    228     mv_col += 1 | (mv_col >> (sizeof(int) * CHAR_BIT - 1));
    229     mv_row /= 2;
    230     mv_col /= 2;
    231     mv_row &= x->fullpixel_mask;
    232     mv_col &= x->fullpixel_mask;
    233 
    234     offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
    235     uptr = x->pre.u_buffer + offset;
    236     vptr = x->pre.v_buffer + offset;
    237 
    238     if ((mv_row | mv_col) & 7)
    239     {
    240         x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, 8);
    241         x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, 8);
    242     }
    243     else
    244     {
    245         vp8_copy_mem8x8(uptr, pre_stride, upred_ptr, 8);
    246         vp8_copy_mem8x8(vptr, pre_stride, vpred_ptr, 8);
    247     }
    248 }
    249 
    250 /*encoder only*/
    251 void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x)
    252 {
    253     int i, j;
    254     int pre_stride = x->pre.uv_stride;
    255     unsigned char *base_pre;
    256 
    257     /* build uv mvs */
    258     for (i = 0; i < 2; i++)
    259     {
    260         for (j = 0; j < 2; j++)
    261         {
    262             int yoffset = i * 8 + j * 2;
    263             int uoffset = 16 + i * 2 + j;
    264             int voffset = 20 + i * 2 + j;
    265 
    266             int temp;
    267 
    268             temp = x->block[yoffset  ].bmi.mv.as_mv.row
    269                    + x->block[yoffset+1].bmi.mv.as_mv.row
    270                    + x->block[yoffset+4].bmi.mv.as_mv.row
    271                    + x->block[yoffset+5].bmi.mv.as_mv.row;
    272 
    273             temp += 4 + ((temp >> (sizeof(int) * CHAR_BIT - 1)) << 3);
    274 
    275             x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
    276 
    277             temp = x->block[yoffset  ].bmi.mv.as_mv.col
    278                    + x->block[yoffset+1].bmi.mv.as_mv.col
    279                    + x->block[yoffset+4].bmi.mv.as_mv.col
    280                    + x->block[yoffset+5].bmi.mv.as_mv.col;
    281 
    282             temp += 4 + ((temp >> (sizeof(int) * CHAR_BIT - 1)) << 3);
    283 
    284             x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
    285 
    286             x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int;
    287         }
    288     }
    289 
    290     base_pre = x->pre.u_buffer;
    291     for (i = 16; i < 20; i += 2)
    292     {
    293         BLOCKD *d0 = &x->block[i];
    294         BLOCKD *d1 = &x->block[i+1];
    295 
    296         if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
    297             build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride);
    298         else
    299         {
    300             vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride, x->subpixel_predict);
    301             vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride, x->subpixel_predict);
    302         }
    303     }
    304 
    305     base_pre = x->pre.v_buffer;
    306     for (i = 20; i < 24; i += 2)
    307     {
    308         BLOCKD *d0 = &x->block[i];
    309         BLOCKD *d1 = &x->block[i+1];
    310 
    311         if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
    312             build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride);
    313         else
    314         {
    315             vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride, x->subpixel_predict);
    316             vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride, x->subpixel_predict);
    317         }
    318     }
    319 }
    320 
    321 
    322 /*encoder only*/
    323 void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x,
    324                                          unsigned char *dst_y,
    325                                          int dst_ystride)
    326 {
    327     unsigned char *ptr_base;
    328     unsigned char *ptr;
    329     int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
    330     int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
    331     int pre_stride = x->pre.y_stride;
    332 
    333     ptr_base = x->pre.y_buffer;
    334     ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
    335 
    336     if ((mv_row | mv_col) & 7)
    337     {
    338         x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7,
    339                                  dst_y, dst_ystride);
    340     }
    341     else
    342     {
    343         vp8_copy_mem16x16(ptr, pre_stride, dst_y,
    344             dst_ystride);
    345     }
    346 }
    347 
    348 static void clamp_mv_to_umv_border(MV *mv, const MACROBLOCKD *xd)
    349 {
    350     /* If the MV points so far into the UMV border that no visible pixels
    351      * are used for reconstruction, the subpel part of the MV can be
    352      * discarded and the MV limited to 16 pixels with equivalent results.
    353      *
    354      * This limit kicks in at 19 pixels for the top and left edges, for
    355      * the 16 pixels plus 3 taps right of the central pixel when subpel
    356      * filtering. The bottom and right edges use 16 pixels plus 2 pixels
    357      * left of the central pixel when filtering.
    358      */
    359     if (mv->col < (xd->mb_to_left_edge - (19 << 3)))
    360         mv->col = xd->mb_to_left_edge - (16 << 3);
    361     else if (mv->col > xd->mb_to_right_edge + (18 << 3))
    362         mv->col = xd->mb_to_right_edge + (16 << 3);
    363 
    364     if (mv->row < (xd->mb_to_top_edge - (19 << 3)))
    365         mv->row = xd->mb_to_top_edge - (16 << 3);
    366     else if (mv->row > xd->mb_to_bottom_edge + (18 << 3))
    367         mv->row = xd->mb_to_bottom_edge + (16 << 3);
    368 }
    369 
    370 /* A version of the above function for chroma block MVs.*/
    371 static void clamp_uvmv_to_umv_border(MV *mv, const MACROBLOCKD *xd)
    372 {
    373     mv->col = (2*mv->col < (xd->mb_to_left_edge - (19 << 3))) ?
    374         (xd->mb_to_left_edge - (16 << 3)) >> 1 : mv->col;
    375     mv->col = (2*mv->col > xd->mb_to_right_edge + (18 << 3)) ?
    376         (xd->mb_to_right_edge + (16 << 3)) >> 1 : mv->col;
    377 
    378     mv->row = (2*mv->row < (xd->mb_to_top_edge - (19 << 3))) ?
    379         (xd->mb_to_top_edge - (16 << 3)) >> 1 : mv->row;
    380     mv->row = (2*mv->row > xd->mb_to_bottom_edge + (18 << 3)) ?
    381         (xd->mb_to_bottom_edge + (16 << 3)) >> 1 : mv->row;
    382 }
    383 
    384 void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x,
    385                                         unsigned char *dst_y,
    386                                         unsigned char *dst_u,
    387                                         unsigned char *dst_v,
    388                                         int dst_ystride,
    389                                         int dst_uvstride)
    390 {
    391     int offset;
    392     unsigned char *ptr;
    393     unsigned char *uptr, *vptr;
    394 
    395     int_mv _16x16mv;
    396 
    397     unsigned char *ptr_base = x->pre.y_buffer;
    398     int pre_stride = x->pre.y_stride;
    399 
    400     _16x16mv.as_int = x->mode_info_context->mbmi.mv.as_int;
    401 
    402     if (x->mode_info_context->mbmi.need_to_clamp_mvs)
    403     {
    404         clamp_mv_to_umv_border(&_16x16mv.as_mv, x);
    405     }
    406 
    407     ptr = ptr_base + ( _16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3);
    408 
    409     if ( _16x16mv.as_int & 0x00070007)
    410     {
    411         x->subpixel_predict16x16(ptr, pre_stride, _16x16mv.as_mv.col & 7,  _16x16mv.as_mv.row & 7, dst_y, dst_ystride);
    412     }
    413     else
    414     {
    415         vp8_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
    416     }
    417 
    418     /* calc uv motion vectors */
    419     _16x16mv.as_mv.row += 1 | (_16x16mv.as_mv.row >> (sizeof(int) * CHAR_BIT - 1));
    420     _16x16mv.as_mv.col += 1 | (_16x16mv.as_mv.col >> (sizeof(int) * CHAR_BIT - 1));
    421     _16x16mv.as_mv.row /= 2;
    422     _16x16mv.as_mv.col /= 2;
    423     _16x16mv.as_mv.row &= x->fullpixel_mask;
    424     _16x16mv.as_mv.col &= x->fullpixel_mask;
    425 
    426     pre_stride >>= 1;
    427     offset = ( _16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3);
    428     uptr = x->pre.u_buffer + offset;
    429     vptr = x->pre.v_buffer + offset;
    430 
    431     if ( _16x16mv.as_int & 0x00070007)
    432     {
    433         x->subpixel_predict8x8(uptr, pre_stride, _16x16mv.as_mv.col & 7,  _16x16mv.as_mv.row & 7, dst_u, dst_uvstride);
    434         x->subpixel_predict8x8(vptr, pre_stride, _16x16mv.as_mv.col & 7,  _16x16mv.as_mv.row & 7, dst_v, dst_uvstride);
    435     }
    436     else
    437     {
    438         vp8_copy_mem8x8(uptr, pre_stride, dst_u, dst_uvstride);
    439         vp8_copy_mem8x8(vptr, pre_stride, dst_v, dst_uvstride);
    440     }
    441 }
    442 
    443 static void build_inter4x4_predictors_mb(MACROBLOCKD *x)
    444 {
    445     int i;
    446     unsigned char *base_dst = x->dst.y_buffer;
    447     unsigned char *base_pre = x->pre.y_buffer;
    448 
    449     if (x->mode_info_context->mbmi.partitioning < 3)
    450     {
    451         BLOCKD *b;
    452         int dst_stride = x->dst.y_stride;
    453 
    454         x->block[ 0].bmi = x->mode_info_context->bmi[ 0];
    455         x->block[ 2].bmi = x->mode_info_context->bmi[ 2];
    456         x->block[ 8].bmi = x->mode_info_context->bmi[ 8];
    457         x->block[10].bmi = x->mode_info_context->bmi[10];
    458         if (x->mode_info_context->mbmi.need_to_clamp_mvs)
    459         {
    460             clamp_mv_to_umv_border(&x->block[ 0].bmi.mv.as_mv, x);
    461             clamp_mv_to_umv_border(&x->block[ 2].bmi.mv.as_mv, x);
    462             clamp_mv_to_umv_border(&x->block[ 8].bmi.mv.as_mv, x);
    463             clamp_mv_to_umv_border(&x->block[10].bmi.mv.as_mv, x);
    464         }
    465 
    466         b = &x->block[ 0];
    467         build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride);
    468         b = &x->block[ 2];
    469         build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride);
    470         b = &x->block[ 8];
    471         build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride);
    472         b = &x->block[10];
    473         build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride);
    474     }
    475     else
    476     {
    477         for (i = 0; i < 16; i += 2)
    478         {
    479             BLOCKD *d0 = &x->block[i];
    480             BLOCKD *d1 = &x->block[i+1];
    481             int dst_stride = x->dst.y_stride;
    482 
    483             x->block[i+0].bmi = x->mode_info_context->bmi[i+0];
    484             x->block[i+1].bmi = x->mode_info_context->bmi[i+1];
    485             if (x->mode_info_context->mbmi.need_to_clamp_mvs)
    486             {
    487                 clamp_mv_to_umv_border(&x->block[i+0].bmi.mv.as_mv, x);
    488                 clamp_mv_to_umv_border(&x->block[i+1].bmi.mv.as_mv, x);
    489             }
    490 
    491             if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
    492                 build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride);
    493             else
    494             {
    495                 build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
    496                 build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
    497             }
    498 
    499         }
    500 
    501     }
    502     base_dst = x->dst.u_buffer;
    503     base_pre = x->pre.u_buffer;
    504     for (i = 16; i < 20; i += 2)
    505     {
    506         BLOCKD *d0 = &x->block[i];
    507         BLOCKD *d1 = &x->block[i+1];
    508         int dst_stride = x->dst.uv_stride;
    509 
    510         /* Note: uv mvs already clamped in build_4x4uvmvs() */
    511 
    512         if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
    513             build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride);
    514         else
    515         {
    516             build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
    517             build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
    518         }
    519     }
    520 
    521     base_dst = x->dst.v_buffer;
    522     base_pre = x->pre.v_buffer;
    523     for (i = 20; i < 24; i += 2)
    524     {
    525         BLOCKD *d0 = &x->block[i];
    526         BLOCKD *d1 = &x->block[i+1];
    527         int dst_stride = x->dst.uv_stride;
    528 
    529         /* Note: uv mvs already clamped in build_4x4uvmvs() */
    530 
    531         if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
    532             build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride);
    533         else
    534         {
    535             build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
    536             build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
    537         }
    538     }
    539 }
    540 
    541 static
    542 void build_4x4uvmvs(MACROBLOCKD *x)
    543 {
    544     int i, j;
    545 
    546     for (i = 0; i < 2; i++)
    547     {
    548         for (j = 0; j < 2; j++)
    549         {
    550             int yoffset = i * 8 + j * 2;
    551             int uoffset = 16 + i * 2 + j;
    552             int voffset = 20 + i * 2 + j;
    553 
    554             int temp;
    555 
    556             temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.row
    557                  + x->mode_info_context->bmi[yoffset + 1].mv.as_mv.row
    558                  + x->mode_info_context->bmi[yoffset + 4].mv.as_mv.row
    559                  + x->mode_info_context->bmi[yoffset + 5].mv.as_mv.row;
    560 
    561             temp += 4 + ((temp >> (sizeof(int) * CHAR_BIT - 1)) << 3);
    562 
    563             x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
    564 
    565             temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.col
    566                  + x->mode_info_context->bmi[yoffset + 1].mv.as_mv.col
    567                  + x->mode_info_context->bmi[yoffset + 4].mv.as_mv.col
    568                  + x->mode_info_context->bmi[yoffset + 5].mv.as_mv.col;
    569 
    570             temp += 4 + ((temp >> (sizeof(int) * CHAR_BIT - 1)) << 3);
    571 
    572             x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
    573 
    574             if (x->mode_info_context->mbmi.need_to_clamp_mvs)
    575                 clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.mv.as_mv, x);
    576 
    577             x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int;
    578         }
    579     }
    580 }
    581 
    582 void vp8_build_inter_predictors_mb(MACROBLOCKD *xd)
    583 {
    584     if (xd->mode_info_context->mbmi.mode != SPLITMV)
    585     {
    586         vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
    587                                            xd->dst.u_buffer, xd->dst.v_buffer,
    588                                            xd->dst.y_stride, xd->dst.uv_stride);
    589     }
    590     else
    591     {
    592         build_4x4uvmvs(xd);
    593         build_inter4x4_predictors_mb(xd);
    594     }
    595 }
    596