Home | History | Annotate | Download | only in common
      1 /*
      2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 
     12 #include "vpx_ports/config.h"
     13 #include "recon.h"
     14 #include "subpixel.h"
     15 #include "blockd.h"
     16 #include "reconinter.h"
     17 #if CONFIG_RUNTIME_CPU_DETECT
     18 #include "onyxc_int.h"
     19 #endif
     20 
     21 // use this define on systems where unaligned int reads and writes are
     22 // not allowed, i.e. ARM architectures
     23 //#define MUST_BE_ALIGNED
     24 
     25 
     26 static const int bbb[4] = {0, 2, 8, 10};
     27 
     28 
     29 
     30 void vp8_copy_mem16x16_c(
     31     unsigned char *src,
     32     int src_stride,
     33     unsigned char *dst,
     34     int dst_stride)
     35 {
     36 
     37     int r;
     38 
     39     for (r = 0; r < 16; r++)
     40     {
     41 #ifdef MUST_BE_ALIGNED
     42         dst[0] = src[0];
     43         dst[1] = src[1];
     44         dst[2] = src[2];
     45         dst[3] = src[3];
     46         dst[4] = src[4];
     47         dst[5] = src[5];
     48         dst[6] = src[6];
     49         dst[7] = src[7];
     50         dst[8] = src[8];
     51         dst[9] = src[9];
     52         dst[10] = src[10];
     53         dst[11] = src[11];
     54         dst[12] = src[12];
     55         dst[13] = src[13];
     56         dst[14] = src[14];
     57         dst[15] = src[15];
     58 
     59 #else
     60         ((int *)dst)[0] = ((int *)src)[0] ;
     61         ((int *)dst)[1] = ((int *)src)[1] ;
     62         ((int *)dst)[2] = ((int *)src)[2] ;
     63         ((int *)dst)[3] = ((int *)src)[3] ;
     64 
     65 #endif
     66         src += src_stride;
     67         dst += dst_stride;
     68 
     69     }
     70 
     71 }
     72 
     73 void vp8_copy_mem8x8_c(
     74     unsigned char *src,
     75     int src_stride,
     76     unsigned char *dst,
     77     int dst_stride)
     78 {
     79     int r;
     80 
     81     for (r = 0; r < 8; r++)
     82     {
     83 #ifdef MUST_BE_ALIGNED
     84         dst[0] = src[0];
     85         dst[1] = src[1];
     86         dst[2] = src[2];
     87         dst[3] = src[3];
     88         dst[4] = src[4];
     89         dst[5] = src[5];
     90         dst[6] = src[6];
     91         dst[7] = src[7];
     92 #else
     93         ((int *)dst)[0] = ((int *)src)[0] ;
     94         ((int *)dst)[1] = ((int *)src)[1] ;
     95 #endif
     96         src += src_stride;
     97         dst += dst_stride;
     98 
     99     }
    100 
    101 }
    102 
    103 void vp8_copy_mem8x4_c(
    104     unsigned char *src,
    105     int src_stride,
    106     unsigned char *dst,
    107     int dst_stride)
    108 {
    109     int r;
    110 
    111     for (r = 0; r < 4; r++)
    112     {
    113 #ifdef MUST_BE_ALIGNED
    114         dst[0] = src[0];
    115         dst[1] = src[1];
    116         dst[2] = src[2];
    117         dst[3] = src[3];
    118         dst[4] = src[4];
    119         dst[5] = src[5];
    120         dst[6] = src[6];
    121         dst[7] = src[7];
    122 #else
    123         ((int *)dst)[0] = ((int *)src)[0] ;
    124         ((int *)dst)[1] = ((int *)src)[1] ;
    125 #endif
    126         src += src_stride;
    127         dst += dst_stride;
    128 
    129     }
    130 
    131 }
    132 
    133 
    134 
    135 void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf)
    136 {
    137     int r;
    138     unsigned char *ptr_base;
    139     unsigned char *ptr;
    140     unsigned char *pred_ptr = d->predictor;
    141 
    142     ptr_base = *(d->base_pre);
    143 
    144     if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
    145     {
    146         ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
    147         sppf(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
    148     }
    149     else
    150     {
    151         ptr_base += d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
    152         ptr = ptr_base;
    153 
    154         for (r = 0; r < 4; r++)
    155         {
    156 #ifdef MUST_BE_ALIGNED
    157             pred_ptr[0]  = ptr[0];
    158             pred_ptr[1]  = ptr[1];
    159             pred_ptr[2]  = ptr[2];
    160             pred_ptr[3]  = ptr[3];
    161 #else
    162             *(int *)pred_ptr = *(int *)ptr ;
    163 #endif
    164             pred_ptr     += pitch;
    165             ptr         += d->pre_stride;
    166         }
    167     }
    168 }
    169 
    170 void vp8_build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, int pitch)
    171 {
    172     unsigned char *ptr_base;
    173     unsigned char *ptr;
    174     unsigned char *pred_ptr = d->predictor;
    175 
    176     ptr_base = *(d->base_pre);
    177     ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
    178 
    179     if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
    180     {
    181         x->subpixel_predict8x8(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
    182     }
    183     else
    184     {
    185         RECON_INVOKE(&x->rtcd->recon, copy8x8)(ptr, d->pre_stride, pred_ptr, pitch);
    186     }
    187 }
    188 
    189 void vp8_build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d, int pitch)
    190 {
    191     unsigned char *ptr_base;
    192     unsigned char *ptr;
    193     unsigned char *pred_ptr = d->predictor;
    194 
    195     ptr_base = *(d->base_pre);
    196     ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
    197 
    198     if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
    199     {
    200         x->subpixel_predict8x4(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
    201     }
    202     else
    203     {
    204         RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr, d->pre_stride, pred_ptr, pitch);
    205     }
    206 }
    207 
    208 
    209 void vp8_build_inter_predictors_mbuv(MACROBLOCKD *x)
    210 {
    211     int i;
    212 
    213     if (x->mode_info_context->mbmi.ref_frame != INTRA_FRAME &&
    214         x->mode_info_context->mbmi.mode != SPLITMV)
    215     {
    216         unsigned char *uptr, *vptr;
    217         unsigned char *upred_ptr = &x->predictor[256];
    218         unsigned char *vpred_ptr = &x->predictor[320];
    219 
    220         int mv_row = x->block[16].bmi.mv.as_mv.row;
    221         int mv_col = x->block[16].bmi.mv.as_mv.col;
    222         int offset;
    223         int pre_stride = x->block[16].pre_stride;
    224 
    225         offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
    226         uptr = x->pre.u_buffer + offset;
    227         vptr = x->pre.v_buffer + offset;
    228 
    229         if ((mv_row | mv_col) & 7)
    230         {
    231             x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, 8);
    232             x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, 8);
    233         }
    234         else
    235         {
    236             RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, upred_ptr, 8);
    237             RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, vpred_ptr, 8);
    238         }
    239     }
    240     else
    241     {
    242         for (i = 16; i < 24; i += 2)
    243         {
    244             BLOCKD *d0 = &x->block[i];
    245             BLOCKD *d1 = &x->block[i+1];
    246 
    247             if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
    248                 vp8_build_inter_predictors2b(x, d0, 8);
    249             else
    250             {
    251                 vp8_build_inter_predictors_b(d0, 8, x->subpixel_predict);
    252                 vp8_build_inter_predictors_b(d1, 8, x->subpixel_predict);
    253             }
    254         }
    255     }
    256 }
    257 
    258 //encoder only
    259 void vp8_build_inter_predictors_mby(MACROBLOCKD *x)
    260 {
    261 
    262   if (x->mode_info_context->mbmi.ref_frame != INTRA_FRAME &&
    263       x->mode_info_context->mbmi.mode != SPLITMV)
    264     {
    265         unsigned char *ptr_base;
    266         unsigned char *ptr;
    267         unsigned char *pred_ptr = x->predictor;
    268         int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
    269         int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
    270         int pre_stride = x->block[0].pre_stride;
    271 
    272         ptr_base = x->pre.y_buffer;
    273         ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
    274 
    275         if ((mv_row | mv_col) & 7)
    276         {
    277             x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, pred_ptr, 16);
    278         }
    279         else
    280         {
    281             RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 16);
    282         }
    283     }
    284     else
    285     {
    286         int i;
    287 
    288         if (x->mode_info_context->mbmi.partitioning < 3)
    289         {
    290             for (i = 0; i < 4; i++)
    291             {
    292                 BLOCKD *d = &x->block[bbb[i]];
    293                 vp8_build_inter_predictors4b(x, d, 16);
    294             }
    295 
    296         }
    297         else
    298         {
    299             for (i = 0; i < 16; i += 2)
    300             {
    301                 BLOCKD *d0 = &x->block[i];
    302                 BLOCKD *d1 = &x->block[i+1];
    303 
    304                 if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
    305                     vp8_build_inter_predictors2b(x, d0, 16);
    306                 else
    307                 {
    308                     vp8_build_inter_predictors_b(d0, 16, x->subpixel_predict);
    309                     vp8_build_inter_predictors_b(d1, 16, x->subpixel_predict);
    310                 }
    311 
    312             }
    313         }
    314     }
    315 }
    316 
    317 void vp8_build_inter_predictors_mb(MACROBLOCKD *x)
    318 {
    319 
    320     if (x->mode_info_context->mbmi.ref_frame != INTRA_FRAME &&
    321         x->mode_info_context->mbmi.mode != SPLITMV)
    322     {
    323         int offset;
    324         unsigned char *ptr_base;
    325         unsigned char *ptr;
    326         unsigned char *uptr, *vptr;
    327         unsigned char *pred_ptr = x->predictor;
    328         unsigned char *upred_ptr = &x->predictor[256];
    329         unsigned char *vpred_ptr = &x->predictor[320];
    330 
    331         int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
    332         int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
    333         int pre_stride = x->block[0].pre_stride;
    334 
    335         ptr_base = x->pre.y_buffer;
    336         ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
    337 
    338         if ((mv_row | mv_col) & 7)
    339         {
    340             x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, pred_ptr, 16);
    341         }
    342         else
    343         {
    344             RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 16);
    345         }
    346 
    347         mv_row = x->block[16].bmi.mv.as_mv.row;
    348         mv_col = x->block[16].bmi.mv.as_mv.col;
    349         pre_stride >>= 1;
    350         offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
    351         uptr = x->pre.u_buffer + offset;
    352         vptr = x->pre.v_buffer + offset;
    353 
    354         if ((mv_row | mv_col) & 7)
    355         {
    356             x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, 8);
    357             x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, 8);
    358         }
    359         else
    360         {
    361             RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, upred_ptr, 8);
    362             RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, vpred_ptr, 8);
    363         }
    364     }
    365     else
    366     {
    367         int i;
    368 
    369         if (x->mode_info_context->mbmi.partitioning < 3)
    370         {
    371             for (i = 0; i < 4; i++)
    372             {
    373                 BLOCKD *d = &x->block[bbb[i]];
    374                 vp8_build_inter_predictors4b(x, d, 16);
    375             }
    376         }
    377         else
    378         {
    379             for (i = 0; i < 16; i += 2)
    380             {
    381                 BLOCKD *d0 = &x->block[i];
    382                 BLOCKD *d1 = &x->block[i+1];
    383 
    384                 if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
    385                     vp8_build_inter_predictors2b(x, d0, 16);
    386                 else
    387                 {
    388                     vp8_build_inter_predictors_b(d0, 16, x->subpixel_predict);
    389                     vp8_build_inter_predictors_b(d1, 16, x->subpixel_predict);
    390                 }
    391 
    392             }
    393 
    394         }
    395 
    396         for (i = 16; i < 24; i += 2)
    397         {
    398             BLOCKD *d0 = &x->block[i];
    399             BLOCKD *d1 = &x->block[i+1];
    400 
    401             if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
    402                 vp8_build_inter_predictors2b(x, d0, 8);
    403             else
    404             {
    405                 vp8_build_inter_predictors_b(d0, 8, x->subpixel_predict);
    406                 vp8_build_inter_predictors_b(d1, 8, x->subpixel_predict);
    407             }
    408 
    409         }
    410 
    411     }
    412 }
    413 
    414 void vp8_build_uvmvs(MACROBLOCKD *x, int fullpixel)
    415 {
    416     int i, j;
    417 
    418     if (x->mode_info_context->mbmi.mode == SPLITMV)
    419     {
    420         for (i = 0; i < 2; i++)
    421         {
    422             for (j = 0; j < 2; j++)
    423             {
    424                 int yoffset = i * 8 + j * 2;
    425                 int uoffset = 16 + i * 2 + j;
    426                 int voffset = 20 + i * 2 + j;
    427 
    428                 int temp;
    429 
    430                 temp = x->block[yoffset  ].bmi.mv.as_mv.row
    431                        + x->block[yoffset+1].bmi.mv.as_mv.row
    432                        + x->block[yoffset+4].bmi.mv.as_mv.row
    433                        + x->block[yoffset+5].bmi.mv.as_mv.row;
    434 
    435                 if (temp < 0) temp -= 4;
    436                 else temp += 4;
    437 
    438                 x->block[uoffset].bmi.mv.as_mv.row = temp / 8;
    439 
    440                 if (fullpixel)
    441                     x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & 0xfffffff8;
    442 
    443                 temp = x->block[yoffset  ].bmi.mv.as_mv.col
    444                        + x->block[yoffset+1].bmi.mv.as_mv.col
    445                        + x->block[yoffset+4].bmi.mv.as_mv.col
    446                        + x->block[yoffset+5].bmi.mv.as_mv.col;
    447 
    448                 if (temp < 0) temp -= 4;
    449                 else temp += 4;
    450 
    451                 x->block[uoffset].bmi.mv.as_mv.col = temp / 8;
    452 
    453                 if (fullpixel)
    454                     x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & 0xfffffff8;
    455 
    456                 x->block[voffset].bmi.mv.as_mv.row = x->block[uoffset].bmi.mv.as_mv.row ;
    457                 x->block[voffset].bmi.mv.as_mv.col = x->block[uoffset].bmi.mv.as_mv.col ;
    458             }
    459         }
    460     }
    461     else
    462     {
    463         int mvrow = x->mode_info_context->mbmi.mv.as_mv.row;
    464         int mvcol = x->mode_info_context->mbmi.mv.as_mv.col;
    465 
    466         if (mvrow < 0)
    467             mvrow -= 1;
    468         else
    469             mvrow += 1;
    470 
    471         if (mvcol < 0)
    472             mvcol -= 1;
    473         else
    474             mvcol += 1;
    475 
    476         mvrow /= 2;
    477         mvcol /= 2;
    478 
    479         for (i = 0; i < 8; i++)
    480         {
    481             x->block[ 16 + i].bmi.mv.as_mv.row = mvrow;
    482             x->block[ 16 + i].bmi.mv.as_mv.col = mvcol;
    483 
    484             if (fullpixel)
    485             {
    486                 x->block[ 16 + i].bmi.mv.as_mv.row = mvrow & 0xfffffff8;
    487                 x->block[ 16 + i].bmi.mv.as_mv.col = mvcol & 0xfffffff8;
    488             }
    489         }
    490     }
    491 }
    492 
    493 
    494 // The following functions are wriiten for skip_recon_mb() to call. Since there is no recon in this
    495 // situation, we can write the result directly to dst buffer instead of writing it to predictor
    496 // buffer and then copying it to dst buffer.
    497 static void vp8_build_inter_predictors_b_s(BLOCKD *d, unsigned char *dst_ptr, vp8_subpix_fn_t sppf)
    498 {
    499     int r;
    500     unsigned char *ptr_base;
    501     unsigned char *ptr;
    502     //unsigned char *pred_ptr = d->predictor;
    503     int dst_stride = d->dst_stride;
    504     int pre_stride = d->pre_stride;
    505 
    506     ptr_base = *(d->base_pre);
    507 
    508     if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
    509     {
    510         ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
    511         sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst_ptr, dst_stride);
    512     }
    513     else
    514     {
    515         ptr_base += d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
    516         ptr = ptr_base;
    517 
    518         for (r = 0; r < 4; r++)
    519         {
    520 #ifdef MUST_BE_ALIGNED
    521             dst_ptr[0]   = ptr[0];
    522             dst_ptr[1]   = ptr[1];
    523             dst_ptr[2]   = ptr[2];
    524             dst_ptr[3]   = ptr[3];
    525 #else
    526             *(int *)dst_ptr = *(int *)ptr ;
    527 #endif
    528             dst_ptr      += dst_stride;
    529             ptr         += pre_stride;
    530         }
    531     }
    532 }
    533 
    534 
    535 
    536 void vp8_build_inter_predictors_mb_s(MACROBLOCKD *x)
    537 {
    538     //unsigned char *pred_ptr = x->block[0].predictor;
    539     //unsigned char *dst_ptr = *(x->block[0].base_dst) + x->block[0].dst;
    540     unsigned char *pred_ptr = x->predictor;
    541     unsigned char *dst_ptr = x->dst.y_buffer;
    542 
    543     if (x->mode_info_context->mbmi.mode != SPLITMV)
    544     {
    545         int offset;
    546         unsigned char *ptr_base;
    547         unsigned char *ptr;
    548         unsigned char *uptr, *vptr;
    549         //unsigned char *pred_ptr = x->predictor;
    550         //unsigned char *upred_ptr = &x->predictor[256];
    551         //unsigned char *vpred_ptr = &x->predictor[320];
    552         unsigned char *udst_ptr = x->dst.u_buffer;
    553         unsigned char *vdst_ptr = x->dst.v_buffer;
    554 
    555         int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
    556         int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
    557         int pre_stride = x->dst.y_stride; //x->block[0].pre_stride;
    558 
    559         ptr_base = x->pre.y_buffer;
    560         ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
    561 
    562         if ((mv_row | mv_col) & 7)
    563         {
    564             x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, dst_ptr, x->dst.y_stride); //x->block[0].dst_stride);
    565         }
    566         else
    567         {
    568             RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, dst_ptr, x->dst.y_stride); //x->block[0].dst_stride);
    569         }
    570 
    571         mv_row = x->block[16].bmi.mv.as_mv.row;
    572         mv_col = x->block[16].bmi.mv.as_mv.col;
    573         pre_stride >>= 1;
    574         offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
    575         uptr = x->pre.u_buffer + offset;
    576         vptr = x->pre.v_buffer + offset;
    577 
    578         if ((mv_row | mv_col) & 7)
    579         {
    580             x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, udst_ptr, x->dst.uv_stride);
    581             x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vdst_ptr, x->dst.uv_stride);
    582         }
    583         else
    584         {
    585             RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, udst_ptr, x->dst.uv_stride);
    586             RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, vdst_ptr, x->dst.uv_stride);
    587         }
    588     }
    589     else
    590     {
    591         //note: this whole ELSE part is not executed at all. So, no way to test the correctness of my modification. Later,
    592         //if sth is wrong, go back to what it is in build_inter_predictors_mb.
    593         int i;
    594 
    595         if (x->mode_info_context->mbmi.partitioning < 3)
    596         {
    597             for (i = 0; i < 4; i++)
    598             {
    599                 BLOCKD *d = &x->block[bbb[i]];
    600                 //vp8_build_inter_predictors4b(x, d, 16);
    601 
    602                 {
    603                     unsigned char *ptr_base;
    604                     unsigned char *ptr;
    605                     unsigned char *pred_ptr = d->predictor;
    606 
    607                     ptr_base = *(d->base_pre);
    608                     ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
    609 
    610                     if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
    611                     {
    612                         x->subpixel_predict8x8(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst_ptr, x->dst.y_stride); //x->block[0].dst_stride);
    613                     }
    614                     else
    615                     {
    616                         RECON_INVOKE(&x->rtcd->recon, copy8x8)(ptr, d->pre_stride, dst_ptr, x->dst.y_stride); //x->block[0].dst_stride);
    617                     }
    618                 }
    619             }
    620         }
    621         else
    622         {
    623             for (i = 0; i < 16; i += 2)
    624             {
    625                 BLOCKD *d0 = &x->block[i];
    626                 BLOCKD *d1 = &x->block[i+1];
    627 
    628                 if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
    629                 {
    630                     //vp8_build_inter_predictors2b(x, d0, 16);
    631                     unsigned char *ptr_base;
    632                     unsigned char *ptr;
    633                     unsigned char *pred_ptr = d0->predictor;
    634 
    635                     ptr_base = *(d0->base_pre);
    636                     ptr = ptr_base + d0->pre + (d0->bmi.mv.as_mv.row >> 3) * d0->pre_stride + (d0->bmi.mv.as_mv.col >> 3);
    637 
    638                     if (d0->bmi.mv.as_mv.row & 7 || d0->bmi.mv.as_mv.col & 7)
    639                     {
    640                         x->subpixel_predict8x4(ptr, d0->pre_stride, d0->bmi.mv.as_mv.col & 7, d0->bmi.mv.as_mv.row & 7, dst_ptr, x->dst.y_stride);
    641                     }
    642                     else
    643                     {
    644                         RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr, d0->pre_stride, dst_ptr, x->dst.y_stride);
    645                     }
    646                 }
    647                 else
    648                 {
    649                     vp8_build_inter_predictors_b_s(d0, dst_ptr, x->subpixel_predict);
    650                     vp8_build_inter_predictors_b_s(d1, dst_ptr, x->subpixel_predict);
    651                 }
    652             }
    653         }
    654 
    655         for (i = 16; i < 24; i += 2)
    656         {
    657             BLOCKD *d0 = &x->block[i];
    658             BLOCKD *d1 = &x->block[i+1];
    659 
    660             if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
    661             {
    662                 //vp8_build_inter_predictors2b(x, d0, 8);
    663                 unsigned char *ptr_base;
    664                 unsigned char *ptr;
    665                 unsigned char *pred_ptr = d0->predictor;
    666 
    667                 ptr_base = *(d0->base_pre);
    668                 ptr = ptr_base + d0->pre + (d0->bmi.mv.as_mv.row >> 3) * d0->pre_stride + (d0->bmi.mv.as_mv.col >> 3);
    669 
    670                 if (d0->bmi.mv.as_mv.row & 7 || d0->bmi.mv.as_mv.col & 7)
    671                 {
    672                     x->subpixel_predict8x4(ptr, d0->pre_stride, d0->bmi.mv.as_mv.col & 7, d0->bmi.mv.as_mv.row & 7, dst_ptr, x->dst.y_stride);
    673                 }
    674                 else
    675                 {
    676                     RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr, d0->pre_stride, dst_ptr, x->dst.y_stride);
    677                 }
    678             }
    679             else
    680             {
    681                 vp8_build_inter_predictors_b_s(d0, dst_ptr, x->subpixel_predict);
    682                 vp8_build_inter_predictors_b_s(d1, dst_ptr, x->subpixel_predict);
    683             }
    684         }
    685     }
    686 }
    687