Home | History | Annotate | Download | only in encoder
      1 /*
      2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 
     12 #include "mcomp.h"
     13 #include "vpx_mem/vpx_mem.h"
     14 
     15 #include <stdio.h>
     16 #include <limits.h>
     17 #include <math.h>
     18 
     19 #ifdef ENTROPY_STATS
     20 static int mv_ref_ct [31] [4] [2];
     21 static int mv_mode_cts [4] [2];
     22 #endif
     23 
     24 static int mv_bits_sadcost[256];
     25 
     26 void vp8cx_init_mv_bits_sadcost()
     27 {
     28     int i;
     29 
     30     for (i = 0; i < 256; i++)
     31     {
     32         mv_bits_sadcost[i] = (int)sqrt(i * 16);
     33     }
     34 }
     35 
     36 
     37 int vp8_mv_bit_cost(MV *mv, MV *ref, int *mvcost[2], int Weight)
     38 {
     39     // MV costing is based on the distribution of vectors in the previous frame and as such will tend to
     40     // over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
     41     // cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
     42     // The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
     43     return ((mvcost[0][(mv->row - ref->row) >> 1] + mvcost[1][(mv->col - ref->col) >> 1]) * Weight) >> 7;
     44 }
     45 
     46 int vp8_mv_err_cost(MV *mv, MV *ref, int *mvcost[2], int error_per_bit)
     47 {
     48     //int i;
     49     //return ((mvcost[0][(mv->row - ref->row)>>1] + mvcost[1][(mv->col - ref->col)>>1] + 128) * error_per_bit) >> 8;
     50     //return ( (vp8_mv_bit_cost(mv,  ref, mvcost, 100) + 128) * error_per_bit) >> 8;
     51 
     52     //i = (vp8_mv_bit_cost(mv,  ref, mvcost, 100) * error_per_bit + 128) >> 8;
     53     return ((mvcost[0][(mv->row - ref->row) >> 1] + mvcost[1][(mv->col - ref->col) >> 1]) * error_per_bit + 128) >> 8;
     54     //return (vp8_mv_bit_cost(mv,  ref, mvcost, 128) * error_per_bit + 128) >> 8;
     55 }
     56 
     57 
     58 static int mv_bits(MV *mv, MV *ref, int *mvcost[2])
     59 {
     60     // get the estimated number of bits for a motion vector, to be used for costing in SAD based
     61     // motion estimation
     62     return ((mvcost[0][(mv->row - ref->row) >> 1]  +  mvcost[1][(mv->col - ref->col)>> 1]) + 128) >> 8;
     63 }
     64 
     65 void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride)
     66 {
     67     int Len;
     68     int search_site_count = 0;
     69 
     70 
     71     // Generate offsets for 4 search sites per step.
     72     Len = MAX_FIRST_STEP;
     73     x->ss[search_site_count].mv.col = 0;
     74     x->ss[search_site_count].mv.row = 0;
     75     x->ss[search_site_count].offset = 0;
     76     search_site_count++;
     77 
     78     while (Len > 0)
     79     {
     80 
     81         // Compute offsets for search sites.
     82         x->ss[search_site_count].mv.col = 0;
     83         x->ss[search_site_count].mv.row = -Len;
     84         x->ss[search_site_count].offset = -Len * stride;
     85         search_site_count++;
     86 
     87         // Compute offsets for search sites.
     88         x->ss[search_site_count].mv.col = 0;
     89         x->ss[search_site_count].mv.row = Len;
     90         x->ss[search_site_count].offset = Len * stride;
     91         search_site_count++;
     92 
     93         // Compute offsets for search sites.
     94         x->ss[search_site_count].mv.col = -Len;
     95         x->ss[search_site_count].mv.row = 0;
     96         x->ss[search_site_count].offset = -Len;
     97         search_site_count++;
     98 
     99         // Compute offsets for search sites.
    100         x->ss[search_site_count].mv.col = Len;
    101         x->ss[search_site_count].mv.row = 0;
    102         x->ss[search_site_count].offset = Len;
    103         search_site_count++;
    104 
    105         // Contract.
    106         Len /= 2;
    107     }
    108 
    109     x->ss_count = search_site_count;
    110     x->searches_per_step = 4;
    111 }
    112 
    113 void vp8_init3smotion_compensation(MACROBLOCK *x, int stride)
    114 {
    115     int Len;
    116     int search_site_count = 0;
    117 
    118     // Generate offsets for 8 search sites per step.
    119     Len = MAX_FIRST_STEP;
    120     x->ss[search_site_count].mv.col = 0;
    121     x->ss[search_site_count].mv.row = 0;
    122     x->ss[search_site_count].offset = 0;
    123     search_site_count++;
    124 
    125     while (Len > 0)
    126     {
    127 
    128         // Compute offsets for search sites.
    129         x->ss[search_site_count].mv.col = 0;
    130         x->ss[search_site_count].mv.row = -Len;
    131         x->ss[search_site_count].offset = -Len * stride;
    132         search_site_count++;
    133 
    134         // Compute offsets for search sites.
    135         x->ss[search_site_count].mv.col = 0;
    136         x->ss[search_site_count].mv.row = Len;
    137         x->ss[search_site_count].offset = Len * stride;
    138         search_site_count++;
    139 
    140         // Compute offsets for search sites.
    141         x->ss[search_site_count].mv.col = -Len;
    142         x->ss[search_site_count].mv.row = 0;
    143         x->ss[search_site_count].offset = -Len;
    144         search_site_count++;
    145 
    146         // Compute offsets for search sites.
    147         x->ss[search_site_count].mv.col = Len;
    148         x->ss[search_site_count].mv.row = 0;
    149         x->ss[search_site_count].offset = Len;
    150         search_site_count++;
    151 
    152         // Compute offsets for search sites.
    153         x->ss[search_site_count].mv.col = -Len;
    154         x->ss[search_site_count].mv.row = -Len;
    155         x->ss[search_site_count].offset = -Len * stride - Len;
    156         search_site_count++;
    157 
    158         // Compute offsets for search sites.
    159         x->ss[search_site_count].mv.col = Len;
    160         x->ss[search_site_count].mv.row = -Len;
    161         x->ss[search_site_count].offset = -Len * stride + Len;
    162         search_site_count++;
    163 
    164         // Compute offsets for search sites.
    165         x->ss[search_site_count].mv.col = -Len;
    166         x->ss[search_site_count].mv.row = Len;
    167         x->ss[search_site_count].offset = Len * stride - Len;
    168         search_site_count++;
    169 
    170         // Compute offsets for search sites.
    171         x->ss[search_site_count].mv.col = Len;
    172         x->ss[search_site_count].mv.row = Len;
    173         x->ss[search_site_count].offset = Len * stride + Len;
    174         search_site_count++;
    175 
    176 
    177         // Contract.
    178         Len /= 2;
    179     }
    180 
    181     x->ss_count = search_site_count;
    182     x->searches_per_step = 8;
    183 }
    184 
    185 
    186 #define MVC(r,c) (((mvcost[0][(r)-rr] + mvcost[1][(c) - rc]) * error_per_bit + 128 )>>8 ) // estimated cost of a motion vector (r,c)
    187 #define PRE(r,c) (*(d->base_pre) + d->pre + ((r)>>2) * d->pre_stride + ((c)>>2)) // pointer to predictor base of a motionvector
    188 #define SP(x) (((x)&3)<<1) // convert motion vector component to offset for svf calc
    189 #define DIST(r,c) svf( PRE(r,c), d->pre_stride, SP(c),SP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
    190 #define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
    191 #define ERR(r,c) (MVC(r,c)+DIST(r,c)) // returns distortion + motion vector cost
    192 #define CHECK_BETTER(v,r,c) IFMVCV(r,c,{if((v = ERR(r,c)) < besterr) { besterr = v; br=r; bc=c; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
    193 #define MIN(x,y) (((x)<(y))?(x):(y))
    194 #define MAX(x,y) (((x)>(y))?(x):(y))
    195 
    196 //#define CHECK_BETTER(v,r,c) if((v = ERR(r,c)) < besterr) { besterr = v; br=r; bc=c; }
    197 
    198 int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv, int error_per_bit, vp8_subpixvariance_fn_t svf, vp8_variance_fn_t vf, int *mvcost[2])
    199 {
    200     unsigned char *y = *(d->base_pre) + d->pre + (bestmv->row) * d->pre_stride + bestmv->col;
    201     unsigned char *z = (*(b->base_src) + b->src);
    202 
    203     int rr = ref_mv->row >> 1, rc = ref_mv->col >> 1;
    204     int br = bestmv->row << 2, bc = bestmv->col << 2;
    205     int tr = br, tc = bc;
    206     unsigned int besterr = INT_MAX;
    207     unsigned int left, right, up, down, diag;
    208     unsigned int sse;
    209     unsigned int whichdir;
    210     unsigned int halfiters = 4;
    211     unsigned int quarteriters = 4;
    212 
    213     int minc = MAX(x->mv_col_min << 2, (ref_mv->col >> 1) - ((1 << mvlong_width) - 1));
    214     int maxc = MIN(x->mv_col_max << 2, (ref_mv->col >> 1) + ((1 << mvlong_width) - 1));
    215     int minr = MAX(x->mv_row_min << 2, (ref_mv->row >> 1) - ((1 << mvlong_width) - 1));
    216     int maxr = MIN(x->mv_row_max << 2, (ref_mv->row >> 1) + ((1 << mvlong_width) - 1));
    217 
    218     // central mv
    219     bestmv->row <<= 3;
    220     bestmv->col <<= 3;
    221 
    222     // calculate central point error
    223     besterr = vf(y, d->pre_stride, z, b->src_stride, &sse);
    224     besterr += vp8_mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
    225 
    226     // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
    227     while (--halfiters)
    228     {
    229         // 1/2 pel
    230         CHECK_BETTER(left, tr, tc - 2);
    231         CHECK_BETTER(right, tr, tc + 2);
    232         CHECK_BETTER(up, tr - 2, tc);
    233         CHECK_BETTER(down, tr + 2, tc);
    234 
    235         whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
    236 
    237         switch (whichdir)
    238         {
    239         case 0:
    240             CHECK_BETTER(diag, tr - 2, tc - 2);
    241             break;
    242         case 1:
    243             CHECK_BETTER(diag, tr - 2, tc + 2);
    244             break;
    245         case 2:
    246             CHECK_BETTER(diag, tr + 2, tc - 2);
    247             break;
    248         case 3:
    249             CHECK_BETTER(diag, tr + 2, tc + 2);
    250             break;
    251         }
    252 
    253         // no reason to check the same one again.
    254         if (tr == br && tc == bc)
    255             break;
    256 
    257         tr = br;
    258         tc = bc;
    259     }
    260 
    261     // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
    262     // 1/4 pel
    263     while (--quarteriters)
    264     {
    265         CHECK_BETTER(left, tr, tc - 1);
    266         CHECK_BETTER(right, tr, tc + 1);
    267         CHECK_BETTER(up, tr - 1, tc);
    268         CHECK_BETTER(down, tr + 1, tc);
    269 
    270         whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
    271 
    272         switch (whichdir)
    273         {
    274         case 0:
    275             CHECK_BETTER(diag, tr - 1, tc - 1);
    276             break;
    277         case 1:
    278             CHECK_BETTER(diag, tr - 1, tc + 1);
    279             break;
    280         case 2:
    281             CHECK_BETTER(diag, tr + 1, tc - 1);
    282             break;
    283         case 3:
    284             CHECK_BETTER(diag, tr + 1, tc + 1);
    285             break;
    286         }
    287 
    288         // no reason to check the same one again.
    289         if (tr == br && tc == bc)
    290             break;
    291 
    292         tr = br;
    293         tc = bc;
    294     }
    295 
    296     bestmv->row = br << 1;
    297     bestmv->col = bc << 1;
    298 
    299     if ((abs(bestmv->col - ref_mv->col) > MAX_FULL_PEL_VAL) || (abs(bestmv->row - ref_mv->row) > MAX_FULL_PEL_VAL))
    300         return INT_MAX;
    301 
    302     return besterr;
    303 }
    304 #undef MVC
    305 #undef PRE
    306 #undef SP
    307 #undef DIST
    308 #undef ERR
    309 #undef CHECK_BETTER
    310 #undef MIN
    311 #undef MAX
    312 int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv, int error_per_bit, vp8_subpixvariance_fn_t svf, vp8_variance_fn_t vf, int *mvcost[2])
    313 {
    314     int bestmse = INT_MAX;
    315     MV startmv;
    316     //MV this_mv;
    317     MV this_mv;
    318     unsigned char *y = *(d->base_pre) + d->pre + (bestmv->row) * d->pre_stride + bestmv->col;
    319     unsigned char *z = (*(b->base_src) + b->src);
    320     int left, right, up, down, diag;
    321     unsigned int sse;
    322     int whichdir ;
    323 
    324 
    325     // Trap uncodable vectors
    326     if ((abs((bestmv->col << 3) - ref_mv->col) > MAX_FULL_PEL_VAL) || (abs((bestmv->row << 3) - ref_mv->row) > MAX_FULL_PEL_VAL))
    327     {
    328         bestmv->row <<= 3;
    329         bestmv->col <<= 3;
    330         return INT_MAX;
    331     }
    332 
    333     // central mv
    334     bestmv->row <<= 3;
    335     bestmv->col <<= 3;
    336     startmv = *bestmv;
    337 
    338     // calculate central point error
    339     bestmse = vf(y, d->pre_stride, z, b->src_stride, &sse);
    340     bestmse += vp8_mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
    341 
    342     // go left then right and check error
    343     this_mv.row = startmv.row;
    344     this_mv.col = ((startmv.col - 8) | 4);
    345     left = svf(y - 1, d->pre_stride, 4, 0, z, b->src_stride, &sse);
    346     left += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    347 
    348     if (left < bestmse)
    349     {
    350         *bestmv = this_mv;
    351         bestmse = left;
    352     }
    353 
    354     this_mv.col += 8;
    355     right = svf(y, d->pre_stride, 4, 0, z, b->src_stride, &sse);
    356     right += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    357 
    358     if (right < bestmse)
    359     {
    360         *bestmv = this_mv;
    361         bestmse = right;
    362     }
    363 
    364     // go up then down and check error
    365     this_mv.col = startmv.col;
    366     this_mv.row = ((startmv.row - 8) | 4);
    367     up = svf(y - d->pre_stride, d->pre_stride, 0, 4, z, b->src_stride, &sse);
    368     up += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    369 
    370     if (up < bestmse)
    371     {
    372         *bestmv = this_mv;
    373         bestmse = up;
    374     }
    375 
    376     this_mv.row += 8;
    377     down = svf(y, d->pre_stride, 0, 4, z, b->src_stride, &sse);
    378     down += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    379 
    380     if (down < bestmse)
    381     {
    382         *bestmv = this_mv;
    383         bestmse = down;
    384     }
    385 
    386 
    387     // now check 1 more diagonal
    388     whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
    389     // whichdir must be 0-4. Therefore, one of the cases below
    390     // must run through. However, because there is no default
    391     // and diag is not set elsewhere, we get a compile warning
    392     diag = 0;
    393     //for(whichdir =0;whichdir<4;whichdir++)
    394     //{
    395     this_mv = startmv;
    396 
    397     switch (whichdir)
    398     {
    399     case 0:
    400         this_mv.col = (this_mv.col - 8) | 4;
    401         this_mv.row = (this_mv.row - 8) | 4;
    402         diag = svf(y - 1 - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
    403         break;
    404     case 1:
    405         this_mv.col += 4;
    406         this_mv.row = (this_mv.row - 8) | 4;
    407         diag = svf(y - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
    408         break;
    409     case 2:
    410         this_mv.col = (this_mv.col - 8) | 4;
    411         this_mv.row += 4;
    412         diag = svf(y - 1, d->pre_stride, 4, 4, z, b->src_stride, &sse);
    413         break;
    414     case 3:
    415         this_mv.col += 4;
    416         this_mv.row += 4;
    417         diag = svf(y, d->pre_stride, 4, 4, z, b->src_stride, &sse);
    418         break;
    419     }
    420 
    421     diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    422 
    423     if (diag < bestmse)
    424     {
    425         *bestmv = this_mv;
    426         bestmse = diag;
    427     }
    428 
    429 //  }
    430 
    431 
    432     // time to check quarter pels.
    433     if (bestmv->row < startmv.row)
    434         y -= d->pre_stride;
    435 
    436     if (bestmv->col < startmv.col)
    437         y--;
    438 
    439     startmv = *bestmv;
    440 
    441 
    442 
    443     // go left then right and check error
    444     this_mv.row = startmv.row;
    445 
    446     if (startmv.col & 7)
    447     {
    448         this_mv.col = startmv.col - 2;
    449         left = svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
    450     }
    451     else
    452     {
    453         this_mv.col = (startmv.col - 8) | 6;
    454         left = svf(y - 1, d->pre_stride, 6, this_mv.row & 7, z, b->src_stride, &sse);
    455     }
    456 
    457     left += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    458 
    459     if (left < bestmse)
    460     {
    461         *bestmv = this_mv;
    462         bestmse = left;
    463     }
    464 
    465     this_mv.col += 4;
    466     right = svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
    467     right += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    468 
    469     if (right < bestmse)
    470     {
    471         *bestmv = this_mv;
    472         bestmse = right;
    473     }
    474 
    475     // go up then down and check error
    476     this_mv.col = startmv.col;
    477 
    478     if (startmv.row & 7)
    479     {
    480         this_mv.row = startmv.row - 2;
    481         up = svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
    482     }
    483     else
    484     {
    485         this_mv.row = (startmv.row - 8) | 6;
    486         up = svf(y - d->pre_stride, d->pre_stride, this_mv.col & 7, 6, z, b->src_stride, &sse);
    487     }
    488 
    489     up += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    490 
    491     if (up < bestmse)
    492     {
    493         *bestmv = this_mv;
    494         bestmse = up;
    495     }
    496 
    497     this_mv.row += 4;
    498     down = svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
    499     down += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    500 
    501     if (down < bestmse)
    502     {
    503         *bestmv = this_mv;
    504         bestmse = down;
    505     }
    506 
    507 
    508     // now check 1 more diagonal
    509     whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
    510 
    511 //  for(whichdir=0;whichdir<4;whichdir++)
    512 //  {
    513     this_mv = startmv;
    514 
    515     switch (whichdir)
    516     {
    517     case 0:
    518 
    519         if (startmv.row & 7)
    520         {
    521             this_mv.row -= 2;
    522 
    523             if (startmv.col & 7)
    524             {
    525                 this_mv.col -= 2;
    526                 diag = svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
    527             }
    528             else
    529             {
    530                 this_mv.col = (startmv.col - 8) | 6;
    531                 diag = svf(y - 1, d->pre_stride, 6, this_mv.row & 7, z, b->src_stride, &sse);;
    532             }
    533         }
    534         else
    535         {
    536             this_mv.row = (startmv.row - 8) | 6;
    537 
    538             if (startmv.col & 7)
    539             {
    540                 this_mv.col -= 2;
    541                 diag = svf(y - d->pre_stride, d->pre_stride, this_mv.col & 7, 6, z, b->src_stride, &sse);
    542             }
    543             else
    544             {
    545                 this_mv.col = (startmv.col - 8) | 6;
    546                 diag = svf(y - d->pre_stride - 1, d->pre_stride, 6, 6, z, b->src_stride, &sse);
    547             }
    548         }
    549 
    550         break;
    551     case 1:
    552         this_mv.col += 2;
    553 
    554         if (startmv.row & 7)
    555         {
    556             this_mv.row -= 2;
    557             diag = svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
    558         }
    559         else
    560         {
    561             this_mv.row = (startmv.row - 8) | 6;
    562             diag = svf(y - d->pre_stride, d->pre_stride, this_mv.col & 7, 6, z, b->src_stride, &sse);
    563         }
    564 
    565         break;
    566     case 2:
    567         this_mv.row += 2;
    568 
    569         if (startmv.col & 7)
    570         {
    571             this_mv.col -= 2;
    572             diag = svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
    573         }
    574         else
    575         {
    576             this_mv.col = (startmv.col - 8) | 6;
    577             diag = svf(y - 1, d->pre_stride, 6, this_mv.row & 7, z, b->src_stride, &sse);;
    578         }
    579 
    580         break;
    581     case 3:
    582         this_mv.col += 2;
    583         this_mv.row += 2;
    584         diag = svf(y, d->pre_stride,  this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
    585         break;
    586     }
    587 
    588     diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    589 
    590     if (diag < bestmse)
    591     {
    592         *bestmv = this_mv;
    593         bestmse = diag;
    594     }
    595 
    596 //  }
    597 
    598     return bestmse;
    599 }
    600 
    601 int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv, int error_per_bit, vp8_subpixvariance_fn_t svf, vp8_variance_fn_t vf, int *mvcost[2])
    602 {
    603     int bestmse = INT_MAX;
    604     MV startmv;
    605     //MV this_mv;
    606     MV this_mv;
    607     unsigned char *y = *(d->base_pre) + d->pre + (bestmv->row) * d->pre_stride + bestmv->col;
    608     unsigned char *z = (*(b->base_src) + b->src);
    609     int left, right, up, down, diag;
    610     unsigned int sse;
    611 
    612     // Trap uncodable vectors
    613     if ((abs((bestmv->col << 3) - ref_mv->col) > MAX_FULL_PEL_VAL) || (abs((bestmv->row << 3) - ref_mv->row) > MAX_FULL_PEL_VAL))
    614     {
    615         bestmv->row <<= 3;
    616         bestmv->col <<= 3;
    617         return INT_MAX;
    618     }
    619 
    620     // central mv
    621     bestmv->row <<= 3;
    622     bestmv->col <<= 3;
    623     startmv = *bestmv;
    624 
    625     // calculate central point error
    626     bestmse = vf(y, d->pre_stride, z, b->src_stride, &sse);
    627     bestmse += vp8_mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
    628 
    629     // go left then right and check error
    630     this_mv.row = startmv.row;
    631     this_mv.col = ((startmv.col - 8) | 4);
    632     left = svf(y - 1, d->pre_stride, 4, 0, z, b->src_stride, &sse);
    633     left += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    634 
    635     if (left < bestmse)
    636     {
    637         *bestmv = this_mv;
    638         bestmse = left;
    639     }
    640 
    641     this_mv.col += 8;
    642     right = svf(y, d->pre_stride, 4, 0, z, b->src_stride, &sse);
    643     right += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    644 
    645     if (right < bestmse)
    646     {
    647         *bestmv = this_mv;
    648         bestmse = right;
    649     }
    650 
    651     // go up then down and check error
    652     this_mv.col = startmv.col;
    653     this_mv.row = ((startmv.row - 8) | 4);
    654     up = svf(y - d->pre_stride, d->pre_stride, 0, 4, z, b->src_stride, &sse);
    655     up += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    656 
    657     if (up < bestmse)
    658     {
    659         *bestmv = this_mv;
    660         bestmse = up;
    661     }
    662 
    663     this_mv.row += 8;
    664     down = svf(y, d->pre_stride, 0, 4, z, b->src_stride, &sse);
    665     down += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    666 
    667     if (down < bestmse)
    668     {
    669         *bestmv = this_mv;
    670         bestmse = down;
    671     }
    672 
    673     // somewhat strangely not doing all the diagonals for half pel is slower than doing them.
    674 #if 0
    675     // now check 1 more diagonal -
    676     whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
    677     this_mv = startmv;
    678 
    679     switch (whichdir)
    680     {
    681     case 0:
    682         this_mv.col = (this_mv.col - 8) | 4;
    683         this_mv.row = (this_mv.row - 8) | 4;
    684         diag = svf(y - 1 - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
    685         break;
    686     case 1:
    687         this_mv.col += 4;
    688         this_mv.row = (this_mv.row - 8) | 4;
    689         diag = svf(y - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
    690         break;
    691     case 2:
    692         this_mv.col = (this_mv.col - 8) | 4;
    693         this_mv.row += 4;
    694         diag = svf(y - 1, d->pre_stride, 4, 4, z, b->src_stride, &sse);
    695         break;
    696     case 3:
    697         this_mv.col += 4;
    698         this_mv.row += 4;
    699         diag = svf(y, d->pre_stride, 4, 4, z, b->src_stride, &sse);
    700         break;
    701     }
    702 
    703     diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    704 
    705     if (diag < bestmse)
    706     {
    707         *bestmv = this_mv;
    708         bestmse = diag;
    709     }
    710 
    711 #else
    712     this_mv.col = (this_mv.col - 8) | 4;
    713     this_mv.row = (this_mv.row - 8) | 4;
    714     diag = svf(y - 1 - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
    715     diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    716 
    717     if (diag < bestmse)
    718     {
    719         *bestmv = this_mv;
    720         bestmse = diag;
    721     }
    722 
    723     this_mv.col += 8;
    724     diag = svf(y - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
    725     diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    726 
    727     if (diag < bestmse)
    728     {
    729         *bestmv = this_mv;
    730         bestmse = diag;
    731     }
    732 
    733     this_mv.col = (this_mv.col - 8) | 4;
    734     this_mv.row = startmv.row + 4;
    735     diag = svf(y - 1, d->pre_stride, 4, 4, z, b->src_stride, &sse);
    736     diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    737 
    738     if (diag < bestmse)
    739     {
    740         *bestmv = this_mv;
    741         bestmse = diag;
    742     }
    743 
    744     this_mv.col += 8;
    745     diag = svf(y, d->pre_stride, 4, 4, z, b->src_stride, &sse);
    746     diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    747 
    748     if (diag < bestmse)
    749     {
    750         *bestmv = this_mv;
    751         bestmse = diag;
    752     }
    753 
    754 #endif
    755     return bestmse;
    756 }
    757 
    758 
    759 #define MVC(r,c) (((mvsadcost[0][((r)<<2)-rr] + mvsadcost[1][((c)<<2) - rc]) * error_per_bit + 128 )>>8 ) // estimated cost of a motion vector (r,c)
    760 #define PRE(r,c) (*(d->base_pre) + d->pre + (r) * d->pre_stride + (c)) // pointer to predictor base of a motionvector
    761 #define DIST(r,c,v) sf( src,src_stride,PRE(r,c),d->pre_stride, v) // returns sad error score.
    762 #define ERR(r,c,v) (MVC(r,c)+DIST(r,c,v)) // returns distortion + motion vector cost
    763 #define CHECK_BETTER(v,r,c) if ((v = ERR(r,c,besterr)) < besterr) { besterr = v; br=r; bc=c; } // checks if (r,c) has better score than previous best
    764 
    765 int vp8_hex_search
    766 (
    767     MACROBLOCK *x,
    768     BLOCK *b,
    769     BLOCKD *d,
    770     MV *ref_mv,
    771     MV *best_mv,
    772     int search_param,
    773     int error_per_bit,
    774     int *num00,
    775     vp8_variance_fn_t vf,
    776     vp8_sad_fn_t      sf,
    777     int *mvsadcost[2],
    778     int *mvcost[2]
    779 )
    780 {
    781     MV hex[6] = { { -2, 0}, { -1, -2}, { -1, 2}, {2, 0}, {1, 2}, {1, -2} } ;
    782     MV neighbors[8] = { { -1, -1}, { -1, 0}, { -1, 1}, {0, -1}, {0, 1}, {1, -1}, {1, 0}, {1, 1} } ;
    783     int i, j;
    784     unsigned char *src = (*(b->base_src) + b->src);
    785     int src_stride = b->src_stride;
    786     int rr = ref_mv->row, rc = ref_mv->col, br = rr, bc = rc, tr, tc;
    787     unsigned int besterr, thiserr = 0x7fffffff;
    788 
    789     if (rc < x->mv_col_min) bc = x->mv_col_min;
    790 
    791     if (rc > x->mv_col_max) bc = x->mv_col_max;
    792 
    793     if (rr < x->mv_row_min) br = x->mv_row_min;
    794 
    795     if (rr > x->mv_row_max) br = x->mv_row_max;
    796 
    797     rr >>= 1;
    798     rc >>= 1;
    799     br >>= 3;
    800     bc >>= 3;
    801 
    802     besterr = ERR(br, bc, thiserr);
    803 
    804     // hex search  jbb changed to 127 to avoid max 256 problem steping by 2.
    805     for (j = 0; j < 127; j++)
    806     {
    807         tr = br;
    808         tc = bc;
    809 
    810         for (i = 0; i < 6; i++)
    811         {
    812             int nr = tr + hex[i].row, nc = tc + hex[i].col;
    813 
    814             if (nc < x->mv_col_min) continue;
    815 
    816             if (nc > x->mv_col_max) continue;
    817 
    818             if (nr < x->mv_row_min) continue;
    819 
    820             if (nr > x->mv_row_max) continue;
    821 
    822             CHECK_BETTER(thiserr, nr, nc);
    823         }
    824 
    825         if (tr == br && tc == bc)
    826             break;
    827     }
    828 
    829     // check 8 1 away neighbors
    830     tr = br;
    831     tc = bc;
    832 
    833     for (i = 0; i < 8; i++)
    834     {
    835         int nr = tr + neighbors[i].row, nc = tc + neighbors[i].col;
    836 
    837         if (nc < x->mv_col_min) continue;
    838 
    839         if (nc > x->mv_col_max) continue;
    840 
    841         if (nr < x->mv_row_min) continue;
    842 
    843         if (nr > x->mv_row_max) continue;
    844 
    845         CHECK_BETTER(thiserr, nr, nc);
    846     }
    847 
    848     best_mv->row = br;
    849     best_mv->col = bc;
    850 
    851     return vf(src, src_stride, PRE(br, bc), d->pre_stride, &thiserr) + MVC(br, bc) ;
    852 }
    853 #undef MVC
    854 #undef PRE
    855 #undef SP
    856 #undef DIST
    857 #undef ERR
    858 #undef CHECK_BETTER
    859 int vp8_diamond_search_sad
    860 (
    861     MACROBLOCK *x,
    862     BLOCK *b,
    863     BLOCKD *d,
    864     MV *ref_mv,
    865     MV *best_mv,
    866     int search_param,
    867     int error_per_bit,
    868     int *num00,
    869     vp8_variance_fn_ptr_t *fn_ptr,
    870     int *mvsadcost[2],
    871     int *mvcost[2]
    872 )
    873 {
    874     int i, j, step;
    875 
    876     unsigned char *what = (*(b->base_src) + b->src);
    877     int what_stride = b->src_stride;
    878     unsigned char *in_what;
    879     int in_what_stride = d->pre_stride;
    880     unsigned char *best_address;
    881 
    882     int tot_steps;
    883     MV this_mv;
    884 
    885     int bestsad = INT_MAX;
    886     int best_site = 0;
    887     int last_site = 0;
    888 
    889     int ref_row = ref_mv->row >> 3;
    890     int ref_col = ref_mv->col >> 3;
    891     int this_row_offset;
    892     int this_col_offset;
    893     search_site *ss;
    894 
    895     unsigned char *check_here;
    896     int thissad;
    897 
    898     // Work out the start point for the search
    899     in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
    900     best_address = in_what;
    901 
    902     // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
    903     if ((ref_col > x->mv_col_min) && (ref_col < x->mv_col_max) &&
    904     (ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
    905     {
    906         // Check the starting position
    907         bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, ref_mv, mvsadcost, error_per_bit);
    908     }
    909 
    910     // search_param determines the length of the initial step and hence the number of iterations
    911     // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
    912     ss = &x->ss[search_param * x->searches_per_step];
    913     tot_steps = (x->ss_count / x->searches_per_step) - search_param;
    914 
    915     i = 1;
    916     best_mv->row = ref_row;
    917     best_mv->col = ref_col;
    918 
    919     *num00 = 0;
    920 
    921     for (step = 0; step < tot_steps ; step++)
    922     {
    923         for (j = 0 ; j < x->searches_per_step ; j++)
    924         {
    925             // Trap illegal vectors
    926             this_row_offset = best_mv->row + ss[i].mv.row;
    927             this_col_offset = best_mv->col + ss[i].mv.col;
    928 
    929             if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
    930             (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
    931 
    932             {
    933                 check_here = ss[i].offset + best_address;
    934                 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
    935 
    936                 if (thissad < bestsad)
    937                 {
    938                     this_mv.row = this_row_offset << 3;
    939                     this_mv.col = this_col_offset << 3;
    940                     thissad += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit);
    941 
    942                     if (thissad < bestsad)
    943                     {
    944                         bestsad = thissad;
    945                         best_site = i;
    946                     }
    947                 }
    948             }
    949 
    950             i++;
    951         }
    952 
    953         if (best_site != last_site)
    954         {
    955             best_mv->row += ss[best_site].mv.row;
    956             best_mv->col += ss[best_site].mv.col;
    957             best_address += ss[best_site].offset;
    958             last_site = best_site;
    959         }
    960         else if (best_address == in_what)
    961             (*num00)++;
    962     }
    963 
    964     this_mv.row = best_mv->row << 3;
    965     this_mv.col = best_mv->col << 3;
    966 
    967     if (bestsad == INT_MAX)
    968         return INT_MAX;
    969 
    970     return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
    971     + vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
    972 }
    973 
    974 int vp8_diamond_search_sadx4
    975 (
    976     MACROBLOCK *x,
    977     BLOCK *b,
    978     BLOCKD *d,
    979     MV *ref_mv,
    980     MV *best_mv,
    981     int search_param,
    982     int error_per_bit,
    983     int *num00,
    984     vp8_variance_fn_ptr_t *fn_ptr,
    985     int *mvsadcost[2],
    986     int *mvcost[2]
    987 )
    988 {
    989     int i, j, step;
    990 
    991     unsigned char *what = (*(b->base_src) + b->src);
    992     int what_stride = b->src_stride;
    993     unsigned char *in_what;
    994     int in_what_stride = d->pre_stride;
    995     unsigned char *best_address;
    996 
    997     int tot_steps;
    998     MV this_mv;
    999 
   1000     int bestsad = INT_MAX;
   1001     int best_site = 0;
   1002     int last_site = 0;
   1003 
   1004     int ref_row = ref_mv->row >> 3;
   1005     int ref_col = ref_mv->col >> 3;
   1006     int this_row_offset;
   1007     int this_col_offset;
   1008     search_site *ss;
   1009 
   1010     unsigned char *check_here;
   1011     unsigned int thissad;
   1012 
   1013     // Work out the start point for the search
   1014     in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
   1015     best_address = in_what;
   1016 
   1017     // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
   1018     if ((ref_col > x->mv_col_min) && (ref_col < x->mv_col_max) &&
   1019     (ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
   1020     {
   1021         // Check the starting position
   1022         bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, ref_mv, mvsadcost, error_per_bit);
   1023     }
   1024 
   1025     // search_param determines the length of the initial step and hence the number of iterations
   1026     // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
   1027     ss = &x->ss[search_param * x->searches_per_step];
   1028     tot_steps = (x->ss_count / x->searches_per_step) - search_param;
   1029 
   1030     i = 1;
   1031     best_mv->row = ref_row;
   1032     best_mv->col = ref_col;
   1033 
   1034     *num00 = 0;
   1035 
   1036     for (step = 0; step < tot_steps ; step++)
   1037     {
   1038         int check_row_min, check_col_min, check_row_max, check_col_max;
   1039 
   1040         check_row_min = x->mv_row_min - best_mv->row;
   1041         check_row_max = x->mv_row_max - best_mv->row;
   1042         check_col_min = x->mv_col_min - best_mv->col;
   1043         check_col_max = x->mv_col_max - best_mv->col;
   1044 
   1045         for (j = 0 ; j < x->searches_per_step ; j += 4)
   1046         {
   1047             unsigned char *block_offset[4];
   1048             unsigned int valid_block[4];
   1049             int all_in = 1, t;
   1050 
   1051             for (t = 0; t < 4; t++)
   1052             {
   1053                 valid_block [t]  = (ss[t+i].mv.col > check_col_min);
   1054                 valid_block [t] &= (ss[t+i].mv.col < check_col_max);
   1055                 valid_block [t] &= (ss[t+i].mv.row > check_row_min);
   1056                 valid_block [t] &= (ss[t+i].mv.row < check_row_max);
   1057 
   1058                 all_in &= valid_block[t];
   1059                 block_offset[t] = ss[i+t].offset + best_address;
   1060             }
   1061 
   1062             if (all_in)
   1063             {
   1064                 unsigned int sad_array[4];
   1065 
   1066                 fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array);
   1067 
   1068                 for (t = 0; t < 4; t++, i++)
   1069                 {
   1070                     thissad = sad_array[t];
   1071 
   1072                     if (thissad < bestsad)
   1073                     {
   1074                         this_mv.row = (best_mv->row + ss[i].mv.row) << 3;
   1075                         this_mv.col = (best_mv->col + ss[i].mv.col) << 3;
   1076                         thissad += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit);
   1077 
   1078                         if (thissad < bestsad)
   1079                         {
   1080                             bestsad = thissad;
   1081                             best_site = i;
   1082                         }
   1083                     }
   1084                 }
   1085             }
   1086             else
   1087             {
   1088                 int t;
   1089 
   1090                 for (t = 0; t < 4; i++, t++)
   1091                 {
   1092                     // Trap illegal vectors
   1093                     if (valid_block[t])
   1094 
   1095                     {
   1096                         check_here = block_offset[t];
   1097                         thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
   1098 
   1099                         if (thissad < bestsad)
   1100                         {
   1101                             this_row_offset = best_mv->row + ss[i].mv.row;
   1102                             this_col_offset = best_mv->col + ss[i].mv.col;
   1103 
   1104                             this_mv.row = this_row_offset << 3;
   1105                             this_mv.col = this_col_offset << 3;
   1106                             thissad += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit);
   1107 
   1108                             if (thissad < bestsad)
   1109                             {
   1110                                 bestsad = thissad;
   1111                                 best_site = i;
   1112                             }
   1113                         }
   1114                     }
   1115                 }
   1116             }
   1117         }
   1118 
   1119         if (best_site != last_site)
   1120         {
   1121             best_mv->row += ss[best_site].mv.row;
   1122             best_mv->col += ss[best_site].mv.col;
   1123             best_address += ss[best_site].offset;
   1124             last_site = best_site;
   1125         }
   1126         else if (best_address == in_what)
   1127             (*num00)++;
   1128     }
   1129 
   1130     this_mv.row = best_mv->row << 3;
   1131     this_mv.col = best_mv->col << 3;
   1132 
   1133     if (bestsad == INT_MAX)
   1134         return INT_MAX;
   1135 
   1136     return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
   1137     + vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
   1138 }
   1139 
   1140 
   1141 int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int error_per_bit, int distance, vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], int *mvsadcost[2])
   1142 {
   1143     unsigned char *what = (*(b->base_src) + b->src);
   1144     int what_stride = b->src_stride;
   1145     unsigned char *in_what;
   1146     int in_what_stride = d->pre_stride;
   1147     int mv_stride = d->pre_stride;
   1148     unsigned char *bestaddress;
   1149     MV *best_mv = &d->bmi.mv.as_mv;
   1150     MV this_mv;
   1151     int bestsad = INT_MAX;
   1152     int r, c;
   1153 
   1154     unsigned char *check_here;
   1155     int thissad;
   1156 
   1157     int ref_row = ref_mv->row >> 3;
   1158     int ref_col = ref_mv->col >> 3;
   1159 
   1160     int row_min = ref_row - distance;
   1161     int row_max = ref_row + distance;
   1162     int col_min = ref_col - distance;
   1163     int col_max = ref_col + distance;
   1164 
   1165     // Work out the mid point for the search
   1166     in_what = *(d->base_pre) + d->pre;
   1167     bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
   1168 
   1169     best_mv->row = ref_row;
   1170     best_mv->col = ref_col;
   1171 
   1172     // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
   1173     if ((ref_col > x->mv_col_min) && (ref_col < x->mv_col_max) &&
   1174     (ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
   1175     {
   1176         // Baseline value at the centre
   1177 
   1178         //bestsad = fn_ptr->sf( what,what_stride,bestaddress,in_what_stride) + (int)sqrt(vp8_mv_err_cost(ref_mv,ref_mv, mvcost,error_per_bit*14));
   1179         bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, ref_mv, mvsadcost, error_per_bit);
   1180     }
   1181 
   1182     // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
   1183     if (col_min < x->mv_col_min)
   1184         col_min = x->mv_col_min;
   1185 
   1186     if (col_max > x->mv_col_max)
   1187         col_max = x->mv_col_max;
   1188 
   1189     if (row_min < x->mv_row_min)
   1190         row_min = x->mv_row_min;
   1191 
   1192     if (row_max > x->mv_row_max)
   1193         row_max = x->mv_row_max;
   1194 
   1195     for (r = row_min; r < row_max ; r++)
   1196     {
   1197         this_mv.row = r << 3;
   1198         check_here = r * mv_stride + in_what + col_min;
   1199 
   1200         for (c = col_min; c < col_max; c++)
   1201         {
   1202             thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
   1203 
   1204             this_mv.col = c << 3;
   1205             //thissad += (int)sqrt(vp8_mv_err_cost(&this_mv,ref_mv, mvcost,error_per_bit*14));
   1206             //thissad  += error_per_bit * mv_bits_sadcost[mv_bits(&this_mv, ref_mv, mvcost)];
   1207             thissad  += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit); //mv_bits(error_per_bit, &this_mv, ref_mv, mvsadcost);
   1208 
   1209             if (thissad < bestsad)
   1210             {
   1211                 bestsad = thissad;
   1212                 best_mv->row = r;
   1213                 best_mv->col = c;
   1214                 bestaddress = check_here;
   1215             }
   1216 
   1217             check_here++;
   1218         }
   1219     }
   1220 
   1221     this_mv.row = best_mv->row << 3;
   1222     this_mv.col = best_mv->col << 3;
   1223 
   1224     if (bestsad < INT_MAX)
   1225         return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
   1226         + vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
   1227     else
   1228         return INT_MAX;
   1229 }
   1230 
   1231 int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int error_per_bit, int distance, vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], int *mvsadcost[2])
   1232 {
   1233     unsigned char *what = (*(b->base_src) + b->src);
   1234     int what_stride = b->src_stride;
   1235     unsigned char *in_what;
   1236     int in_what_stride = d->pre_stride;
   1237     int mv_stride = d->pre_stride;
   1238     unsigned char *bestaddress;
   1239     MV *best_mv = &d->bmi.mv.as_mv;
   1240     MV this_mv;
   1241     int bestsad = INT_MAX;
   1242     int r, c;
   1243 
   1244     unsigned char *check_here;
   1245     unsigned int thissad;
   1246 
   1247     int ref_row = ref_mv->row >> 3;
   1248     int ref_col = ref_mv->col >> 3;
   1249 
   1250     int row_min = ref_row - distance;
   1251     int row_max = ref_row + distance;
   1252     int col_min = ref_col - distance;
   1253     int col_max = ref_col + distance;
   1254 
   1255     unsigned int sad_array[3];
   1256 
   1257     // Work out the mid point for the search
   1258     in_what = *(d->base_pre) + d->pre;
   1259     bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
   1260 
   1261     best_mv->row = ref_row;
   1262     best_mv->col = ref_col;
   1263 
   1264     // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
   1265     if ((ref_col > x->mv_col_min) && (ref_col < x->mv_col_max) &&
   1266     (ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
   1267     {
   1268         // Baseline value at the centre
   1269         bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, ref_mv, mvsadcost, error_per_bit);
   1270     }
   1271 
   1272     // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
   1273     if (col_min < x->mv_col_min)
   1274         col_min = x->mv_col_min;
   1275 
   1276     if (col_max > x->mv_col_max)
   1277         col_max = x->mv_col_max;
   1278 
   1279     if (row_min < x->mv_row_min)
   1280         row_min = x->mv_row_min;
   1281 
   1282     if (row_max > x->mv_row_max)
   1283         row_max = x->mv_row_max;
   1284 
   1285     for (r = row_min; r < row_max ; r++)
   1286     {
   1287         this_mv.row = r << 3;
   1288         check_here = r * mv_stride + in_what + col_min;
   1289         c = col_min;
   1290 
   1291         while ((c + 3) < col_max)
   1292         {
   1293             int i;
   1294 
   1295             fn_ptr->sdx3f(what, what_stride, check_here , in_what_stride, sad_array);
   1296 
   1297             for (i = 0; i < 3; i++)
   1298             {
   1299                 thissad = sad_array[i];
   1300 
   1301                 if (thissad < bestsad)
   1302                 {
   1303                     this_mv.col = c << 3;
   1304                     thissad  += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit);
   1305 
   1306                     if (thissad < bestsad)
   1307                     {
   1308                         bestsad = thissad;
   1309                         best_mv->row = r;
   1310                         best_mv->col = c;
   1311                         bestaddress = check_here;
   1312                     }
   1313                 }
   1314 
   1315                 check_here++;
   1316                 c++;
   1317             }
   1318         }
   1319 
   1320         while (c < col_max)
   1321         {
   1322             thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
   1323 
   1324             if (thissad < bestsad)
   1325             {
   1326                 this_mv.col = c << 3;
   1327                 thissad  += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit);
   1328 
   1329                 if (thissad < bestsad)
   1330                 {
   1331                     bestsad = thissad;
   1332                     best_mv->row = r;
   1333                     best_mv->col = c;
   1334                     bestaddress = check_here;
   1335                 }
   1336             }
   1337 
   1338             check_here ++;
   1339             c ++;
   1340         }
   1341 
   1342     }
   1343 
   1344     this_mv.row = best_mv->row << 3;
   1345     this_mv.col = best_mv->col << 3;
   1346 
   1347     if (bestsad < INT_MAX)
   1348         return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
   1349         + vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
   1350     else
   1351         return INT_MAX;
   1352 }
   1353 
   1354 #ifdef ENTROPY_STATS
   1355 void print_mode_context(void)
   1356 {
   1357     FILE *f = fopen("modecont.c", "w");
   1358     int i, j;
   1359 
   1360     fprintf(f, "#include \"entropy.h\"\n");
   1361     fprintf(f, "const int vp8_mode_contexts[6][4] =\n");
   1362     fprintf(f, "{\n");
   1363 
   1364     for (j = 0; j < 6; j++)
   1365     {
   1366         fprintf(f, "  { // %d \n", j);
   1367         fprintf(f, "    ");
   1368 
   1369         for (i = 0; i < 4; i++)
   1370         {
   1371             int overal_prob;
   1372             int this_prob;
   1373             int count; // = mv_ref_ct[j][i][0]+mv_ref_ct[j][i][1];
   1374 
   1375             // Overall probs
   1376             count = mv_mode_cts[i][0] + mv_mode_cts[i][1];
   1377 
   1378             if (count)
   1379                 overal_prob = 256 * mv_mode_cts[i][0] / count;
   1380             else
   1381                 overal_prob = 128;
   1382 
   1383             if (overal_prob == 0)
   1384                 overal_prob = 1;
   1385 
   1386             // context probs
   1387             count = mv_ref_ct[j][i][0] + mv_ref_ct[j][i][1];
   1388 
   1389             if (count)
   1390                 this_prob = 256 * mv_ref_ct[j][i][0] / count;
   1391             else
   1392                 this_prob = 128;
   1393 
   1394             if (this_prob == 0)
   1395                 this_prob = 1;
   1396 
   1397             fprintf(f, "%5d, ", this_prob);
   1398             //fprintf(f,"%5d, %5d, %8d,", this_prob, overal_prob, (this_prob << 10)/overal_prob);
   1399             //fprintf(f,"%8d, ", (this_prob << 10)/overal_prob);
   1400         }
   1401 
   1402         fprintf(f, "  },\n");
   1403     }
   1404 
   1405     fprintf(f, "};\n");
   1406     fclose(f);
   1407 }
   1408 
   1409 /* MV ref count ENTROPY_STATS stats code */
   1410 #ifdef ENTROPY_STATS
   1411 void init_mv_ref_counts()
   1412 {
   1413     vpx_memset(mv_ref_ct, 0, sizeof(mv_ref_ct));
   1414     vpx_memset(mv_mode_cts, 0, sizeof(mv_mode_cts));
   1415 }
   1416 
   1417 void accum_mv_refs(MB_PREDICTION_MODE m, const int ct[4])
   1418 {
   1419     if (m == ZEROMV)
   1420     {
   1421         ++mv_ref_ct [ct[0]] [0] [0];
   1422         ++mv_mode_cts[0][0];
   1423     }
   1424     else
   1425     {
   1426         ++mv_ref_ct [ct[0]] [0] [1];
   1427         ++mv_mode_cts[0][1];
   1428 
   1429         if (m == NEARESTMV)
   1430         {
   1431             ++mv_ref_ct [ct[1]] [1] [0];
   1432             ++mv_mode_cts[1][0];
   1433         }
   1434         else
   1435         {
   1436             ++mv_ref_ct [ct[1]] [1] [1];
   1437             ++mv_mode_cts[1][1];
   1438 
   1439             if (m == NEARMV)
   1440             {
   1441                 ++mv_ref_ct [ct[2]] [2] [0];
   1442                 ++mv_mode_cts[2][0];
   1443             }
   1444             else
   1445             {
   1446                 ++mv_ref_ct [ct[2]] [2] [1];
   1447                 ++mv_mode_cts[2][1];
   1448 
   1449                 if (m == NEWMV)
   1450                 {
   1451                     ++mv_ref_ct [ct[3]] [3] [0];
   1452                     ++mv_mode_cts[3][0];
   1453                 }
   1454                 else
   1455                 {
   1456                     ++mv_ref_ct [ct[3]] [3] [1];
   1457                     ++mv_mode_cts[3][1];
   1458                 }
   1459             }
   1460         }
   1461     }
   1462 }
   1463 
   1464 #endif/* END MV ref count ENTROPY_STATS stats code */
   1465 
   1466 #endif
   1467