Home | History | Annotate | Download | only in encoder
      1 /*
      2  *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 #include <assert.h>
     12 #include <limits.h>
     13 #include <math.h>
     14 #include <stdio.h>
     15 
     16 #include "./vp9_rtcd.h"
     17 #include "./vpx_dsp_rtcd.h"
     18 
     19 #include "vpx/vpx_codec.h"
     20 #include "vpx_dsp/vpx_dsp_common.h"
     21 #include "vpx_mem/vpx_mem.h"
     22 #include "vpx_ports/mem.h"
     23 
     24 #include "vp9/common/vp9_blockd.h"
     25 #include "vp9/common/vp9_common.h"
     26 #include "vp9/common/vp9_mvref_common.h"
     27 #include "vp9/common/vp9_pred_common.h"
     28 #include "vp9/common/vp9_reconinter.h"
     29 #include "vp9/common/vp9_reconintra.h"
     30 #include "vp9/common/vp9_scan.h"
     31 
     32 #include "vp9/encoder/vp9_cost.h"
     33 #include "vp9/encoder/vp9_encoder.h"
     34 #include "vp9/encoder/vp9_pickmode.h"
     35 #include "vp9/encoder/vp9_ratectrl.h"
     36 #include "vp9/encoder/vp9_rd.h"
     37 
     38 typedef struct {
     39   uint8_t *data;
     40   int stride;
     41   int in_use;
     42 } PRED_BUFFER;
     43 
     44 static const int pos_shift_16x16[4][4] = {
     45   { 9, 10, 13, 14 }, { 11, 12, 15, 16 }, { 17, 18, 21, 22 }, { 19, 20, 23, 24 }
     46 };
     47 
     48 static int mv_refs_rt(VP9_COMP *cpi, const VP9_COMMON *cm, const MACROBLOCK *x,
     49                       const MACROBLOCKD *xd, const TileInfo *const tile,
     50                       MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
     51                       int_mv *mv_ref_list, int_mv *base_mv, int mi_row,
     52                       int mi_col, int use_base_mv) {
     53   const int *ref_sign_bias = cm->ref_frame_sign_bias;
     54   int i, refmv_count = 0;
     55 
     56   const POSITION *const mv_ref_search = mv_ref_blocks[mi->sb_type];
     57 
     58   int different_ref_found = 0;
     59   int context_counter = 0;
     60   int const_motion = 0;
     61 
     62   // Blank the reference vector list
     63   memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES);
     64 
     65   // The nearest 2 blocks are treated differently
     66   // if the size < 8x8 we get the mv from the bmi substructure,
     67   // and we also need to keep a mode count.
     68   for (i = 0; i < 2; ++i) {
     69     const POSITION *const mv_ref = &mv_ref_search[i];
     70     if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
     71       const MODE_INFO *const candidate_mi =
     72           xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
     73       // Keep counts for entropy encoding.
     74       context_counter += mode_2_counter[candidate_mi->mode];
     75       different_ref_found = 1;
     76 
     77       if (candidate_mi->ref_frame[0] == ref_frame)
     78         ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 0, mv_ref->col, -1),
     79                         refmv_count, mv_ref_list, Done);
     80     }
     81   }
     82 
     83   const_motion = 1;
     84 
     85   // Check the rest of the neighbors in much the same way
     86   // as before except we don't need to keep track of sub blocks or
     87   // mode counts.
     88   for (; i < MVREF_NEIGHBOURS && !refmv_count; ++i) {
     89     const POSITION *const mv_ref = &mv_ref_search[i];
     90     if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
     91       const MODE_INFO *const candidate_mi =
     92           xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
     93       different_ref_found = 1;
     94 
     95       if (candidate_mi->ref_frame[0] == ref_frame)
     96         ADD_MV_REF_LIST(candidate_mi->mv[0], refmv_count, mv_ref_list, Done);
     97     }
     98   }
     99 
    100   // Since we couldn't find 2 mvs from the same reference frame
    101   // go back through the neighbors and find motion vectors from
    102   // different reference frames.
    103   if (different_ref_found && !refmv_count) {
    104     for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
    105       const POSITION *mv_ref = &mv_ref_search[i];
    106       if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
    107         const MODE_INFO *const candidate_mi =
    108             xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
    109 
    110         // If the candidate is INTRA we don't want to consider its mv.
    111         IF_DIFF_REF_FRAME_ADD_MV(candidate_mi, ref_frame, ref_sign_bias,
    112                                  refmv_count, mv_ref_list, Done);
    113       }
    114     }
    115   }
    116   if (use_base_mv &&
    117       !cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame &&
    118       ref_frame == LAST_FRAME) {
    119     // Get base layer mv.
    120     MV_REF *candidate =
    121         &cm->prev_frame
    122              ->mvs[(mi_col >> 1) + (mi_row >> 1) * (cm->mi_cols >> 1)];
    123     if (candidate->mv[0].as_int != INVALID_MV) {
    124       base_mv->as_mv.row = (candidate->mv[0].as_mv.row * 2);
    125       base_mv->as_mv.col = (candidate->mv[0].as_mv.col * 2);
    126       clamp_mv_ref(&base_mv->as_mv, xd);
    127     } else {
    128       base_mv->as_int = INVALID_MV;
    129     }
    130   }
    131 
    132 Done:
    133 
    134   x->mbmi_ext->mode_context[ref_frame] = counter_to_context[context_counter];
    135 
    136   // Clamp vectors
    137   for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i)
    138     clamp_mv_ref(&mv_ref_list[i].as_mv, xd);
    139 
    140   return const_motion;
    141 }
    142 
    143 static int combined_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
    144                                   BLOCK_SIZE bsize, int mi_row, int mi_col,
    145                                   int_mv *tmp_mv, int *rate_mv,
    146                                   int64_t best_rd_sofar, int use_base_mv) {
    147   MACROBLOCKD *xd = &x->e_mbd;
    148   MODE_INFO *mi = xd->mi[0];
    149   struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } };
    150   const int step_param = cpi->sf.mv.fullpel_search_step_param;
    151   const int sadpb = x->sadperbit16;
    152   MV mvp_full;
    153   const int ref = mi->ref_frame[0];
    154   const MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv;
    155   MV center_mv;
    156   uint32_t dis;
    157   int rate_mode;
    158   const MvLimits tmp_mv_limits = x->mv_limits;
    159   int rv = 0;
    160   int cost_list[5];
    161   int search_subpel = 1;
    162   const YV12_BUFFER_CONFIG *scaled_ref_frame =
    163       vp9_get_scaled_ref_frame(cpi, ref);
    164   if (scaled_ref_frame) {
    165     int i;
    166     // Swap out the reference frame for a version that's been scaled to
    167     // match the resolution of the current frame, allowing the existing
    168     // motion search code to be used without additional modifications.
    169     for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
    170     vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
    171   }
    172   vp9_set_mv_search_range(&x->mv_limits, &ref_mv);
    173 
    174   // Limit motion vector for large lightning change.
    175   if (cpi->oxcf.speed > 5 && x->lowvar_highsumdiff) {
    176     x->mv_limits.col_min = VPXMAX(x->mv_limits.col_min, -10);
    177     x->mv_limits.row_min = VPXMAX(x->mv_limits.row_min, -10);
    178     x->mv_limits.col_max = VPXMIN(x->mv_limits.col_max, 10);
    179     x->mv_limits.row_max = VPXMIN(x->mv_limits.row_max, 10);
    180   }
    181 
    182   assert(x->mv_best_ref_index[ref] <= 2);
    183   if (x->mv_best_ref_index[ref] < 2)
    184     mvp_full = x->mbmi_ext->ref_mvs[ref][x->mv_best_ref_index[ref]].as_mv;
    185   else
    186     mvp_full = x->pred_mv[ref];
    187 
    188   mvp_full.col >>= 3;
    189   mvp_full.row >>= 3;
    190 
    191   if (!use_base_mv)
    192     center_mv = ref_mv;
    193   else
    194     center_mv = tmp_mv->as_mv;
    195 
    196   if (x->sb_use_mv_part) {
    197     tmp_mv->as_mv.row = x->sb_mvrow_part >> 3;
    198     tmp_mv->as_mv.col = x->sb_mvcol_part >> 3;
    199   } else {
    200     vp9_full_pixel_search(
    201         cpi, x, bsize, &mvp_full, step_param, cpi->sf.mv.search_method, sadpb,
    202         cond_cost_list(cpi, cost_list), &center_mv, &tmp_mv->as_mv, INT_MAX, 0);
    203   }
    204 
    205   x->mv_limits = tmp_mv_limits;
    206 
    207   // calculate the bit cost on motion vector
    208   mvp_full.row = tmp_mv->as_mv.row * 8;
    209   mvp_full.col = tmp_mv->as_mv.col * 8;
    210 
    211   *rate_mv = vp9_mv_bit_cost(&mvp_full, &ref_mv, x->nmvjointcost, x->mvcost,
    212                              MV_COST_WEIGHT);
    213 
    214   rate_mode =
    215       cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref]][INTER_OFFSET(NEWMV)];
    216   rv =
    217       !(RDCOST(x->rdmult, x->rddiv, (*rate_mv + rate_mode), 0) > best_rd_sofar);
    218 
    219   // For SVC on non-reference frame, avoid subpel for (0, 0) motion.
    220   if (cpi->use_svc && cpi->svc.non_reference_frame) {
    221     if (mvp_full.row == 0 && mvp_full.col == 0) search_subpel = 0;
    222   }
    223 
    224   if (rv && search_subpel) {
    225     int subpel_force_stop = cpi->sf.mv.subpel_force_stop;
    226     if (use_base_mv && cpi->sf.base_mv_aggressive) subpel_force_stop = 2;
    227     cpi->find_fractional_mv_step(
    228         x, &tmp_mv->as_mv, &ref_mv, cpi->common.allow_high_precision_mv,
    229         x->errorperbit, &cpi->fn_ptr[bsize], subpel_force_stop,
    230         cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
    231         x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0);
    232     *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
    233                                x->mvcost, MV_COST_WEIGHT);
    234   }
    235 
    236   if (scaled_ref_frame) {
    237     int i;
    238     for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
    239   }
    240   return rv;
    241 }
    242 
    243 static void block_variance(const uint8_t *src, int src_stride,
    244                            const uint8_t *ref, int ref_stride, int w, int h,
    245                            unsigned int *sse, int *sum, int block_size,
    246 #if CONFIG_VP9_HIGHBITDEPTH
    247                            int use_highbitdepth, vpx_bit_depth_t bd,
    248 #endif
    249                            uint32_t *sse8x8, int *sum8x8, uint32_t *var8x8) {
    250   int i, j, k = 0;
    251 
    252   *sse = 0;
    253   *sum = 0;
    254 
    255   for (i = 0; i < h; i += block_size) {
    256     for (j = 0; j < w; j += block_size) {
    257 #if CONFIG_VP9_HIGHBITDEPTH
    258       if (use_highbitdepth) {
    259         switch (bd) {
    260           case VPX_BITS_8:
    261             vpx_highbd_8_get8x8var(src + src_stride * i + j, src_stride,
    262                                    ref + ref_stride * i + j, ref_stride,
    263                                    &sse8x8[k], &sum8x8[k]);
    264             break;
    265           case VPX_BITS_10:
    266             vpx_highbd_10_get8x8var(src + src_stride * i + j, src_stride,
    267                                     ref + ref_stride * i + j, ref_stride,
    268                                     &sse8x8[k], &sum8x8[k]);
    269             break;
    270           case VPX_BITS_12:
    271             vpx_highbd_12_get8x8var(src + src_stride * i + j, src_stride,
    272                                     ref + ref_stride * i + j, ref_stride,
    273                                     &sse8x8[k], &sum8x8[k]);
    274             break;
    275         }
    276       } else {
    277         vpx_get8x8var(src + src_stride * i + j, src_stride,
    278                       ref + ref_stride * i + j, ref_stride, &sse8x8[k],
    279                       &sum8x8[k]);
    280       }
    281 #else
    282       vpx_get8x8var(src + src_stride * i + j, src_stride,
    283                     ref + ref_stride * i + j, ref_stride, &sse8x8[k],
    284                     &sum8x8[k]);
    285 #endif
    286       *sse += sse8x8[k];
    287       *sum += sum8x8[k];
    288       var8x8[k] = sse8x8[k] - (uint32_t)(((int64_t)sum8x8[k] * sum8x8[k]) >> 6);
    289       k++;
    290     }
    291   }
    292 }
    293 
    294 static void calculate_variance(int bw, int bh, TX_SIZE tx_size,
    295                                unsigned int *sse_i, int *sum_i,
    296                                unsigned int *var_o, unsigned int *sse_o,
    297                                int *sum_o) {
    298   const BLOCK_SIZE unit_size = txsize_to_bsize[tx_size];
    299   const int nw = 1 << (bw - b_width_log2_lookup[unit_size]);
    300   const int nh = 1 << (bh - b_height_log2_lookup[unit_size]);
    301   int i, j, k = 0;
    302 
    303   for (i = 0; i < nh; i += 2) {
    304     for (j = 0; j < nw; j += 2) {
    305       sse_o[k] = sse_i[i * nw + j] + sse_i[i * nw + j + 1] +
    306                  sse_i[(i + 1) * nw + j] + sse_i[(i + 1) * nw + j + 1];
    307       sum_o[k] = sum_i[i * nw + j] + sum_i[i * nw + j + 1] +
    308                  sum_i[(i + 1) * nw + j] + sum_i[(i + 1) * nw + j + 1];
    309       var_o[k] = sse_o[k] - (uint32_t)(((int64_t)sum_o[k] * sum_o[k]) >>
    310                                        (b_width_log2_lookup[unit_size] +
    311                                         b_height_log2_lookup[unit_size] + 6));
    312       k++;
    313     }
    314   }
    315 }
    316 
    317 // Adjust the ac_thr according to speed, width, height and normalized sum
    318 static int ac_thr_factor(const int speed, const int width, const int height,
    319                          const int norm_sum) {
    320   if (speed >= 8 && norm_sum < 5) {
    321     if (width <= 640 && height <= 480)
    322       return 4;
    323     else
    324       return 2;
    325   }
    326   return 1;
    327 }
    328 
    329 static void model_rd_for_sb_y_large(VP9_COMP *cpi, BLOCK_SIZE bsize,
    330                                     MACROBLOCK *x, MACROBLOCKD *xd,
    331                                     int *out_rate_sum, int64_t *out_dist_sum,
    332                                     unsigned int *var_y, unsigned int *sse_y,
    333                                     int mi_row, int mi_col, int *early_term,
    334                                     int *flag_preduv_computed) {
    335   // Note our transform coeffs are 8 times an orthogonal transform.
    336   // Hence quantizer step is also 8 times. To get effective quantizer
    337   // we need to divide by 8 before sending to modeling function.
    338   unsigned int sse;
    339   int rate;
    340   int64_t dist;
    341   struct macroblock_plane *const p = &x->plane[0];
    342   struct macroblockd_plane *const pd = &xd->plane[0];
    343   const uint32_t dc_quant = pd->dequant[0];
    344   const uint32_t ac_quant = pd->dequant[1];
    345   const int64_t dc_thr = dc_quant * dc_quant >> 6;
    346   int64_t ac_thr = ac_quant * ac_quant >> 6;
    347   unsigned int var;
    348   int sum;
    349   int skip_dc = 0;
    350 
    351   const int bw = b_width_log2_lookup[bsize];
    352   const int bh = b_height_log2_lookup[bsize];
    353   const int num8x8 = 1 << (bw + bh - 2);
    354   unsigned int sse8x8[64] = { 0 };
    355   int sum8x8[64] = { 0 };
    356   unsigned int var8x8[64] = { 0 };
    357   TX_SIZE tx_size;
    358   int i, k;
    359 #if CONFIG_VP9_HIGHBITDEPTH
    360   const vpx_bit_depth_t bd = cpi->common.bit_depth;
    361 #endif
    362   // Calculate variance for whole partition, and also save 8x8 blocks' variance
    363   // to be used in following transform skipping test.
    364   block_variance(p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride,
    365                  4 << bw, 4 << bh, &sse, &sum, 8,
    366 #if CONFIG_VP9_HIGHBITDEPTH
    367                  cpi->common.use_highbitdepth, bd,
    368 #endif
    369                  sse8x8, sum8x8, var8x8);
    370   var = sse - (unsigned int)(((int64_t)sum * sum) >> (bw + bh + 4));
    371 
    372   *var_y = var;
    373   *sse_y = sse;
    374 
    375 #if CONFIG_VP9_TEMPORAL_DENOISING
    376   if (cpi->oxcf.noise_sensitivity > 0 && denoise_svc(cpi) &&
    377       cpi->oxcf.speed > 5)
    378     ac_thr = vp9_scale_acskip_thresh(ac_thr, cpi->denoiser.denoising_level,
    379                                      (abs(sum) >> (bw + bh)),
    380                                      cpi->svc.temporal_layer_id);
    381   else
    382     ac_thr *= ac_thr_factor(cpi->oxcf.speed, cpi->common.width,
    383                             cpi->common.height, abs(sum) >> (bw + bh));
    384 #else
    385   ac_thr *= ac_thr_factor(cpi->oxcf.speed, cpi->common.width,
    386                           cpi->common.height, abs(sum) >> (bw + bh));
    387 #endif
    388 
    389   if (cpi->common.tx_mode == TX_MODE_SELECT) {
    390     if (sse > (var << 2))
    391       tx_size = VPXMIN(max_txsize_lookup[bsize],
    392                        tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
    393     else
    394       tx_size = TX_8X8;
    395 
    396     if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
    397         cyclic_refresh_segment_id_boosted(xd->mi[0]->segment_id))
    398       tx_size = TX_8X8;
    399     else if (tx_size > TX_16X16)
    400       tx_size = TX_16X16;
    401   } else {
    402     tx_size = VPXMIN(max_txsize_lookup[bsize],
    403                      tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
    404   }
    405 
    406   assert(tx_size >= TX_8X8);
    407   xd->mi[0]->tx_size = tx_size;
    408 
    409   // Evaluate if the partition block is a skippable block in Y plane.
    410   {
    411     unsigned int sse16x16[16] = { 0 };
    412     int sum16x16[16] = { 0 };
    413     unsigned int var16x16[16] = { 0 };
    414     const int num16x16 = num8x8 >> 2;
    415 
    416     unsigned int sse32x32[4] = { 0 };
    417     int sum32x32[4] = { 0 };
    418     unsigned int var32x32[4] = { 0 };
    419     const int num32x32 = num8x8 >> 4;
    420 
    421     int ac_test = 1;
    422     int dc_test = 1;
    423     const int num = (tx_size == TX_8X8)
    424                         ? num8x8
    425                         : ((tx_size == TX_16X16) ? num16x16 : num32x32);
    426     const unsigned int *sse_tx =
    427         (tx_size == TX_8X8) ? sse8x8
    428                             : ((tx_size == TX_16X16) ? sse16x16 : sse32x32);
    429     const unsigned int *var_tx =
    430         (tx_size == TX_8X8) ? var8x8
    431                             : ((tx_size == TX_16X16) ? var16x16 : var32x32);
    432 
    433     // Calculate variance if tx_size > TX_8X8
    434     if (tx_size >= TX_16X16)
    435       calculate_variance(bw, bh, TX_8X8, sse8x8, sum8x8, var16x16, sse16x16,
    436                          sum16x16);
    437     if (tx_size == TX_32X32)
    438       calculate_variance(bw, bh, TX_16X16, sse16x16, sum16x16, var32x32,
    439                          sse32x32, sum32x32);
    440 
    441     // Skipping test
    442     x->skip_txfm[0] = SKIP_TXFM_NONE;
    443     for (k = 0; k < num; k++)
    444       // Check if all ac coefficients can be quantized to zero.
    445       if (!(var_tx[k] < ac_thr || var == 0)) {
    446         ac_test = 0;
    447         break;
    448       }
    449 
    450     for (k = 0; k < num; k++)
    451       // Check if dc coefficient can be quantized to zero.
    452       if (!(sse_tx[k] - var_tx[k] < dc_thr || sse == var)) {
    453         dc_test = 0;
    454         break;
    455       }
    456 
    457     if (ac_test) {
    458       x->skip_txfm[0] = SKIP_TXFM_AC_ONLY;
    459 
    460       if (dc_test) x->skip_txfm[0] = SKIP_TXFM_AC_DC;
    461     } else if (dc_test) {
    462       skip_dc = 1;
    463     }
    464   }
    465 
    466   if (x->skip_txfm[0] == SKIP_TXFM_AC_DC) {
    467     int skip_uv[2] = { 0 };
    468     unsigned int var_uv[2];
    469     unsigned int sse_uv[2];
    470 
    471     *out_rate_sum = 0;
    472     *out_dist_sum = sse << 4;
    473 
    474     // Transform skipping test in UV planes.
    475     for (i = 1; i <= 2; i++) {
    476       if (cpi->oxcf.speed < 8 || x->color_sensitivity[i - 1]) {
    477         struct macroblock_plane *const p = &x->plane[i];
    478         struct macroblockd_plane *const pd = &xd->plane[i];
    479         const TX_SIZE uv_tx_size = get_uv_tx_size(xd->mi[0], pd);
    480         const BLOCK_SIZE unit_size = txsize_to_bsize[uv_tx_size];
    481         const BLOCK_SIZE uv_bsize = get_plane_block_size(bsize, pd);
    482         const int uv_bw = b_width_log2_lookup[uv_bsize];
    483         const int uv_bh = b_height_log2_lookup[uv_bsize];
    484         const int sf = (uv_bw - b_width_log2_lookup[unit_size]) +
    485                        (uv_bh - b_height_log2_lookup[unit_size]);
    486         const uint32_t uv_dc_thr = pd->dequant[0] * pd->dequant[0] >> (6 - sf);
    487         const uint32_t uv_ac_thr = pd->dequant[1] * pd->dequant[1] >> (6 - sf);
    488         int j = i - 1;
    489 
    490         vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, i);
    491         flag_preduv_computed[i - 1] = 1;
    492         var_uv[j] = cpi->fn_ptr[uv_bsize].vf(
    493             p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride, &sse_uv[j]);
    494 
    495         if ((var_uv[j] < uv_ac_thr || var_uv[j] == 0) &&
    496             (sse_uv[j] - var_uv[j] < uv_dc_thr || sse_uv[j] == var_uv[j]))
    497           skip_uv[j] = 1;
    498         else
    499           break;
    500       } else {
    501         skip_uv[i - 1] = 1;
    502       }
    503     }
    504 
    505     // If the transform in YUV planes are skippable, the mode search checks
    506     // fewer inter modes and doesn't check intra modes.
    507     if (skip_uv[0] & skip_uv[1]) {
    508       *early_term = 1;
    509     }
    510     return;
    511   }
    512 
    513   if (!skip_dc) {
    514 #if CONFIG_VP9_HIGHBITDEPTH
    515     vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
    516                                  dc_quant >> (xd->bd - 5), &rate, &dist);
    517 #else
    518     vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
    519                                  dc_quant >> 3, &rate, &dist);
    520 #endif  // CONFIG_VP9_HIGHBITDEPTH
    521   }
    522 
    523   if (!skip_dc) {
    524     *out_rate_sum = rate >> 1;
    525     *out_dist_sum = dist << 3;
    526   } else {
    527     *out_rate_sum = 0;
    528     *out_dist_sum = (sse - var) << 4;
    529   }
    530 
    531 #if CONFIG_VP9_HIGHBITDEPTH
    532   vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize],
    533                                ac_quant >> (xd->bd - 5), &rate, &dist);
    534 #else
    535   vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], ac_quant >> 3,
    536                                &rate, &dist);
    537 #endif  // CONFIG_VP9_HIGHBITDEPTH
    538 
    539   *out_rate_sum += rate;
    540   *out_dist_sum += dist << 4;
    541 }
    542 
    543 static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
    544                               MACROBLOCKD *xd, int *out_rate_sum,
    545                               int64_t *out_dist_sum, unsigned int *var_y,
    546                               unsigned int *sse_y) {
    547   // Note our transform coeffs are 8 times an orthogonal transform.
    548   // Hence quantizer step is also 8 times. To get effective quantizer
    549   // we need to divide by 8 before sending to modeling function.
    550   unsigned int sse;
    551   int rate;
    552   int64_t dist;
    553   struct macroblock_plane *const p = &x->plane[0];
    554   struct macroblockd_plane *const pd = &xd->plane[0];
    555   const int64_t dc_thr = p->quant_thred[0] >> 6;
    556   const int64_t ac_thr = p->quant_thred[1] >> 6;
    557   const uint32_t dc_quant = pd->dequant[0];
    558   const uint32_t ac_quant = pd->dequant[1];
    559   unsigned int var = cpi->fn_ptr[bsize].vf(p->src.buf, p->src.stride,
    560                                            pd->dst.buf, pd->dst.stride, &sse);
    561   int skip_dc = 0;
    562 
    563   *var_y = var;
    564   *sse_y = sse;
    565 
    566   if (cpi->common.tx_mode == TX_MODE_SELECT) {
    567     if (sse > (var << 2))
    568       xd->mi[0]->tx_size =
    569           VPXMIN(max_txsize_lookup[bsize],
    570                  tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
    571     else
    572       xd->mi[0]->tx_size = TX_8X8;
    573 
    574     if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
    575         cyclic_refresh_segment_id_boosted(xd->mi[0]->segment_id))
    576       xd->mi[0]->tx_size = TX_8X8;
    577     else if (xd->mi[0]->tx_size > TX_16X16)
    578       xd->mi[0]->tx_size = TX_16X16;
    579   } else {
    580     xd->mi[0]->tx_size =
    581         VPXMIN(max_txsize_lookup[bsize],
    582                tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
    583   }
    584 
    585   // Evaluate if the partition block is a skippable block in Y plane.
    586   {
    587     const BLOCK_SIZE unit_size = txsize_to_bsize[xd->mi[0]->tx_size];
    588     const unsigned int num_blk_log2 =
    589         (b_width_log2_lookup[bsize] - b_width_log2_lookup[unit_size]) +
    590         (b_height_log2_lookup[bsize] - b_height_log2_lookup[unit_size]);
    591     const unsigned int sse_tx = sse >> num_blk_log2;
    592     const unsigned int var_tx = var >> num_blk_log2;
    593 
    594     x->skip_txfm[0] = SKIP_TXFM_NONE;
    595     // Check if all ac coefficients can be quantized to zero.
    596     if (var_tx < ac_thr || var == 0) {
    597       x->skip_txfm[0] = SKIP_TXFM_AC_ONLY;
    598       // Check if dc coefficient can be quantized to zero.
    599       if (sse_tx - var_tx < dc_thr || sse == var)
    600         x->skip_txfm[0] = SKIP_TXFM_AC_DC;
    601     } else {
    602       if (sse_tx - var_tx < dc_thr || sse == var) skip_dc = 1;
    603     }
    604   }
    605 
    606   if (x->skip_txfm[0] == SKIP_TXFM_AC_DC) {
    607     *out_rate_sum = 0;
    608     *out_dist_sum = sse << 4;
    609     return;
    610   }
    611 
    612   if (!skip_dc) {
    613 #if CONFIG_VP9_HIGHBITDEPTH
    614     vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
    615                                  dc_quant >> (xd->bd - 5), &rate, &dist);
    616 #else
    617     vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
    618                                  dc_quant >> 3, &rate, &dist);
    619 #endif  // CONFIG_VP9_HIGHBITDEPTH
    620   }
    621 
    622   if (!skip_dc) {
    623     *out_rate_sum = rate >> 1;
    624     *out_dist_sum = dist << 3;
    625   } else {
    626     *out_rate_sum = 0;
    627     *out_dist_sum = (sse - var) << 4;
    628   }
    629 
    630 #if CONFIG_VP9_HIGHBITDEPTH
    631   vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize],
    632                                ac_quant >> (xd->bd - 5), &rate, &dist);
    633 #else
    634   vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], ac_quant >> 3,
    635                                &rate, &dist);
    636 #endif  // CONFIG_VP9_HIGHBITDEPTH
    637 
    638   *out_rate_sum += rate;
    639   *out_dist_sum += dist << 4;
    640 }
    641 
    642 static void block_yrd(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *this_rdc,
    643                       int *skippable, int64_t *sse, BLOCK_SIZE bsize,
    644                       TX_SIZE tx_size, int rd_computed) {
    645   MACROBLOCKD *xd = &x->e_mbd;
    646   const struct macroblockd_plane *pd = &xd->plane[0];
    647   struct macroblock_plane *const p = &x->plane[0];
    648   const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
    649   const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
    650   const int step = 1 << (tx_size << 1);
    651   const int block_step = (1 << tx_size);
    652   int block = 0, r, c;
    653   const int max_blocks_wide =
    654       num_4x4_w + (xd->mb_to_right_edge >= 0 ? 0 : xd->mb_to_right_edge >> 5);
    655   const int max_blocks_high =
    656       num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? 0 : xd->mb_to_bottom_edge >> 5);
    657   int eob_cost = 0;
    658   const int bw = 4 * num_4x4_w;
    659   const int bh = 4 * num_4x4_h;
    660 
    661 #if CONFIG_VP9_HIGHBITDEPTH
    662   // TODO(jingning): Implement the high bit-depth Hadamard transforms and
    663   // remove this check condition.
    664   // TODO(marpan): Use this path (model_rd) for 8bit under certain conditions
    665   // for now, as the vp9_quantize_fp below for highbitdepth build is slow.
    666   if (xd->bd != 8 ||
    667       (cpi->oxcf.speed > 5 && cpi->common.frame_type != KEY_FRAME &&
    668        bsize < BLOCK_32X32)) {
    669     unsigned int var_y, sse_y;
    670     (void)tx_size;
    671     if (!rd_computed)
    672       model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc->rate, &this_rdc->dist,
    673                         &var_y, &sse_y);
    674     *sse = INT_MAX;
    675     *skippable = 0;
    676     return;
    677   }
    678 #endif
    679 
    680   if (cpi->sf.use_simple_block_yrd && cpi->common.frame_type != KEY_FRAME &&
    681       (bsize < BLOCK_32X32 ||
    682        (cpi->use_svc &&
    683         (bsize < BLOCK_32X32 || cpi->svc.temporal_layer_id > 0)))) {
    684     unsigned int var_y, sse_y;
    685     (void)tx_size;
    686     if (!rd_computed)
    687       model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc->rate, &this_rdc->dist,
    688                         &var_y, &sse_y);
    689     *sse = INT_MAX;
    690     *skippable = 0;
    691     return;
    692   }
    693 
    694   (void)cpi;
    695 
    696   // The max tx_size passed in is TX_16X16.
    697   assert(tx_size != TX_32X32);
    698 
    699   vpx_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
    700                      pd->dst.buf, pd->dst.stride);
    701   *skippable = 1;
    702   // Keep track of the row and column of the blocks we use so that we know
    703   // if we are in the unrestricted motion border.
    704   for (r = 0; r < max_blocks_high; r += block_step) {
    705     for (c = 0; c < num_4x4_w; c += block_step) {
    706       if (c < max_blocks_wide) {
    707         const scan_order *const scan_order = &vp9_default_scan_orders[tx_size];
    708         tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
    709         tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
    710         tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
    711         uint16_t *const eob = &p->eobs[block];
    712         const int diff_stride = bw;
    713         const int16_t *src_diff;
    714         src_diff = &p->src_diff[(r * diff_stride + c) << 2];
    715 
    716         switch (tx_size) {
    717           case TX_16X16:
    718             vpx_hadamard_16x16(src_diff, diff_stride, coeff);
    719             vp9_quantize_fp(coeff, 256, x->skip_block, p->round_fp, p->quant_fp,
    720                             qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan,
    721                             scan_order->iscan);
    722             break;
    723           case TX_8X8:
    724             vpx_hadamard_8x8(src_diff, diff_stride, coeff);
    725             vp9_quantize_fp(coeff, 64, x->skip_block, p->round_fp, p->quant_fp,
    726                             qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan,
    727                             scan_order->iscan);
    728             break;
    729           case TX_4X4:
    730             x->fwd_txfm4x4(src_diff, coeff, diff_stride);
    731             vp9_quantize_fp(coeff, 16, x->skip_block, p->round_fp, p->quant_fp,
    732                             qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan,
    733                             scan_order->iscan);
    734             break;
    735           default: assert(0); break;
    736         }
    737         *skippable &= (*eob == 0);
    738         eob_cost += 1;
    739       }
    740       block += step;
    741     }
    742   }
    743 
    744   this_rdc->rate = 0;
    745   if (*sse < INT64_MAX) {
    746     *sse = (*sse << 6) >> 2;
    747     if (*skippable) {
    748       this_rdc->dist = *sse;
    749       return;
    750     }
    751   }
    752 
    753   block = 0;
    754   this_rdc->dist = 0;
    755   for (r = 0; r < max_blocks_high; r += block_step) {
    756     for (c = 0; c < num_4x4_w; c += block_step) {
    757       if (c < max_blocks_wide) {
    758         tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
    759         tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
    760         tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
    761         uint16_t *const eob = &p->eobs[block];
    762 
    763         if (*eob == 1)
    764           this_rdc->rate += (int)abs(qcoeff[0]);
    765         else if (*eob > 1)
    766           this_rdc->rate += vpx_satd(qcoeff, step << 4);
    767 
    768         this_rdc->dist += vp9_block_error_fp(coeff, dqcoeff, step << 4) >> 2;
    769       }
    770       block += step;
    771     }
    772   }
    773 
    774   // If skippable is set, rate gets clobbered later.
    775   this_rdc->rate <<= (2 + VP9_PROB_COST_SHIFT);
    776   this_rdc->rate += (eob_cost << VP9_PROB_COST_SHIFT);
    777 }
    778 
    779 static void model_rd_for_sb_uv(VP9_COMP *cpi, BLOCK_SIZE plane_bsize,
    780                                MACROBLOCK *x, MACROBLOCKD *xd,
    781                                RD_COST *this_rdc, unsigned int *var_y,
    782                                unsigned int *sse_y, int start_plane,
    783                                int stop_plane) {
    784   // Note our transform coeffs are 8 times an orthogonal transform.
    785   // Hence quantizer step is also 8 times. To get effective quantizer
    786   // we need to divide by 8 before sending to modeling function.
    787   unsigned int sse;
    788   int rate;
    789   int64_t dist;
    790   int i;
    791 #if CONFIG_VP9_HIGHBITDEPTH
    792   uint64_t tot_var = *var_y;
    793   uint64_t tot_sse = *sse_y;
    794 #else
    795   uint32_t tot_var = *var_y;
    796   uint32_t tot_sse = *sse_y;
    797 #endif
    798 
    799   this_rdc->rate = 0;
    800   this_rdc->dist = 0;
    801 
    802   for (i = start_plane; i <= stop_plane; ++i) {
    803     struct macroblock_plane *const p = &x->plane[i];
    804     struct macroblockd_plane *const pd = &xd->plane[i];
    805     const uint32_t dc_quant = pd->dequant[0];
    806     const uint32_t ac_quant = pd->dequant[1];
    807     const BLOCK_SIZE bs = plane_bsize;
    808     unsigned int var;
    809     if (!x->color_sensitivity[i - 1]) continue;
    810 
    811     var = cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride, pd->dst.buf,
    812                              pd->dst.stride, &sse);
    813     assert(sse >= var);
    814     tot_var += var;
    815     tot_sse += sse;
    816 
    817 #if CONFIG_VP9_HIGHBITDEPTH
    818     vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs],
    819                                  dc_quant >> (xd->bd - 5), &rate, &dist);
    820 #else
    821     vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs],
    822                                  dc_quant >> 3, &rate, &dist);
    823 #endif  // CONFIG_VP9_HIGHBITDEPTH
    824 
    825     this_rdc->rate += rate >> 1;
    826     this_rdc->dist += dist << 3;
    827 
    828 #if CONFIG_VP9_HIGHBITDEPTH
    829     vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs],
    830                                  ac_quant >> (xd->bd - 5), &rate, &dist);
    831 #else
    832     vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs], ac_quant >> 3,
    833                                  &rate, &dist);
    834 #endif  // CONFIG_VP9_HIGHBITDEPTH
    835 
    836     this_rdc->rate += rate;
    837     this_rdc->dist += dist << 4;
    838   }
    839 
    840 #if CONFIG_VP9_HIGHBITDEPTH
    841   *var_y = tot_var > UINT32_MAX ? UINT32_MAX : (uint32_t)tot_var;
    842   *sse_y = tot_sse > UINT32_MAX ? UINT32_MAX : (uint32_t)tot_sse;
    843 #else
    844   *var_y = tot_var;
    845   *sse_y = tot_sse;
    846 #endif
    847 }
    848 
    849 static int get_pred_buffer(PRED_BUFFER *p, int len) {
    850   int i;
    851 
    852   for (i = 0; i < len; i++) {
    853     if (!p[i].in_use) {
    854       p[i].in_use = 1;
    855       return i;
    856     }
    857   }
    858   return -1;
    859 }
    860 
    861 static void free_pred_buffer(PRED_BUFFER *p) {
    862   if (p != NULL) p->in_use = 0;
    863 }
    864 
    865 static void encode_breakout_test(
    866     VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int mi_row, int mi_col,
    867     MV_REFERENCE_FRAME ref_frame, PREDICTION_MODE this_mode, unsigned int var_y,
    868     unsigned int sse_y, struct buf_2d yv12_mb[][MAX_MB_PLANE], int *rate,
    869     int64_t *dist, int *flag_preduv_computed) {
    870   MACROBLOCKD *xd = &x->e_mbd;
    871   MODE_INFO *const mi = xd->mi[0];
    872   const BLOCK_SIZE uv_size = get_plane_block_size(bsize, &xd->plane[1]);
    873   unsigned int var = var_y, sse = sse_y;
    874   // Skipping threshold for ac.
    875   unsigned int thresh_ac;
    876   // Skipping threshold for dc.
    877   unsigned int thresh_dc;
    878   int motion_low = 1;
    879   if (cpi->use_svc && ref_frame == GOLDEN_FRAME) return;
    880   if (mi->mv[0].as_mv.row > 64 || mi->mv[0].as_mv.row < -64 ||
    881       mi->mv[0].as_mv.col > 64 || mi->mv[0].as_mv.col < -64)
    882     motion_low = 0;
    883   if (x->encode_breakout > 0 && motion_low == 1) {
    884     // Set a maximum for threshold to avoid big PSNR loss in low bit rate
    885     // case. Use extreme low threshold for static frames to limit
    886     // skipping.
    887     const unsigned int max_thresh = 36000;
    888     // The encode_breakout input
    889     const unsigned int min_thresh =
    890         VPXMIN(((unsigned int)x->encode_breakout << 4), max_thresh);
    891 #if CONFIG_VP9_HIGHBITDEPTH
    892     const int shift = (xd->bd << 1) - 16;
    893 #endif
    894 
    895     // Calculate threshold according to dequant value.
    896     thresh_ac = (xd->plane[0].dequant[1] * xd->plane[0].dequant[1]) >> 3;
    897 #if CONFIG_VP9_HIGHBITDEPTH
    898     if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && shift > 0) {
    899       thresh_ac = ROUND_POWER_OF_TWO(thresh_ac, shift);
    900     }
    901 #endif  // CONFIG_VP9_HIGHBITDEPTH
    902     thresh_ac = clamp(thresh_ac, min_thresh, max_thresh);
    903 
    904     // Adjust ac threshold according to partition size.
    905     thresh_ac >>=
    906         8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
    907 
    908     thresh_dc = (xd->plane[0].dequant[0] * xd->plane[0].dequant[0] >> 6);
    909 #if CONFIG_VP9_HIGHBITDEPTH
    910     if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && shift > 0) {
    911       thresh_dc = ROUND_POWER_OF_TWO(thresh_dc, shift);
    912     }
    913 #endif  // CONFIG_VP9_HIGHBITDEPTH
    914   } else {
    915     thresh_ac = 0;
    916     thresh_dc = 0;
    917   }
    918 
    919   // Y skipping condition checking for ac and dc.
    920   if (var <= thresh_ac && (sse - var) <= thresh_dc) {
    921     unsigned int sse_u, sse_v;
    922     unsigned int var_u, var_v;
    923     unsigned int thresh_ac_uv = thresh_ac;
    924     unsigned int thresh_dc_uv = thresh_dc;
    925     if (x->sb_is_skin) {
    926       thresh_ac_uv = 0;
    927       thresh_dc_uv = 0;
    928     }
    929 
    930     if (!flag_preduv_computed[0] || !flag_preduv_computed[1]) {
    931       xd->plane[1].pre[0] = yv12_mb[ref_frame][1];
    932       xd->plane[2].pre[0] = yv12_mb[ref_frame][2];
    933       vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, bsize);
    934     }
    935 
    936     var_u = cpi->fn_ptr[uv_size].vf(x->plane[1].src.buf, x->plane[1].src.stride,
    937                                     xd->plane[1].dst.buf,
    938                                     xd->plane[1].dst.stride, &sse_u);
    939 
    940     // U skipping condition checking
    941     if (((var_u << 2) <= thresh_ac_uv) && (sse_u - var_u <= thresh_dc_uv)) {
    942       var_v = cpi->fn_ptr[uv_size].vf(
    943           x->plane[2].src.buf, x->plane[2].src.stride, xd->plane[2].dst.buf,
    944           xd->plane[2].dst.stride, &sse_v);
    945 
    946       // V skipping condition checking
    947       if (((var_v << 2) <= thresh_ac_uv) && (sse_v - var_v <= thresh_dc_uv)) {
    948         x->skip = 1;
    949 
    950         // The cost of skip bit needs to be added.
    951         *rate = cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]]
    952                                     [INTER_OFFSET(this_mode)];
    953 
    954         // More on this part of rate
    955         // rate += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
    956 
    957         // Scaling factor for SSE from spatial domain to frequency
    958         // domain is 16. Adjust distortion accordingly.
    959         // TODO(yunqingwang): In this function, only y-plane dist is
    960         // calculated.
    961         *dist = (sse << 4);  // + ((sse_u + sse_v) << 4);
    962 
    963         // *disable_skip = 1;
    964       }
    965     }
    966   }
    967 }
    968 
    969 struct estimate_block_intra_args {
    970   VP9_COMP *cpi;
    971   MACROBLOCK *x;
    972   PREDICTION_MODE mode;
    973   int skippable;
    974   RD_COST *rdc;
    975 };
    976 
    977 static void estimate_block_intra(int plane, int block, int row, int col,
    978                                  BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
    979                                  void *arg) {
    980   struct estimate_block_intra_args *const args = arg;
    981   VP9_COMP *const cpi = args->cpi;
    982   MACROBLOCK *const x = args->x;
    983   MACROBLOCKD *const xd = &x->e_mbd;
    984   struct macroblock_plane *const p = &x->plane[0];
    985   struct macroblockd_plane *const pd = &xd->plane[0];
    986   const BLOCK_SIZE bsize_tx = txsize_to_bsize[tx_size];
    987   uint8_t *const src_buf_base = p->src.buf;
    988   uint8_t *const dst_buf_base = pd->dst.buf;
    989   const int src_stride = p->src.stride;
    990   const int dst_stride = pd->dst.stride;
    991   RD_COST this_rdc;
    992 
    993   (void)block;
    994 
    995   p->src.buf = &src_buf_base[4 * (row * src_stride + col)];
    996   pd->dst.buf = &dst_buf_base[4 * (row * dst_stride + col)];
    997   // Use source buffer as an approximation for the fully reconstructed buffer.
    998   vp9_predict_intra_block(xd, b_width_log2_lookup[plane_bsize], tx_size,
    999                           args->mode, x->skip_encode ? p->src.buf : pd->dst.buf,
   1000                           x->skip_encode ? src_stride : dst_stride, pd->dst.buf,
   1001                           dst_stride, col, row, plane);
   1002 
   1003   if (plane == 0) {
   1004     int64_t this_sse = INT64_MAX;
   1005     // TODO(jingning): This needs further refactoring.
   1006     block_yrd(cpi, x, &this_rdc, &args->skippable, &this_sse, bsize_tx,
   1007               VPXMIN(tx_size, TX_16X16), 0);
   1008   } else {
   1009     unsigned int var = 0;
   1010     unsigned int sse = 0;
   1011     model_rd_for_sb_uv(cpi, plane_bsize, x, xd, &this_rdc, &var, &sse, plane,
   1012                        plane);
   1013   }
   1014 
   1015   p->src.buf = src_buf_base;
   1016   pd->dst.buf = dst_buf_base;
   1017   args->rdc->rate += this_rdc.rate;
   1018   args->rdc->dist += this_rdc.dist;
   1019 }
   1020 
   1021 static const THR_MODES mode_idx[MAX_REF_FRAMES][4] = {
   1022   { THR_DC, THR_V_PRED, THR_H_PRED, THR_TM },
   1023   { THR_NEARESTMV, THR_NEARMV, THR_ZEROMV, THR_NEWMV },
   1024   { THR_NEARESTG, THR_NEARG, THR_ZEROG, THR_NEWG },
   1025   { THR_NEARESTA, THR_NEARA, THR_ZEROA, THR_NEWA },
   1026 };
   1027 
   1028 static const PREDICTION_MODE intra_mode_list[] = { DC_PRED, V_PRED, H_PRED,
   1029                                                    TM_PRED };
   1030 
   1031 static int mode_offset(const PREDICTION_MODE mode) {
   1032   if (mode >= NEARESTMV) {
   1033     return INTER_OFFSET(mode);
   1034   } else {
   1035     switch (mode) {
   1036       case DC_PRED: return 0;
   1037       case V_PRED: return 1;
   1038       case H_PRED: return 2;
   1039       case TM_PRED: return 3;
   1040       default: return -1;
   1041     }
   1042   }
   1043 }
   1044 
   1045 static INLINE int rd_less_than_thresh_row_mt(int64_t best_rd, int thresh,
   1046                                              const int *const thresh_fact) {
   1047   int is_rd_less_than_thresh;
   1048   is_rd_less_than_thresh =
   1049       best_rd < ((int64_t)thresh * (*thresh_fact) >> 5) || thresh == INT_MAX;
   1050   return is_rd_less_than_thresh;
   1051 }
   1052 
   1053 static INLINE void update_thresh_freq_fact_row_mt(
   1054     VP9_COMP *cpi, TileDataEnc *tile_data, int source_variance,
   1055     int thresh_freq_fact_idx, MV_REFERENCE_FRAME ref_frame,
   1056     THR_MODES best_mode_idx, PREDICTION_MODE mode) {
   1057   THR_MODES thr_mode_idx = mode_idx[ref_frame][mode_offset(mode)];
   1058   int freq_fact_idx = thresh_freq_fact_idx + thr_mode_idx;
   1059   int *freq_fact = &tile_data->row_base_thresh_freq_fact[freq_fact_idx];
   1060   if (thr_mode_idx == best_mode_idx)
   1061     *freq_fact -= (*freq_fact >> 4);
   1062   else if (cpi->sf.limit_newmv_early_exit && mode == NEWMV &&
   1063            ref_frame == LAST_FRAME && source_variance < 5) {
   1064     *freq_fact = VPXMIN(*freq_fact + RD_THRESH_INC, 32);
   1065   } else {
   1066     *freq_fact = VPXMIN(*freq_fact + RD_THRESH_INC,
   1067                         cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT);
   1068   }
   1069 }
   1070 
   1071 static INLINE void update_thresh_freq_fact(
   1072     VP9_COMP *cpi, TileDataEnc *tile_data, int source_variance,
   1073     BLOCK_SIZE bsize, MV_REFERENCE_FRAME ref_frame, THR_MODES best_mode_idx,
   1074     PREDICTION_MODE mode) {
   1075   THR_MODES thr_mode_idx = mode_idx[ref_frame][mode_offset(mode)];
   1076   int *freq_fact = &tile_data->thresh_freq_fact[bsize][thr_mode_idx];
   1077   if (thr_mode_idx == best_mode_idx)
   1078     *freq_fact -= (*freq_fact >> 4);
   1079   else if (cpi->sf.limit_newmv_early_exit && mode == NEWMV &&
   1080            ref_frame == LAST_FRAME && source_variance < 5) {
   1081     *freq_fact = VPXMIN(*freq_fact + RD_THRESH_INC, 32);
   1082   } else {
   1083     *freq_fact = VPXMIN(*freq_fact + RD_THRESH_INC,
   1084                         cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT);
   1085   }
   1086 }
   1087 
   1088 void vp9_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
   1089                          BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
   1090   MACROBLOCKD *const xd = &x->e_mbd;
   1091   MODE_INFO *const mi = xd->mi[0];
   1092   RD_COST this_rdc, best_rdc;
   1093   PREDICTION_MODE this_mode;
   1094   struct estimate_block_intra_args args = { cpi, x, DC_PRED, 1, 0 };
   1095   const TX_SIZE intra_tx_size =
   1096       VPXMIN(max_txsize_lookup[bsize],
   1097              tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
   1098   MODE_INFO *const mic = xd->mi[0];
   1099   int *bmode_costs;
   1100   const MODE_INFO *above_mi = xd->above_mi;
   1101   const MODE_INFO *left_mi = xd->left_mi;
   1102   const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, 0);
   1103   const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, 0);
   1104   bmode_costs = cpi->y_mode_costs[A][L];
   1105 
   1106   (void)ctx;
   1107   vp9_rd_cost_reset(&best_rdc);
   1108   vp9_rd_cost_reset(&this_rdc);
   1109 
   1110   mi->ref_frame[0] = INTRA_FRAME;
   1111   // Initialize interp_filter here so we do not have to check for inter block
   1112   // modes in get_pred_context_switchable_interp()
   1113   mi->interp_filter = SWITCHABLE_FILTERS;
   1114 
   1115   mi->mv[0].as_int = INVALID_MV;
   1116   mi->uv_mode = DC_PRED;
   1117   memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
   1118 
   1119   // Change the limit of this loop to add other intra prediction
   1120   // mode tests.
   1121   for (this_mode = DC_PRED; this_mode <= H_PRED; ++this_mode) {
   1122     this_rdc.dist = this_rdc.rate = 0;
   1123     args.mode = this_mode;
   1124     args.skippable = 1;
   1125     args.rdc = &this_rdc;
   1126     mi->tx_size = intra_tx_size;
   1127     vp9_foreach_transformed_block_in_plane(xd, bsize, 0, estimate_block_intra,
   1128                                            &args);
   1129     if (args.skippable) {
   1130       x->skip_txfm[0] = SKIP_TXFM_AC_DC;
   1131       this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 1);
   1132     } else {
   1133       x->skip_txfm[0] = SKIP_TXFM_NONE;
   1134       this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 0);
   1135     }
   1136     this_rdc.rate += bmode_costs[this_mode];
   1137     this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
   1138 
   1139     if (this_rdc.rdcost < best_rdc.rdcost) {
   1140       best_rdc = this_rdc;
   1141       mi->mode = this_mode;
   1142     }
   1143   }
   1144 
   1145   *rd_cost = best_rdc;
   1146 }
   1147 
   1148 static void init_ref_frame_cost(VP9_COMMON *const cm, MACROBLOCKD *const xd,
   1149                                 int ref_frame_cost[MAX_REF_FRAMES]) {
   1150   vpx_prob intra_inter_p = vp9_get_intra_inter_prob(cm, xd);
   1151   vpx_prob ref_single_p1 = vp9_get_pred_prob_single_ref_p1(cm, xd);
   1152   vpx_prob ref_single_p2 = vp9_get_pred_prob_single_ref_p2(cm, xd);
   1153 
   1154   ref_frame_cost[INTRA_FRAME] = vp9_cost_bit(intra_inter_p, 0);
   1155   ref_frame_cost[LAST_FRAME] = ref_frame_cost[GOLDEN_FRAME] =
   1156       ref_frame_cost[ALTREF_FRAME] = vp9_cost_bit(intra_inter_p, 1);
   1157 
   1158   ref_frame_cost[LAST_FRAME] += vp9_cost_bit(ref_single_p1, 0);
   1159   ref_frame_cost[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p1, 1);
   1160   ref_frame_cost[ALTREF_FRAME] += vp9_cost_bit(ref_single_p1, 1);
   1161   ref_frame_cost[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p2, 0);
   1162   ref_frame_cost[ALTREF_FRAME] += vp9_cost_bit(ref_single_p2, 1);
   1163 }
   1164 
   1165 typedef struct {
   1166   MV_REFERENCE_FRAME ref_frame;
   1167   PREDICTION_MODE pred_mode;
   1168 } REF_MODE;
   1169 
   1170 #define RT_INTER_MODES 12
   1171 static const REF_MODE ref_mode_set[RT_INTER_MODES] = {
   1172   { LAST_FRAME, ZEROMV },   { LAST_FRAME, NEARESTMV },
   1173   { GOLDEN_FRAME, ZEROMV }, { LAST_FRAME, NEARMV },
   1174   { LAST_FRAME, NEWMV },    { GOLDEN_FRAME, NEARESTMV },
   1175   { GOLDEN_FRAME, NEARMV }, { GOLDEN_FRAME, NEWMV },
   1176   { ALTREF_FRAME, ZEROMV }, { ALTREF_FRAME, NEARESTMV },
   1177   { ALTREF_FRAME, NEARMV }, { ALTREF_FRAME, NEWMV }
   1178 };
   1179 
   1180 #define RT_INTER_MODES_SVC 8
   1181 static const REF_MODE ref_mode_set_svc[RT_INTER_MODES_SVC] = {
   1182   { LAST_FRAME, ZEROMV },      { LAST_FRAME, NEARESTMV },
   1183   { LAST_FRAME, NEARMV },      { GOLDEN_FRAME, ZEROMV },
   1184   { GOLDEN_FRAME, NEARESTMV }, { GOLDEN_FRAME, NEARMV },
   1185   { LAST_FRAME, NEWMV },       { GOLDEN_FRAME, NEWMV }
   1186 };
   1187 
   1188 static INLINE void find_predictors(
   1189     VP9_COMP *cpi, MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame,
   1190     int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
   1191     int const_motion[MAX_REF_FRAMES], int *ref_frame_skip_mask,
   1192     const int flag_list[4], TileDataEnc *tile_data, int mi_row, int mi_col,
   1193     struct buf_2d yv12_mb[4][MAX_MB_PLANE], BLOCK_SIZE bsize,
   1194     int force_skip_low_temp_var, int comp_pred_allowed) {
   1195   VP9_COMMON *const cm = &cpi->common;
   1196   MACROBLOCKD *const xd = &x->e_mbd;
   1197   const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
   1198   TileInfo *const tile_info = &tile_data->tile_info;
   1199   // TODO(jingning) placeholder for inter-frame non-RD mode decision.
   1200   x->pred_mv_sad[ref_frame] = INT_MAX;
   1201   frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
   1202   frame_mv[ZEROMV][ref_frame].as_int = 0;
   1203   // this needs various further optimizations. to be continued..
   1204   if ((cpi->ref_frame_flags & flag_list[ref_frame]) && (yv12 != NULL)) {
   1205     int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame];
   1206     const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
   1207     vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
   1208     if (cm->use_prev_frame_mvs || comp_pred_allowed) {
   1209       vp9_find_mv_refs(cm, xd, xd->mi[0], ref_frame, candidates, mi_row, mi_col,
   1210                        x->mbmi_ext->mode_context);
   1211     } else {
   1212       const_motion[ref_frame] =
   1213           mv_refs_rt(cpi, cm, x, xd, tile_info, xd->mi[0], ref_frame,
   1214                      candidates, &frame_mv[NEWMV][ref_frame], mi_row, mi_col,
   1215                      (int)(cpi->svc.use_base_mv && cpi->svc.spatial_layer_id));
   1216     }
   1217     vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
   1218                           &frame_mv[NEARESTMV][ref_frame],
   1219                           &frame_mv[NEARMV][ref_frame]);
   1220     // Early exit for golden frame if force_skip_low_temp_var is set.
   1221     if (!vp9_is_scaled(sf) && bsize >= BLOCK_8X8 &&
   1222         !(force_skip_low_temp_var && ref_frame == GOLDEN_FRAME)) {
   1223       vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
   1224                   bsize);
   1225     }
   1226   } else {
   1227     *ref_frame_skip_mask |= (1 << ref_frame);
   1228   }
   1229 }
   1230 
   1231 static void vp9_NEWMV_diff_bias(const NOISE_ESTIMATE *ne, MACROBLOCKD *xd,
   1232                                 PREDICTION_MODE this_mode, RD_COST *this_rdc,
   1233                                 BLOCK_SIZE bsize, int mv_row, int mv_col,
   1234                                 int is_last_frame, int lowvar_highsumdiff,
   1235                                 int is_skin) {
   1236   // Bias against MVs associated with NEWMV mode that are very different from
   1237   // top/left neighbors.
   1238   if (this_mode == NEWMV) {
   1239     int al_mv_average_row;
   1240     int al_mv_average_col;
   1241     int left_row, left_col;
   1242     int row_diff, col_diff;
   1243     int above_mv_valid = 0;
   1244     int left_mv_valid = 0;
   1245     int above_row = 0;
   1246     int above_col = 0;
   1247 
   1248     if (xd->above_mi) {
   1249       above_mv_valid = xd->above_mi->mv[0].as_int != INVALID_MV;
   1250       above_row = xd->above_mi->mv[0].as_mv.row;
   1251       above_col = xd->above_mi->mv[0].as_mv.col;
   1252     }
   1253     if (xd->left_mi) {
   1254       left_mv_valid = xd->left_mi->mv[0].as_int != INVALID_MV;
   1255       left_row = xd->left_mi->mv[0].as_mv.row;
   1256       left_col = xd->left_mi->mv[0].as_mv.col;
   1257     }
   1258     if (above_mv_valid && left_mv_valid) {
   1259       al_mv_average_row = (above_row + left_row + 1) >> 1;
   1260       al_mv_average_col = (above_col + left_col + 1) >> 1;
   1261     } else if (above_mv_valid) {
   1262       al_mv_average_row = above_row;
   1263       al_mv_average_col = above_col;
   1264     } else if (left_mv_valid) {
   1265       al_mv_average_row = left_row;
   1266       al_mv_average_col = left_col;
   1267     } else {
   1268       al_mv_average_row = al_mv_average_col = 0;
   1269     }
   1270     row_diff = (al_mv_average_row - mv_row);
   1271     col_diff = (al_mv_average_col - mv_col);
   1272     if (row_diff > 48 || row_diff < -48 || col_diff > 48 || col_diff < -48) {
   1273       if (bsize > BLOCK_32X32)
   1274         this_rdc->rdcost = this_rdc->rdcost << 1;
   1275       else
   1276         this_rdc->rdcost = 3 * this_rdc->rdcost >> 1;
   1277     }
   1278   }
   1279   // If noise estimation is enabled, and estimated level is above threshold,
   1280   // add a bias to LAST reference with small motion, for large blocks.
   1281   if (ne->enabled && ne->level >= kMedium && bsize >= BLOCK_32X32 &&
   1282       is_last_frame && mv_row < 8 && mv_row > -8 && mv_col < 8 && mv_col > -8)
   1283     this_rdc->rdcost = 7 * (this_rdc->rdcost >> 3);
   1284   else if (lowvar_highsumdiff && !is_skin && bsize >= BLOCK_16X16 &&
   1285            is_last_frame && mv_row < 16 && mv_row > -16 && mv_col < 16 &&
   1286            mv_col > -16)
   1287     this_rdc->rdcost = 7 * (this_rdc->rdcost >> 3);
   1288 }
   1289 
   1290 #if CONFIG_VP9_TEMPORAL_DENOISING
   1291 static void vp9_pickmode_ctx_den_update(
   1292     VP9_PICKMODE_CTX_DEN *ctx_den, int64_t zero_last_cost_orig,
   1293     int ref_frame_cost[MAX_REF_FRAMES],
   1294     int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int reuse_inter_pred,
   1295     TX_SIZE best_tx_size, PREDICTION_MODE best_mode,
   1296     MV_REFERENCE_FRAME best_ref_frame, INTERP_FILTER best_pred_filter,
   1297     uint8_t best_mode_skip_txfm) {
   1298   ctx_den->zero_last_cost_orig = zero_last_cost_orig;
   1299   ctx_den->ref_frame_cost = ref_frame_cost;
   1300   ctx_den->frame_mv = frame_mv;
   1301   ctx_den->reuse_inter_pred = reuse_inter_pred;
   1302   ctx_den->best_tx_size = best_tx_size;
   1303   ctx_den->best_mode = best_mode;
   1304   ctx_den->best_ref_frame = best_ref_frame;
   1305   ctx_den->best_pred_filter = best_pred_filter;
   1306   ctx_den->best_mode_skip_txfm = best_mode_skip_txfm;
   1307 }
   1308 
   1309 static void recheck_zeromv_after_denoising(
   1310     VP9_COMP *cpi, MODE_INFO *const mi, MACROBLOCK *x, MACROBLOCKD *const xd,
   1311     VP9_DENOISER_DECISION decision, VP9_PICKMODE_CTX_DEN *ctx_den,
   1312     struct buf_2d yv12_mb[4][MAX_MB_PLANE], RD_COST *best_rdc, BLOCK_SIZE bsize,
   1313     int mi_row, int mi_col) {
   1314   // If INTRA or GOLDEN reference was selected, re-evaluate ZEROMV on
   1315   // denoised result. Only do this under noise conditions, and if rdcost of
   1316   // ZEROMV onoriginal source is not significantly higher than rdcost of best
   1317   // mode.
   1318   if (cpi->noise_estimate.enabled && cpi->noise_estimate.level > kLow &&
   1319       ctx_den->zero_last_cost_orig < (best_rdc->rdcost << 3) &&
   1320       ((ctx_den->best_ref_frame == INTRA_FRAME && decision >= FILTER_BLOCK) ||
   1321        (ctx_den->best_ref_frame == GOLDEN_FRAME &&
   1322         cpi->svc.number_spatial_layers == 1 &&
   1323         decision == FILTER_ZEROMV_BLOCK))) {
   1324     // Check if we should pick ZEROMV on denoised signal.
   1325     int rate = 0;
   1326     int64_t dist = 0;
   1327     uint32_t var_y = UINT_MAX;
   1328     uint32_t sse_y = UINT_MAX;
   1329     RD_COST this_rdc;
   1330     mi->mode = ZEROMV;
   1331     mi->ref_frame[0] = LAST_FRAME;
   1332     mi->ref_frame[1] = NONE;
   1333     mi->mv[0].as_int = 0;
   1334     mi->interp_filter = EIGHTTAP;
   1335     xd->plane[0].pre[0] = yv12_mb[LAST_FRAME][0];
   1336     vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
   1337     model_rd_for_sb_y(cpi, bsize, x, xd, &rate, &dist, &var_y, &sse_y);
   1338     this_rdc.rate = rate + ctx_den->ref_frame_cost[LAST_FRAME] +
   1339                     cpi->inter_mode_cost[x->mbmi_ext->mode_context[LAST_FRAME]]
   1340                                         [INTER_OFFSET(ZEROMV)];
   1341     this_rdc.dist = dist;
   1342     this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, rate, dist);
   1343     // Don't switch to ZEROMV if the rdcost for ZEROMV on denoised source
   1344     // is higher than best_ref mode (on original source).
   1345     if (this_rdc.rdcost > best_rdc->rdcost) {
   1346       this_rdc = *best_rdc;
   1347       mi->mode = ctx_den->best_mode;
   1348       mi->ref_frame[0] = ctx_den->best_ref_frame;
   1349       mi->interp_filter = ctx_den->best_pred_filter;
   1350       if (ctx_den->best_ref_frame == INTRA_FRAME) {
   1351         mi->mv[0].as_int = INVALID_MV;
   1352         mi->interp_filter = SWITCHABLE_FILTERS;
   1353       } else if (ctx_den->best_ref_frame == GOLDEN_FRAME) {
   1354         mi->mv[0].as_int =
   1355             ctx_den->frame_mv[ctx_den->best_mode][ctx_den->best_ref_frame]
   1356                 .as_int;
   1357         if (ctx_den->reuse_inter_pred) {
   1358           xd->plane[0].pre[0] = yv12_mb[GOLDEN_FRAME][0];
   1359           vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
   1360         }
   1361       }
   1362       mi->tx_size = ctx_den->best_tx_size;
   1363       x->skip_txfm[0] = ctx_den->best_mode_skip_txfm;
   1364     } else {
   1365       ctx_den->best_ref_frame = LAST_FRAME;
   1366       *best_rdc = this_rdc;
   1367     }
   1368   }
   1369 }
   1370 #endif  // CONFIG_VP9_TEMPORAL_DENOISING
   1371 
   1372 static INLINE int get_force_skip_low_temp_var(uint8_t *variance_low, int mi_row,
   1373                                               int mi_col, BLOCK_SIZE bsize) {
   1374   const int i = (mi_row & 0x7) >> 1;
   1375   const int j = (mi_col & 0x7) >> 1;
   1376   int force_skip_low_temp_var = 0;
   1377   // Set force_skip_low_temp_var based on the block size and block offset.
   1378   if (bsize == BLOCK_64X64) {
   1379     force_skip_low_temp_var = variance_low[0];
   1380   } else if (bsize == BLOCK_64X32) {
   1381     if (!(mi_col & 0x7) && !(mi_row & 0x7)) {
   1382       force_skip_low_temp_var = variance_low[1];
   1383     } else if (!(mi_col & 0x7) && (mi_row & 0x7)) {
   1384       force_skip_low_temp_var = variance_low[2];
   1385     }
   1386   } else if (bsize == BLOCK_32X64) {
   1387     if (!(mi_col & 0x7) && !(mi_row & 0x7)) {
   1388       force_skip_low_temp_var = variance_low[3];
   1389     } else if ((mi_col & 0x7) && !(mi_row & 0x7)) {
   1390       force_skip_low_temp_var = variance_low[4];
   1391     }
   1392   } else if (bsize == BLOCK_32X32) {
   1393     if (!(mi_col & 0x7) && !(mi_row & 0x7)) {
   1394       force_skip_low_temp_var = variance_low[5];
   1395     } else if ((mi_col & 0x7) && !(mi_row & 0x7)) {
   1396       force_skip_low_temp_var = variance_low[6];
   1397     } else if (!(mi_col & 0x7) && (mi_row & 0x7)) {
   1398       force_skip_low_temp_var = variance_low[7];
   1399     } else if ((mi_col & 0x7) && (mi_row & 0x7)) {
   1400       force_skip_low_temp_var = variance_low[8];
   1401     }
   1402   } else if (bsize == BLOCK_16X16) {
   1403     force_skip_low_temp_var = variance_low[pos_shift_16x16[i][j]];
   1404   } else if (bsize == BLOCK_32X16) {
   1405     // The col shift index for the second 16x16 block.
   1406     const int j2 = ((mi_col + 2) & 0x7) >> 1;
   1407     // Only if each 16x16 block inside has low temporal variance.
   1408     force_skip_low_temp_var = variance_low[pos_shift_16x16[i][j]] &&
   1409                               variance_low[pos_shift_16x16[i][j2]];
   1410   } else if (bsize == BLOCK_16X32) {
   1411     // The row shift index for the second 16x16 block.
   1412     const int i2 = ((mi_row + 2) & 0x7) >> 1;
   1413     force_skip_low_temp_var = variance_low[pos_shift_16x16[i][j]] &&
   1414                               variance_low[pos_shift_16x16[i2][j]];
   1415   }
   1416   return force_skip_low_temp_var;
   1417 }
   1418 
   1419 void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, TileDataEnc *tile_data,
   1420                          int mi_row, int mi_col, RD_COST *rd_cost,
   1421                          BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
   1422   VP9_COMMON *const cm = &cpi->common;
   1423   SPEED_FEATURES *const sf = &cpi->sf;
   1424   const SVC *const svc = &cpi->svc;
   1425   MACROBLOCKD *const xd = &x->e_mbd;
   1426   MODE_INFO *const mi = xd->mi[0];
   1427   struct macroblockd_plane *const pd = &xd->plane[0];
   1428   PREDICTION_MODE best_mode = ZEROMV;
   1429   MV_REFERENCE_FRAME ref_frame, best_ref_frame = LAST_FRAME;
   1430   MV_REFERENCE_FRAME usable_ref_frame, second_ref_frame;
   1431   TX_SIZE best_tx_size = TX_SIZES;
   1432   INTERP_FILTER best_pred_filter = EIGHTTAP;
   1433   int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
   1434   uint8_t mode_checked[MB_MODE_COUNT][MAX_REF_FRAMES];
   1435   struct buf_2d yv12_mb[4][MAX_MB_PLANE];
   1436   static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
   1437                                     VP9_ALT_FLAG };
   1438   RD_COST this_rdc, best_rdc;
   1439   uint8_t skip_txfm = SKIP_TXFM_NONE, best_mode_skip_txfm = SKIP_TXFM_NONE;
   1440   // var_y and sse_y are saved to be used in skipping checking
   1441   unsigned int var_y = UINT_MAX;
   1442   unsigned int sse_y = UINT_MAX;
   1443   const int intra_cost_penalty =
   1444       vp9_get_intra_cost_penalty(cpi, bsize, cm->base_qindex, cm->y_dc_delta_q);
   1445   int64_t inter_mode_thresh =
   1446       RDCOST(x->rdmult, x->rddiv, intra_cost_penalty, 0);
   1447   const int *const rd_threshes = cpi->rd.threshes[mi->segment_id][bsize];
   1448   const int sb_row = mi_row >> MI_BLOCK_SIZE_LOG2;
   1449   int thresh_freq_fact_idx = (sb_row * BLOCK_SIZES + bsize) * MAX_MODES;
   1450   const int *const rd_thresh_freq_fact =
   1451       (cpi->sf.adaptive_rd_thresh_row_mt)
   1452           ? &(tile_data->row_base_thresh_freq_fact[thresh_freq_fact_idx])
   1453           : tile_data->thresh_freq_fact[bsize];
   1454 
   1455   INTERP_FILTER filter_ref;
   1456   const int bsl = mi_width_log2_lookup[bsize];
   1457   const int pred_filter_search =
   1458       cm->interp_filter == SWITCHABLE
   1459           ? (((mi_row + mi_col) >> bsl) +
   1460              get_chessboard_index(cm->current_video_frame)) &
   1461                 0x1
   1462           : 0;
   1463   int const_motion[MAX_REF_FRAMES] = { 0 };
   1464   const int bh = num_4x4_blocks_high_lookup[bsize] << 2;
   1465   const int bw = num_4x4_blocks_wide_lookup[bsize] << 2;
   1466   // For speed 6, the result of interp filter is reused later in actual encoding
   1467   // process.
   1468   // tmp[3] points to dst buffer, and the other 3 point to allocated buffers.
   1469   PRED_BUFFER tmp[4];
   1470   DECLARE_ALIGNED(16, uint8_t, pred_buf[3 * 64 * 64]);
   1471 #if CONFIG_VP9_HIGHBITDEPTH
   1472   DECLARE_ALIGNED(16, uint16_t, pred_buf_16[3 * 64 * 64]);
   1473 #endif
   1474   struct buf_2d orig_dst = pd->dst;
   1475   PRED_BUFFER *best_pred = NULL;
   1476   PRED_BUFFER *this_mode_pred = NULL;
   1477   const int pixels_in_block = bh * bw;
   1478   int reuse_inter_pred = cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready;
   1479   int ref_frame_skip_mask = 0;
   1480   int idx;
   1481   int best_pred_sad = INT_MAX;
   1482   int best_early_term = 0;
   1483   int ref_frame_cost[MAX_REF_FRAMES];
   1484   int svc_force_zero_mode[3] = { 0 };
   1485   int perform_intra_pred = 1;
   1486   int use_golden_nonzeromv = 1;
   1487   int force_skip_low_temp_var = 0;
   1488   int skip_ref_find_pred[4] = { 0 };
   1489   unsigned int sse_zeromv_normalized = UINT_MAX;
   1490   unsigned int best_sse_sofar = UINT_MAX;
   1491   unsigned int thresh_svc_skip_golden = 500;
   1492 #if CONFIG_VP9_TEMPORAL_DENOISING
   1493   VP9_PICKMODE_CTX_DEN ctx_den;
   1494   int64_t zero_last_cost_orig = INT64_MAX;
   1495   int denoise_svc_pickmode = 1;
   1496 #endif
   1497   INTERP_FILTER filter_gf_svc = EIGHTTAP;
   1498   MV_REFERENCE_FRAME best_second_ref_frame = NONE;
   1499   int comp_modes = 0;
   1500   int num_inter_modes = (cpi->use_svc) ? RT_INTER_MODES_SVC : RT_INTER_MODES;
   1501   int flag_svc_subpel = 0;
   1502   int svc_mv_col = 0;
   1503   int svc_mv_row = 0;
   1504 
   1505   init_ref_frame_cost(cm, xd, ref_frame_cost);
   1506 
   1507   memset(&mode_checked[0][0], 0, MB_MODE_COUNT * MAX_REF_FRAMES);
   1508 
   1509   if (reuse_inter_pred) {
   1510     int i;
   1511     for (i = 0; i < 3; i++) {
   1512 #if CONFIG_VP9_HIGHBITDEPTH
   1513       if (cm->use_highbitdepth)
   1514         tmp[i].data = CONVERT_TO_BYTEPTR(&pred_buf_16[pixels_in_block * i]);
   1515       else
   1516         tmp[i].data = &pred_buf[pixels_in_block * i];
   1517 #else
   1518       tmp[i].data = &pred_buf[pixels_in_block * i];
   1519 #endif  // CONFIG_VP9_HIGHBITDEPTH
   1520       tmp[i].stride = bw;
   1521       tmp[i].in_use = 0;
   1522     }
   1523     tmp[3].data = pd->dst.buf;
   1524     tmp[3].stride = pd->dst.stride;
   1525     tmp[3].in_use = 0;
   1526   }
   1527 
   1528   x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
   1529   x->skip = 0;
   1530 
   1531   // Instead of using vp9_get_pred_context_switchable_interp(xd) to assign
   1532   // filter_ref, we use a less strict condition on assigning filter_ref.
   1533   // This is to reduce the probabily of entering the flow of not assigning
   1534   // filter_ref and then skip filter search.
   1535   if (xd->above_mi && is_inter_block(xd->above_mi))
   1536     filter_ref = xd->above_mi->interp_filter;
   1537   else if (xd->left_mi && is_inter_block(xd->left_mi))
   1538     filter_ref = xd->left_mi->interp_filter;
   1539   else
   1540     filter_ref = cm->interp_filter;
   1541 
   1542   // initialize mode decisions
   1543   vp9_rd_cost_reset(&best_rdc);
   1544   vp9_rd_cost_reset(rd_cost);
   1545   mi->sb_type = bsize;
   1546   mi->ref_frame[0] = NONE;
   1547   mi->ref_frame[1] = NONE;
   1548 
   1549   mi->tx_size =
   1550       VPXMIN(max_txsize_lookup[bsize], tx_mode_to_biggest_tx_size[cm->tx_mode]);
   1551 
   1552   if (sf->short_circuit_flat_blocks || sf->limit_newmv_early_exit) {
   1553 #if CONFIG_VP9_HIGHBITDEPTH
   1554     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
   1555       x->source_variance = vp9_high_get_sby_perpixel_variance(
   1556           cpi, &x->plane[0].src, bsize, xd->bd);
   1557     else
   1558 #endif  // CONFIG_VP9_HIGHBITDEPTH
   1559       x->source_variance =
   1560           vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
   1561   }
   1562 
   1563 #if CONFIG_VP9_TEMPORAL_DENOISING
   1564   if (cpi->oxcf.noise_sensitivity > 0) {
   1565     if (cpi->use_svc) {
   1566       int layer = LAYER_IDS_TO_IDX(cpi->svc.spatial_layer_id,
   1567                                    cpi->svc.temporal_layer_id,
   1568                                    cpi->svc.number_temporal_layers);
   1569       LAYER_CONTEXT *lc = &cpi->svc.layer_context[layer];
   1570       denoise_svc_pickmode = denoise_svc(cpi) && !lc->is_key_frame;
   1571     }
   1572     if (cpi->denoiser.denoising_level > kDenLowLow && denoise_svc_pickmode)
   1573       vp9_denoiser_reset_frame_stats(ctx);
   1574   }
   1575 #endif
   1576 
   1577   if (cpi->rc.frames_since_golden == 0 && !cpi->use_svc &&
   1578       !cpi->rc.alt_ref_gf_group && !cpi->rc.last_frame_is_src_altref) {
   1579     usable_ref_frame = LAST_FRAME;
   1580   } else {
   1581     usable_ref_frame = GOLDEN_FRAME;
   1582   }
   1583 
   1584   if (cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR) {
   1585     if (cpi->rc.alt_ref_gf_group || cpi->rc.is_src_frame_alt_ref)
   1586       usable_ref_frame = ALTREF_FRAME;
   1587 
   1588     if (cpi->rc.is_src_frame_alt_ref) {
   1589       skip_ref_find_pred[LAST_FRAME] = 1;
   1590       skip_ref_find_pred[GOLDEN_FRAME] = 1;
   1591     }
   1592     if (!cm->show_frame) {
   1593       if (cpi->rc.frames_since_key == 1) {
   1594         usable_ref_frame = LAST_FRAME;
   1595         skip_ref_find_pred[GOLDEN_FRAME] = 1;
   1596         skip_ref_find_pred[ALTREF_FRAME] = 1;
   1597       }
   1598     }
   1599   }
   1600 
   1601   // For svc mode, on spatial_layer_id > 0: if the reference has different scale
   1602   // constrain the inter mode to only test zero motion.
   1603   if (cpi->use_svc && svc->force_zero_mode_spatial_ref &&
   1604       cpi->svc.spatial_layer_id > 0) {
   1605     if (cpi->ref_frame_flags & flag_list[LAST_FRAME]) {
   1606       struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf;
   1607       if (vp9_is_scaled(sf)) svc_force_zero_mode[LAST_FRAME - 1] = 1;
   1608     }
   1609     if (cpi->ref_frame_flags & flag_list[GOLDEN_FRAME]) {
   1610       struct scale_factors *const sf = &cm->frame_refs[GOLDEN_FRAME - 1].sf;
   1611       if (vp9_is_scaled(sf)) svc_force_zero_mode[GOLDEN_FRAME - 1] = 1;
   1612     }
   1613   }
   1614 
   1615   if (cpi->sf.short_circuit_low_temp_var) {
   1616     force_skip_low_temp_var =
   1617         get_force_skip_low_temp_var(&x->variance_low[0], mi_row, mi_col, bsize);
   1618     // If force_skip_low_temp_var is set, and for short circuit mode = 1 and 3,
   1619     // skip golden reference.
   1620     if ((cpi->sf.short_circuit_low_temp_var == 1 ||
   1621          cpi->sf.short_circuit_low_temp_var == 3) &&
   1622         force_skip_low_temp_var) {
   1623       usable_ref_frame = LAST_FRAME;
   1624     }
   1625   }
   1626 
   1627   if (!((cpi->ref_frame_flags & flag_list[GOLDEN_FRAME]) &&
   1628         !svc_force_zero_mode[GOLDEN_FRAME - 1] && !force_skip_low_temp_var))
   1629     use_golden_nonzeromv = 0;
   1630 
   1631   if (cpi->oxcf.speed >= 8 && !cpi->use_svc &&
   1632       ((cpi->rc.frames_since_golden + 1) < x->last_sb_high_content ||
   1633        x->last_sb_high_content > 40 || cpi->rc.frames_since_golden > 120))
   1634     usable_ref_frame = LAST_FRAME;
   1635 
   1636   // Compound prediction modes: (0,0) on LAST/GOLDEN and ARF.
   1637   if (cm->reference_mode == REFERENCE_MODE_SELECT &&
   1638       cpi->sf.use_compound_nonrd_pickmode && usable_ref_frame == ALTREF_FRAME)
   1639     comp_modes = 2;
   1640 
   1641   for (ref_frame = LAST_FRAME; ref_frame <= usable_ref_frame; ++ref_frame) {
   1642     if (!skip_ref_find_pred[ref_frame]) {
   1643       find_predictors(cpi, x, ref_frame, frame_mv, const_motion,
   1644                       &ref_frame_skip_mask, flag_list, tile_data, mi_row,
   1645                       mi_col, yv12_mb, bsize, force_skip_low_temp_var,
   1646                       comp_modes > 0);
   1647     }
   1648   }
   1649 
   1650   if (cpi->use_svc || cpi->oxcf.speed <= 7 || bsize < BLOCK_32X32)
   1651     x->sb_use_mv_part = 0;
   1652 
   1653   // Set the flag_svc_subpel to 1 for SVC if the lower spatial layer used
   1654   // an averaging filter for downsampling (phase = 8). If so, we will test
   1655   // a nonzero motion mode on the spatial (goldeen) reference.
   1656   // The nonzero motion is half pixel shifted to left and top (-4, -4).
   1657   if (cpi->use_svc && cpi->svc.spatial_layer_id > 0 &&
   1658       svc_force_zero_mode[GOLDEN_FRAME - 1] &&
   1659       cpi->svc.downsample_filter_phase[cpi->svc.spatial_layer_id - 1] == 8) {
   1660     svc_mv_col = -4;
   1661     svc_mv_row = -4;
   1662     flag_svc_subpel = 1;
   1663   }
   1664 
   1665   for (idx = 0; idx < num_inter_modes + comp_modes; ++idx) {
   1666     int rate_mv = 0;
   1667     int mode_rd_thresh;
   1668     int mode_index;
   1669     int i;
   1670     int64_t this_sse;
   1671     int is_skippable;
   1672     int this_early_term = 0;
   1673     int rd_computed = 0;
   1674     int flag_preduv_computed[2] = { 0 };
   1675     int inter_mv_mode = 0;
   1676     int skip_this_mv = 0;
   1677     int comp_pred = 0;
   1678     int force_gf_mv = 0;
   1679     PREDICTION_MODE this_mode;
   1680     second_ref_frame = NONE;
   1681 
   1682     if (idx < num_inter_modes) {
   1683       this_mode = ref_mode_set[idx].pred_mode;
   1684       ref_frame = ref_mode_set[idx].ref_frame;
   1685 
   1686       if (cpi->use_svc) {
   1687         this_mode = ref_mode_set_svc[idx].pred_mode;
   1688         ref_frame = ref_mode_set_svc[idx].ref_frame;
   1689       }
   1690     } else {
   1691       // Add (0,0) compound modes.
   1692       this_mode = ZEROMV;
   1693       ref_frame = LAST_FRAME;
   1694       if (idx == num_inter_modes + comp_modes - 1) ref_frame = GOLDEN_FRAME;
   1695       second_ref_frame = ALTREF_FRAME;
   1696       comp_pred = 1;
   1697     }
   1698 
   1699     if (ref_frame > usable_ref_frame) continue;
   1700     if (skip_ref_find_pred[ref_frame]) continue;
   1701 
   1702     if (flag_svc_subpel && ref_frame == GOLDEN_FRAME) {
   1703       force_gf_mv = 1;
   1704       // Only test mode if NEARESTMV/NEARMV is (svc_mv_col, svc_mv_row),
   1705       // otherwise set NEWMV to (svc_mv_col, svc_mv_row).
   1706       if (this_mode == NEWMV) {
   1707         frame_mv[this_mode][ref_frame].as_mv.col = svc_mv_col;
   1708         frame_mv[this_mode][ref_frame].as_mv.row = svc_mv_row;
   1709       } else if (frame_mv[this_mode][ref_frame].as_mv.col != svc_mv_col ||
   1710                  frame_mv[this_mode][ref_frame].as_mv.row != svc_mv_row) {
   1711         continue;
   1712       }
   1713     }
   1714 
   1715     if (comp_pred) {
   1716       const struct segmentation *const seg = &cm->seg;
   1717       if (!cpi->allow_comp_inter_inter) continue;
   1718       // Skip compound inter modes if ARF is not available.
   1719       if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue;
   1720       // Do not allow compound prediction if the segment level reference frame
   1721       // feature is in use as in this case there can only be one reference.
   1722       if (segfeature_active(seg, mi->segment_id, SEG_LVL_REF_FRAME)) continue;
   1723     }
   1724 
   1725     // For SVC, skip the golden (spatial) reference search if sse of zeromv_last
   1726     // is below threshold.
   1727     if (cpi->use_svc && ref_frame == GOLDEN_FRAME &&
   1728         sse_zeromv_normalized < thresh_svc_skip_golden)
   1729       continue;
   1730 
   1731     if (sf->short_circuit_flat_blocks && x->source_variance == 0 &&
   1732         this_mode != NEARESTMV) {
   1733       continue;
   1734     }
   1735 
   1736     if (!(cpi->sf.inter_mode_mask[bsize] & (1 << this_mode))) continue;
   1737 
   1738     if (cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR) {
   1739       if (cpi->rc.is_src_frame_alt_ref &&
   1740           (ref_frame != ALTREF_FRAME ||
   1741            frame_mv[this_mode][ref_frame].as_int != 0))
   1742         continue;
   1743 
   1744       if (!cm->show_frame && ref_frame == ALTREF_FRAME &&
   1745           frame_mv[this_mode][ref_frame].as_int != 0)
   1746         continue;
   1747 
   1748       if (cpi->rc.alt_ref_gf_group && cm->show_frame &&
   1749           cpi->rc.frames_since_golden > (cpi->rc.baseline_gf_interval >> 1) &&
   1750           ref_frame == GOLDEN_FRAME &&
   1751           frame_mv[this_mode][ref_frame].as_int != 0)
   1752         continue;
   1753 
   1754       if (cpi->rc.alt_ref_gf_group && cm->show_frame &&
   1755           cpi->rc.frames_since_golden > 0 &&
   1756           cpi->rc.frames_since_golden < (cpi->rc.baseline_gf_interval >> 1) &&
   1757           ref_frame == ALTREF_FRAME &&
   1758           frame_mv[this_mode][ref_frame].as_int != 0)
   1759         continue;
   1760     }
   1761 
   1762     if (!(cpi->ref_frame_flags & flag_list[ref_frame])) continue;
   1763 
   1764     if (const_motion[ref_frame] && this_mode == NEARMV) continue;
   1765 
   1766     // Skip non-zeromv mode search for golden frame if force_skip_low_temp_var
   1767     // is set. If nearestmv for golden frame is 0, zeromv mode will be skipped
   1768     // later.
   1769     if (!force_gf_mv && force_skip_low_temp_var && ref_frame == GOLDEN_FRAME &&
   1770         frame_mv[this_mode][ref_frame].as_int != 0) {
   1771       continue;
   1772     }
   1773 
   1774     if (x->content_state_sb != kVeryHighSad &&
   1775         (cpi->sf.short_circuit_low_temp_var >= 2 ||
   1776          (cpi->sf.short_circuit_low_temp_var == 1 && bsize == BLOCK_64X64)) &&
   1777         force_skip_low_temp_var && ref_frame == LAST_FRAME &&
   1778         this_mode == NEWMV) {
   1779       continue;
   1780     }
   1781 
   1782     if (cpi->use_svc) {
   1783       if (!force_gf_mv && svc_force_zero_mode[ref_frame - 1] &&
   1784           frame_mv[this_mode][ref_frame].as_int != 0)
   1785         continue;
   1786     }
   1787 
   1788     if (sf->reference_masking &&
   1789         !(frame_mv[this_mode][ref_frame].as_int == 0 &&
   1790           ref_frame == LAST_FRAME)) {
   1791       if (usable_ref_frame < ALTREF_FRAME) {
   1792         if (!force_skip_low_temp_var && usable_ref_frame > LAST_FRAME) {
   1793           i = (ref_frame == LAST_FRAME) ? GOLDEN_FRAME : LAST_FRAME;
   1794           if ((cpi->ref_frame_flags & flag_list[i]))
   1795             if (x->pred_mv_sad[ref_frame] > (x->pred_mv_sad[i] << 1))
   1796               ref_frame_skip_mask |= (1 << ref_frame);
   1797         }
   1798       } else if (!cpi->rc.is_src_frame_alt_ref &&
   1799                  !(frame_mv[this_mode][ref_frame].as_int == 0 &&
   1800                    ref_frame == ALTREF_FRAME)) {
   1801         int ref1 = (ref_frame == GOLDEN_FRAME) ? LAST_FRAME : GOLDEN_FRAME;
   1802         int ref2 = (ref_frame == ALTREF_FRAME) ? LAST_FRAME : ALTREF_FRAME;
   1803         if (((cpi->ref_frame_flags & flag_list[ref1]) &&
   1804              (x->pred_mv_sad[ref_frame] > (x->pred_mv_sad[ref1] << 1))) ||
   1805             ((cpi->ref_frame_flags & flag_list[ref2]) &&
   1806              (x->pred_mv_sad[ref_frame] > (x->pred_mv_sad[ref2] << 1))))
   1807           ref_frame_skip_mask |= (1 << ref_frame);
   1808       }
   1809     }
   1810     if (ref_frame_skip_mask & (1 << ref_frame)) continue;
   1811 
   1812     // Select prediction reference frames.
   1813     for (i = 0; i < MAX_MB_PLANE; i++) {
   1814       xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
   1815       if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
   1816     }
   1817 
   1818     mi->ref_frame[0] = ref_frame;
   1819     mi->ref_frame[1] = second_ref_frame;
   1820     set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
   1821 
   1822     mode_index = mode_idx[ref_frame][INTER_OFFSET(this_mode)];
   1823     mode_rd_thresh = best_mode_skip_txfm ? rd_threshes[mode_index] << 1
   1824                                          : rd_threshes[mode_index];
   1825 
   1826     // Increase mode_rd_thresh value for GOLDEN_FRAME for improved encoding
   1827     // speed with little/no subjective quality loss.
   1828     if (cpi->sf.bias_golden && ref_frame == GOLDEN_FRAME &&
   1829         cpi->rc.frames_since_golden > 4)
   1830       mode_rd_thresh = mode_rd_thresh << 3;
   1831 
   1832     if ((cpi->sf.adaptive_rd_thresh_row_mt &&
   1833          rd_less_than_thresh_row_mt(best_rdc.rdcost, mode_rd_thresh,
   1834                                     &rd_thresh_freq_fact[mode_index])) ||
   1835         (!cpi->sf.adaptive_rd_thresh_row_mt &&
   1836          rd_less_than_thresh(best_rdc.rdcost, mode_rd_thresh,
   1837                              &rd_thresh_freq_fact[mode_index])))
   1838       continue;
   1839 
   1840     if (this_mode == NEWMV && !force_gf_mv) {
   1841       if (ref_frame > LAST_FRAME && !cpi->use_svc &&
   1842           cpi->oxcf.rc_mode == VPX_CBR) {
   1843         int tmp_sad;
   1844         uint32_t dis;
   1845         int cost_list[5] = { INT_MAX, INT_MAX, INT_MAX, INT_MAX, INT_MAX };
   1846 
   1847         if (bsize < BLOCK_16X16) continue;
   1848 
   1849         tmp_sad = vp9_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
   1850 
   1851         if (tmp_sad > x->pred_mv_sad[LAST_FRAME]) continue;
   1852         if (tmp_sad + (num_pels_log2_lookup[bsize] << 4) > best_pred_sad)
   1853           continue;
   1854 
   1855         frame_mv[NEWMV][ref_frame].as_int = mi->mv[0].as_int;
   1856         rate_mv = vp9_mv_bit_cost(&frame_mv[NEWMV][ref_frame].as_mv,
   1857                                   &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv,
   1858                                   x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
   1859         frame_mv[NEWMV][ref_frame].as_mv.row >>= 3;
   1860         frame_mv[NEWMV][ref_frame].as_mv.col >>= 3;
   1861 
   1862         cpi->find_fractional_mv_step(
   1863             x, &frame_mv[NEWMV][ref_frame].as_mv,
   1864             &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv,
   1865             cpi->common.allow_high_precision_mv, x->errorperbit,
   1866             &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
   1867             cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
   1868             x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref_frame], NULL, 0,
   1869             0);
   1870       } else if (svc->use_base_mv && svc->spatial_layer_id) {
   1871         if (frame_mv[NEWMV][ref_frame].as_int != INVALID_MV) {
   1872           const int pre_stride = xd->plane[0].pre[0].stride;
   1873           unsigned int base_mv_sse = UINT_MAX;
   1874           int scale = (cpi->rc.avg_frame_low_motion > 60) ? 2 : 4;
   1875           const uint8_t *const pre_buf =
   1876               xd->plane[0].pre[0].buf +
   1877               (frame_mv[NEWMV][ref_frame].as_mv.row >> 3) * pre_stride +
   1878               (frame_mv[NEWMV][ref_frame].as_mv.col >> 3);
   1879           cpi->fn_ptr[bsize].vf(x->plane[0].src.buf, x->plane[0].src.stride,
   1880                                 pre_buf, pre_stride, &base_mv_sse);
   1881 
   1882           // Exit NEWMV search if base_mv is (0,0) && bsize < BLOCK_16x16,
   1883           // for SVC encoding.
   1884           if (cpi->use_svc && cpi->svc.use_base_mv && bsize < BLOCK_16X16 &&
   1885               frame_mv[NEWMV][ref_frame].as_mv.row == 0 &&
   1886               frame_mv[NEWMV][ref_frame].as_mv.col == 0)
   1887             continue;
   1888 
   1889           // Exit NEWMV search if base_mv_sse is large.
   1890           if (sf->base_mv_aggressive && base_mv_sse > (best_sse_sofar << scale))
   1891             continue;
   1892           if (base_mv_sse < (best_sse_sofar << 1)) {
   1893             // Base layer mv is good.
   1894             // Exit NEWMV search if the base_mv is (0, 0) and sse is low, since
   1895             // (0, 0) mode is already tested.
   1896             unsigned int base_mv_sse_normalized =
   1897                 base_mv_sse >>
   1898                 (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
   1899             if (sf->base_mv_aggressive && base_mv_sse <= best_sse_sofar &&
   1900                 base_mv_sse_normalized < 400 &&
   1901                 frame_mv[NEWMV][ref_frame].as_mv.row == 0 &&
   1902                 frame_mv[NEWMV][ref_frame].as_mv.col == 0)
   1903               continue;
   1904             if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
   1905                                         &frame_mv[NEWMV][ref_frame], &rate_mv,
   1906                                         best_rdc.rdcost, 1)) {
   1907               continue;
   1908             }
   1909           } else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
   1910                                              &frame_mv[NEWMV][ref_frame],
   1911                                              &rate_mv, best_rdc.rdcost, 0)) {
   1912             continue;
   1913           }
   1914         } else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
   1915                                            &frame_mv[NEWMV][ref_frame],
   1916                                            &rate_mv, best_rdc.rdcost, 0)) {
   1917           continue;
   1918         }
   1919       } else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
   1920                                          &frame_mv[NEWMV][ref_frame], &rate_mv,
   1921                                          best_rdc.rdcost, 0)) {
   1922         continue;
   1923       }
   1924     }
   1925 
   1926     // TODO(jianj): Skipping the testing of (duplicate) non-zero motion vector
   1927     // causes some regression, leave it for duplicate zero-mv for now, until
   1928     // regression issue is resolved.
   1929     for (inter_mv_mode = NEARESTMV; inter_mv_mode <= NEWMV; inter_mv_mode++) {
   1930       if (inter_mv_mode == this_mode || comp_pred) continue;
   1931       if (mode_checked[inter_mv_mode][ref_frame] &&
   1932           frame_mv[this_mode][ref_frame].as_int ==
   1933               frame_mv[inter_mv_mode][ref_frame].as_int &&
   1934           frame_mv[inter_mv_mode][ref_frame].as_int == 0) {
   1935         skip_this_mv = 1;
   1936         break;
   1937       }
   1938     }
   1939 
   1940     if (skip_this_mv) continue;
   1941 
   1942     // If use_golden_nonzeromv is false, NEWMV mode is skipped for golden, no
   1943     // need to compute best_pred_sad which is only used to skip golden NEWMV.
   1944     if (use_golden_nonzeromv && this_mode == NEWMV && ref_frame == LAST_FRAME &&
   1945         frame_mv[NEWMV][LAST_FRAME].as_int != INVALID_MV) {
   1946       const int pre_stride = xd->plane[0].pre[0].stride;
   1947       const uint8_t *const pre_buf =
   1948           xd->plane[0].pre[0].buf +
   1949           (frame_mv[NEWMV][LAST_FRAME].as_mv.row >> 3) * pre_stride +
   1950           (frame_mv[NEWMV][LAST_FRAME].as_mv.col >> 3);
   1951       best_pred_sad = cpi->fn_ptr[bsize].sdf(
   1952           x->plane[0].src.buf, x->plane[0].src.stride, pre_buf, pre_stride);
   1953       x->pred_mv_sad[LAST_FRAME] = best_pred_sad;
   1954     }
   1955 
   1956     if (this_mode != NEARESTMV && !comp_pred &&
   1957         frame_mv[this_mode][ref_frame].as_int ==
   1958             frame_mv[NEARESTMV][ref_frame].as_int)
   1959       continue;
   1960 
   1961     mi->mode = this_mode;
   1962     mi->mv[0].as_int = frame_mv[this_mode][ref_frame].as_int;
   1963     mi->mv[1].as_int = 0;
   1964 
   1965     // Search for the best prediction filter type, when the resulting
   1966     // motion vector is at sub-pixel accuracy level for luma component, i.e.,
   1967     // the last three bits are all zeros.
   1968     if (reuse_inter_pred) {
   1969       if (!this_mode_pred) {
   1970         this_mode_pred = &tmp[3];
   1971       } else {
   1972         this_mode_pred = &tmp[get_pred_buffer(tmp, 3)];
   1973         pd->dst.buf = this_mode_pred->data;
   1974         pd->dst.stride = bw;
   1975       }
   1976     }
   1977 
   1978     if ((this_mode == NEWMV || filter_ref == SWITCHABLE) &&
   1979         pred_filter_search &&
   1980         (ref_frame == LAST_FRAME ||
   1981          (ref_frame == GOLDEN_FRAME && !force_gf_mv &&
   1982           (cpi->use_svc || cpi->oxcf.rc_mode == VPX_VBR))) &&
   1983         (((mi->mv[0].as_mv.row | mi->mv[0].as_mv.col) & 0x07) != 0)) {
   1984       int pf_rate[3];
   1985       int64_t pf_dist[3];
   1986       int curr_rate[3];
   1987       unsigned int pf_var[3];
   1988       unsigned int pf_sse[3];
   1989       TX_SIZE pf_tx_size[3];
   1990       int64_t best_cost = INT64_MAX;
   1991       INTERP_FILTER best_filter = SWITCHABLE, filter;
   1992       PRED_BUFFER *current_pred = this_mode_pred;
   1993       rd_computed = 1;
   1994 
   1995       for (filter = EIGHTTAP; filter <= EIGHTTAP_SMOOTH; ++filter) {
   1996         int64_t cost;
   1997         mi->interp_filter = filter;
   1998         vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
   1999         model_rd_for_sb_y(cpi, bsize, x, xd, &pf_rate[filter], &pf_dist[filter],
   2000                           &pf_var[filter], &pf_sse[filter]);
   2001         curr_rate[filter] = pf_rate[filter];
   2002         pf_rate[filter] += vp9_get_switchable_rate(cpi, xd);
   2003         cost = RDCOST(x->rdmult, x->rddiv, pf_rate[filter], pf_dist[filter]);
   2004         pf_tx_size[filter] = mi->tx_size;
   2005         if (cost < best_cost) {
   2006           best_filter = filter;
   2007           best_cost = cost;
   2008           skip_txfm = x->skip_txfm[0];
   2009 
   2010           if (reuse_inter_pred) {
   2011             if (this_mode_pred != current_pred) {
   2012               free_pred_buffer(this_mode_pred);
   2013               this_mode_pred = current_pred;
   2014             }
   2015             current_pred = &tmp[get_pred_buffer(tmp, 3)];
   2016             pd->dst.buf = current_pred->data;
   2017             pd->dst.stride = bw;
   2018           }
   2019         }
   2020       }
   2021 
   2022       if (reuse_inter_pred && this_mode_pred != current_pred)
   2023         free_pred_buffer(current_pred);
   2024 
   2025       mi->interp_filter = best_filter;
   2026       mi->tx_size = pf_tx_size[best_filter];
   2027       this_rdc.rate = curr_rate[best_filter];
   2028       this_rdc.dist = pf_dist[best_filter];
   2029       var_y = pf_var[best_filter];
   2030       sse_y = pf_sse[best_filter];
   2031       x->skip_txfm[0] = skip_txfm;
   2032       if (reuse_inter_pred) {
   2033         pd->dst.buf = this_mode_pred->data;
   2034         pd->dst.stride = this_mode_pred->stride;
   2035       }
   2036     } else {
   2037       // For low motion content use x->sb_is_skin in addition to VeryHighSad
   2038       // for setting large_block.
   2039       const int large_block =
   2040           (x->content_state_sb == kVeryHighSad ||
   2041            (x->sb_is_skin && cpi->rc.avg_frame_low_motion > 70) ||
   2042            cpi->oxcf.speed < 7)
   2043               ? bsize > BLOCK_32X32
   2044               : bsize >= BLOCK_32X32;
   2045       mi->interp_filter = (filter_ref == SWITCHABLE) ? EIGHTTAP : filter_ref;
   2046 
   2047       if (cpi->use_svc && ref_frame == GOLDEN_FRAME &&
   2048           svc_force_zero_mode[ref_frame - 1])
   2049         mi->interp_filter = filter_gf_svc;
   2050 
   2051       vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
   2052 
   2053       // For large partition blocks, extra testing is done.
   2054       if (cpi->oxcf.rc_mode == VPX_CBR && large_block &&
   2055           !cyclic_refresh_segment_id_boosted(xd->mi[0]->segment_id) &&
   2056           cm->base_qindex) {
   2057         model_rd_for_sb_y_large(cpi, bsize, x, xd, &this_rdc.rate,
   2058                                 &this_rdc.dist, &var_y, &sse_y, mi_row, mi_col,
   2059                                 &this_early_term, flag_preduv_computed);
   2060       } else {
   2061         rd_computed = 1;
   2062         model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc.rate, &this_rdc.dist,
   2063                           &var_y, &sse_y);
   2064       }
   2065       // Save normalized sse (between current and last frame) for (0, 0) motion.
   2066       if (cpi->use_svc && ref_frame == LAST_FRAME &&
   2067           frame_mv[this_mode][ref_frame].as_int == 0) {
   2068         sse_zeromv_normalized =
   2069             sse_y >> (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
   2070       }
   2071       if (sse_y < best_sse_sofar) best_sse_sofar = sse_y;
   2072     }
   2073 
   2074     if (!this_early_term) {
   2075       this_sse = (int64_t)sse_y;
   2076       block_yrd(cpi, x, &this_rdc, &is_skippable, &this_sse, bsize,
   2077                 VPXMIN(mi->tx_size, TX_16X16), rd_computed);
   2078 
   2079       x->skip_txfm[0] = is_skippable;
   2080       if (is_skippable) {
   2081         this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
   2082       } else {
   2083         if (RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist) <
   2084             RDCOST(x->rdmult, x->rddiv, 0, this_sse)) {
   2085           this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0);
   2086         } else {
   2087           this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
   2088           this_rdc.dist = this_sse;
   2089           x->skip_txfm[0] = SKIP_TXFM_AC_DC;
   2090         }
   2091       }
   2092 
   2093       if (cm->interp_filter == SWITCHABLE) {
   2094         if ((mi->mv[0].as_mv.row | mi->mv[0].as_mv.col) & 0x07)
   2095           this_rdc.rate += vp9_get_switchable_rate(cpi, xd);
   2096       }
   2097     } else {
   2098       this_rdc.rate += cm->interp_filter == SWITCHABLE
   2099                            ? vp9_get_switchable_rate(cpi, xd)
   2100                            : 0;
   2101       this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
   2102     }
   2103 
   2104     if (!this_early_term &&
   2105         (x->color_sensitivity[0] || x->color_sensitivity[1])) {
   2106       RD_COST rdc_uv;
   2107       const BLOCK_SIZE uv_bsize = get_plane_block_size(bsize, &xd->plane[1]);
   2108       if (x->color_sensitivity[0] && !flag_preduv_computed[0]) {
   2109         vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, 1);
   2110         flag_preduv_computed[0] = 1;
   2111       }
   2112       if (x->color_sensitivity[1] && !flag_preduv_computed[1]) {
   2113         vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, 2);
   2114         flag_preduv_computed[1] = 1;
   2115       }
   2116       model_rd_for_sb_uv(cpi, uv_bsize, x, xd, &rdc_uv, &var_y, &sse_y, 1, 2);
   2117       this_rdc.rate += rdc_uv.rate;
   2118       this_rdc.dist += rdc_uv.dist;
   2119     }
   2120 
   2121     this_rdc.rate += rate_mv;
   2122     this_rdc.rate += cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]]
   2123                                          [INTER_OFFSET(this_mode)];
   2124     // TODO(marpan): Add costing for compound mode.
   2125     this_rdc.rate += ref_frame_cost[ref_frame];
   2126     this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
   2127 
   2128     // Bias against NEWMV that is very different from its neighbors, and bias
   2129     // to small motion-lastref for noisy input.
   2130     if (cpi->oxcf.rc_mode == VPX_CBR && cpi->oxcf.speed >= 5 &&
   2131         cpi->oxcf.content != VP9E_CONTENT_SCREEN) {
   2132       vp9_NEWMV_diff_bias(&cpi->noise_estimate, xd, this_mode, &this_rdc, bsize,
   2133                           frame_mv[this_mode][ref_frame].as_mv.row,
   2134                           frame_mv[this_mode][ref_frame].as_mv.col,
   2135                           ref_frame == LAST_FRAME, x->lowvar_highsumdiff,
   2136                           x->sb_is_skin);
   2137     }
   2138 
   2139     // Skipping checking: test to see if this block can be reconstructed by
   2140     // prediction only.
   2141     if (cpi->allow_encode_breakout) {
   2142       encode_breakout_test(cpi, x, bsize, mi_row, mi_col, ref_frame, this_mode,
   2143                            var_y, sse_y, yv12_mb, &this_rdc.rate,
   2144                            &this_rdc.dist, flag_preduv_computed);
   2145       if (x->skip) {
   2146         this_rdc.rate += rate_mv;
   2147         this_rdc.rdcost =
   2148             RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
   2149       }
   2150     }
   2151 
   2152 #if CONFIG_VP9_TEMPORAL_DENOISING
   2153     if (cpi->oxcf.noise_sensitivity > 0 && denoise_svc_pickmode &&
   2154         cpi->denoiser.denoising_level > kDenLowLow) {
   2155       vp9_denoiser_update_frame_stats(mi, sse_y, this_mode, ctx);
   2156       // Keep track of zero_last cost.
   2157       if (ref_frame == LAST_FRAME && frame_mv[this_mode][ref_frame].as_int == 0)
   2158         zero_last_cost_orig = this_rdc.rdcost;
   2159     }
   2160 #else
   2161     (void)ctx;
   2162 #endif
   2163 
   2164     mode_checked[this_mode][ref_frame] = 1;
   2165 
   2166     if (this_rdc.rdcost < best_rdc.rdcost || x->skip) {
   2167       best_rdc = this_rdc;
   2168       best_mode = this_mode;
   2169       best_pred_filter = mi->interp_filter;
   2170       best_tx_size = mi->tx_size;
   2171       best_ref_frame = ref_frame;
   2172       best_mode_skip_txfm = x->skip_txfm[0];
   2173       best_early_term = this_early_term;
   2174       best_second_ref_frame = second_ref_frame;
   2175 
   2176       if (reuse_inter_pred) {
   2177         free_pred_buffer(best_pred);
   2178         best_pred = this_mode_pred;
   2179       }
   2180     } else {
   2181       if (reuse_inter_pred) free_pred_buffer(this_mode_pred);
   2182     }
   2183 
   2184     if (x->skip) break;
   2185 
   2186     // If early termination flag is 1 and at least 2 modes are checked,
   2187     // the mode search is terminated.
   2188     if (best_early_term && idx > 0) {
   2189       x->skip = 1;
   2190       break;
   2191     }
   2192   }
   2193 
   2194   mi->mode = best_mode;
   2195   mi->interp_filter = best_pred_filter;
   2196   mi->tx_size = best_tx_size;
   2197   mi->ref_frame[0] = best_ref_frame;
   2198   mi->mv[0].as_int = frame_mv[best_mode][best_ref_frame].as_int;
   2199   xd->mi[0]->bmi[0].as_mv[0].as_int = mi->mv[0].as_int;
   2200   x->skip_txfm[0] = best_mode_skip_txfm;
   2201   mi->ref_frame[1] = best_second_ref_frame;
   2202 
   2203   // For spatial enhancemanent layer: perform intra prediction only if base
   2204   // layer is chosen as the reference. Always perform intra prediction if
   2205   // LAST is the only reference or is_key_frame is set.
   2206   if (cpi->svc.spatial_layer_id) {
   2207     perform_intra_pred =
   2208         cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame ||
   2209         !(cpi->ref_frame_flags & flag_list[GOLDEN_FRAME]) ||
   2210         (!cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame &&
   2211          svc_force_zero_mode[best_ref_frame - 1]);
   2212     inter_mode_thresh = (inter_mode_thresh << 1) + inter_mode_thresh;
   2213   }
   2214   if (cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR &&
   2215       cpi->rc.is_src_frame_alt_ref)
   2216     perform_intra_pred = 0;
   2217   // Perform intra prediction search, if the best SAD is above a certain
   2218   // threshold.
   2219   if (best_rdc.rdcost == INT64_MAX ||
   2220       ((!force_skip_low_temp_var || bsize < BLOCK_32X32 ||
   2221         x->content_state_sb == kVeryHighSad) &&
   2222        perform_intra_pred && !x->skip && best_rdc.rdcost > inter_mode_thresh &&
   2223        bsize <= cpi->sf.max_intra_bsize && !x->skip_low_source_sad &&
   2224        !x->lowvar_highsumdiff)) {
   2225     struct estimate_block_intra_args args = { cpi, x, DC_PRED, 1, 0 };
   2226     int i;
   2227     TX_SIZE best_intra_tx_size = TX_SIZES;
   2228     TX_SIZE intra_tx_size =
   2229         VPXMIN(max_txsize_lookup[bsize],
   2230                tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
   2231     if (cpi->oxcf.content != VP9E_CONTENT_SCREEN && intra_tx_size > TX_16X16)
   2232       intra_tx_size = TX_16X16;
   2233 
   2234     if (reuse_inter_pred && best_pred != NULL) {
   2235       if (best_pred->data == orig_dst.buf) {
   2236         this_mode_pred = &tmp[get_pred_buffer(tmp, 3)];
   2237 #if CONFIG_VP9_HIGHBITDEPTH
   2238         if (cm->use_highbitdepth)
   2239           vpx_highbd_convolve_copy(
   2240               CONVERT_TO_SHORTPTR(best_pred->data), best_pred->stride,
   2241               CONVERT_TO_SHORTPTR(this_mode_pred->data), this_mode_pred->stride,
   2242               NULL, 0, 0, 0, 0, bw, bh, xd->bd);
   2243         else
   2244           vpx_convolve_copy(best_pred->data, best_pred->stride,
   2245                             this_mode_pred->data, this_mode_pred->stride, NULL,
   2246                             0, 0, 0, 0, bw, bh);
   2247 #else
   2248         vpx_convolve_copy(best_pred->data, best_pred->stride,
   2249                           this_mode_pred->data, this_mode_pred->stride, NULL, 0,
   2250                           0, 0, 0, bw, bh);
   2251 #endif  // CONFIG_VP9_HIGHBITDEPTH
   2252         best_pred = this_mode_pred;
   2253       }
   2254     }
   2255     pd->dst = orig_dst;
   2256 
   2257     for (i = 0; i < 4; ++i) {
   2258       const PREDICTION_MODE this_mode = intra_mode_list[i];
   2259       THR_MODES mode_index = mode_idx[INTRA_FRAME][mode_offset(this_mode)];
   2260       int mode_rd_thresh = rd_threshes[mode_index];
   2261       if (sf->short_circuit_flat_blocks && x->source_variance == 0 &&
   2262           this_mode != DC_PRED) {
   2263         continue;
   2264       }
   2265 
   2266       if (!((1 << this_mode) & cpi->sf.intra_y_mode_bsize_mask[bsize]))
   2267         continue;
   2268 
   2269       if ((cpi->sf.adaptive_rd_thresh_row_mt &&
   2270            rd_less_than_thresh_row_mt(best_rdc.rdcost, mode_rd_thresh,
   2271                                       &rd_thresh_freq_fact[mode_index])) ||
   2272           (!cpi->sf.adaptive_rd_thresh_row_mt &&
   2273            rd_less_than_thresh(best_rdc.rdcost, mode_rd_thresh,
   2274                                &rd_thresh_freq_fact[mode_index])))
   2275         continue;
   2276 
   2277       mi->mode = this_mode;
   2278       mi->ref_frame[0] = INTRA_FRAME;
   2279       this_rdc.dist = this_rdc.rate = 0;
   2280       args.mode = this_mode;
   2281       args.skippable = 1;
   2282       args.rdc = &this_rdc;
   2283       mi->tx_size = intra_tx_size;
   2284       vp9_foreach_transformed_block_in_plane(xd, bsize, 0, estimate_block_intra,
   2285                                              &args);
   2286       // Check skip cost here since skippable is not set for for uv, this
   2287       // mirrors the behavior used by inter
   2288       if (args.skippable) {
   2289         x->skip_txfm[0] = SKIP_TXFM_AC_DC;
   2290         this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 1);
   2291       } else {
   2292         x->skip_txfm[0] = SKIP_TXFM_NONE;
   2293         this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 0);
   2294       }
   2295       // Inter and intra RD will mismatch in scale for non-screen content.
   2296       if (cpi->oxcf.content == VP9E_CONTENT_SCREEN) {
   2297         if (x->color_sensitivity[0])
   2298           vp9_foreach_transformed_block_in_plane(xd, bsize, 1,
   2299                                                  estimate_block_intra, &args);
   2300         if (x->color_sensitivity[1])
   2301           vp9_foreach_transformed_block_in_plane(xd, bsize, 2,
   2302                                                  estimate_block_intra, &args);
   2303       }
   2304       this_rdc.rate += cpi->mbmode_cost[this_mode];
   2305       this_rdc.rate += ref_frame_cost[INTRA_FRAME];
   2306       this_rdc.rate += intra_cost_penalty;
   2307       this_rdc.rdcost =
   2308           RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
   2309 
   2310       if (this_rdc.rdcost < best_rdc.rdcost) {
   2311         best_rdc = this_rdc;
   2312         best_mode = this_mode;
   2313         best_intra_tx_size = mi->tx_size;
   2314         best_ref_frame = INTRA_FRAME;
   2315         best_second_ref_frame = NONE;
   2316         mi->uv_mode = this_mode;
   2317         mi->mv[0].as_int = INVALID_MV;
   2318         mi->mv[1].as_int = INVALID_MV;
   2319         best_mode_skip_txfm = x->skip_txfm[0];
   2320       }
   2321     }
   2322 
   2323     // Reset mb_mode_info to the best inter mode.
   2324     if (best_ref_frame != INTRA_FRAME) {
   2325       mi->tx_size = best_tx_size;
   2326     } else {
   2327       mi->tx_size = best_intra_tx_size;
   2328     }
   2329   }
   2330 
   2331   pd->dst = orig_dst;
   2332   mi->mode = best_mode;
   2333   mi->ref_frame[0] = best_ref_frame;
   2334   mi->ref_frame[1] = best_second_ref_frame;
   2335   x->skip_txfm[0] = best_mode_skip_txfm;
   2336 
   2337   if (!is_inter_block(mi)) {
   2338     mi->interp_filter = SWITCHABLE_FILTERS;
   2339   }
   2340 
   2341   if (reuse_inter_pred && best_pred != NULL) {
   2342     if (best_pred->data != orig_dst.buf && is_inter_mode(mi->mode)) {
   2343 #if CONFIG_VP9_HIGHBITDEPTH
   2344       if (cm->use_highbitdepth)
   2345         vpx_highbd_convolve_copy(
   2346             CONVERT_TO_SHORTPTR(best_pred->data), best_pred->stride,
   2347             CONVERT_TO_SHORTPTR(pd->dst.buf), pd->dst.stride, NULL, 0, 0, 0, 0,
   2348             bw, bh, xd->bd);
   2349       else
   2350         vpx_convolve_copy(best_pred->data, best_pred->stride, pd->dst.buf,
   2351                           pd->dst.stride, NULL, 0, 0, 0, 0, bw, bh);
   2352 #else
   2353       vpx_convolve_copy(best_pred->data, best_pred->stride, pd->dst.buf,
   2354                         pd->dst.stride, NULL, 0, 0, 0, 0, bw, bh);
   2355 #endif  // CONFIG_VP9_HIGHBITDEPTH
   2356     }
   2357   }
   2358 
   2359 #if CONFIG_VP9_TEMPORAL_DENOISING
   2360   if (cpi->oxcf.noise_sensitivity > 0 && cpi->resize_pending == 0 &&
   2361       denoise_svc_pickmode && cpi->denoiser.denoising_level > kDenLowLow &&
   2362       cpi->denoiser.reset == 0) {
   2363     VP9_DENOISER_DECISION decision = COPY_BLOCK;
   2364     ctx->sb_skip_denoising = 0;
   2365     // TODO(marpan): There is an issue with denoising when the
   2366     // superblock partitioning scheme is based on the pickmode.
   2367     // Remove this condition when the issue is resolved.
   2368     if (x->sb_pickmode_part) ctx->sb_skip_denoising = 1;
   2369     vp9_pickmode_ctx_den_update(&ctx_den, zero_last_cost_orig, ref_frame_cost,
   2370                                 frame_mv, reuse_inter_pred, best_tx_size,
   2371                                 best_mode, best_ref_frame, best_pred_filter,
   2372                                 best_mode_skip_txfm);
   2373     vp9_denoiser_denoise(cpi, x, mi_row, mi_col, bsize, ctx, &decision);
   2374     recheck_zeromv_after_denoising(cpi, mi, x, xd, decision, &ctx_den, yv12_mb,
   2375                                    &best_rdc, bsize, mi_row, mi_col);
   2376     best_ref_frame = ctx_den.best_ref_frame;
   2377   }
   2378 #endif
   2379 
   2380   if (best_ref_frame == ALTREF_FRAME || best_second_ref_frame == ALTREF_FRAME)
   2381     x->arf_frame_usage++;
   2382   else if (best_ref_frame != INTRA_FRAME)
   2383     x->lastgolden_frame_usage++;
   2384 
   2385   if (cpi->sf.adaptive_rd_thresh) {
   2386     THR_MODES best_mode_idx = mode_idx[best_ref_frame][mode_offset(mi->mode)];
   2387 
   2388     if (best_ref_frame == INTRA_FRAME) {
   2389       // Only consider the modes that are included in the intra_mode_list.
   2390       int intra_modes = sizeof(intra_mode_list) / sizeof(PREDICTION_MODE);
   2391       int i;
   2392 
   2393       // TODO(yunqingwang): Check intra mode mask and only update freq_fact
   2394       // for those valid modes.
   2395       for (i = 0; i < intra_modes; i++) {
   2396         if (cpi->sf.adaptive_rd_thresh_row_mt)
   2397           update_thresh_freq_fact_row_mt(cpi, tile_data, x->source_variance,
   2398                                          thresh_freq_fact_idx, INTRA_FRAME,
   2399                                          best_mode_idx, intra_mode_list[i]);
   2400         else
   2401           update_thresh_freq_fact(cpi, tile_data, x->source_variance, bsize,
   2402                                   INTRA_FRAME, best_mode_idx,
   2403                                   intra_mode_list[i]);
   2404       }
   2405     } else {
   2406       for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
   2407         PREDICTION_MODE this_mode;
   2408         if (best_ref_frame != ref_frame) continue;
   2409         for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
   2410           if (cpi->sf.adaptive_rd_thresh_row_mt)
   2411             update_thresh_freq_fact_row_mt(cpi, tile_data, x->source_variance,
   2412                                            thresh_freq_fact_idx, ref_frame,
   2413                                            best_mode_idx, this_mode);
   2414           else
   2415             update_thresh_freq_fact(cpi, tile_data, x->source_variance, bsize,
   2416                                     ref_frame, best_mode_idx, this_mode);
   2417         }
   2418       }
   2419     }
   2420   }
   2421 
   2422   *rd_cost = best_rdc;
   2423 }
   2424 
   2425 void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, int mi_row,
   2426                                 int mi_col, RD_COST *rd_cost, BLOCK_SIZE bsize,
   2427                                 PICK_MODE_CONTEXT *ctx) {
   2428   VP9_COMMON *const cm = &cpi->common;
   2429   SPEED_FEATURES *const sf = &cpi->sf;
   2430   MACROBLOCKD *const xd = &x->e_mbd;
   2431   MODE_INFO *const mi = xd->mi[0];
   2432   MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
   2433   const struct segmentation *const seg = &cm->seg;
   2434   MV_REFERENCE_FRAME ref_frame, second_ref_frame = NONE;
   2435   MV_REFERENCE_FRAME best_ref_frame = NONE;
   2436   unsigned char segment_id = mi->segment_id;
   2437   struct buf_2d yv12_mb[4][MAX_MB_PLANE];
   2438   static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
   2439                                     VP9_ALT_FLAG };
   2440   int64_t best_rd = INT64_MAX;
   2441   b_mode_info bsi[MAX_REF_FRAMES][4];
   2442   int ref_frame_skip_mask = 0;
   2443   const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
   2444   const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
   2445   int idx, idy;
   2446 
   2447   x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
   2448   ctx->pred_pixel_ready = 0;
   2449 
   2450   for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
   2451     const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
   2452     int_mv dummy_mv[2];
   2453     x->pred_mv_sad[ref_frame] = INT_MAX;
   2454 
   2455     if ((cpi->ref_frame_flags & flag_list[ref_frame]) && (yv12 != NULL)) {
   2456       int_mv *const candidates = mbmi_ext->ref_mvs[ref_frame];
   2457       const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
   2458       vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf,
   2459                            sf);
   2460       vp9_find_mv_refs(cm, xd, xd->mi[0], ref_frame, candidates, mi_row, mi_col,
   2461                        mbmi_ext->mode_context);
   2462 
   2463       vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
   2464                             &dummy_mv[0], &dummy_mv[1]);
   2465     } else {
   2466       ref_frame_skip_mask |= (1 << ref_frame);
   2467     }
   2468   }
   2469 
   2470   mi->sb_type = bsize;
   2471   mi->tx_size = TX_4X4;
   2472   mi->uv_mode = DC_PRED;
   2473   mi->ref_frame[0] = LAST_FRAME;
   2474   mi->ref_frame[1] = NONE;
   2475   mi->interp_filter =
   2476       cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter;
   2477 
   2478   for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
   2479     int64_t this_rd = 0;
   2480     int plane;
   2481 
   2482     if (ref_frame_skip_mask & (1 << ref_frame)) continue;
   2483 
   2484 #if CONFIG_BETTER_HW_COMPATIBILITY
   2485     if ((bsize == BLOCK_8X4 || bsize == BLOCK_4X8) && ref_frame > INTRA_FRAME &&
   2486         vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
   2487       continue;
   2488 #endif
   2489 
   2490     // TODO(jingning, agrange): Scaling reference frame not supported for
   2491     // sub8x8 blocks. Is this supported now?
   2492     if (ref_frame > INTRA_FRAME &&
   2493         vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
   2494       continue;
   2495 
   2496     // If the segment reference frame feature is enabled....
   2497     // then do nothing if the current ref frame is not allowed..
   2498     if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
   2499         get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame)
   2500       continue;
   2501 
   2502     mi->ref_frame[0] = ref_frame;
   2503     x->skip = 0;
   2504     set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
   2505 
   2506     // Select prediction reference frames.
   2507     for (plane = 0; plane < MAX_MB_PLANE; plane++)
   2508       xd->plane[plane].pre[0] = yv12_mb[ref_frame][plane];
   2509 
   2510     for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
   2511       for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
   2512         int_mv b_mv[MB_MODE_COUNT];
   2513         int64_t b_best_rd = INT64_MAX;
   2514         const int i = idy * 2 + idx;
   2515         PREDICTION_MODE this_mode;
   2516         RD_COST this_rdc;
   2517         unsigned int var_y, sse_y;
   2518 
   2519         struct macroblock_plane *p = &x->plane[0];
   2520         struct macroblockd_plane *pd = &xd->plane[0];
   2521 
   2522         const struct buf_2d orig_src = p->src;
   2523         const struct buf_2d orig_dst = pd->dst;
   2524         struct buf_2d orig_pre[2];
   2525         memcpy(orig_pre, xd->plane[0].pre, sizeof(orig_pre));
   2526 
   2527         // set buffer pointers for sub8x8 motion search.
   2528         p->src.buf =
   2529             &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
   2530         pd->dst.buf =
   2531             &pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
   2532         pd->pre[0].buf =
   2533             &pd->pre[0]
   2534                  .buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
   2535 
   2536         b_mv[ZEROMV].as_int = 0;
   2537         b_mv[NEWMV].as_int = INVALID_MV;
   2538         vp9_append_sub8x8_mvs_for_idx(cm, xd, i, 0, mi_row, mi_col,
   2539                                       &b_mv[NEARESTMV], &b_mv[NEARMV],
   2540                                       mbmi_ext->mode_context);
   2541 
   2542         for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
   2543           int b_rate = 0;
   2544           xd->mi[0]->bmi[i].as_mv[0].as_int = b_mv[this_mode].as_int;
   2545 
   2546           if (this_mode == NEWMV) {
   2547             const int step_param = cpi->sf.mv.fullpel_search_step_param;
   2548             MV mvp_full;
   2549             MV tmp_mv;
   2550             int cost_list[5];
   2551             const MvLimits tmp_mv_limits = x->mv_limits;
   2552             uint32_t dummy_dist;
   2553 
   2554             if (i == 0) {
   2555               mvp_full.row = b_mv[NEARESTMV].as_mv.row >> 3;
   2556               mvp_full.col = b_mv[NEARESTMV].as_mv.col >> 3;
   2557             } else {
   2558               mvp_full.row = xd->mi[0]->bmi[0].as_mv[0].as_mv.row >> 3;
   2559               mvp_full.col = xd->mi[0]->bmi[0].as_mv[0].as_mv.col >> 3;
   2560             }
   2561 
   2562             vp9_set_mv_search_range(&x->mv_limits,
   2563                                     &mbmi_ext->ref_mvs[ref_frame][0].as_mv);
   2564 
   2565             vp9_full_pixel_search(
   2566                 cpi, x, bsize, &mvp_full, step_param, cpi->sf.mv.search_method,
   2567                 x->sadperbit4, cond_cost_list(cpi, cost_list),
   2568                 &mbmi_ext->ref_mvs[ref_frame][0].as_mv, &tmp_mv, INT_MAX, 0);
   2569 
   2570             x->mv_limits = tmp_mv_limits;
   2571 
   2572             // calculate the bit cost on motion vector
   2573             mvp_full.row = tmp_mv.row * 8;
   2574             mvp_full.col = tmp_mv.col * 8;
   2575 
   2576             b_rate += vp9_mv_bit_cost(
   2577                 &mvp_full, &mbmi_ext->ref_mvs[ref_frame][0].as_mv,
   2578                 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
   2579 
   2580             b_rate += cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]]
   2581                                           [INTER_OFFSET(NEWMV)];
   2582             if (RDCOST(x->rdmult, x->rddiv, b_rate, 0) > b_best_rd) continue;
   2583 
   2584             cpi->find_fractional_mv_step(
   2585                 x, &tmp_mv, &mbmi_ext->ref_mvs[ref_frame][0].as_mv,
   2586                 cpi->common.allow_high_precision_mv, x->errorperbit,
   2587                 &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
   2588                 cpi->sf.mv.subpel_iters_per_step,
   2589                 cond_cost_list(cpi, cost_list), x->nmvjointcost, x->mvcost,
   2590                 &dummy_dist, &x->pred_sse[ref_frame], NULL, 0, 0);
   2591 
   2592             xd->mi[0]->bmi[i].as_mv[0].as_mv = tmp_mv;
   2593           } else {
   2594             b_rate += cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]]
   2595                                           [INTER_OFFSET(this_mode)];
   2596           }
   2597 
   2598 #if CONFIG_VP9_HIGHBITDEPTH
   2599           if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
   2600             vp9_highbd_build_inter_predictor(
   2601                 CONVERT_TO_SHORTPTR(pd->pre[0].buf), pd->pre[0].stride,
   2602                 CONVERT_TO_SHORTPTR(pd->dst.buf), pd->dst.stride,
   2603                 &xd->mi[0]->bmi[i].as_mv[0].as_mv, &xd->block_refs[0]->sf,
   2604                 4 * num_4x4_blocks_wide, 4 * num_4x4_blocks_high, 0,
   2605                 vp9_filter_kernels[mi->interp_filter], MV_PRECISION_Q3,
   2606                 mi_col * MI_SIZE + 4 * (i & 0x01),
   2607                 mi_row * MI_SIZE + 4 * (i >> 1), xd->bd);
   2608           } else {
   2609 #endif
   2610             vp9_build_inter_predictor(
   2611                 pd->pre[0].buf, pd->pre[0].stride, pd->dst.buf, pd->dst.stride,
   2612                 &xd->mi[0]->bmi[i].as_mv[0].as_mv, &xd->block_refs[0]->sf,
   2613                 4 * num_4x4_blocks_wide, 4 * num_4x4_blocks_high, 0,
   2614                 vp9_filter_kernels[mi->interp_filter], MV_PRECISION_Q3,
   2615                 mi_col * MI_SIZE + 4 * (i & 0x01),
   2616                 mi_row * MI_SIZE + 4 * (i >> 1));
   2617 
   2618 #if CONFIG_VP9_HIGHBITDEPTH
   2619           }
   2620 #endif
   2621 
   2622           model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc.rate, &this_rdc.dist,
   2623                             &var_y, &sse_y);
   2624 
   2625           this_rdc.rate += b_rate;
   2626           this_rdc.rdcost =
   2627               RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
   2628           if (this_rdc.rdcost < b_best_rd) {
   2629             b_best_rd = this_rdc.rdcost;
   2630             bsi[ref_frame][i].as_mode = this_mode;
   2631             bsi[ref_frame][i].as_mv[0].as_mv = xd->mi[0]->bmi[i].as_mv[0].as_mv;
   2632           }
   2633         }  // mode search
   2634 
   2635         // restore source and prediction buffer pointers.
   2636         p->src = orig_src;
   2637         pd->pre[0] = orig_pre[0];
   2638         pd->dst = orig_dst;
   2639         this_rd += b_best_rd;
   2640 
   2641         xd->mi[0]->bmi[i] = bsi[ref_frame][i];
   2642         if (num_4x4_blocks_wide > 1) xd->mi[0]->bmi[i + 1] = xd->mi[0]->bmi[i];
   2643         if (num_4x4_blocks_high > 1) xd->mi[0]->bmi[i + 2] = xd->mi[0]->bmi[i];
   2644       }
   2645     }  // loop through sub8x8 blocks
   2646 
   2647     if (this_rd < best_rd) {
   2648       best_rd = this_rd;
   2649       best_ref_frame = ref_frame;
   2650     }
   2651   }  // reference frames
   2652 
   2653   mi->tx_size = TX_4X4;
   2654   mi->ref_frame[0] = best_ref_frame;
   2655   for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
   2656     for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
   2657       const int block = idy * 2 + idx;
   2658       xd->mi[0]->bmi[block] = bsi[best_ref_frame][block];
   2659       if (num_4x4_blocks_wide > 1)
   2660         xd->mi[0]->bmi[block + 1] = bsi[best_ref_frame][block];
   2661       if (num_4x4_blocks_high > 1)
   2662         xd->mi[0]->bmi[block + 2] = bsi[best_ref_frame][block];
   2663     }
   2664   }
   2665   mi->mode = xd->mi[0]->bmi[3].as_mode;
   2666   ctx->mic = *(xd->mi[0]);
   2667   ctx->mbmi_ext = *x->mbmi_ext;
   2668   ctx->skip_txfm[0] = SKIP_TXFM_NONE;
   2669   ctx->skip = 0;
   2670   // Dummy assignment for speed -5. No effect in speed -6.
   2671   rd_cost->rdcost = best_rd;
   2672 }
   2673