Home | History | Annotate | Download | only in encoder
      1 /*
      2  * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 #include <math.h>
     12 #include <stdio.h>
     13 #include <limits.h>
     14 
     15 #include "./vp9_rtcd.h"
     16 #include "./vpx_config.h"
     17 #include "./vpx_dsp_rtcd.h"
     18 #include "./vpx_scale_rtcd.h"
     19 #include "vpx_dsp/psnr.h"
     20 #include "vpx_dsp/vpx_dsp_common.h"
     21 #include "vpx_dsp/vpx_filter.h"
     22 #if CONFIG_INTERNAL_STATS
     23 #include "vpx_dsp/ssim.h"
     24 #endif
     25 #include "vpx_ports/mem.h"
     26 #include "vpx_ports/system_state.h"
     27 #include "vpx_ports/vpx_timer.h"
     28 
     29 #include "vp9/common/vp9_alloccommon.h"
     30 #include "vp9/common/vp9_filter.h"
     31 #include "vp9/common/vp9_idct.h"
     32 #if CONFIG_VP9_POSTPROC
     33 #include "vp9/common/vp9_postproc.h"
     34 #endif
     35 #include "vp9/common/vp9_reconinter.h"
     36 #include "vp9/common/vp9_reconintra.h"
     37 #include "vp9/common/vp9_tile_common.h"
     38 #include "vp9/common/vp9_scan.h"
     39 
     40 #include "vp9/encoder/vp9_alt_ref_aq.h"
     41 #include "vp9/encoder/vp9_aq_360.h"
     42 #include "vp9/encoder/vp9_aq_complexity.h"
     43 #include "vp9/encoder/vp9_aq_cyclicrefresh.h"
     44 #include "vp9/encoder/vp9_aq_variance.h"
     45 #include "vp9/encoder/vp9_bitstream.h"
     46 #if CONFIG_INTERNAL_STATS
     47 #include "vp9/encoder/vp9_blockiness.h"
     48 #endif
     49 #include "vp9/encoder/vp9_context_tree.h"
     50 #include "vp9/encoder/vp9_encodeframe.h"
     51 #include "vp9/encoder/vp9_encodemb.h"
     52 #include "vp9/encoder/vp9_encodemv.h"
     53 #include "vp9/encoder/vp9_encoder.h"
     54 #include "vp9/encoder/vp9_ethread.h"
     55 #include "vp9/encoder/vp9_extend.h"
     56 #include "vp9/encoder/vp9_firstpass.h"
     57 #include "vp9/encoder/vp9_mbgraph.h"
     58 #if CONFIG_NON_GREEDY_MV
     59 #include "vp9/encoder/vp9_mcomp.h"
     60 #endif
     61 #include "vp9/encoder/vp9_multi_thread.h"
     62 #include "vp9/encoder/vp9_noise_estimate.h"
     63 #include "vp9/encoder/vp9_picklpf.h"
     64 #include "vp9/encoder/vp9_ratectrl.h"
     65 #include "vp9/encoder/vp9_rd.h"
     66 #include "vp9/encoder/vp9_resize.h"
     67 #include "vp9/encoder/vp9_segmentation.h"
     68 #include "vp9/encoder/vp9_skin_detection.h"
     69 #include "vp9/encoder/vp9_speed_features.h"
     70 #include "vp9/encoder/vp9_svc_layercontext.h"
     71 #include "vp9/encoder/vp9_temporal_filter.h"
     72 
     73 #define AM_SEGMENT_ID_INACTIVE 7
     74 #define AM_SEGMENT_ID_ACTIVE 0
     75 
     76 // Whether to use high precision mv for altref computation.
     77 #define ALTREF_HIGH_PRECISION_MV 1
     78 
     79 // Q threshold for high precision mv. Choose a very high value for now so that
     80 // HIGH_PRECISION is always chosen.
     81 #define HIGH_PRECISION_MV_QTHRESH 200
     82 
     83 #define FRAME_SIZE_FACTOR 128  // empirical params for context model threshold
     84 #define FRAME_RATE_FACTOR 8
     85 
     86 #ifdef OUTPUT_YUV_DENOISED
     87 FILE *yuv_denoised_file = NULL;
     88 #endif
     89 #ifdef OUTPUT_YUV_SKINMAP
     90 static FILE *yuv_skinmap_file = NULL;
     91 #endif
     92 #ifdef OUTPUT_YUV_REC
     93 FILE *yuv_rec_file;
     94 #endif
     95 #ifdef OUTPUT_YUV_SVC_SRC
     96 FILE *yuv_svc_src[3] = { NULL, NULL, NULL };
     97 #endif
     98 
     99 #if 0
    100 FILE *framepsnr;
    101 FILE *kf_list;
    102 FILE *keyfile;
    103 #endif
    104 
    105 #ifdef ENABLE_KF_DENOISE
    106 // Test condition for spatial denoise of source.
    107 static int is_spatial_denoise_enabled(VP9_COMP *cpi) {
    108   VP9_COMMON *const cm = &cpi->common;
    109   const VP9EncoderConfig *const oxcf = &cpi->oxcf;
    110 
    111   return (oxcf->pass != 1) && !is_lossless_requested(&cpi->oxcf) &&
    112          frame_is_intra_only(cm);
    113 }
    114 #endif
    115 
    116 // compute adaptive threshold for skip recoding
    117 static int compute_context_model_thresh(const VP9_COMP *const cpi) {
    118   const VP9_COMMON *const cm = &cpi->common;
    119   const VP9EncoderConfig *const oxcf = &cpi->oxcf;
    120   const int frame_size = (cm->width * cm->height) >> 10;
    121   const int bitrate = (int)(oxcf->target_bandwidth >> 10);
    122   const int qindex_factor = cm->base_qindex + (MAXQ >> 1);
    123 
    124   // This equation makes the threshold adaptive to frame size.
    125   // Coding gain obtained by recoding comes from alternate frames of large
    126   // content change. We skip recoding if the difference of previous and current
    127   // frame context probability model is less than a certain threshold.
    128   // The first component is the most critical part to guarantee adaptivity.
    129   // Other parameters are estimated based on normal setting of hd resolution
    130   // parameters. e.g frame_size = 1920x1080, bitrate = 8000, qindex_factor < 50
    131   const int thresh =
    132       ((FRAME_SIZE_FACTOR * frame_size - FRAME_RATE_FACTOR * bitrate) *
    133        qindex_factor) >>
    134       9;
    135 
    136   return thresh;
    137 }
    138 
    139 // compute the total cost difference between current
    140 // and previous frame context prob model.
    141 static int compute_context_model_diff(const VP9_COMMON *const cm) {
    142   const FRAME_CONTEXT *const pre_fc =
    143       &cm->frame_contexts[cm->frame_context_idx];
    144   const FRAME_CONTEXT *const cur_fc = cm->fc;
    145   const FRAME_COUNTS *counts = &cm->counts;
    146   vpx_prob pre_last_prob, cur_last_prob;
    147   int diff = 0;
    148   int i, j, k, l, m, n;
    149 
    150   // y_mode_prob
    151   for (i = 0; i < BLOCK_SIZE_GROUPS; ++i) {
    152     for (j = 0; j < INTRA_MODES - 1; ++j) {
    153       diff += (int)counts->y_mode[i][j] *
    154               (pre_fc->y_mode_prob[i][j] - cur_fc->y_mode_prob[i][j]);
    155     }
    156     pre_last_prob = MAX_PROB - pre_fc->y_mode_prob[i][INTRA_MODES - 2];
    157     cur_last_prob = MAX_PROB - cur_fc->y_mode_prob[i][INTRA_MODES - 2];
    158 
    159     diff += (int)counts->y_mode[i][INTRA_MODES - 1] *
    160             (pre_last_prob - cur_last_prob);
    161   }
    162 
    163   // uv_mode_prob
    164   for (i = 0; i < INTRA_MODES; ++i) {
    165     for (j = 0; j < INTRA_MODES - 1; ++j) {
    166       diff += (int)counts->uv_mode[i][j] *
    167               (pre_fc->uv_mode_prob[i][j] - cur_fc->uv_mode_prob[i][j]);
    168     }
    169     pre_last_prob = MAX_PROB - pre_fc->uv_mode_prob[i][INTRA_MODES - 2];
    170     cur_last_prob = MAX_PROB - cur_fc->uv_mode_prob[i][INTRA_MODES - 2];
    171 
    172     diff += (int)counts->uv_mode[i][INTRA_MODES - 1] *
    173             (pre_last_prob - cur_last_prob);
    174   }
    175 
    176   // partition_prob
    177   for (i = 0; i < PARTITION_CONTEXTS; ++i) {
    178     for (j = 0; j < PARTITION_TYPES - 1; ++j) {
    179       diff += (int)counts->partition[i][j] *
    180               (pre_fc->partition_prob[i][j] - cur_fc->partition_prob[i][j]);
    181     }
    182     pre_last_prob = MAX_PROB - pre_fc->partition_prob[i][PARTITION_TYPES - 2];
    183     cur_last_prob = MAX_PROB - cur_fc->partition_prob[i][PARTITION_TYPES - 2];
    184 
    185     diff += (int)counts->partition[i][PARTITION_TYPES - 1] *
    186             (pre_last_prob - cur_last_prob);
    187   }
    188 
    189   // coef_probs
    190   for (i = 0; i < TX_SIZES; ++i) {
    191     for (j = 0; j < PLANE_TYPES; ++j) {
    192       for (k = 0; k < REF_TYPES; ++k) {
    193         for (l = 0; l < COEF_BANDS; ++l) {
    194           for (m = 0; m < BAND_COEFF_CONTEXTS(l); ++m) {
    195             for (n = 0; n < UNCONSTRAINED_NODES; ++n) {
    196               diff += (int)counts->coef[i][j][k][l][m][n] *
    197                       (pre_fc->coef_probs[i][j][k][l][m][n] -
    198                        cur_fc->coef_probs[i][j][k][l][m][n]);
    199             }
    200 
    201             pre_last_prob =
    202                 MAX_PROB -
    203                 pre_fc->coef_probs[i][j][k][l][m][UNCONSTRAINED_NODES - 1];
    204             cur_last_prob =
    205                 MAX_PROB -
    206                 cur_fc->coef_probs[i][j][k][l][m][UNCONSTRAINED_NODES - 1];
    207 
    208             diff += (int)counts->coef[i][j][k][l][m][UNCONSTRAINED_NODES] *
    209                     (pre_last_prob - cur_last_prob);
    210           }
    211         }
    212       }
    213     }
    214   }
    215 
    216   // switchable_interp_prob
    217   for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) {
    218     for (j = 0; j < SWITCHABLE_FILTERS - 1; ++j) {
    219       diff += (int)counts->switchable_interp[i][j] *
    220               (pre_fc->switchable_interp_prob[i][j] -
    221                cur_fc->switchable_interp_prob[i][j]);
    222     }
    223     pre_last_prob =
    224         MAX_PROB - pre_fc->switchable_interp_prob[i][SWITCHABLE_FILTERS - 2];
    225     cur_last_prob =
    226         MAX_PROB - cur_fc->switchable_interp_prob[i][SWITCHABLE_FILTERS - 2];
    227 
    228     diff += (int)counts->switchable_interp[i][SWITCHABLE_FILTERS - 1] *
    229             (pre_last_prob - cur_last_prob);
    230   }
    231 
    232   // inter_mode_probs
    233   for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
    234     for (j = 0; j < INTER_MODES - 1; ++j) {
    235       diff += (int)counts->inter_mode[i][j] *
    236               (pre_fc->inter_mode_probs[i][j] - cur_fc->inter_mode_probs[i][j]);
    237     }
    238     pre_last_prob = MAX_PROB - pre_fc->inter_mode_probs[i][INTER_MODES - 2];
    239     cur_last_prob = MAX_PROB - cur_fc->inter_mode_probs[i][INTER_MODES - 2];
    240 
    241     diff += (int)counts->inter_mode[i][INTER_MODES - 1] *
    242             (pre_last_prob - cur_last_prob);
    243   }
    244 
    245   // intra_inter_prob
    246   for (i = 0; i < INTRA_INTER_CONTEXTS; ++i) {
    247     diff += (int)counts->intra_inter[i][0] *
    248             (pre_fc->intra_inter_prob[i] - cur_fc->intra_inter_prob[i]);
    249 
    250     pre_last_prob = MAX_PROB - pre_fc->intra_inter_prob[i];
    251     cur_last_prob = MAX_PROB - cur_fc->intra_inter_prob[i];
    252 
    253     diff += (int)counts->intra_inter[i][1] * (pre_last_prob - cur_last_prob);
    254   }
    255 
    256   // comp_inter_prob
    257   for (i = 0; i < COMP_INTER_CONTEXTS; ++i) {
    258     diff += (int)counts->comp_inter[i][0] *
    259             (pre_fc->comp_inter_prob[i] - cur_fc->comp_inter_prob[i]);
    260 
    261     pre_last_prob = MAX_PROB - pre_fc->comp_inter_prob[i];
    262     cur_last_prob = MAX_PROB - cur_fc->comp_inter_prob[i];
    263 
    264     diff += (int)counts->comp_inter[i][1] * (pre_last_prob - cur_last_prob);
    265   }
    266 
    267   // single_ref_prob
    268   for (i = 0; i < REF_CONTEXTS; ++i) {
    269     for (j = 0; j < 2; ++j) {
    270       diff += (int)counts->single_ref[i][j][0] *
    271               (pre_fc->single_ref_prob[i][j] - cur_fc->single_ref_prob[i][j]);
    272 
    273       pre_last_prob = MAX_PROB - pre_fc->single_ref_prob[i][j];
    274       cur_last_prob = MAX_PROB - cur_fc->single_ref_prob[i][j];
    275 
    276       diff +=
    277           (int)counts->single_ref[i][j][1] * (pre_last_prob - cur_last_prob);
    278     }
    279   }
    280 
    281   // comp_ref_prob
    282   for (i = 0; i < REF_CONTEXTS; ++i) {
    283     diff += (int)counts->comp_ref[i][0] *
    284             (pre_fc->comp_ref_prob[i] - cur_fc->comp_ref_prob[i]);
    285 
    286     pre_last_prob = MAX_PROB - pre_fc->comp_ref_prob[i];
    287     cur_last_prob = MAX_PROB - cur_fc->comp_ref_prob[i];
    288 
    289     diff += (int)counts->comp_ref[i][1] * (pre_last_prob - cur_last_prob);
    290   }
    291 
    292   // tx_probs
    293   for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
    294     // p32x32
    295     for (j = 0; j < TX_SIZES - 1; ++j) {
    296       diff += (int)counts->tx.p32x32[i][j] *
    297               (pre_fc->tx_probs.p32x32[i][j] - cur_fc->tx_probs.p32x32[i][j]);
    298     }
    299     pre_last_prob = MAX_PROB - pre_fc->tx_probs.p32x32[i][TX_SIZES - 2];
    300     cur_last_prob = MAX_PROB - cur_fc->tx_probs.p32x32[i][TX_SIZES - 2];
    301 
    302     diff += (int)counts->tx.p32x32[i][TX_SIZES - 1] *
    303             (pre_last_prob - cur_last_prob);
    304 
    305     // p16x16
    306     for (j = 0; j < TX_SIZES - 2; ++j) {
    307       diff += (int)counts->tx.p16x16[i][j] *
    308               (pre_fc->tx_probs.p16x16[i][j] - cur_fc->tx_probs.p16x16[i][j]);
    309     }
    310     pre_last_prob = MAX_PROB - pre_fc->tx_probs.p16x16[i][TX_SIZES - 3];
    311     cur_last_prob = MAX_PROB - cur_fc->tx_probs.p16x16[i][TX_SIZES - 3];
    312 
    313     diff += (int)counts->tx.p16x16[i][TX_SIZES - 2] *
    314             (pre_last_prob - cur_last_prob);
    315 
    316     // p8x8
    317     for (j = 0; j < TX_SIZES - 3; ++j) {
    318       diff += (int)counts->tx.p8x8[i][j] *
    319               (pre_fc->tx_probs.p8x8[i][j] - cur_fc->tx_probs.p8x8[i][j]);
    320     }
    321     pre_last_prob = MAX_PROB - pre_fc->tx_probs.p8x8[i][TX_SIZES - 4];
    322     cur_last_prob = MAX_PROB - cur_fc->tx_probs.p8x8[i][TX_SIZES - 4];
    323 
    324     diff +=
    325         (int)counts->tx.p8x8[i][TX_SIZES - 3] * (pre_last_prob - cur_last_prob);
    326   }
    327 
    328   // skip_probs
    329   for (i = 0; i < SKIP_CONTEXTS; ++i) {
    330     diff += (int)counts->skip[i][0] *
    331             (pre_fc->skip_probs[i] - cur_fc->skip_probs[i]);
    332 
    333     pre_last_prob = MAX_PROB - pre_fc->skip_probs[i];
    334     cur_last_prob = MAX_PROB - cur_fc->skip_probs[i];
    335 
    336     diff += (int)counts->skip[i][1] * (pre_last_prob - cur_last_prob);
    337   }
    338 
    339   // mv
    340   for (i = 0; i < MV_JOINTS - 1; ++i) {
    341     diff += (int)counts->mv.joints[i] *
    342             (pre_fc->nmvc.joints[i] - cur_fc->nmvc.joints[i]);
    343   }
    344   pre_last_prob = MAX_PROB - pre_fc->nmvc.joints[MV_JOINTS - 2];
    345   cur_last_prob = MAX_PROB - cur_fc->nmvc.joints[MV_JOINTS - 2];
    346 
    347   diff +=
    348       (int)counts->mv.joints[MV_JOINTS - 1] * (pre_last_prob - cur_last_prob);
    349 
    350   for (i = 0; i < 2; ++i) {
    351     const nmv_component_counts *nmv_count = &counts->mv.comps[i];
    352     const nmv_component *pre_nmv_prob = &pre_fc->nmvc.comps[i];
    353     const nmv_component *cur_nmv_prob = &cur_fc->nmvc.comps[i];
    354 
    355     // sign
    356     diff += (int)nmv_count->sign[0] * (pre_nmv_prob->sign - cur_nmv_prob->sign);
    357 
    358     pre_last_prob = MAX_PROB - pre_nmv_prob->sign;
    359     cur_last_prob = MAX_PROB - cur_nmv_prob->sign;
    360 
    361     diff += (int)nmv_count->sign[1] * (pre_last_prob - cur_last_prob);
    362 
    363     // classes
    364     for (j = 0; j < MV_CLASSES - 1; ++j) {
    365       diff += (int)nmv_count->classes[j] *
    366               (pre_nmv_prob->classes[j] - cur_nmv_prob->classes[j]);
    367     }
    368     pre_last_prob = MAX_PROB - pre_nmv_prob->classes[MV_CLASSES - 2];
    369     cur_last_prob = MAX_PROB - cur_nmv_prob->classes[MV_CLASSES - 2];
    370 
    371     diff += (int)nmv_count->classes[MV_CLASSES - 1] *
    372             (pre_last_prob - cur_last_prob);
    373 
    374     // class0
    375     for (j = 0; j < CLASS0_SIZE - 1; ++j) {
    376       diff += (int)nmv_count->class0[j] *
    377               (pre_nmv_prob->class0[j] - cur_nmv_prob->class0[j]);
    378     }
    379     pre_last_prob = MAX_PROB - pre_nmv_prob->class0[CLASS0_SIZE - 2];
    380     cur_last_prob = MAX_PROB - cur_nmv_prob->class0[CLASS0_SIZE - 2];
    381 
    382     diff += (int)nmv_count->class0[CLASS0_SIZE - 1] *
    383             (pre_last_prob - cur_last_prob);
    384 
    385     // bits
    386     for (j = 0; j < MV_OFFSET_BITS; ++j) {
    387       diff += (int)nmv_count->bits[j][0] *
    388               (pre_nmv_prob->bits[j] - cur_nmv_prob->bits[j]);
    389 
    390       pre_last_prob = MAX_PROB - pre_nmv_prob->bits[j];
    391       cur_last_prob = MAX_PROB - cur_nmv_prob->bits[j];
    392 
    393       diff += (int)nmv_count->bits[j][1] * (pre_last_prob - cur_last_prob);
    394     }
    395 
    396     // class0_fp
    397     for (j = 0; j < CLASS0_SIZE; ++j) {
    398       for (k = 0; k < MV_FP_SIZE - 1; ++k) {
    399         diff += (int)nmv_count->class0_fp[j][k] *
    400                 (pre_nmv_prob->class0_fp[j][k] - cur_nmv_prob->class0_fp[j][k]);
    401       }
    402       pre_last_prob = MAX_PROB - pre_nmv_prob->class0_fp[j][MV_FP_SIZE - 2];
    403       cur_last_prob = MAX_PROB - cur_nmv_prob->class0_fp[j][MV_FP_SIZE - 2];
    404 
    405       diff += (int)nmv_count->class0_fp[j][MV_FP_SIZE - 1] *
    406               (pre_last_prob - cur_last_prob);
    407     }
    408 
    409     // fp
    410     for (j = 0; j < MV_FP_SIZE - 1; ++j) {
    411       diff +=
    412           (int)nmv_count->fp[j] * (pre_nmv_prob->fp[j] - cur_nmv_prob->fp[j]);
    413     }
    414     pre_last_prob = MAX_PROB - pre_nmv_prob->fp[MV_FP_SIZE - 2];
    415     cur_last_prob = MAX_PROB - cur_nmv_prob->fp[MV_FP_SIZE - 2];
    416 
    417     diff +=
    418         (int)nmv_count->fp[MV_FP_SIZE - 1] * (pre_last_prob - cur_last_prob);
    419 
    420     // class0_hp
    421     diff += (int)nmv_count->class0_hp[0] *
    422             (pre_nmv_prob->class0_hp - cur_nmv_prob->class0_hp);
    423 
    424     pre_last_prob = MAX_PROB - pre_nmv_prob->class0_hp;
    425     cur_last_prob = MAX_PROB - cur_nmv_prob->class0_hp;
    426 
    427     diff += (int)nmv_count->class0_hp[1] * (pre_last_prob - cur_last_prob);
    428 
    429     // hp
    430     diff += (int)nmv_count->hp[0] * (pre_nmv_prob->hp - cur_nmv_prob->hp);
    431 
    432     pre_last_prob = MAX_PROB - pre_nmv_prob->hp;
    433     cur_last_prob = MAX_PROB - cur_nmv_prob->hp;
    434 
    435     diff += (int)nmv_count->hp[1] * (pre_last_prob - cur_last_prob);
    436   }
    437 
    438   return -diff;
    439 }
    440 
    441 // Test for whether to calculate metrics for the frame.
    442 static int is_psnr_calc_enabled(VP9_COMP *cpi) {
    443   VP9_COMMON *const cm = &cpi->common;
    444   const VP9EncoderConfig *const oxcf = &cpi->oxcf;
    445 
    446   return cpi->b_calculate_psnr && (oxcf->pass != 1) && cm->show_frame;
    447 }
    448 
    449 /* clang-format off */
    450 const Vp9LevelSpec vp9_level_defs[VP9_LEVELS] = {
    451   //         sample rate    size   breadth  bitrate  cpb
    452   { LEVEL_1,   829440,      36864,    512,   200,    400,    2, 1,  4,  8 },
    453   { LEVEL_1_1, 2764800,     73728,    768,   800,    1000,   2, 1,  4,  8 },
    454   { LEVEL_2,   4608000,     122880,   960,   1800,   1500,   2, 1,  4,  8 },
    455   { LEVEL_2_1, 9216000,     245760,   1344,  3600,   2800,   2, 2,  4,  8 },
    456   { LEVEL_3,   20736000,    552960,   2048,  7200,   6000,   2, 4,  4,  8 },
    457   { LEVEL_3_1, 36864000,    983040,   2752,  12000,  10000,  2, 4,  4,  8 },
    458   { LEVEL_4,   83558400,    2228224,  4160,  18000,  16000,  4, 4,  4,  8 },
    459   { LEVEL_4_1, 160432128,   2228224,  4160,  30000,  18000,  4, 4,  5,  6 },
    460   { LEVEL_5,   311951360,   8912896,  8384,  60000,  36000,  6, 8,  6,  4 },
    461   { LEVEL_5_1, 588251136,   8912896,  8384,  120000, 46000,  8, 8,  10, 4 },
    462   // TODO(huisu): update max_cpb_size for level 5_2 ~ 6_2 when
    463   // they are finalized (currently tentative).
    464   { LEVEL_5_2, 1176502272,  8912896,  8384,  180000, 90000,  8, 8,  10, 4 },
    465   { LEVEL_6,   1176502272,  35651584, 16832, 180000, 90000,  8, 16, 10, 4 },
    466   { LEVEL_6_1, 2353004544u, 35651584, 16832, 240000, 180000, 8, 16, 10, 4 },
    467   { LEVEL_6_2, 4706009088u, 35651584, 16832, 480000, 360000, 8, 16, 10, 4 },
    468 };
    469 /* clang-format on */
    470 
    471 static const char *level_fail_messages[TARGET_LEVEL_FAIL_IDS] = {
    472   "The average bit-rate is too high.",
    473   "The picture size is too large.",
    474   "The picture width/height is too large.",
    475   "The luma sample rate is too large.",
    476   "The CPB size is too large.",
    477   "The compression ratio is too small",
    478   "Too many column tiles are used.",
    479   "The alt-ref distance is too small.",
    480   "Too many reference buffers are used."
    481 };
    482 
    483 static INLINE void Scale2Ratio(VPX_SCALING mode, int *hr, int *hs) {
    484   switch (mode) {
    485     case NORMAL:
    486       *hr = 1;
    487       *hs = 1;
    488       break;
    489     case FOURFIVE:
    490       *hr = 4;
    491       *hs = 5;
    492       break;
    493     case THREEFIVE:
    494       *hr = 3;
    495       *hs = 5;
    496       break;
    497     default:
    498       assert(mode == ONETWO);
    499       *hr = 1;
    500       *hs = 2;
    501       break;
    502   }
    503 }
    504 
    505 // Mark all inactive blocks as active. Other segmentation features may be set
    506 // so memset cannot be used, instead only inactive blocks should be reset.
    507 static void suppress_active_map(VP9_COMP *cpi) {
    508   unsigned char *const seg_map = cpi->segmentation_map;
    509 
    510   if (cpi->active_map.enabled || cpi->active_map.update) {
    511     const int rows = cpi->common.mi_rows;
    512     const int cols = cpi->common.mi_cols;
    513     int i;
    514 
    515     for (i = 0; i < rows * cols; ++i)
    516       if (seg_map[i] == AM_SEGMENT_ID_INACTIVE)
    517         seg_map[i] = AM_SEGMENT_ID_ACTIVE;
    518   }
    519 }
    520 
    521 static void apply_active_map(VP9_COMP *cpi) {
    522   struct segmentation *const seg = &cpi->common.seg;
    523   unsigned char *const seg_map = cpi->segmentation_map;
    524   const unsigned char *const active_map = cpi->active_map.map;
    525   int i;
    526 
    527   assert(AM_SEGMENT_ID_ACTIVE == CR_SEGMENT_ID_BASE);
    528 
    529   if (frame_is_intra_only(&cpi->common)) {
    530     cpi->active_map.enabled = 0;
    531     cpi->active_map.update = 1;
    532   }
    533 
    534   if (cpi->active_map.update) {
    535     if (cpi->active_map.enabled) {
    536       for (i = 0; i < cpi->common.mi_rows * cpi->common.mi_cols; ++i)
    537         if (seg_map[i] == AM_SEGMENT_ID_ACTIVE) seg_map[i] = active_map[i];
    538       vp9_enable_segmentation(seg);
    539       vp9_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
    540       vp9_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
    541       // Setting the data to -MAX_LOOP_FILTER will result in the computed loop
    542       // filter level being zero regardless of the value of seg->abs_delta.
    543       vp9_set_segdata(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF,
    544                       -MAX_LOOP_FILTER);
    545     } else {
    546       vp9_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
    547       vp9_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
    548       if (seg->enabled) {
    549         seg->update_data = 1;
    550         seg->update_map = 1;
    551       }
    552     }
    553     cpi->active_map.update = 0;
    554   }
    555 }
    556 
    557 static void apply_roi_map(VP9_COMP *cpi) {
    558   VP9_COMMON *cm = &cpi->common;
    559   struct segmentation *const seg = &cm->seg;
    560   vpx_roi_map_t *roi = &cpi->roi;
    561   const int *delta_q = roi->delta_q;
    562   const int *delta_lf = roi->delta_lf;
    563   const int *skip = roi->skip;
    564   int ref_frame[8];
    565   int internal_delta_q[MAX_SEGMENTS];
    566   int i;
    567   static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
    568                                     VP9_ALT_FLAG };
    569 
    570   // TODO(jianj): Investigate why ROI not working in speed < 5 or in non
    571   // realtime mode.
    572   if (cpi->oxcf.mode != REALTIME || cpi->oxcf.speed < 5) return;
    573   if (!roi->enabled) return;
    574 
    575   memcpy(&ref_frame, roi->ref_frame, sizeof(ref_frame));
    576 
    577   vp9_enable_segmentation(seg);
    578   vp9_clearall_segfeatures(seg);
    579   // Select delta coding method;
    580   seg->abs_delta = SEGMENT_DELTADATA;
    581 
    582   memcpy(cpi->segmentation_map, roi->roi_map, (cm->mi_rows * cm->mi_cols));
    583 
    584   for (i = 0; i < MAX_SEGMENTS; ++i) {
    585     // Translate the external delta q values to internal values.
    586     internal_delta_q[i] = vp9_quantizer_to_qindex(abs(delta_q[i]));
    587     if (delta_q[i] < 0) internal_delta_q[i] = -internal_delta_q[i];
    588     vp9_disable_segfeature(seg, i, SEG_LVL_ALT_Q);
    589     vp9_disable_segfeature(seg, i, SEG_LVL_ALT_LF);
    590     if (internal_delta_q[i] != 0) {
    591       vp9_enable_segfeature(seg, i, SEG_LVL_ALT_Q);
    592       vp9_set_segdata(seg, i, SEG_LVL_ALT_Q, internal_delta_q[i]);
    593     }
    594     if (delta_lf[i] != 0) {
    595       vp9_enable_segfeature(seg, i, SEG_LVL_ALT_LF);
    596       vp9_set_segdata(seg, i, SEG_LVL_ALT_LF, delta_lf[i]);
    597     }
    598     if (skip[i] != 0) {
    599       vp9_enable_segfeature(seg, i, SEG_LVL_SKIP);
    600       vp9_set_segdata(seg, i, SEG_LVL_SKIP, skip[i]);
    601     }
    602     if (ref_frame[i] >= 0) {
    603       int valid_ref = 1;
    604       // ALTREF is not used as reference for nonrd_pickmode with 0 lag.
    605       if (ref_frame[i] == ALTREF_FRAME && cpi->sf.use_nonrd_pick_mode)
    606         valid_ref = 0;
    607       // If GOLDEN is selected, make sure it's set as reference.
    608       if (ref_frame[i] == GOLDEN_FRAME &&
    609           !(cpi->ref_frame_flags & flag_list[ref_frame[i]])) {
    610         valid_ref = 0;
    611       }
    612       // GOLDEN was updated in previous encoded frame, so GOLDEN and LAST are
    613       // same reference.
    614       if (ref_frame[i] == GOLDEN_FRAME && cpi->rc.frames_since_golden == 0)
    615         ref_frame[i] = LAST_FRAME;
    616       if (valid_ref) {
    617         vp9_enable_segfeature(seg, i, SEG_LVL_REF_FRAME);
    618         vp9_set_segdata(seg, i, SEG_LVL_REF_FRAME, ref_frame[i]);
    619       }
    620     }
    621   }
    622   roi->enabled = 1;
    623 }
    624 
    625 static void init_level_info(Vp9LevelInfo *level_info) {
    626   Vp9LevelStats *const level_stats = &level_info->level_stats;
    627   Vp9LevelSpec *const level_spec = &level_info->level_spec;
    628 
    629   memset(level_stats, 0, sizeof(*level_stats));
    630   memset(level_spec, 0, sizeof(*level_spec));
    631   level_spec->level = LEVEL_UNKNOWN;
    632   level_spec->min_altref_distance = INT_MAX;
    633 }
    634 
    635 static int check_seg_range(int seg_data[8], int range) {
    636   return !(abs(seg_data[0]) > range || abs(seg_data[1]) > range ||
    637            abs(seg_data[2]) > range || abs(seg_data[3]) > range ||
    638            abs(seg_data[4]) > range || abs(seg_data[5]) > range ||
    639            abs(seg_data[6]) > range || abs(seg_data[7]) > range);
    640 }
    641 
    642 VP9_LEVEL vp9_get_level(const Vp9LevelSpec *const level_spec) {
    643   int i;
    644   const Vp9LevelSpec *this_level;
    645 
    646   vpx_clear_system_state();
    647 
    648   for (i = 0; i < VP9_LEVELS; ++i) {
    649     this_level = &vp9_level_defs[i];
    650     if ((double)level_spec->max_luma_sample_rate >
    651             (double)this_level->max_luma_sample_rate *
    652                 (1 + SAMPLE_RATE_GRACE_P) ||
    653         level_spec->max_luma_picture_size > this_level->max_luma_picture_size ||
    654         level_spec->max_luma_picture_breadth >
    655             this_level->max_luma_picture_breadth ||
    656         level_spec->average_bitrate > this_level->average_bitrate ||
    657         level_spec->max_cpb_size > this_level->max_cpb_size ||
    658         level_spec->compression_ratio < this_level->compression_ratio ||
    659         level_spec->max_col_tiles > this_level->max_col_tiles ||
    660         level_spec->min_altref_distance < this_level->min_altref_distance ||
    661         level_spec->max_ref_frame_buffers > this_level->max_ref_frame_buffers)
    662       continue;
    663     break;
    664   }
    665   return (i == VP9_LEVELS) ? LEVEL_UNKNOWN : vp9_level_defs[i].level;
    666 }
    667 
    668 int vp9_set_roi_map(VP9_COMP *cpi, unsigned char *map, unsigned int rows,
    669                     unsigned int cols, int delta_q[8], int delta_lf[8],
    670                     int skip[8], int ref_frame[8]) {
    671   VP9_COMMON *cm = &cpi->common;
    672   vpx_roi_map_t *roi = &cpi->roi;
    673   const int range = 63;
    674   const int ref_frame_range = 3;  // Alt-ref
    675   const int skip_range = 1;
    676   const int frame_rows = cpi->common.mi_rows;
    677   const int frame_cols = cpi->common.mi_cols;
    678 
    679   // Check number of rows and columns match
    680   if (frame_rows != (int)rows || frame_cols != (int)cols) {
    681     return -1;
    682   }
    683 
    684   if (!check_seg_range(delta_q, range) || !check_seg_range(delta_lf, range) ||
    685       !check_seg_range(ref_frame, ref_frame_range) ||
    686       !check_seg_range(skip, skip_range))
    687     return -1;
    688 
    689   // Also disable segmentation if no deltas are specified.
    690   if (!map ||
    691       (!(delta_q[0] | delta_q[1] | delta_q[2] | delta_q[3] | delta_q[4] |
    692          delta_q[5] | delta_q[6] | delta_q[7] | delta_lf[0] | delta_lf[1] |
    693          delta_lf[2] | delta_lf[3] | delta_lf[4] | delta_lf[5] | delta_lf[6] |
    694          delta_lf[7] | skip[0] | skip[1] | skip[2] | skip[3] | skip[4] |
    695          skip[5] | skip[6] | skip[7]) &&
    696        (ref_frame[0] == -1 && ref_frame[1] == -1 && ref_frame[2] == -1 &&
    697         ref_frame[3] == -1 && ref_frame[4] == -1 && ref_frame[5] == -1 &&
    698         ref_frame[6] == -1 && ref_frame[7] == -1))) {
    699     vp9_disable_segmentation(&cm->seg);
    700     cpi->roi.enabled = 0;
    701     return 0;
    702   }
    703 
    704   if (roi->roi_map) {
    705     vpx_free(roi->roi_map);
    706     roi->roi_map = NULL;
    707   }
    708   CHECK_MEM_ERROR(cm, roi->roi_map, vpx_malloc(rows * cols));
    709 
    710   // Copy to ROI sturcture in the compressor.
    711   memcpy(roi->roi_map, map, rows * cols);
    712   memcpy(&roi->delta_q, delta_q, MAX_SEGMENTS * sizeof(delta_q[0]));
    713   memcpy(&roi->delta_lf, delta_lf, MAX_SEGMENTS * sizeof(delta_lf[0]));
    714   memcpy(&roi->skip, skip, MAX_SEGMENTS * sizeof(skip[0]));
    715   memcpy(&roi->ref_frame, ref_frame, MAX_SEGMENTS * sizeof(ref_frame[0]));
    716   roi->enabled = 1;
    717   roi->rows = rows;
    718   roi->cols = cols;
    719 
    720   return 0;
    721 }
    722 
    723 int vp9_set_active_map(VP9_COMP *cpi, unsigned char *new_map_16x16, int rows,
    724                        int cols) {
    725   if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) {
    726     unsigned char *const active_map_8x8 = cpi->active_map.map;
    727     const int mi_rows = cpi->common.mi_rows;
    728     const int mi_cols = cpi->common.mi_cols;
    729     cpi->active_map.update = 1;
    730     if (new_map_16x16) {
    731       int r, c;
    732       for (r = 0; r < mi_rows; ++r) {
    733         for (c = 0; c < mi_cols; ++c) {
    734           active_map_8x8[r * mi_cols + c] =
    735               new_map_16x16[(r >> 1) * cols + (c >> 1)]
    736                   ? AM_SEGMENT_ID_ACTIVE
    737                   : AM_SEGMENT_ID_INACTIVE;
    738         }
    739       }
    740       cpi->active_map.enabled = 1;
    741     } else {
    742       cpi->active_map.enabled = 0;
    743     }
    744     return 0;
    745   } else {
    746     return -1;
    747   }
    748 }
    749 
    750 int vp9_get_active_map(VP9_COMP *cpi, unsigned char *new_map_16x16, int rows,
    751                        int cols) {
    752   if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols &&
    753       new_map_16x16) {
    754     unsigned char *const seg_map_8x8 = cpi->segmentation_map;
    755     const int mi_rows = cpi->common.mi_rows;
    756     const int mi_cols = cpi->common.mi_cols;
    757     memset(new_map_16x16, !cpi->active_map.enabled, rows * cols);
    758     if (cpi->active_map.enabled) {
    759       int r, c;
    760       for (r = 0; r < mi_rows; ++r) {
    761         for (c = 0; c < mi_cols; ++c) {
    762           // Cyclic refresh segments are considered active despite not having
    763           // AM_SEGMENT_ID_ACTIVE
    764           new_map_16x16[(r >> 1) * cols + (c >> 1)] |=
    765               seg_map_8x8[r * mi_cols + c] != AM_SEGMENT_ID_INACTIVE;
    766         }
    767       }
    768     }
    769     return 0;
    770   } else {
    771     return -1;
    772   }
    773 }
    774 
    775 void vp9_set_high_precision_mv(VP9_COMP *cpi, int allow_high_precision_mv) {
    776   MACROBLOCK *const mb = &cpi->td.mb;
    777   cpi->common.allow_high_precision_mv = allow_high_precision_mv;
    778   if (cpi->common.allow_high_precision_mv) {
    779     mb->mvcost = mb->nmvcost_hp;
    780     mb->mvsadcost = mb->nmvsadcost_hp;
    781   } else {
    782     mb->mvcost = mb->nmvcost;
    783     mb->mvsadcost = mb->nmvsadcost;
    784   }
    785 }
    786 
    787 static void setup_frame(VP9_COMP *cpi) {
    788   VP9_COMMON *const cm = &cpi->common;
    789   // Set up entropy context depending on frame type. The decoder mandates
    790   // the use of the default context, index 0, for keyframes and inter
    791   // frames where the error_resilient_mode or intra_only flag is set. For
    792   // other inter-frames the encoder currently uses only two contexts;
    793   // context 1 for ALTREF frames and context 0 for the others.
    794   if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
    795     vp9_setup_past_independence(cm);
    796   } else {
    797     if (!cpi->use_svc) cm->frame_context_idx = cpi->refresh_alt_ref_frame;
    798   }
    799 
    800   // TODO(jingning): Overwrite the frame_context_idx index in multi-layer ARF
    801   // case. Need some further investigation on if we could apply this to single
    802   // layer ARF case as well.
    803   if (cpi->multi_layer_arf && !cpi->use_svc) {
    804     GF_GROUP *const gf_group = &cpi->twopass.gf_group;
    805     cm->frame_context_idx = clamp(gf_group->layer_depth[gf_group->index] - 1, 0,
    806                                   FRAME_CONTEXTS - 1);
    807   }
    808 
    809   if (cm->frame_type == KEY_FRAME) {
    810     cpi->refresh_golden_frame = 1;
    811     cpi->refresh_alt_ref_frame = 1;
    812     vp9_zero(cpi->interp_filter_selected);
    813   } else {
    814     *cm->fc = cm->frame_contexts[cm->frame_context_idx];
    815     vp9_zero(cpi->interp_filter_selected[0]);
    816   }
    817 }
    818 
    819 static void vp9_enc_setup_mi(VP9_COMMON *cm) {
    820   int i;
    821   cm->mi = cm->mip + cm->mi_stride + 1;
    822   memset(cm->mip, 0, cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip));
    823   cm->prev_mi = cm->prev_mip + cm->mi_stride + 1;
    824   // Clear top border row
    825   memset(cm->prev_mip, 0, sizeof(*cm->prev_mip) * cm->mi_stride);
    826   // Clear left border column
    827   for (i = 1; i < cm->mi_rows + 1; ++i)
    828     memset(&cm->prev_mip[i * cm->mi_stride], 0, sizeof(*cm->prev_mip));
    829 
    830   cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
    831   cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1;
    832 
    833   memset(cm->mi_grid_base, 0,
    834          cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
    835 }
    836 
    837 static int vp9_enc_alloc_mi(VP9_COMMON *cm, int mi_size) {
    838   cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
    839   if (!cm->mip) return 1;
    840   cm->prev_mip = vpx_calloc(mi_size, sizeof(*cm->prev_mip));
    841   if (!cm->prev_mip) return 1;
    842   cm->mi_alloc_size = mi_size;
    843 
    844   cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
    845   if (!cm->mi_grid_base) return 1;
    846   cm->prev_mi_grid_base =
    847       (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
    848   if (!cm->prev_mi_grid_base) return 1;
    849 
    850   return 0;
    851 }
    852 
    853 static void vp9_enc_free_mi(VP9_COMMON *cm) {
    854   vpx_free(cm->mip);
    855   cm->mip = NULL;
    856   vpx_free(cm->prev_mip);
    857   cm->prev_mip = NULL;
    858   vpx_free(cm->mi_grid_base);
    859   cm->mi_grid_base = NULL;
    860   vpx_free(cm->prev_mi_grid_base);
    861   cm->prev_mi_grid_base = NULL;
    862   cm->mi_alloc_size = 0;
    863 }
    864 
    865 static void vp9_swap_mi_and_prev_mi(VP9_COMMON *cm) {
    866   // Current mip will be the prev_mip for the next frame.
    867   MODE_INFO **temp_base = cm->prev_mi_grid_base;
    868   MODE_INFO *temp = cm->prev_mip;
    869 
    870   // Skip update prev_mi frame in show_existing_frame mode.
    871   if (cm->show_existing_frame) return;
    872 
    873   cm->prev_mip = cm->mip;
    874   cm->mip = temp;
    875 
    876   // Update the upper left visible macroblock ptrs.
    877   cm->mi = cm->mip + cm->mi_stride + 1;
    878   cm->prev_mi = cm->prev_mip + cm->mi_stride + 1;
    879 
    880   cm->prev_mi_grid_base = cm->mi_grid_base;
    881   cm->mi_grid_base = temp_base;
    882   cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
    883   cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1;
    884 }
    885 
    886 void vp9_initialize_enc(void) {
    887   static volatile int init_done = 0;
    888 
    889   if (!init_done) {
    890     vp9_rtcd();
    891     vpx_dsp_rtcd();
    892     vpx_scale_rtcd();
    893     vp9_init_intra_predictors();
    894     vp9_init_me_luts();
    895     vp9_rc_init_minq_luts();
    896     vp9_entropy_mv_init();
    897 #if !CONFIG_REALTIME_ONLY
    898     vp9_temporal_filter_init();
    899 #endif
    900     init_done = 1;
    901   }
    902 }
    903 
    904 static void dealloc_compressor_data(VP9_COMP *cpi) {
    905   VP9_COMMON *const cm = &cpi->common;
    906   int i;
    907 
    908   vpx_free(cpi->mbmi_ext_base);
    909   cpi->mbmi_ext_base = NULL;
    910 
    911   vpx_free(cpi->tile_data);
    912   cpi->tile_data = NULL;
    913 
    914   vpx_free(cpi->segmentation_map);
    915   cpi->segmentation_map = NULL;
    916   vpx_free(cpi->coding_context.last_frame_seg_map_copy);
    917   cpi->coding_context.last_frame_seg_map_copy = NULL;
    918 
    919   vpx_free(cpi->nmvcosts[0]);
    920   vpx_free(cpi->nmvcosts[1]);
    921   cpi->nmvcosts[0] = NULL;
    922   cpi->nmvcosts[1] = NULL;
    923 
    924   vpx_free(cpi->nmvcosts_hp[0]);
    925   vpx_free(cpi->nmvcosts_hp[1]);
    926   cpi->nmvcosts_hp[0] = NULL;
    927   cpi->nmvcosts_hp[1] = NULL;
    928 
    929   vpx_free(cpi->nmvsadcosts[0]);
    930   vpx_free(cpi->nmvsadcosts[1]);
    931   cpi->nmvsadcosts[0] = NULL;
    932   cpi->nmvsadcosts[1] = NULL;
    933 
    934   vpx_free(cpi->nmvsadcosts_hp[0]);
    935   vpx_free(cpi->nmvsadcosts_hp[1]);
    936   cpi->nmvsadcosts_hp[0] = NULL;
    937   cpi->nmvsadcosts_hp[1] = NULL;
    938 
    939   vpx_free(cpi->skin_map);
    940   cpi->skin_map = NULL;
    941 
    942   vpx_free(cpi->prev_partition);
    943   cpi->prev_partition = NULL;
    944 
    945   vpx_free(cpi->svc.prev_partition_svc);
    946   cpi->svc.prev_partition_svc = NULL;
    947 
    948   vpx_free(cpi->prev_segment_id);
    949   cpi->prev_segment_id = NULL;
    950 
    951   vpx_free(cpi->prev_variance_low);
    952   cpi->prev_variance_low = NULL;
    953 
    954   vpx_free(cpi->copied_frame_cnt);
    955   cpi->copied_frame_cnt = NULL;
    956 
    957   vpx_free(cpi->content_state_sb_fd);
    958   cpi->content_state_sb_fd = NULL;
    959 
    960   vpx_free(cpi->count_arf_frame_usage);
    961   cpi->count_arf_frame_usage = NULL;
    962   vpx_free(cpi->count_lastgolden_frame_usage);
    963   cpi->count_lastgolden_frame_usage = NULL;
    964 
    965   vp9_cyclic_refresh_free(cpi->cyclic_refresh);
    966   cpi->cyclic_refresh = NULL;
    967 
    968   vpx_free(cpi->active_map.map);
    969   cpi->active_map.map = NULL;
    970 
    971   vpx_free(cpi->roi.roi_map);
    972   cpi->roi.roi_map = NULL;
    973 
    974   vpx_free(cpi->consec_zero_mv);
    975   cpi->consec_zero_mv = NULL;
    976 
    977   vp9_free_ref_frame_buffers(cm->buffer_pool);
    978 #if CONFIG_VP9_POSTPROC
    979   vp9_free_postproc_buffers(cm);
    980 #endif
    981   vp9_free_context_buffers(cm);
    982 
    983   vpx_free_frame_buffer(&cpi->last_frame_uf);
    984   vpx_free_frame_buffer(&cpi->scaled_source);
    985   vpx_free_frame_buffer(&cpi->scaled_last_source);
    986   vpx_free_frame_buffer(&cpi->alt_ref_buffer);
    987 #ifdef ENABLE_KF_DENOISE
    988   vpx_free_frame_buffer(&cpi->raw_unscaled_source);
    989   vpx_free_frame_buffer(&cpi->raw_scaled_source);
    990 #endif
    991 
    992   vp9_lookahead_destroy(cpi->lookahead);
    993 
    994   vpx_free(cpi->tile_tok[0][0]);
    995   cpi->tile_tok[0][0] = 0;
    996 
    997   vpx_free(cpi->tplist[0][0]);
    998   cpi->tplist[0][0] = NULL;
    999 
   1000   vp9_free_pc_tree(&cpi->td);
   1001 
   1002   for (i = 0; i < cpi->svc.number_spatial_layers; ++i) {
   1003     LAYER_CONTEXT *const lc = &cpi->svc.layer_context[i];
   1004     vpx_free(lc->rc_twopass_stats_in.buf);
   1005     lc->rc_twopass_stats_in.buf = NULL;
   1006     lc->rc_twopass_stats_in.sz = 0;
   1007   }
   1008 
   1009   if (cpi->source_diff_var != NULL) {
   1010     vpx_free(cpi->source_diff_var);
   1011     cpi->source_diff_var = NULL;
   1012   }
   1013 
   1014   for (i = 0; i < MAX_LAG_BUFFERS; ++i) {
   1015     vpx_free_frame_buffer(&cpi->svc.scaled_frames[i]);
   1016   }
   1017   memset(&cpi->svc.scaled_frames[0], 0,
   1018          MAX_LAG_BUFFERS * sizeof(cpi->svc.scaled_frames[0]));
   1019 
   1020   vpx_free_frame_buffer(&cpi->svc.scaled_temp);
   1021   memset(&cpi->svc.scaled_temp, 0, sizeof(cpi->svc.scaled_temp));
   1022 
   1023   vpx_free_frame_buffer(&cpi->svc.empty_frame.img);
   1024   memset(&cpi->svc.empty_frame, 0, sizeof(cpi->svc.empty_frame));
   1025 
   1026   vp9_free_svc_cyclic_refresh(cpi);
   1027 }
   1028 
   1029 static void save_coding_context(VP9_COMP *cpi) {
   1030   CODING_CONTEXT *const cc = &cpi->coding_context;
   1031   VP9_COMMON *cm = &cpi->common;
   1032 
   1033   // Stores a snapshot of key state variables which can subsequently be
   1034   // restored with a call to vp9_restore_coding_context. These functions are
   1035   // intended for use in a re-code loop in vp9_compress_frame where the
   1036   // quantizer value is adjusted between loop iterations.
   1037   vp9_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
   1038 
   1039   memcpy(cc->nmvcosts[0], cpi->nmvcosts[0],
   1040          MV_VALS * sizeof(*cpi->nmvcosts[0]));
   1041   memcpy(cc->nmvcosts[1], cpi->nmvcosts[1],
   1042          MV_VALS * sizeof(*cpi->nmvcosts[1]));
   1043   memcpy(cc->nmvcosts_hp[0], cpi->nmvcosts_hp[0],
   1044          MV_VALS * sizeof(*cpi->nmvcosts_hp[0]));
   1045   memcpy(cc->nmvcosts_hp[1], cpi->nmvcosts_hp[1],
   1046          MV_VALS * sizeof(*cpi->nmvcosts_hp[1]));
   1047 
   1048   vp9_copy(cc->segment_pred_probs, cm->seg.pred_probs);
   1049 
   1050   memcpy(cpi->coding_context.last_frame_seg_map_copy, cm->last_frame_seg_map,
   1051          (cm->mi_rows * cm->mi_cols));
   1052 
   1053   vp9_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas);
   1054   vp9_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas);
   1055 
   1056   cc->fc = *cm->fc;
   1057 }
   1058 
   1059 static void restore_coding_context(VP9_COMP *cpi) {
   1060   CODING_CONTEXT *const cc = &cpi->coding_context;
   1061   VP9_COMMON *cm = &cpi->common;
   1062 
   1063   // Restore key state variables to the snapshot state stored in the
   1064   // previous call to vp9_save_coding_context.
   1065   vp9_copy(cpi->td.mb.nmvjointcost, cc->nmvjointcost);
   1066 
   1067   memcpy(cpi->nmvcosts[0], cc->nmvcosts[0], MV_VALS * sizeof(*cc->nmvcosts[0]));
   1068   memcpy(cpi->nmvcosts[1], cc->nmvcosts[1], MV_VALS * sizeof(*cc->nmvcosts[1]));
   1069   memcpy(cpi->nmvcosts_hp[0], cc->nmvcosts_hp[0],
   1070          MV_VALS * sizeof(*cc->nmvcosts_hp[0]));
   1071   memcpy(cpi->nmvcosts_hp[1], cc->nmvcosts_hp[1],
   1072          MV_VALS * sizeof(*cc->nmvcosts_hp[1]));
   1073 
   1074   vp9_copy(cm->seg.pred_probs, cc->segment_pred_probs);
   1075 
   1076   memcpy(cm->last_frame_seg_map, cpi->coding_context.last_frame_seg_map_copy,
   1077          (cm->mi_rows * cm->mi_cols));
   1078 
   1079   vp9_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas);
   1080   vp9_copy(cm->lf.last_mode_deltas, cc->last_mode_lf_deltas);
   1081 
   1082   *cm->fc = cc->fc;
   1083 }
   1084 
   1085 #if !CONFIG_REALTIME_ONLY
   1086 static void configure_static_seg_features(VP9_COMP *cpi) {
   1087   VP9_COMMON *const cm = &cpi->common;
   1088   const RATE_CONTROL *const rc = &cpi->rc;
   1089   struct segmentation *const seg = &cm->seg;
   1090 
   1091   int high_q = (int)(rc->avg_q > 48.0);
   1092   int qi_delta;
   1093 
   1094   // Disable and clear down for KF
   1095   if (cm->frame_type == KEY_FRAME) {
   1096     // Clear down the global segmentation map
   1097     memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
   1098     seg->update_map = 0;
   1099     seg->update_data = 0;
   1100     cpi->static_mb_pct = 0;
   1101 
   1102     // Disable segmentation
   1103     vp9_disable_segmentation(seg);
   1104 
   1105     // Clear down the segment features.
   1106     vp9_clearall_segfeatures(seg);
   1107   } else if (cpi->refresh_alt_ref_frame) {
   1108     // If this is an alt ref frame
   1109     // Clear down the global segmentation map
   1110     memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
   1111     seg->update_map = 0;
   1112     seg->update_data = 0;
   1113     cpi->static_mb_pct = 0;
   1114 
   1115     // Disable segmentation and individual segment features by default
   1116     vp9_disable_segmentation(seg);
   1117     vp9_clearall_segfeatures(seg);
   1118 
   1119     // Scan frames from current to arf frame.
   1120     // This function re-enables segmentation if appropriate.
   1121     vp9_update_mbgraph_stats(cpi);
   1122 
   1123     // If segmentation was enabled set those features needed for the
   1124     // arf itself.
   1125     if (seg->enabled) {
   1126       seg->update_map = 1;
   1127       seg->update_data = 1;
   1128 
   1129       qi_delta =
   1130           vp9_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875, cm->bit_depth);
   1131       vp9_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta - 2);
   1132       vp9_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
   1133 
   1134       vp9_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
   1135       vp9_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
   1136 
   1137       // Where relevant assume segment data is delta data
   1138       seg->abs_delta = SEGMENT_DELTADATA;
   1139     }
   1140   } else if (seg->enabled) {
   1141     // All other frames if segmentation has been enabled
   1142 
   1143     // First normal frame in a valid gf or alt ref group
   1144     if (rc->frames_since_golden == 0) {
   1145       // Set up segment features for normal frames in an arf group
   1146       if (rc->source_alt_ref_active) {
   1147         seg->update_map = 0;
   1148         seg->update_data = 1;
   1149         seg->abs_delta = SEGMENT_DELTADATA;
   1150 
   1151         qi_delta =
   1152             vp9_compute_qdelta(rc, rc->avg_q, rc->avg_q * 1.125, cm->bit_depth);
   1153         vp9_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta + 2);
   1154         vp9_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
   1155 
   1156         vp9_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
   1157         vp9_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
   1158 
   1159         // Segment coding disabled for compred testing
   1160         if (high_q || (cpi->static_mb_pct == 100)) {
   1161           vp9_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
   1162           vp9_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
   1163           vp9_enable_segfeature(seg, 1, SEG_LVL_SKIP);
   1164         }
   1165       } else {
   1166         // Disable segmentation and clear down features if alt ref
   1167         // is not active for this group
   1168 
   1169         vp9_disable_segmentation(seg);
   1170 
   1171         memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
   1172 
   1173         seg->update_map = 0;
   1174         seg->update_data = 0;
   1175 
   1176         vp9_clearall_segfeatures(seg);
   1177       }
   1178     } else if (rc->is_src_frame_alt_ref) {
   1179       // Special case where we are coding over the top of a previous
   1180       // alt ref frame.
   1181       // Segment coding disabled for compred testing
   1182 
   1183       // Enable ref frame features for segment 0 as well
   1184       vp9_enable_segfeature(seg, 0, SEG_LVL_REF_FRAME);
   1185       vp9_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
   1186 
   1187       // All mbs should use ALTREF_FRAME
   1188       vp9_clear_segdata(seg, 0, SEG_LVL_REF_FRAME);
   1189       vp9_set_segdata(seg, 0, SEG_LVL_REF_FRAME, ALTREF_FRAME);
   1190       vp9_clear_segdata(seg, 1, SEG_LVL_REF_FRAME);
   1191       vp9_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
   1192 
   1193       // Skip all MBs if high Q (0,0 mv and skip coeffs)
   1194       if (high_q) {
   1195         vp9_enable_segfeature(seg, 0, SEG_LVL_SKIP);
   1196         vp9_enable_segfeature(seg, 1, SEG_LVL_SKIP);
   1197       }
   1198       // Enable data update
   1199       seg->update_data = 1;
   1200     } else {
   1201       // All other frames.
   1202 
   1203       // No updates.. leave things as they are.
   1204       seg->update_map = 0;
   1205       seg->update_data = 0;
   1206     }
   1207   }
   1208 }
   1209 #endif  // !CONFIG_REALTIME_ONLY
   1210 
   1211 static void update_reference_segmentation_map(VP9_COMP *cpi) {
   1212   VP9_COMMON *const cm = &cpi->common;
   1213   MODE_INFO **mi_8x8_ptr = cm->mi_grid_visible;
   1214   uint8_t *cache_ptr = cm->last_frame_seg_map;
   1215   int row, col;
   1216 
   1217   for (row = 0; row < cm->mi_rows; row++) {
   1218     MODE_INFO **mi_8x8 = mi_8x8_ptr;
   1219     uint8_t *cache = cache_ptr;
   1220     for (col = 0; col < cm->mi_cols; col++, mi_8x8++, cache++)
   1221       cache[0] = mi_8x8[0]->segment_id;
   1222     mi_8x8_ptr += cm->mi_stride;
   1223     cache_ptr += cm->mi_cols;
   1224   }
   1225 }
   1226 
   1227 static void alloc_raw_frame_buffers(VP9_COMP *cpi) {
   1228   VP9_COMMON *cm = &cpi->common;
   1229   const VP9EncoderConfig *oxcf = &cpi->oxcf;
   1230 
   1231   if (!cpi->lookahead)
   1232     cpi->lookahead = vp9_lookahead_init(oxcf->width, oxcf->height,
   1233                                         cm->subsampling_x, cm->subsampling_y,
   1234 #if CONFIG_VP9_HIGHBITDEPTH
   1235                                         cm->use_highbitdepth,
   1236 #endif
   1237                                         oxcf->lag_in_frames);
   1238   if (!cpi->lookahead)
   1239     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
   1240                        "Failed to allocate lag buffers");
   1241 
   1242   // TODO(agrange) Check if ARF is enabled and skip allocation if not.
   1243   if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer, oxcf->width, oxcf->height,
   1244                                cm->subsampling_x, cm->subsampling_y,
   1245 #if CONFIG_VP9_HIGHBITDEPTH
   1246                                cm->use_highbitdepth,
   1247 #endif
   1248                                VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
   1249                                NULL, NULL, NULL))
   1250     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
   1251                        "Failed to allocate altref buffer");
   1252 }
   1253 
   1254 static void alloc_util_frame_buffers(VP9_COMP *cpi) {
   1255   VP9_COMMON *const cm = &cpi->common;
   1256   if (vpx_realloc_frame_buffer(&cpi->last_frame_uf, cm->width, cm->height,
   1257                                cm->subsampling_x, cm->subsampling_y,
   1258 #if CONFIG_VP9_HIGHBITDEPTH
   1259                                cm->use_highbitdepth,
   1260 #endif
   1261                                VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
   1262                                NULL, NULL, NULL))
   1263     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
   1264                        "Failed to allocate last frame buffer");
   1265 
   1266   if (vpx_realloc_frame_buffer(&cpi->scaled_source, cm->width, cm->height,
   1267                                cm->subsampling_x, cm->subsampling_y,
   1268 #if CONFIG_VP9_HIGHBITDEPTH
   1269                                cm->use_highbitdepth,
   1270 #endif
   1271                                VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
   1272                                NULL, NULL, NULL))
   1273     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
   1274                        "Failed to allocate scaled source buffer");
   1275 
   1276   // For 1 pass cbr: allocate scaled_frame that may be used as an intermediate
   1277   // buffer for a 2 stage down-sampling: two stages of 1:2 down-sampling for a
   1278   // target of 1/4x1/4. number_spatial_layers must be greater than 2.
   1279   if (is_one_pass_cbr_svc(cpi) && !cpi->svc.scaled_temp_is_alloc &&
   1280       cpi->svc.number_spatial_layers > 2) {
   1281     cpi->svc.scaled_temp_is_alloc = 1;
   1282     if (vpx_realloc_frame_buffer(
   1283             &cpi->svc.scaled_temp, cm->width >> 1, cm->height >> 1,
   1284             cm->subsampling_x, cm->subsampling_y,
   1285 #if CONFIG_VP9_HIGHBITDEPTH
   1286             cm->use_highbitdepth,
   1287 #endif
   1288             VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment, NULL, NULL, NULL))
   1289       vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
   1290                          "Failed to allocate scaled_frame for svc ");
   1291   }
   1292 
   1293   if (vpx_realloc_frame_buffer(&cpi->scaled_last_source, cm->width, cm->height,
   1294                                cm->subsampling_x, cm->subsampling_y,
   1295 #if CONFIG_VP9_HIGHBITDEPTH
   1296                                cm->use_highbitdepth,
   1297 #endif
   1298                                VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
   1299                                NULL, NULL, NULL))
   1300     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
   1301                        "Failed to allocate scaled last source buffer");
   1302 #ifdef ENABLE_KF_DENOISE
   1303   if (vpx_realloc_frame_buffer(&cpi->raw_unscaled_source, cm->width, cm->height,
   1304                                cm->subsampling_x, cm->subsampling_y,
   1305 #if CONFIG_VP9_HIGHBITDEPTH
   1306                                cm->use_highbitdepth,
   1307 #endif
   1308                                VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
   1309                                NULL, NULL, NULL))
   1310     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
   1311                        "Failed to allocate unscaled raw source frame buffer");
   1312 
   1313   if (vpx_realloc_frame_buffer(&cpi->raw_scaled_source, cm->width, cm->height,
   1314                                cm->subsampling_x, cm->subsampling_y,
   1315 #if CONFIG_VP9_HIGHBITDEPTH
   1316                                cm->use_highbitdepth,
   1317 #endif
   1318                                VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
   1319                                NULL, NULL, NULL))
   1320     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
   1321                        "Failed to allocate scaled raw source frame buffer");
   1322 #endif
   1323 }
   1324 
   1325 static int alloc_context_buffers_ext(VP9_COMP *cpi) {
   1326   VP9_COMMON *cm = &cpi->common;
   1327   int mi_size = cm->mi_cols * cm->mi_rows;
   1328 
   1329   cpi->mbmi_ext_base = vpx_calloc(mi_size, sizeof(*cpi->mbmi_ext_base));
   1330   if (!cpi->mbmi_ext_base) return 1;
   1331 
   1332   return 0;
   1333 }
   1334 
   1335 static void alloc_compressor_data(VP9_COMP *cpi) {
   1336   VP9_COMMON *cm = &cpi->common;
   1337   int sb_rows;
   1338 
   1339   vp9_alloc_context_buffers(cm, cm->width, cm->height);
   1340 
   1341   alloc_context_buffers_ext(cpi);
   1342 
   1343   vpx_free(cpi->tile_tok[0][0]);
   1344 
   1345   {
   1346     unsigned int tokens = get_token_alloc(cm->mb_rows, cm->mb_cols);
   1347     CHECK_MEM_ERROR(cm, cpi->tile_tok[0][0],
   1348                     vpx_calloc(tokens, sizeof(*cpi->tile_tok[0][0])));
   1349   }
   1350 
   1351   sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2;
   1352   vpx_free(cpi->tplist[0][0]);
   1353   CHECK_MEM_ERROR(
   1354       cm, cpi->tplist[0][0],
   1355       vpx_calloc(sb_rows * 4 * (1 << 6), sizeof(*cpi->tplist[0][0])));
   1356 
   1357   vp9_setup_pc_tree(&cpi->common, &cpi->td);
   1358 }
   1359 
   1360 void vp9_new_framerate(VP9_COMP *cpi, double framerate) {
   1361   cpi->framerate = framerate < 0.1 ? 30 : framerate;
   1362   vp9_rc_update_framerate(cpi);
   1363 }
   1364 
   1365 static void set_tile_limits(VP9_COMP *cpi) {
   1366   VP9_COMMON *const cm = &cpi->common;
   1367 
   1368   int min_log2_tile_cols, max_log2_tile_cols;
   1369   vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
   1370 
   1371   cm->log2_tile_cols =
   1372       clamp(cpi->oxcf.tile_columns, min_log2_tile_cols, max_log2_tile_cols);
   1373   cm->log2_tile_rows = cpi->oxcf.tile_rows;
   1374 
   1375   if (cpi->oxcf.target_level == LEVEL_AUTO) {
   1376     const int level_tile_cols =
   1377         log_tile_cols_from_picsize_level(cpi->common.width, cpi->common.height);
   1378     if (cm->log2_tile_cols > level_tile_cols) {
   1379       cm->log2_tile_cols = VPXMAX(level_tile_cols, min_log2_tile_cols);
   1380     }
   1381   }
   1382 }
   1383 
   1384 static void update_frame_size(VP9_COMP *cpi) {
   1385   VP9_COMMON *const cm = &cpi->common;
   1386   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
   1387 
   1388   vp9_set_mb_mi(cm, cm->width, cm->height);
   1389   vp9_init_context_buffers(cm);
   1390   vp9_init_macroblockd(cm, xd, NULL);
   1391   cpi->td.mb.mbmi_ext_base = cpi->mbmi_ext_base;
   1392   memset(cpi->mbmi_ext_base, 0,
   1393          cm->mi_rows * cm->mi_cols * sizeof(*cpi->mbmi_ext_base));
   1394 
   1395   set_tile_limits(cpi);
   1396 }
   1397 
   1398 static void init_buffer_indices(VP9_COMP *cpi) {
   1399   int ref_frame;
   1400 
   1401   for (ref_frame = 0; ref_frame < REF_FRAMES; ++ref_frame)
   1402     cpi->ref_fb_idx[ref_frame] = ref_frame;
   1403 
   1404   cpi->lst_fb_idx = cpi->ref_fb_idx[LAST_FRAME - 1];
   1405   cpi->gld_fb_idx = cpi->ref_fb_idx[GOLDEN_FRAME - 1];
   1406   cpi->alt_fb_idx = cpi->ref_fb_idx[ALTREF_FRAME - 1];
   1407 }
   1408 
   1409 static void init_level_constraint(LevelConstraint *lc) {
   1410   lc->level_index = -1;
   1411   lc->max_cpb_size = INT_MAX;
   1412   lc->max_frame_size = INT_MAX;
   1413   lc->rc_config_updated = 0;
   1414   lc->fail_flag = 0;
   1415 }
   1416 
   1417 static void set_level_constraint(LevelConstraint *ls, int8_t level_index) {
   1418   vpx_clear_system_state();
   1419   ls->level_index = level_index;
   1420   if (level_index >= 0) {
   1421     ls->max_cpb_size = vp9_level_defs[level_index].max_cpb_size * (double)1000;
   1422   }
   1423 }
   1424 
   1425 static void init_config(struct VP9_COMP *cpi, VP9EncoderConfig *oxcf) {
   1426   VP9_COMMON *const cm = &cpi->common;
   1427 
   1428   cpi->oxcf = *oxcf;
   1429   cpi->framerate = oxcf->init_framerate;
   1430   cm->profile = oxcf->profile;
   1431   cm->bit_depth = oxcf->bit_depth;
   1432 #if CONFIG_VP9_HIGHBITDEPTH
   1433   cm->use_highbitdepth = oxcf->use_highbitdepth;
   1434 #endif
   1435   cm->color_space = oxcf->color_space;
   1436   cm->color_range = oxcf->color_range;
   1437 
   1438   cpi->target_level = oxcf->target_level;
   1439   cpi->keep_level_stats = oxcf->target_level != LEVEL_MAX;
   1440   set_level_constraint(&cpi->level_constraint,
   1441                        get_level_index(cpi->target_level));
   1442 
   1443   cm->width = oxcf->width;
   1444   cm->height = oxcf->height;
   1445   alloc_compressor_data(cpi);
   1446 
   1447   cpi->svc.temporal_layering_mode = oxcf->temporal_layering_mode;
   1448 
   1449   // Single thread case: use counts in common.
   1450   cpi->td.counts = &cm->counts;
   1451 
   1452   // Spatial scalability.
   1453   cpi->svc.number_spatial_layers = oxcf->ss_number_layers;
   1454   // Temporal scalability.
   1455   cpi->svc.number_temporal_layers = oxcf->ts_number_layers;
   1456 
   1457   if ((cpi->svc.number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) ||
   1458       ((cpi->svc.number_temporal_layers > 1 ||
   1459         cpi->svc.number_spatial_layers > 1) &&
   1460        cpi->oxcf.pass != 1)) {
   1461     vp9_init_layer_context(cpi);
   1462   }
   1463 
   1464   // change includes all joint functionality
   1465   vp9_change_config(cpi, oxcf);
   1466 
   1467   cpi->static_mb_pct = 0;
   1468   cpi->ref_frame_flags = 0;
   1469 
   1470   init_buffer_indices(cpi);
   1471 
   1472   vp9_noise_estimate_init(&cpi->noise_estimate, cm->width, cm->height);
   1473 }
   1474 
   1475 static void set_rc_buffer_sizes(RATE_CONTROL *rc,
   1476                                 const VP9EncoderConfig *oxcf) {
   1477   const int64_t bandwidth = oxcf->target_bandwidth;
   1478   const int64_t starting = oxcf->starting_buffer_level_ms;
   1479   const int64_t optimal = oxcf->optimal_buffer_level_ms;
   1480   const int64_t maximum = oxcf->maximum_buffer_size_ms;
   1481 
   1482   rc->starting_buffer_level = starting * bandwidth / 1000;
   1483   rc->optimal_buffer_level =
   1484       (optimal == 0) ? bandwidth / 8 : optimal * bandwidth / 1000;
   1485   rc->maximum_buffer_size =
   1486       (maximum == 0) ? bandwidth / 8 : maximum * bandwidth / 1000;
   1487 }
   1488 
   1489 #if CONFIG_VP9_HIGHBITDEPTH
   1490 #define HIGHBD_BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX4DF) \
   1491   cpi->fn_ptr[BT].sdf = SDF;                             \
   1492   cpi->fn_ptr[BT].sdaf = SDAF;                           \
   1493   cpi->fn_ptr[BT].vf = VF;                               \
   1494   cpi->fn_ptr[BT].svf = SVF;                             \
   1495   cpi->fn_ptr[BT].svaf = SVAF;                           \
   1496   cpi->fn_ptr[BT].sdx4df = SDX4DF;
   1497 
   1498 #define MAKE_BFP_SAD_WRAPPER(fnname)                                           \
   1499   static unsigned int fnname##_bits8(const uint8_t *src_ptr,                   \
   1500                                      int source_stride,                        \
   1501                                      const uint8_t *ref_ptr, int ref_stride) { \
   1502     return fnname(src_ptr, source_stride, ref_ptr, ref_stride);                \
   1503   }                                                                            \
   1504   static unsigned int fnname##_bits10(                                         \
   1505       const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
   1506       int ref_stride) {                                                        \
   1507     return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 2;           \
   1508   }                                                                            \
   1509   static unsigned int fnname##_bits12(                                         \
   1510       const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
   1511       int ref_stride) {                                                        \
   1512     return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 4;           \
   1513   }
   1514 
   1515 #define MAKE_BFP_SADAVG_WRAPPER(fnname)                                        \
   1516   static unsigned int fnname##_bits8(                                          \
   1517       const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
   1518       int ref_stride, const uint8_t *second_pred) {                            \
   1519     return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred);   \
   1520   }                                                                            \
   1521   static unsigned int fnname##_bits10(                                         \
   1522       const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
   1523       int ref_stride, const uint8_t *second_pred) {                            \
   1524     return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred) >> \
   1525            2;                                                                  \
   1526   }                                                                            \
   1527   static unsigned int fnname##_bits12(                                         \
   1528       const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
   1529       int ref_stride, const uint8_t *second_pred) {                            \
   1530     return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred) >> \
   1531            4;                                                                  \
   1532   }
   1533 
   1534 #define MAKE_BFP_SAD4D_WRAPPER(fnname)                                        \
   1535   static void fnname##_bits8(const uint8_t *src_ptr, int source_stride,       \
   1536                              const uint8_t *const ref_ptr[], int ref_stride,  \
   1537                              unsigned int *sad_array) {                       \
   1538     fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);           \
   1539   }                                                                           \
   1540   static void fnname##_bits10(const uint8_t *src_ptr, int source_stride,      \
   1541                               const uint8_t *const ref_ptr[], int ref_stride, \
   1542                               unsigned int *sad_array) {                      \
   1543     int i;                                                                    \
   1544     fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);           \
   1545     for (i = 0; i < 4; i++) sad_array[i] >>= 2;                               \
   1546   }                                                                           \
   1547   static void fnname##_bits12(const uint8_t *src_ptr, int source_stride,      \
   1548                               const uint8_t *const ref_ptr[], int ref_stride, \
   1549                               unsigned int *sad_array) {                      \
   1550     int i;                                                                    \
   1551     fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);           \
   1552     for (i = 0; i < 4; i++) sad_array[i] >>= 4;                               \
   1553   }
   1554 
   1555 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x16)
   1556 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x16_avg)
   1557 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x16x4d)
   1558 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x32)
   1559 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x32_avg)
   1560 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x32x4d)
   1561 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad64x32)
   1562 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad64x32_avg)
   1563 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad64x32x4d)
   1564 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x64)
   1565 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x64_avg)
   1566 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x64x4d)
   1567 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x32)
   1568 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x32_avg)
   1569 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x32x4d)
   1570 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad64x64)
   1571 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad64x64_avg)
   1572 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad64x64x4d)
   1573 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x16)
   1574 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x16_avg)
   1575 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x16x4d)
   1576 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x8)
   1577 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x8_avg)
   1578 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x8x4d)
   1579 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x16)
   1580 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x16_avg)
   1581 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x16x4d)
   1582 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x8)
   1583 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x8_avg)
   1584 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x8x4d)
   1585 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x4)
   1586 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x4_avg)
   1587 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x4x4d)
   1588 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad4x8)
   1589 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad4x8_avg)
   1590 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad4x8x4d)
   1591 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad4x4)
   1592 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad4x4_avg)
   1593 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad4x4x4d)
   1594 
   1595 static void highbd_set_var_fns(VP9_COMP *const cpi) {
   1596   VP9_COMMON *const cm = &cpi->common;
   1597   if (cm->use_highbitdepth) {
   1598     switch (cm->bit_depth) {
   1599       case VPX_BITS_8:
   1600         HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits8,
   1601                    vpx_highbd_sad32x16_avg_bits8, vpx_highbd_8_variance32x16,
   1602                    vpx_highbd_8_sub_pixel_variance32x16,
   1603                    vpx_highbd_8_sub_pixel_avg_variance32x16,
   1604                    vpx_highbd_sad32x16x4d_bits8)
   1605 
   1606         HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits8,
   1607                    vpx_highbd_sad16x32_avg_bits8, vpx_highbd_8_variance16x32,
   1608                    vpx_highbd_8_sub_pixel_variance16x32,
   1609                    vpx_highbd_8_sub_pixel_avg_variance16x32,
   1610                    vpx_highbd_sad16x32x4d_bits8)
   1611 
   1612         HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits8,
   1613                    vpx_highbd_sad64x32_avg_bits8, vpx_highbd_8_variance64x32,
   1614                    vpx_highbd_8_sub_pixel_variance64x32,
   1615                    vpx_highbd_8_sub_pixel_avg_variance64x32,
   1616                    vpx_highbd_sad64x32x4d_bits8)
   1617 
   1618         HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits8,
   1619                    vpx_highbd_sad32x64_avg_bits8, vpx_highbd_8_variance32x64,
   1620                    vpx_highbd_8_sub_pixel_variance32x64,
   1621                    vpx_highbd_8_sub_pixel_avg_variance32x64,
   1622                    vpx_highbd_sad32x64x4d_bits8)
   1623 
   1624         HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits8,
   1625                    vpx_highbd_sad32x32_avg_bits8, vpx_highbd_8_variance32x32,
   1626                    vpx_highbd_8_sub_pixel_variance32x32,
   1627                    vpx_highbd_8_sub_pixel_avg_variance32x32,
   1628                    vpx_highbd_sad32x32x4d_bits8)
   1629 
   1630         HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits8,
   1631                    vpx_highbd_sad64x64_avg_bits8, vpx_highbd_8_variance64x64,
   1632                    vpx_highbd_8_sub_pixel_variance64x64,
   1633                    vpx_highbd_8_sub_pixel_avg_variance64x64,
   1634                    vpx_highbd_sad64x64x4d_bits8)
   1635 
   1636         HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits8,
   1637                    vpx_highbd_sad16x16_avg_bits8, vpx_highbd_8_variance16x16,
   1638                    vpx_highbd_8_sub_pixel_variance16x16,
   1639                    vpx_highbd_8_sub_pixel_avg_variance16x16,
   1640                    vpx_highbd_sad16x16x4d_bits8)
   1641 
   1642         HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits8,
   1643                    vpx_highbd_sad16x8_avg_bits8, vpx_highbd_8_variance16x8,
   1644                    vpx_highbd_8_sub_pixel_variance16x8,
   1645                    vpx_highbd_8_sub_pixel_avg_variance16x8,
   1646                    vpx_highbd_sad16x8x4d_bits8)
   1647 
   1648         HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits8,
   1649                    vpx_highbd_sad8x16_avg_bits8, vpx_highbd_8_variance8x16,
   1650                    vpx_highbd_8_sub_pixel_variance8x16,
   1651                    vpx_highbd_8_sub_pixel_avg_variance8x16,
   1652                    vpx_highbd_sad8x16x4d_bits8)
   1653 
   1654         HIGHBD_BFP(
   1655             BLOCK_8X8, vpx_highbd_sad8x8_bits8, vpx_highbd_sad8x8_avg_bits8,
   1656             vpx_highbd_8_variance8x8, vpx_highbd_8_sub_pixel_variance8x8,
   1657             vpx_highbd_8_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x4d_bits8)
   1658 
   1659         HIGHBD_BFP(
   1660             BLOCK_8X4, vpx_highbd_sad8x4_bits8, vpx_highbd_sad8x4_avg_bits8,
   1661             vpx_highbd_8_variance8x4, vpx_highbd_8_sub_pixel_variance8x4,
   1662             vpx_highbd_8_sub_pixel_avg_variance8x4, vpx_highbd_sad8x4x4d_bits8)
   1663 
   1664         HIGHBD_BFP(
   1665             BLOCK_4X8, vpx_highbd_sad4x8_bits8, vpx_highbd_sad4x8_avg_bits8,
   1666             vpx_highbd_8_variance4x8, vpx_highbd_8_sub_pixel_variance4x8,
   1667             vpx_highbd_8_sub_pixel_avg_variance4x8, vpx_highbd_sad4x8x4d_bits8)
   1668 
   1669         HIGHBD_BFP(
   1670             BLOCK_4X4, vpx_highbd_sad4x4_bits8, vpx_highbd_sad4x4_avg_bits8,
   1671             vpx_highbd_8_variance4x4, vpx_highbd_8_sub_pixel_variance4x4,
   1672             vpx_highbd_8_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x4d_bits8)
   1673         break;
   1674 
   1675       case VPX_BITS_10:
   1676         HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits10,
   1677                    vpx_highbd_sad32x16_avg_bits10, vpx_highbd_10_variance32x16,
   1678                    vpx_highbd_10_sub_pixel_variance32x16,
   1679                    vpx_highbd_10_sub_pixel_avg_variance32x16,
   1680                    vpx_highbd_sad32x16x4d_bits10)
   1681 
   1682         HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits10,
   1683                    vpx_highbd_sad16x32_avg_bits10, vpx_highbd_10_variance16x32,
   1684                    vpx_highbd_10_sub_pixel_variance16x32,
   1685                    vpx_highbd_10_sub_pixel_avg_variance16x32,
   1686                    vpx_highbd_sad16x32x4d_bits10)
   1687 
   1688         HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits10,
   1689                    vpx_highbd_sad64x32_avg_bits10, vpx_highbd_10_variance64x32,
   1690                    vpx_highbd_10_sub_pixel_variance64x32,
   1691                    vpx_highbd_10_sub_pixel_avg_variance64x32,
   1692                    vpx_highbd_sad64x32x4d_bits10)
   1693 
   1694         HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits10,
   1695                    vpx_highbd_sad32x64_avg_bits10, vpx_highbd_10_variance32x64,
   1696                    vpx_highbd_10_sub_pixel_variance32x64,
   1697                    vpx_highbd_10_sub_pixel_avg_variance32x64,
   1698                    vpx_highbd_sad32x64x4d_bits10)
   1699 
   1700         HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits10,
   1701                    vpx_highbd_sad32x32_avg_bits10, vpx_highbd_10_variance32x32,
   1702                    vpx_highbd_10_sub_pixel_variance32x32,
   1703                    vpx_highbd_10_sub_pixel_avg_variance32x32,
   1704                    vpx_highbd_sad32x32x4d_bits10)
   1705 
   1706         HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits10,
   1707                    vpx_highbd_sad64x64_avg_bits10, vpx_highbd_10_variance64x64,
   1708                    vpx_highbd_10_sub_pixel_variance64x64,
   1709                    vpx_highbd_10_sub_pixel_avg_variance64x64,
   1710                    vpx_highbd_sad64x64x4d_bits10)
   1711 
   1712         HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits10,
   1713                    vpx_highbd_sad16x16_avg_bits10, vpx_highbd_10_variance16x16,
   1714                    vpx_highbd_10_sub_pixel_variance16x16,
   1715                    vpx_highbd_10_sub_pixel_avg_variance16x16,
   1716                    vpx_highbd_sad16x16x4d_bits10)
   1717 
   1718         HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits10,
   1719                    vpx_highbd_sad16x8_avg_bits10, vpx_highbd_10_variance16x8,
   1720                    vpx_highbd_10_sub_pixel_variance16x8,
   1721                    vpx_highbd_10_sub_pixel_avg_variance16x8,
   1722                    vpx_highbd_sad16x8x4d_bits10)
   1723 
   1724         HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits10,
   1725                    vpx_highbd_sad8x16_avg_bits10, vpx_highbd_10_variance8x16,
   1726                    vpx_highbd_10_sub_pixel_variance8x16,
   1727                    vpx_highbd_10_sub_pixel_avg_variance8x16,
   1728                    vpx_highbd_sad8x16x4d_bits10)
   1729 
   1730         HIGHBD_BFP(BLOCK_8X8, vpx_highbd_sad8x8_bits10,
   1731                    vpx_highbd_sad8x8_avg_bits10, vpx_highbd_10_variance8x8,
   1732                    vpx_highbd_10_sub_pixel_variance8x8,
   1733                    vpx_highbd_10_sub_pixel_avg_variance8x8,
   1734                    vpx_highbd_sad8x8x4d_bits10)
   1735 
   1736         HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits10,
   1737                    vpx_highbd_sad8x4_avg_bits10, vpx_highbd_10_variance8x4,
   1738                    vpx_highbd_10_sub_pixel_variance8x4,
   1739                    vpx_highbd_10_sub_pixel_avg_variance8x4,
   1740                    vpx_highbd_sad8x4x4d_bits10)
   1741 
   1742         HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits10,
   1743                    vpx_highbd_sad4x8_avg_bits10, vpx_highbd_10_variance4x8,
   1744                    vpx_highbd_10_sub_pixel_variance4x8,
   1745                    vpx_highbd_10_sub_pixel_avg_variance4x8,
   1746                    vpx_highbd_sad4x8x4d_bits10)
   1747 
   1748         HIGHBD_BFP(BLOCK_4X4, vpx_highbd_sad4x4_bits10,
   1749                    vpx_highbd_sad4x4_avg_bits10, vpx_highbd_10_variance4x4,
   1750                    vpx_highbd_10_sub_pixel_variance4x4,
   1751                    vpx_highbd_10_sub_pixel_avg_variance4x4,
   1752                    vpx_highbd_sad4x4x4d_bits10)
   1753         break;
   1754 
   1755       default:
   1756         assert(cm->bit_depth == VPX_BITS_12);
   1757         HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits12,
   1758                    vpx_highbd_sad32x16_avg_bits12, vpx_highbd_12_variance32x16,
   1759                    vpx_highbd_12_sub_pixel_variance32x16,
   1760                    vpx_highbd_12_sub_pixel_avg_variance32x16,
   1761                    vpx_highbd_sad32x16x4d_bits12)
   1762 
   1763         HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits12,
   1764                    vpx_highbd_sad16x32_avg_bits12, vpx_highbd_12_variance16x32,
   1765                    vpx_highbd_12_sub_pixel_variance16x32,
   1766                    vpx_highbd_12_sub_pixel_avg_variance16x32,
   1767                    vpx_highbd_sad16x32x4d_bits12)
   1768 
   1769         HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits12,
   1770                    vpx_highbd_sad64x32_avg_bits12, vpx_highbd_12_variance64x32,
   1771                    vpx_highbd_12_sub_pixel_variance64x32,
   1772                    vpx_highbd_12_sub_pixel_avg_variance64x32,
   1773                    vpx_highbd_sad64x32x4d_bits12)
   1774 
   1775         HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits12,
   1776                    vpx_highbd_sad32x64_avg_bits12, vpx_highbd_12_variance32x64,
   1777                    vpx_highbd_12_sub_pixel_variance32x64,
   1778                    vpx_highbd_12_sub_pixel_avg_variance32x64,
   1779                    vpx_highbd_sad32x64x4d_bits12)
   1780 
   1781         HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits12,
   1782                    vpx_highbd_sad32x32_avg_bits12, vpx_highbd_12_variance32x32,
   1783                    vpx_highbd_12_sub_pixel_variance32x32,
   1784                    vpx_highbd_12_sub_pixel_avg_variance32x32,
   1785                    vpx_highbd_sad32x32x4d_bits12)
   1786 
   1787         HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits12,
   1788                    vpx_highbd_sad64x64_avg_bits12, vpx_highbd_12_variance64x64,
   1789                    vpx_highbd_12_sub_pixel_variance64x64,
   1790                    vpx_highbd_12_sub_pixel_avg_variance64x64,
   1791                    vpx_highbd_sad64x64x4d_bits12)
   1792 
   1793         HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits12,
   1794                    vpx_highbd_sad16x16_avg_bits12, vpx_highbd_12_variance16x16,
   1795                    vpx_highbd_12_sub_pixel_variance16x16,
   1796                    vpx_highbd_12_sub_pixel_avg_variance16x16,
   1797                    vpx_highbd_sad16x16x4d_bits12)
   1798 
   1799         HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits12,
   1800                    vpx_highbd_sad16x8_avg_bits12, vpx_highbd_12_variance16x8,
   1801                    vpx_highbd_12_sub_pixel_variance16x8,
   1802                    vpx_highbd_12_sub_pixel_avg_variance16x8,
   1803                    vpx_highbd_sad16x8x4d_bits12)
   1804 
   1805         HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits12,
   1806                    vpx_highbd_sad8x16_avg_bits12, vpx_highbd_12_variance8x16,
   1807                    vpx_highbd_12_sub_pixel_variance8x16,
   1808                    vpx_highbd_12_sub_pixel_avg_variance8x16,
   1809                    vpx_highbd_sad8x16x4d_bits12)
   1810 
   1811         HIGHBD_BFP(BLOCK_8X8, vpx_highbd_sad8x8_bits12,
   1812                    vpx_highbd_sad8x8_avg_bits12, vpx_highbd_12_variance8x8,
   1813                    vpx_highbd_12_sub_pixel_variance8x8,
   1814                    vpx_highbd_12_sub_pixel_avg_variance8x8,
   1815                    vpx_highbd_sad8x8x4d_bits12)
   1816 
   1817         HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits12,
   1818                    vpx_highbd_sad8x4_avg_bits12, vpx_highbd_12_variance8x4,
   1819                    vpx_highbd_12_sub_pixel_variance8x4,
   1820                    vpx_highbd_12_sub_pixel_avg_variance8x4,
   1821                    vpx_highbd_sad8x4x4d_bits12)
   1822 
   1823         HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits12,
   1824                    vpx_highbd_sad4x8_avg_bits12, vpx_highbd_12_variance4x8,
   1825                    vpx_highbd_12_sub_pixel_variance4x8,
   1826                    vpx_highbd_12_sub_pixel_avg_variance4x8,
   1827                    vpx_highbd_sad4x8x4d_bits12)
   1828 
   1829         HIGHBD_BFP(BLOCK_4X4, vpx_highbd_sad4x4_bits12,
   1830                    vpx_highbd_sad4x4_avg_bits12, vpx_highbd_12_variance4x4,
   1831                    vpx_highbd_12_sub_pixel_variance4x4,
   1832                    vpx_highbd_12_sub_pixel_avg_variance4x4,
   1833                    vpx_highbd_sad4x4x4d_bits12)
   1834         break;
   1835     }
   1836   }
   1837 }
   1838 #endif  // CONFIG_VP9_HIGHBITDEPTH
   1839 
   1840 static void realloc_segmentation_maps(VP9_COMP *cpi) {
   1841   VP9_COMMON *const cm = &cpi->common;
   1842 
   1843   // Create the encoder segmentation map and set all entries to 0
   1844   vpx_free(cpi->segmentation_map);
   1845   CHECK_MEM_ERROR(cm, cpi->segmentation_map,
   1846                   vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
   1847 
   1848   // Create a map used for cyclic background refresh.
   1849   if (cpi->cyclic_refresh) vp9_cyclic_refresh_free(cpi->cyclic_refresh);
   1850   CHECK_MEM_ERROR(cm, cpi->cyclic_refresh,
   1851                   vp9_cyclic_refresh_alloc(cm->mi_rows, cm->mi_cols));
   1852 
   1853   // Create a map used to mark inactive areas.
   1854   vpx_free(cpi->active_map.map);
   1855   CHECK_MEM_ERROR(cm, cpi->active_map.map,
   1856                   vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
   1857 
   1858   // And a place holder structure is the coding context
   1859   // for use if we want to save and restore it
   1860   vpx_free(cpi->coding_context.last_frame_seg_map_copy);
   1861   CHECK_MEM_ERROR(cm, cpi->coding_context.last_frame_seg_map_copy,
   1862                   vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
   1863 }
   1864 
   1865 static void alloc_copy_partition_data(VP9_COMP *cpi) {
   1866   VP9_COMMON *const cm = &cpi->common;
   1867   if (cpi->prev_partition == NULL) {
   1868     CHECK_MEM_ERROR(cm, cpi->prev_partition,
   1869                     (BLOCK_SIZE *)vpx_calloc(cm->mi_stride * cm->mi_rows,
   1870                                              sizeof(*cpi->prev_partition)));
   1871   }
   1872   if (cpi->prev_segment_id == NULL) {
   1873     CHECK_MEM_ERROR(
   1874         cm, cpi->prev_segment_id,
   1875         (int8_t *)vpx_calloc((cm->mi_stride >> 3) * ((cm->mi_rows >> 3) + 1),
   1876                              sizeof(*cpi->prev_segment_id)));
   1877   }
   1878   if (cpi->prev_variance_low == NULL) {
   1879     CHECK_MEM_ERROR(cm, cpi->prev_variance_low,
   1880                     (uint8_t *)vpx_calloc(
   1881                         (cm->mi_stride >> 3) * ((cm->mi_rows >> 3) + 1) * 25,
   1882                         sizeof(*cpi->prev_variance_low)));
   1883   }
   1884   if (cpi->copied_frame_cnt == NULL) {
   1885     CHECK_MEM_ERROR(
   1886         cm, cpi->copied_frame_cnt,
   1887         (uint8_t *)vpx_calloc((cm->mi_stride >> 3) * ((cm->mi_rows >> 3) + 1),
   1888                               sizeof(*cpi->copied_frame_cnt)));
   1889   }
   1890 }
   1891 
   1892 void vp9_change_config(struct VP9_COMP *cpi, const VP9EncoderConfig *oxcf) {
   1893   VP9_COMMON *const cm = &cpi->common;
   1894   RATE_CONTROL *const rc = &cpi->rc;
   1895   int last_w = cpi->oxcf.width;
   1896   int last_h = cpi->oxcf.height;
   1897 
   1898   vp9_init_quantizer(cpi);
   1899   if (cm->profile != oxcf->profile) cm->profile = oxcf->profile;
   1900   cm->bit_depth = oxcf->bit_depth;
   1901   cm->color_space = oxcf->color_space;
   1902   cm->color_range = oxcf->color_range;
   1903 
   1904   cpi->target_level = oxcf->target_level;
   1905   cpi->keep_level_stats = oxcf->target_level != LEVEL_MAX;
   1906   set_level_constraint(&cpi->level_constraint,
   1907                        get_level_index(cpi->target_level));
   1908 
   1909   if (cm->profile <= PROFILE_1)
   1910     assert(cm->bit_depth == VPX_BITS_8);
   1911   else
   1912     assert(cm->bit_depth > VPX_BITS_8);
   1913 
   1914   cpi->oxcf = *oxcf;
   1915 #if CONFIG_VP9_HIGHBITDEPTH
   1916   cpi->td.mb.e_mbd.bd = (int)cm->bit_depth;
   1917 #endif  // CONFIG_VP9_HIGHBITDEPTH
   1918 
   1919   if ((oxcf->pass == 0) && (oxcf->rc_mode == VPX_Q)) {
   1920     rc->baseline_gf_interval = FIXED_GF_INTERVAL;
   1921   } else {
   1922     rc->baseline_gf_interval = (MIN_GF_INTERVAL + MAX_GF_INTERVAL) / 2;
   1923   }
   1924 
   1925   cpi->refresh_golden_frame = 0;
   1926   cpi->refresh_last_frame = 1;
   1927   cm->refresh_frame_context = 1;
   1928   cm->reset_frame_context = 0;
   1929 
   1930   vp9_reset_segment_features(&cm->seg);
   1931   vp9_set_high_precision_mv(cpi, 0);
   1932 
   1933   {
   1934     int i;
   1935 
   1936     for (i = 0; i < MAX_SEGMENTS; i++)
   1937       cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
   1938   }
   1939   cpi->encode_breakout = cpi->oxcf.encode_breakout;
   1940 
   1941   set_rc_buffer_sizes(rc, &cpi->oxcf);
   1942 
   1943   // Under a configuration change, where maximum_buffer_size may change,
   1944   // keep buffer level clipped to the maximum allowed buffer size.
   1945   rc->bits_off_target = VPXMIN(rc->bits_off_target, rc->maximum_buffer_size);
   1946   rc->buffer_level = VPXMIN(rc->buffer_level, rc->maximum_buffer_size);
   1947 
   1948   // Set up frame rate and related parameters rate control values.
   1949   vp9_new_framerate(cpi, cpi->framerate);
   1950 
   1951   // Set absolute upper and lower quality limits
   1952   rc->worst_quality = cpi->oxcf.worst_allowed_q;
   1953   rc->best_quality = cpi->oxcf.best_allowed_q;
   1954 
   1955   cm->interp_filter = cpi->sf.default_interp_filter;
   1956 
   1957   if (cpi->oxcf.render_width > 0 && cpi->oxcf.render_height > 0) {
   1958     cm->render_width = cpi->oxcf.render_width;
   1959     cm->render_height = cpi->oxcf.render_height;
   1960   } else {
   1961     cm->render_width = cpi->oxcf.width;
   1962     cm->render_height = cpi->oxcf.height;
   1963   }
   1964   if (last_w != cpi->oxcf.width || last_h != cpi->oxcf.height) {
   1965     cm->width = cpi->oxcf.width;
   1966     cm->height = cpi->oxcf.height;
   1967     cpi->external_resize = 1;
   1968   }
   1969 
   1970   if (cpi->initial_width) {
   1971     int new_mi_size = 0;
   1972     vp9_set_mb_mi(cm, cm->width, cm->height);
   1973     new_mi_size = cm->mi_stride * calc_mi_size(cm->mi_rows);
   1974     if (cm->mi_alloc_size < new_mi_size) {
   1975       vp9_free_context_buffers(cm);
   1976       alloc_compressor_data(cpi);
   1977       realloc_segmentation_maps(cpi);
   1978       cpi->initial_width = cpi->initial_height = 0;
   1979       cpi->external_resize = 0;
   1980     } else if (cm->mi_alloc_size == new_mi_size &&
   1981                (cpi->oxcf.width > last_w || cpi->oxcf.height > last_h)) {
   1982       vp9_alloc_loop_filter(cm);
   1983     }
   1984   }
   1985 
   1986   if (cm->current_video_frame == 0 || last_w != cpi->oxcf.width ||
   1987       last_h != cpi->oxcf.height)
   1988     update_frame_size(cpi);
   1989 
   1990   if (last_w != cpi->oxcf.width || last_h != cpi->oxcf.height) {
   1991     memset(cpi->consec_zero_mv, 0,
   1992            cm->mi_rows * cm->mi_cols * sizeof(*cpi->consec_zero_mv));
   1993     if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
   1994       vp9_cyclic_refresh_reset_resize(cpi);
   1995     rc->rc_1_frame = 0;
   1996     rc->rc_2_frame = 0;
   1997   }
   1998 
   1999   if ((cpi->svc.number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) ||
   2000       ((cpi->svc.number_temporal_layers > 1 ||
   2001         cpi->svc.number_spatial_layers > 1) &&
   2002        cpi->oxcf.pass != 1)) {
   2003     vp9_update_layer_context_change_config(cpi,
   2004                                            (int)cpi->oxcf.target_bandwidth);
   2005   }
   2006 
   2007   // Check for resetting the rc flags (rc_1_frame, rc_2_frame) if the
   2008   // configuration change has a large change in avg_frame_bandwidth.
   2009   // For SVC check for resetting based on spatial layer average bandwidth.
   2010   // Also reset buffer level to optimal level.
   2011   if (cm->current_video_frame > 0) {
   2012     if (cpi->use_svc) {
   2013       vp9_svc_check_reset_layer_rc_flag(cpi);
   2014     } else {
   2015       if (rc->avg_frame_bandwidth > (3 * rc->last_avg_frame_bandwidth >> 1) ||
   2016           rc->avg_frame_bandwidth < (rc->last_avg_frame_bandwidth >> 1)) {
   2017         rc->rc_1_frame = 0;
   2018         rc->rc_2_frame = 0;
   2019         rc->bits_off_target = rc->optimal_buffer_level;
   2020         rc->buffer_level = rc->optimal_buffer_level;
   2021       }
   2022     }
   2023   }
   2024 
   2025   cpi->alt_ref_source = NULL;
   2026   rc->is_src_frame_alt_ref = 0;
   2027 
   2028 #if 0
   2029   // Experimental RD Code
   2030   cpi->frame_distortion = 0;
   2031   cpi->last_frame_distortion = 0;
   2032 #endif
   2033 
   2034   set_tile_limits(cpi);
   2035 
   2036   cpi->ext_refresh_frame_flags_pending = 0;
   2037   cpi->ext_refresh_frame_context_pending = 0;
   2038 
   2039 #if CONFIG_VP9_HIGHBITDEPTH
   2040   highbd_set_var_fns(cpi);
   2041 #endif
   2042 
   2043   vp9_set_row_mt(cpi);
   2044 }
   2045 
   2046 #ifndef M_LOG2_E
   2047 #define M_LOG2_E 0.693147180559945309417
   2048 #endif
   2049 #define log2f(x) (log(x) / (float)M_LOG2_E)
   2050 
   2051 /***********************************************************************
   2052  * Read before modifying 'cal_nmvjointsadcost' or 'cal_nmvsadcosts'    *
   2053  ***********************************************************************
   2054  * The following 2 functions ('cal_nmvjointsadcost' and                *
   2055  * 'cal_nmvsadcosts') are used to calculate cost lookup tables         *
   2056  * used by 'vp9_diamond_search_sad'. The C implementation of the       *
   2057  * function is generic, but the AVX intrinsics optimised version       *
   2058  * relies on the following properties of the computed tables:          *
   2059  * For cal_nmvjointsadcost:                                            *
   2060  *   - mvjointsadcost[1] == mvjointsadcost[2] == mvjointsadcost[3]     *
   2061  * For cal_nmvsadcosts:                                                *
   2062  *   - For all i: mvsadcost[0][i] == mvsadcost[1][i]                   *
   2063  *         (Equal costs for both components)                           *
   2064  *   - For all i: mvsadcost[0][i] == mvsadcost[0][-i]                  *
   2065  *         (Cost function is even)                                     *
   2066  * If these do not hold, then the AVX optimised version of the         *
   2067  * 'vp9_diamond_search_sad' function cannot be used as it is, in which *
   2068  * case you can revert to using the C function instead.                *
   2069  ***********************************************************************/
   2070 
   2071 static void cal_nmvjointsadcost(int *mvjointsadcost) {
   2072   /*********************************************************************
   2073    * Warning: Read the comments above before modifying this function   *
   2074    *********************************************************************/
   2075   mvjointsadcost[0] = 600;
   2076   mvjointsadcost[1] = 300;
   2077   mvjointsadcost[2] = 300;
   2078   mvjointsadcost[3] = 300;
   2079 }
   2080 
   2081 static void cal_nmvsadcosts(int *mvsadcost[2]) {
   2082   /*********************************************************************
   2083    * Warning: Read the comments above before modifying this function   *
   2084    *********************************************************************/
   2085   int i = 1;
   2086 
   2087   mvsadcost[0][0] = 0;
   2088   mvsadcost[1][0] = 0;
   2089 
   2090   do {
   2091     double z = 256 * (2 * (log2f(8 * i) + .6));
   2092     mvsadcost[0][i] = (int)z;
   2093     mvsadcost[1][i] = (int)z;
   2094     mvsadcost[0][-i] = (int)z;
   2095     mvsadcost[1][-i] = (int)z;
   2096   } while (++i <= MV_MAX);
   2097 }
   2098 
   2099 static void cal_nmvsadcosts_hp(int *mvsadcost[2]) {
   2100   int i = 1;
   2101 
   2102   mvsadcost[0][0] = 0;
   2103   mvsadcost[1][0] = 0;
   2104 
   2105   do {
   2106     double z = 256 * (2 * (log2f(8 * i) + .6));
   2107     mvsadcost[0][i] = (int)z;
   2108     mvsadcost[1][i] = (int)z;
   2109     mvsadcost[0][-i] = (int)z;
   2110     mvsadcost[1][-i] = (int)z;
   2111   } while (++i <= MV_MAX);
   2112 }
   2113 
   2114 VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf,
   2115                                 BufferPool *const pool) {
   2116   unsigned int i;
   2117   VP9_COMP *volatile const cpi = vpx_memalign(32, sizeof(VP9_COMP));
   2118   VP9_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL;
   2119 
   2120   if (!cm) return NULL;
   2121 
   2122   vp9_zero(*cpi);
   2123 
   2124   if (setjmp(cm->error.jmp)) {
   2125     cm->error.setjmp = 0;
   2126     vp9_remove_compressor(cpi);
   2127     return 0;
   2128   }
   2129 
   2130   cm->error.setjmp = 1;
   2131   cm->alloc_mi = vp9_enc_alloc_mi;
   2132   cm->free_mi = vp9_enc_free_mi;
   2133   cm->setup_mi = vp9_enc_setup_mi;
   2134 
   2135   CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
   2136   CHECK_MEM_ERROR(
   2137       cm, cm->frame_contexts,
   2138       (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
   2139 
   2140   cpi->use_svc = 0;
   2141   cpi->resize_state = ORIG;
   2142   cpi->external_resize = 0;
   2143   cpi->resize_avg_qp = 0;
   2144   cpi->resize_buffer_underflow = 0;
   2145   cpi->use_skin_detection = 0;
   2146   cpi->common.buffer_pool = pool;
   2147 
   2148   cpi->force_update_segmentation = 0;
   2149 
   2150   init_config(cpi, oxcf);
   2151   vp9_rc_init(&cpi->oxcf, oxcf->pass, &cpi->rc);
   2152 
   2153   cm->current_video_frame = 0;
   2154   cpi->partition_search_skippable_frame = 0;
   2155   cpi->tile_data = NULL;
   2156 
   2157   realloc_segmentation_maps(cpi);
   2158 
   2159   CHECK_MEM_ERROR(
   2160       cm, cpi->skin_map,
   2161       vpx_calloc(cm->mi_rows * cm->mi_cols, sizeof(cpi->skin_map[0])));
   2162 
   2163   CHECK_MEM_ERROR(cm, cpi->alt_ref_aq, vp9_alt_ref_aq_create());
   2164 
   2165   CHECK_MEM_ERROR(
   2166       cm, cpi->consec_zero_mv,
   2167       vpx_calloc(cm->mi_rows * cm->mi_cols, sizeof(*cpi->consec_zero_mv)));
   2168 
   2169   CHECK_MEM_ERROR(cm, cpi->nmvcosts[0],
   2170                   vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts[0])));
   2171   CHECK_MEM_ERROR(cm, cpi->nmvcosts[1],
   2172                   vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts[1])));
   2173   CHECK_MEM_ERROR(cm, cpi->nmvcosts_hp[0],
   2174                   vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts_hp[0])));
   2175   CHECK_MEM_ERROR(cm, cpi->nmvcosts_hp[1],
   2176                   vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts_hp[1])));
   2177   CHECK_MEM_ERROR(cm, cpi->nmvsadcosts[0],
   2178                   vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts[0])));
   2179   CHECK_MEM_ERROR(cm, cpi->nmvsadcosts[1],
   2180                   vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts[1])));
   2181   CHECK_MEM_ERROR(cm, cpi->nmvsadcosts_hp[0],
   2182                   vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[0])));
   2183   CHECK_MEM_ERROR(cm, cpi->nmvsadcosts_hp[1],
   2184                   vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[1])));
   2185 
   2186   for (i = 0; i < (sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]));
   2187        i++) {
   2188     CHECK_MEM_ERROR(
   2189         cm, cpi->mbgraph_stats[i].mb_stats,
   2190         vpx_calloc(cm->MBs * sizeof(*cpi->mbgraph_stats[i].mb_stats), 1));
   2191   }
   2192 
   2193 #if CONFIG_FP_MB_STATS
   2194   cpi->use_fp_mb_stats = 0;
   2195   if (cpi->use_fp_mb_stats) {
   2196     // a place holder used to store the first pass mb stats in the first pass
   2197     CHECK_MEM_ERROR(cm, cpi->twopass.frame_mb_stats_buf,
   2198                     vpx_calloc(cm->MBs * sizeof(uint8_t), 1));
   2199   } else {
   2200     cpi->twopass.frame_mb_stats_buf = NULL;
   2201   }
   2202 #endif
   2203 
   2204   cpi->refresh_alt_ref_frame = 0;
   2205   cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
   2206 
   2207   init_level_info(&cpi->level_info);
   2208   init_level_constraint(&cpi->level_constraint);
   2209 
   2210 #if CONFIG_INTERNAL_STATS
   2211   cpi->b_calculate_blockiness = 1;
   2212   cpi->b_calculate_consistency = 1;
   2213   cpi->total_inconsistency = 0;
   2214   cpi->psnr.worst = 100.0;
   2215   cpi->worst_ssim = 100.0;
   2216 
   2217   cpi->count = 0;
   2218   cpi->bytes = 0;
   2219 
   2220   if (cpi->b_calculate_psnr) {
   2221     cpi->total_sq_error = 0;
   2222     cpi->total_samples = 0;
   2223 
   2224     cpi->totalp_sq_error = 0;
   2225     cpi->totalp_samples = 0;
   2226 
   2227     cpi->tot_recode_hits = 0;
   2228     cpi->summed_quality = 0;
   2229     cpi->summed_weights = 0;
   2230     cpi->summedp_quality = 0;
   2231     cpi->summedp_weights = 0;
   2232   }
   2233 
   2234   cpi->fastssim.worst = 100.0;
   2235 
   2236   cpi->psnrhvs.worst = 100.0;
   2237 
   2238   if (cpi->b_calculate_blockiness) {
   2239     cpi->total_blockiness = 0;
   2240     cpi->worst_blockiness = 0.0;
   2241   }
   2242 
   2243   if (cpi->b_calculate_consistency) {
   2244     CHECK_MEM_ERROR(cm, cpi->ssim_vars,
   2245                     vpx_calloc(cpi->common.mi_rows * cpi->common.mi_cols,
   2246                                sizeof(*cpi->ssim_vars) * 4));
   2247     cpi->worst_consistency = 100.0;
   2248   } else {
   2249     cpi->ssim_vars = NULL;
   2250   }
   2251 
   2252 #endif
   2253 
   2254   cpi->first_time_stamp_ever = INT64_MAX;
   2255 
   2256   /*********************************************************************
   2257    * Warning: Read the comments around 'cal_nmvjointsadcost' and       *
   2258    * 'cal_nmvsadcosts' before modifying how these tables are computed. *
   2259    *********************************************************************/
   2260   cal_nmvjointsadcost(cpi->td.mb.nmvjointsadcost);
   2261   cpi->td.mb.nmvcost[0] = &cpi->nmvcosts[0][MV_MAX];
   2262   cpi->td.mb.nmvcost[1] = &cpi->nmvcosts[1][MV_MAX];
   2263   cpi->td.mb.nmvsadcost[0] = &cpi->nmvsadcosts[0][MV_MAX];
   2264   cpi->td.mb.nmvsadcost[1] = &cpi->nmvsadcosts[1][MV_MAX];
   2265   cal_nmvsadcosts(cpi->td.mb.nmvsadcost);
   2266 
   2267   cpi->td.mb.nmvcost_hp[0] = &cpi->nmvcosts_hp[0][MV_MAX];
   2268   cpi->td.mb.nmvcost_hp[1] = &cpi->nmvcosts_hp[1][MV_MAX];
   2269   cpi->td.mb.nmvsadcost_hp[0] = &cpi->nmvsadcosts_hp[0][MV_MAX];
   2270   cpi->td.mb.nmvsadcost_hp[1] = &cpi->nmvsadcosts_hp[1][MV_MAX];
   2271   cal_nmvsadcosts_hp(cpi->td.mb.nmvsadcost_hp);
   2272 
   2273 #if CONFIG_VP9_TEMPORAL_DENOISING
   2274 #ifdef OUTPUT_YUV_DENOISED
   2275   yuv_denoised_file = fopen("denoised.yuv", "ab");
   2276 #endif
   2277 #endif
   2278 #ifdef OUTPUT_YUV_SKINMAP
   2279   yuv_skinmap_file = fopen("skinmap.yuv", "wb");
   2280 #endif
   2281 #ifdef OUTPUT_YUV_REC
   2282   yuv_rec_file = fopen("rec.yuv", "wb");
   2283 #endif
   2284 #ifdef OUTPUT_YUV_SVC_SRC
   2285   yuv_svc_src[0] = fopen("svc_src_0.yuv", "wb");
   2286   yuv_svc_src[1] = fopen("svc_src_1.yuv", "wb");
   2287   yuv_svc_src[2] = fopen("svc_src_2.yuv", "wb");
   2288 #endif
   2289 
   2290 #if 0
   2291   framepsnr = fopen("framepsnr.stt", "a");
   2292   kf_list = fopen("kf_list.stt", "w");
   2293 #endif
   2294 
   2295   cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED;
   2296 
   2297 #if !CONFIG_REALTIME_ONLY
   2298   if (oxcf->pass == 1) {
   2299     vp9_init_first_pass(cpi);
   2300   } else if (oxcf->pass == 2) {
   2301     const size_t packet_sz = sizeof(FIRSTPASS_STATS);
   2302     const int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
   2303 
   2304     if (cpi->svc.number_spatial_layers > 1 ||
   2305         cpi->svc.number_temporal_layers > 1) {
   2306       FIRSTPASS_STATS *const stats = oxcf->two_pass_stats_in.buf;
   2307       FIRSTPASS_STATS *stats_copy[VPX_SS_MAX_LAYERS] = { 0 };
   2308       int i;
   2309 
   2310       for (i = 0; i < oxcf->ss_number_layers; ++i) {
   2311         FIRSTPASS_STATS *const last_packet_for_layer =
   2312             &stats[packets - oxcf->ss_number_layers + i];
   2313         const int layer_id = (int)last_packet_for_layer->spatial_layer_id;
   2314         const int packets_in_layer = (int)last_packet_for_layer->count + 1;
   2315         if (layer_id >= 0 && layer_id < oxcf->ss_number_layers) {
   2316           LAYER_CONTEXT *const lc = &cpi->svc.layer_context[layer_id];
   2317 
   2318           vpx_free(lc->rc_twopass_stats_in.buf);
   2319 
   2320           lc->rc_twopass_stats_in.sz = packets_in_layer * packet_sz;
   2321           CHECK_MEM_ERROR(cm, lc->rc_twopass_stats_in.buf,
   2322                           vpx_malloc(lc->rc_twopass_stats_in.sz));
   2323           lc->twopass.stats_in_start = lc->rc_twopass_stats_in.buf;
   2324           lc->twopass.stats_in = lc->twopass.stats_in_start;
   2325           lc->twopass.stats_in_end =
   2326               lc->twopass.stats_in_start + packets_in_layer - 1;
   2327           stats_copy[layer_id] = lc->rc_twopass_stats_in.buf;
   2328         }
   2329       }
   2330 
   2331       for (i = 0; i < packets; ++i) {
   2332         const int layer_id = (int)stats[i].spatial_layer_id;
   2333         if (layer_id >= 0 && layer_id < oxcf->ss_number_layers &&
   2334             stats_copy[layer_id] != NULL) {
   2335           *stats_copy[layer_id] = stats[i];
   2336           ++stats_copy[layer_id];
   2337         }
   2338       }
   2339 
   2340       vp9_init_second_pass_spatial_svc(cpi);
   2341     } else {
   2342 #if CONFIG_FP_MB_STATS
   2343       if (cpi->use_fp_mb_stats) {
   2344         const size_t psz = cpi->common.MBs * sizeof(uint8_t);
   2345         const int ps = (int)(oxcf->firstpass_mb_stats_in.sz / psz);
   2346 
   2347         cpi->twopass.firstpass_mb_stats.mb_stats_start =
   2348             oxcf->firstpass_mb_stats_in.buf;
   2349         cpi->twopass.firstpass_mb_stats.mb_stats_end =
   2350             cpi->twopass.firstpass_mb_stats.mb_stats_start +
   2351             (ps - 1) * cpi->common.MBs * sizeof(uint8_t);
   2352       }
   2353 #endif
   2354 
   2355       cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
   2356       cpi->twopass.stats_in = cpi->twopass.stats_in_start;
   2357       cpi->twopass.stats_in_end = &cpi->twopass.stats_in[packets - 1];
   2358 
   2359       vp9_init_second_pass(cpi);
   2360     }
   2361   }
   2362 #endif  // !CONFIG_REALTIME_ONLY
   2363 
   2364   vp9_set_speed_features_framesize_independent(cpi);
   2365   vp9_set_speed_features_framesize_dependent(cpi);
   2366 
   2367 #if CONFIG_NON_GREEDY_MV
   2368   cpi->feature_score_loc_alloc = 0;
   2369 #endif  // CONFIG_NON_GREEDY_MV
   2370   for (i = 0; i < MAX_ARF_GOP_SIZE; ++i) cpi->tpl_stats[i].tpl_stats_ptr = NULL;
   2371 
   2372   // Allocate memory to store variances for a frame.
   2373   CHECK_MEM_ERROR(cm, cpi->source_diff_var, vpx_calloc(cm->MBs, sizeof(diff)));
   2374   cpi->source_var_thresh = 0;
   2375   cpi->frames_till_next_var_check = 0;
   2376 
   2377 #define BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX4DF) \
   2378   cpi->fn_ptr[BT].sdf = SDF;                      \
   2379   cpi->fn_ptr[BT].sdaf = SDAF;                    \
   2380   cpi->fn_ptr[BT].vf = VF;                        \
   2381   cpi->fn_ptr[BT].svf = SVF;                      \
   2382   cpi->fn_ptr[BT].svaf = SVAF;                    \
   2383   cpi->fn_ptr[BT].sdx4df = SDX4DF;
   2384 
   2385   BFP(BLOCK_32X16, vpx_sad32x16, vpx_sad32x16_avg, vpx_variance32x16,
   2386       vpx_sub_pixel_variance32x16, vpx_sub_pixel_avg_variance32x16,
   2387       vpx_sad32x16x4d)
   2388 
   2389   BFP(BLOCK_16X32, vpx_sad16x32, vpx_sad16x32_avg, vpx_variance16x32,
   2390       vpx_sub_pixel_variance16x32, vpx_sub_pixel_avg_variance16x32,
   2391       vpx_sad16x32x4d)
   2392 
   2393   BFP(BLOCK_64X32, vpx_sad64x32, vpx_sad64x32_avg, vpx_variance64x32,
   2394       vpx_sub_pixel_variance64x32, vpx_sub_pixel_avg_variance64x32,
   2395       vpx_sad64x32x4d)
   2396 
   2397   BFP(BLOCK_32X64, vpx_sad32x64, vpx_sad32x64_avg, vpx_variance32x64,
   2398       vpx_sub_pixel_variance32x64, vpx_sub_pixel_avg_variance32x64,
   2399       vpx_sad32x64x4d)
   2400 
   2401   BFP(BLOCK_32X32, vpx_sad32x32, vpx_sad32x32_avg, vpx_variance32x32,
   2402       vpx_sub_pixel_variance32x32, vpx_sub_pixel_avg_variance32x32,
   2403       vpx_sad32x32x4d)
   2404 
   2405   BFP(BLOCK_64X64, vpx_sad64x64, vpx_sad64x64_avg, vpx_variance64x64,
   2406       vpx_sub_pixel_variance64x64, vpx_sub_pixel_avg_variance64x64,
   2407       vpx_sad64x64x4d)
   2408 
   2409   BFP(BLOCK_16X16, vpx_sad16x16, vpx_sad16x16_avg, vpx_variance16x16,
   2410       vpx_sub_pixel_variance16x16, vpx_sub_pixel_avg_variance16x16,
   2411       vpx_sad16x16x4d)
   2412 
   2413   BFP(BLOCK_16X8, vpx_sad16x8, vpx_sad16x8_avg, vpx_variance16x8,
   2414       vpx_sub_pixel_variance16x8, vpx_sub_pixel_avg_variance16x8,
   2415       vpx_sad16x8x4d)
   2416 
   2417   BFP(BLOCK_8X16, vpx_sad8x16, vpx_sad8x16_avg, vpx_variance8x16,
   2418       vpx_sub_pixel_variance8x16, vpx_sub_pixel_avg_variance8x16,
   2419       vpx_sad8x16x4d)
   2420 
   2421   BFP(BLOCK_8X8, vpx_sad8x8, vpx_sad8x8_avg, vpx_variance8x8,
   2422       vpx_sub_pixel_variance8x8, vpx_sub_pixel_avg_variance8x8, vpx_sad8x8x4d)
   2423 
   2424   BFP(BLOCK_8X4, vpx_sad8x4, vpx_sad8x4_avg, vpx_variance8x4,
   2425       vpx_sub_pixel_variance8x4, vpx_sub_pixel_avg_variance8x4, vpx_sad8x4x4d)
   2426 
   2427   BFP(BLOCK_4X8, vpx_sad4x8, vpx_sad4x8_avg, vpx_variance4x8,
   2428       vpx_sub_pixel_variance4x8, vpx_sub_pixel_avg_variance4x8, vpx_sad4x8x4d)
   2429 
   2430   BFP(BLOCK_4X4, vpx_sad4x4, vpx_sad4x4_avg, vpx_variance4x4,
   2431       vpx_sub_pixel_variance4x4, vpx_sub_pixel_avg_variance4x4, vpx_sad4x4x4d)
   2432 
   2433 #if CONFIG_VP9_HIGHBITDEPTH
   2434   highbd_set_var_fns(cpi);
   2435 #endif
   2436 
   2437   /* vp9_init_quantizer() is first called here. Add check in
   2438    * vp9_frame_init_quantizer() so that vp9_init_quantizer is only
   2439    * called later when needed. This will avoid unnecessary calls of
   2440    * vp9_init_quantizer() for every frame.
   2441    */
   2442   vp9_init_quantizer(cpi);
   2443 
   2444   vp9_loop_filter_init(cm);
   2445 
   2446   // Set up the unit scaling factor used during motion search.
   2447 #if CONFIG_VP9_HIGHBITDEPTH
   2448   vp9_setup_scale_factors_for_frame(&cpi->me_sf, cm->width, cm->height,
   2449                                     cm->width, cm->height,
   2450                                     cm->use_highbitdepth);
   2451 #else
   2452   vp9_setup_scale_factors_for_frame(&cpi->me_sf, cm->width, cm->height,
   2453                                     cm->width, cm->height);
   2454 #endif  // CONFIG_VP9_HIGHBITDEPTH
   2455   cpi->td.mb.me_sf = &cpi->me_sf;
   2456 
   2457   cm->error.setjmp = 0;
   2458 
   2459   return cpi;
   2460 }
   2461 
   2462 #if CONFIG_INTERNAL_STATS
   2463 #define SNPRINT(H, T) snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T))
   2464 
   2465 #define SNPRINT2(H, T, V) \
   2466   snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T), (V))
   2467 #endif  // CONFIG_INTERNAL_STATS
   2468 
   2469 void vp9_remove_compressor(VP9_COMP *cpi) {
   2470   VP9_COMMON *cm;
   2471   unsigned int i, frame;
   2472   int t;
   2473 
   2474   if (!cpi) return;
   2475 
   2476 #if CONFIG_INTERNAL_STATS
   2477   vpx_free(cpi->ssim_vars);
   2478 #endif
   2479 
   2480   cm = &cpi->common;
   2481   if (cm->current_video_frame > 0) {
   2482 #if CONFIG_INTERNAL_STATS
   2483     vpx_clear_system_state();
   2484 
   2485     if (cpi->oxcf.pass != 1) {
   2486       char headings[512] = { 0 };
   2487       char results[512] = { 0 };
   2488       FILE *f = fopen("opsnr.stt", "a");
   2489       double time_encoded =
   2490           (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
   2491           10000000.000;
   2492       double total_encode_time =
   2493           (cpi->time_receive_data + cpi->time_compress_data) / 1000.000;
   2494       const double dr =
   2495           (double)cpi->bytes * (double)8 / (double)1000 / time_encoded;
   2496       const double peak = (double)((1 << cpi->oxcf.input_bit_depth) - 1);
   2497       const double target_rate = (double)cpi->oxcf.target_bandwidth / 1000;
   2498       const double rate_err = ((100.0 * (dr - target_rate)) / target_rate);
   2499 
   2500       if (cpi->b_calculate_psnr) {
   2501         const double total_psnr = vpx_sse_to_psnr(
   2502             (double)cpi->total_samples, peak, (double)cpi->total_sq_error);
   2503         const double totalp_psnr = vpx_sse_to_psnr(
   2504             (double)cpi->totalp_samples, peak, (double)cpi->totalp_sq_error);
   2505         const double total_ssim =
   2506             100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
   2507         const double totalp_ssim =
   2508             100 * pow(cpi->summedp_quality / cpi->summedp_weights, 8.0);
   2509 
   2510         snprintf(headings, sizeof(headings),
   2511                  "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\tGLPsnrP\t"
   2512                  "VPXSSIM\tVPSSIMP\tFASTSIM\tPSNRHVS\t"
   2513                  "WstPsnr\tWstSsim\tWstFast\tWstHVS\t"
   2514                  "AVPsnrY\tAPsnrCb\tAPsnrCr");
   2515         snprintf(results, sizeof(results),
   2516                  "%7.2f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
   2517                  "%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
   2518                  "%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
   2519                  "%7.3f\t%7.3f\t%7.3f",
   2520                  dr, cpi->psnr.stat[ALL] / cpi->count, total_psnr,
   2521                  cpi->psnrp.stat[ALL] / cpi->count, totalp_psnr, total_ssim,
   2522                  totalp_ssim, cpi->fastssim.stat[ALL] / cpi->count,
   2523                  cpi->psnrhvs.stat[ALL] / cpi->count, cpi->psnr.worst,
   2524                  cpi->worst_ssim, cpi->fastssim.worst, cpi->psnrhvs.worst,
   2525                  cpi->psnr.stat[Y] / cpi->count, cpi->psnr.stat[U] / cpi->count,
   2526                  cpi->psnr.stat[V] / cpi->count);
   2527 
   2528         if (cpi->b_calculate_blockiness) {
   2529           SNPRINT(headings, "\t  Block\tWstBlck");
   2530           SNPRINT2(results, "\t%7.3f", cpi->total_blockiness / cpi->count);
   2531           SNPRINT2(results, "\t%7.3f", cpi->worst_blockiness);
   2532         }
   2533 
   2534         if (cpi->b_calculate_consistency) {
   2535           double consistency =
   2536               vpx_sse_to_psnr((double)cpi->totalp_samples, peak,
   2537                               (double)cpi->total_inconsistency);
   2538 
   2539           SNPRINT(headings, "\tConsist\tWstCons");
   2540           SNPRINT2(results, "\t%7.3f", consistency);
   2541           SNPRINT2(results, "\t%7.3f", cpi->worst_consistency);
   2542         }
   2543 
   2544         fprintf(f, "%s\t    Time\tRcErr\tAbsErr\n", headings);
   2545         fprintf(f, "%s\t%8.0f\t%7.2f\t%7.2f\n", results, total_encode_time,
   2546                 rate_err, fabs(rate_err));
   2547       }
   2548 
   2549       fclose(f);
   2550     }
   2551 #endif
   2552 
   2553 #if 0
   2554     {
   2555       printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
   2556       printf("\n_frames recive_data encod_mb_row compress_frame  Total\n");
   2557       printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame,
   2558              cpi->time_receive_data / 1000, cpi->time_encode_sb_row / 1000,
   2559              cpi->time_compress_data / 1000,
   2560              (cpi->time_receive_data + cpi->time_compress_data) / 1000);
   2561     }
   2562 #endif
   2563   }
   2564 
   2565 #if CONFIG_VP9_TEMPORAL_DENOISING
   2566   vp9_denoiser_free(&(cpi->denoiser));
   2567 #endif
   2568 
   2569 #if CONFIG_NON_GREEDY_MV
   2570   vpx_free(cpi->feature_score_loc_arr);
   2571   vpx_free(cpi->feature_score_loc_sort);
   2572   vpx_free(cpi->feature_score_loc_heap);
   2573 #endif
   2574   for (frame = 0; frame < MAX_ARF_GOP_SIZE; ++frame) {
   2575     vpx_free(cpi->tpl_stats[frame].tpl_stats_ptr);
   2576     cpi->tpl_stats[frame].is_valid = 0;
   2577   }
   2578 
   2579   for (t = 0; t < cpi->num_workers; ++t) {
   2580     VPxWorker *const worker = &cpi->workers[t];
   2581     EncWorkerData *const thread_data = &cpi->tile_thr_data[t];
   2582 
   2583     // Deallocate allocated threads.
   2584     vpx_get_worker_interface()->end(worker);
   2585 
   2586     // Deallocate allocated thread data.
   2587     if (t < cpi->num_workers - 1) {
   2588       vpx_free(thread_data->td->counts);
   2589       vp9_free_pc_tree(thread_data->td);
   2590       vpx_free(thread_data->td);
   2591     }
   2592   }
   2593   vpx_free(cpi->tile_thr_data);
   2594   vpx_free(cpi->workers);
   2595   vp9_row_mt_mem_dealloc(cpi);
   2596 
   2597   if (cpi->num_workers > 1) {
   2598     vp9_loop_filter_dealloc(&cpi->lf_row_sync);
   2599     vp9_bitstream_encode_tiles_buffer_dealloc(cpi);
   2600   }
   2601 
   2602   vp9_alt_ref_aq_destroy(cpi->alt_ref_aq);
   2603 
   2604   dealloc_compressor_data(cpi);
   2605 
   2606   for (i = 0; i < sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]);
   2607        ++i) {
   2608     vpx_free(cpi->mbgraph_stats[i].mb_stats);
   2609   }
   2610 
   2611 #if CONFIG_FP_MB_STATS
   2612   if (cpi->use_fp_mb_stats) {
   2613     vpx_free(cpi->twopass.frame_mb_stats_buf);
   2614     cpi->twopass.frame_mb_stats_buf = NULL;
   2615   }
   2616 #endif
   2617 
   2618   vp9_remove_common(cm);
   2619   vp9_free_ref_frame_buffers(cm->buffer_pool);
   2620 #if CONFIG_VP9_POSTPROC
   2621   vp9_free_postproc_buffers(cm);
   2622 #endif
   2623   vpx_free(cpi);
   2624 
   2625 #if CONFIG_VP9_TEMPORAL_DENOISING
   2626 #ifdef OUTPUT_YUV_DENOISED
   2627   fclose(yuv_denoised_file);
   2628 #endif
   2629 #endif
   2630 #ifdef OUTPUT_YUV_SKINMAP
   2631   fclose(yuv_skinmap_file);
   2632 #endif
   2633 #ifdef OUTPUT_YUV_REC
   2634   fclose(yuv_rec_file);
   2635 #endif
   2636 #ifdef OUTPUT_YUV_SVC_SRC
   2637   fclose(yuv_svc_src[0]);
   2638   fclose(yuv_svc_src[1]);
   2639   fclose(yuv_svc_src[2]);
   2640 #endif
   2641 
   2642 #if 0
   2643 
   2644   if (keyfile)
   2645     fclose(keyfile);
   2646 
   2647   if (framepsnr)
   2648     fclose(framepsnr);
   2649 
   2650   if (kf_list)
   2651     fclose(kf_list);
   2652 
   2653 #endif
   2654 }
   2655 
   2656 static void generate_psnr_packet(VP9_COMP *cpi) {
   2657   struct vpx_codec_cx_pkt pkt;
   2658   int i;
   2659   PSNR_STATS psnr;
   2660 #if CONFIG_VP9_HIGHBITDEPTH
   2661   vpx_calc_highbd_psnr(cpi->raw_source_frame, cpi->common.frame_to_show, &psnr,
   2662                        cpi->td.mb.e_mbd.bd, cpi->oxcf.input_bit_depth);
   2663 #else
   2664   vpx_calc_psnr(cpi->raw_source_frame, cpi->common.frame_to_show, &psnr);
   2665 #endif
   2666 
   2667   for (i = 0; i < 4; ++i) {
   2668     pkt.data.psnr.samples[i] = psnr.samples[i];
   2669     pkt.data.psnr.sse[i] = psnr.sse[i];
   2670     pkt.data.psnr.psnr[i] = psnr.psnr[i];
   2671   }
   2672   pkt.kind = VPX_CODEC_PSNR_PKT;
   2673   if (cpi->use_svc)
   2674     cpi->svc
   2675         .layer_context[cpi->svc.spatial_layer_id *
   2676                        cpi->svc.number_temporal_layers]
   2677         .psnr_pkt = pkt.data.psnr;
   2678   else
   2679     vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
   2680 }
   2681 
   2682 int vp9_use_as_reference(VP9_COMP *cpi, int ref_frame_flags) {
   2683   if (ref_frame_flags > 7) return -1;
   2684 
   2685   cpi->ref_frame_flags = ref_frame_flags;
   2686   return 0;
   2687 }
   2688 
   2689 void vp9_update_reference(VP9_COMP *cpi, int ref_frame_flags) {
   2690   cpi->ext_refresh_golden_frame = (ref_frame_flags & VP9_GOLD_FLAG) != 0;
   2691   cpi->ext_refresh_alt_ref_frame = (ref_frame_flags & VP9_ALT_FLAG) != 0;
   2692   cpi->ext_refresh_last_frame = (ref_frame_flags & VP9_LAST_FLAG) != 0;
   2693   cpi->ext_refresh_frame_flags_pending = 1;
   2694 }
   2695 
   2696 static YV12_BUFFER_CONFIG *get_vp9_ref_frame_buffer(
   2697     VP9_COMP *cpi, VP9_REFFRAME ref_frame_flag) {
   2698   MV_REFERENCE_FRAME ref_frame = NONE;
   2699   if (ref_frame_flag == VP9_LAST_FLAG)
   2700     ref_frame = LAST_FRAME;
   2701   else if (ref_frame_flag == VP9_GOLD_FLAG)
   2702     ref_frame = GOLDEN_FRAME;
   2703   else if (ref_frame_flag == VP9_ALT_FLAG)
   2704     ref_frame = ALTREF_FRAME;
   2705 
   2706   return ref_frame == NONE ? NULL : get_ref_frame_buffer(cpi, ref_frame);
   2707 }
   2708 
   2709 int vp9_copy_reference_enc(VP9_COMP *cpi, VP9_REFFRAME ref_frame_flag,
   2710                            YV12_BUFFER_CONFIG *sd) {
   2711   YV12_BUFFER_CONFIG *cfg = get_vp9_ref_frame_buffer(cpi, ref_frame_flag);
   2712   if (cfg) {
   2713     vpx_yv12_copy_frame(cfg, sd);
   2714     return 0;
   2715   } else {
   2716     return -1;
   2717   }
   2718 }
   2719 
   2720 int vp9_set_reference_enc(VP9_COMP *cpi, VP9_REFFRAME ref_frame_flag,
   2721                           YV12_BUFFER_CONFIG *sd) {
   2722   YV12_BUFFER_CONFIG *cfg = get_vp9_ref_frame_buffer(cpi, ref_frame_flag);
   2723   if (cfg) {
   2724     vpx_yv12_copy_frame(sd, cfg);
   2725     return 0;
   2726   } else {
   2727     return -1;
   2728   }
   2729 }
   2730 
   2731 int vp9_update_entropy(VP9_COMP *cpi, int update) {
   2732   cpi->ext_refresh_frame_context = update;
   2733   cpi->ext_refresh_frame_context_pending = 1;
   2734   return 0;
   2735 }
   2736 
   2737 #ifdef OUTPUT_YUV_REC
   2738 void vp9_write_yuv_rec_frame(VP9_COMMON *cm) {
   2739   YV12_BUFFER_CONFIG *s = cm->frame_to_show;
   2740   uint8_t *src = s->y_buffer;
   2741   int h = cm->height;
   2742 
   2743 #if CONFIG_VP9_HIGHBITDEPTH
   2744   if (s->flags & YV12_FLAG_HIGHBITDEPTH) {
   2745     uint16_t *src16 = CONVERT_TO_SHORTPTR(s->y_buffer);
   2746 
   2747     do {
   2748       fwrite(src16, s->y_width, 2, yuv_rec_file);
   2749       src16 += s->y_stride;
   2750     } while (--h);
   2751 
   2752     src16 = CONVERT_TO_SHORTPTR(s->u_buffer);
   2753     h = s->uv_height;
   2754 
   2755     do {
   2756       fwrite(src16, s->uv_width, 2, yuv_rec_file);
   2757       src16 += s->uv_stride;
   2758     } while (--h);
   2759 
   2760     src16 = CONVERT_TO_SHORTPTR(s->v_buffer);
   2761     h = s->uv_height;
   2762 
   2763     do {
   2764       fwrite(src16, s->uv_width, 2, yuv_rec_file);
   2765       src16 += s->uv_stride;
   2766     } while (--h);
   2767 
   2768     fflush(yuv_rec_file);
   2769     return;
   2770   }
   2771 #endif  // CONFIG_VP9_HIGHBITDEPTH
   2772 
   2773   do {
   2774     fwrite(src, s->y_width, 1, yuv_rec_file);
   2775     src += s->y_stride;
   2776   } while (--h);
   2777 
   2778   src = s->u_buffer;
   2779   h = s->uv_height;
   2780 
   2781   do {
   2782     fwrite(src, s->uv_width, 1, yuv_rec_file);
   2783     src += s->uv_stride;
   2784   } while (--h);
   2785 
   2786   src = s->v_buffer;
   2787   h = s->uv_height;
   2788 
   2789   do {
   2790     fwrite(src, s->uv_width, 1, yuv_rec_file);
   2791     src += s->uv_stride;
   2792   } while (--h);
   2793 
   2794   fflush(yuv_rec_file);
   2795 }
   2796 #endif
   2797 
   2798 #if CONFIG_VP9_HIGHBITDEPTH
   2799 static void scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
   2800                                                 YV12_BUFFER_CONFIG *dst,
   2801                                                 int bd) {
   2802 #else
   2803 static void scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
   2804                                                 YV12_BUFFER_CONFIG *dst) {
   2805 #endif  // CONFIG_VP9_HIGHBITDEPTH
   2806   // TODO(dkovalev): replace YV12_BUFFER_CONFIG with vpx_image_t
   2807   int i;
   2808   const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer,
   2809                                    src->v_buffer };
   2810   const int src_strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
   2811   const int src_widths[3] = { src->y_crop_width, src->uv_crop_width,
   2812                               src->uv_crop_width };
   2813   const int src_heights[3] = { src->y_crop_height, src->uv_crop_height,
   2814                                src->uv_crop_height };
   2815   uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer };
   2816   const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride };
   2817   const int dst_widths[3] = { dst->y_crop_width, dst->uv_crop_width,
   2818                               dst->uv_crop_width };
   2819   const int dst_heights[3] = { dst->y_crop_height, dst->uv_crop_height,
   2820                                dst->uv_crop_height };
   2821 
   2822   for (i = 0; i < MAX_MB_PLANE; ++i) {
   2823 #if CONFIG_VP9_HIGHBITDEPTH
   2824     if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
   2825       vp9_highbd_resize_plane(srcs[i], src_heights[i], src_widths[i],
   2826                               src_strides[i], dsts[i], dst_heights[i],
   2827                               dst_widths[i], dst_strides[i], bd);
   2828     } else {
   2829       vp9_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
   2830                        dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
   2831     }
   2832 #else
   2833     vp9_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
   2834                      dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
   2835 #endif  // CONFIG_VP9_HIGHBITDEPTH
   2836   }
   2837   vpx_extend_frame_borders(dst);
   2838 }
   2839 
   2840 #if CONFIG_VP9_HIGHBITDEPTH
   2841 static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src,
   2842                                    YV12_BUFFER_CONFIG *dst, int bd,
   2843                                    INTERP_FILTER filter_type,
   2844                                    int phase_scaler) {
   2845   const int src_w = src->y_crop_width;
   2846   const int src_h = src->y_crop_height;
   2847   const int dst_w = dst->y_crop_width;
   2848   const int dst_h = dst->y_crop_height;
   2849   const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer,
   2850                                    src->v_buffer };
   2851   const int src_strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
   2852   uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer };
   2853   const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride };
   2854   const InterpKernel *const kernel = vp9_filter_kernels[filter_type];
   2855   int x, y, i;
   2856 
   2857   for (i = 0; i < MAX_MB_PLANE; ++i) {
   2858     const int factor = (i == 0 || i == 3 ? 1 : 2);
   2859     const int src_stride = src_strides[i];
   2860     const int dst_stride = dst_strides[i];
   2861     for (y = 0; y < dst_h; y += 16) {
   2862       const int y_q4 = y * (16 / factor) * src_h / dst_h + phase_scaler;
   2863       for (x = 0; x < dst_w; x += 16) {
   2864         const int x_q4 = x * (16 / factor) * src_w / dst_w + phase_scaler;
   2865         const uint8_t *src_ptr = srcs[i] +
   2866                                  (y / factor) * src_h / dst_h * src_stride +
   2867                                  (x / factor) * src_w / dst_w;
   2868         uint8_t *dst_ptr = dsts[i] + (y / factor) * dst_stride + (x / factor);
   2869 
   2870         if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
   2871           vpx_highbd_convolve8(CONVERT_TO_SHORTPTR(src_ptr), src_stride,
   2872                                CONVERT_TO_SHORTPTR(dst_ptr), dst_stride, kernel,
   2873                                x_q4 & 0xf, 16 * src_w / dst_w, y_q4 & 0xf,
   2874                                16 * src_h / dst_h, 16 / factor, 16 / factor,
   2875                                bd);
   2876         } else {
   2877           vpx_scaled_2d(src_ptr, src_stride, dst_ptr, dst_stride, kernel,
   2878                         x_q4 & 0xf, 16 * src_w / dst_w, y_q4 & 0xf,
   2879                         16 * src_h / dst_h, 16 / factor, 16 / factor);
   2880         }
   2881       }
   2882     }
   2883   }
   2884 
   2885   vpx_extend_frame_borders(dst);
   2886 }
   2887 #endif  // CONFIG_VP9_HIGHBITDEPTH
   2888 
   2889 static int scale_down(VP9_COMP *cpi, int q) {
   2890   RATE_CONTROL *const rc = &cpi->rc;
   2891   GF_GROUP *const gf_group = &cpi->twopass.gf_group;
   2892   int scale = 0;
   2893   assert(frame_is_kf_gf_arf(cpi));
   2894 
   2895   if (rc->frame_size_selector == UNSCALED &&
   2896       q >= rc->rf_level_maxq[gf_group->rf_level[gf_group->index]]) {
   2897     const int max_size_thresh =
   2898         (int)(rate_thresh_mult[SCALE_STEP1] *
   2899               VPXMAX(rc->this_frame_target, rc->avg_frame_bandwidth));
   2900     scale = rc->projected_frame_size > max_size_thresh ? 1 : 0;
   2901   }
   2902   return scale;
   2903 }
   2904 
   2905 static int big_rate_miss_high_threshold(VP9_COMP *cpi) {
   2906   const RATE_CONTROL *const rc = &cpi->rc;
   2907   int big_miss_high;
   2908 
   2909   if (frame_is_kf_gf_arf(cpi))
   2910     big_miss_high = rc->this_frame_target * 3 / 2;
   2911   else
   2912     big_miss_high = rc->this_frame_target * 2;
   2913 
   2914   return big_miss_high;
   2915 }
   2916 
   2917 static int big_rate_miss(VP9_COMP *cpi) {
   2918   const RATE_CONTROL *const rc = &cpi->rc;
   2919   int big_miss_high;
   2920   int big_miss_low;
   2921 
   2922   // Ignore for overlay frames
   2923   if (rc->is_src_frame_alt_ref) {
   2924     return 0;
   2925   } else {
   2926     big_miss_low = (rc->this_frame_target / 2);
   2927     big_miss_high = big_rate_miss_high_threshold(cpi);
   2928 
   2929     return (rc->projected_frame_size > big_miss_high) ||
   2930            (rc->projected_frame_size < big_miss_low);
   2931   }
   2932 }
   2933 
   2934 // test in two pass for the first
   2935 static int two_pass_first_group_inter(VP9_COMP *cpi) {
   2936   if (cpi->oxcf.pass == 2) {
   2937     TWO_PASS *const twopass = &cpi->twopass;
   2938     GF_GROUP *const gf_group = &twopass->gf_group;
   2939     const int gfg_index = gf_group->index;
   2940 
   2941     if (gfg_index == 0) return gf_group->update_type[gfg_index] == LF_UPDATE;
   2942     return gf_group->update_type[gfg_index - 1] != LF_UPDATE &&
   2943            gf_group->update_type[gfg_index] == LF_UPDATE;
   2944   } else {
   2945     return 0;
   2946   }
   2947 }
   2948 
   2949 // Function to test for conditions that indicate we should loop
   2950 // back and recode a frame.
   2951 static int recode_loop_test(VP9_COMP *cpi, int high_limit, int low_limit, int q,
   2952                             int maxq, int minq) {
   2953   const RATE_CONTROL *const rc = &cpi->rc;
   2954   const VP9EncoderConfig *const oxcf = &cpi->oxcf;
   2955   const int frame_is_kfgfarf = frame_is_kf_gf_arf(cpi);
   2956   int force_recode = 0;
   2957 
   2958   if ((rc->projected_frame_size >= rc->max_frame_bandwidth) ||
   2959       big_rate_miss(cpi) || (cpi->sf.recode_loop == ALLOW_RECODE) ||
   2960       (two_pass_first_group_inter(cpi) &&
   2961        (cpi->sf.recode_loop == ALLOW_RECODE_FIRST)) ||
   2962       (frame_is_kfgfarf && (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF))) {
   2963     if (frame_is_kfgfarf && (oxcf->resize_mode == RESIZE_DYNAMIC) &&
   2964         scale_down(cpi, q)) {
   2965       // Code this group at a lower resolution.
   2966       cpi->resize_pending = 1;
   2967       return 1;
   2968     }
   2969 
   2970     // Force recode for extreme overshoot.
   2971     if ((rc->projected_frame_size >= rc->max_frame_bandwidth) ||
   2972         (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF &&
   2973          rc->projected_frame_size >= big_rate_miss_high_threshold(cpi))) {
   2974       return 1;
   2975     }
   2976 
   2977     // TODO(agrange) high_limit could be greater than the scale-down threshold.
   2978     if ((rc->projected_frame_size > high_limit && q < maxq) ||
   2979         (rc->projected_frame_size < low_limit && q > minq)) {
   2980       force_recode = 1;
   2981     } else if (cpi->oxcf.rc_mode == VPX_CQ) {
   2982       // Deal with frame undershoot and whether or not we are
   2983       // below the automatically set cq level.
   2984       if (q > oxcf->cq_level &&
   2985           rc->projected_frame_size < ((rc->this_frame_target * 7) >> 3)) {
   2986         force_recode = 1;
   2987       }
   2988     }
   2989   }
   2990   return force_recode;
   2991 }
   2992 
   2993 static void update_ref_frames(VP9_COMP *cpi) {
   2994   VP9_COMMON *const cm = &cpi->common;
   2995   BufferPool *const pool = cm->buffer_pool;
   2996   GF_GROUP *const gf_group = &cpi->twopass.gf_group;
   2997 
   2998   // Pop ARF.
   2999   if (cm->show_existing_frame) {
   3000     cpi->lst_fb_idx = cpi->alt_fb_idx;
   3001     cpi->alt_fb_idx =
   3002         stack_pop(gf_group->arf_index_stack, gf_group->stack_size);
   3003     --gf_group->stack_size;
   3004   }
   3005 
   3006   // At this point the new frame has been encoded.
   3007   // If any buffer copy / swapping is signaled it should be done here.
   3008   if (cm->frame_type == KEY_FRAME) {
   3009     ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx],
   3010                cm->new_fb_idx);
   3011     ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx],
   3012                cm->new_fb_idx);
   3013   } else if (vp9_preserve_existing_gf(cpi)) {
   3014     // We have decided to preserve the previously existing golden frame as our
   3015     // new ARF frame. However, in the short term in function
   3016     // vp9_get_refresh_mask() we left it in the GF slot and, if
   3017     // we're updating the GF with the current decoded frame, we save it to the
   3018     // ARF slot instead.
   3019     // We now have to update the ARF with the current frame and swap gld_fb_idx
   3020     // and alt_fb_idx so that, overall, we've stored the old GF in the new ARF
   3021     // slot and, if we're updating the GF, the current frame becomes the new GF.
   3022     int tmp;
   3023 
   3024     ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx],
   3025                cm->new_fb_idx);
   3026 
   3027     tmp = cpi->alt_fb_idx;
   3028     cpi->alt_fb_idx = cpi->gld_fb_idx;
   3029     cpi->gld_fb_idx = tmp;
   3030   } else { /* For non key/golden frames */
   3031     if (cpi->refresh_alt_ref_frame) {
   3032       int arf_idx = gf_group->top_arf_idx;
   3033 
   3034       // Push new ARF into stack.
   3035       stack_push(gf_group->arf_index_stack, cpi->alt_fb_idx,
   3036                  gf_group->stack_size);
   3037       ++gf_group->stack_size;
   3038 
   3039       assert(arf_idx < REF_FRAMES);
   3040 
   3041       ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[arf_idx], cm->new_fb_idx);
   3042       memcpy(cpi->interp_filter_selected[ALTREF_FRAME],
   3043              cpi->interp_filter_selected[0],
   3044              sizeof(cpi->interp_filter_selected[0]));
   3045 
   3046       cpi->alt_fb_idx = arf_idx;
   3047     }
   3048 
   3049     if (cpi->refresh_golden_frame) {
   3050       ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx],
   3051                  cm->new_fb_idx);
   3052       if (!cpi->rc.is_src_frame_alt_ref)
   3053         memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
   3054                cpi->interp_filter_selected[0],
   3055                sizeof(cpi->interp_filter_selected[0]));
   3056       else
   3057         memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
   3058                cpi->interp_filter_selected[ALTREF_FRAME],
   3059                sizeof(cpi->interp_filter_selected[ALTREF_FRAME]));
   3060     }
   3061   }
   3062 
   3063   if (cpi->refresh_last_frame) {
   3064     ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->lst_fb_idx],
   3065                cm->new_fb_idx);
   3066     if (!cpi->rc.is_src_frame_alt_ref)
   3067       memcpy(cpi->interp_filter_selected[LAST_FRAME],
   3068              cpi->interp_filter_selected[0],
   3069              sizeof(cpi->interp_filter_selected[0]));
   3070   }
   3071 
   3072   if (gf_group->update_type[gf_group->index] == MID_OVERLAY_UPDATE) {
   3073     cpi->alt_fb_idx =
   3074         stack_pop(gf_group->arf_index_stack, gf_group->stack_size);
   3075     --gf_group->stack_size;
   3076   }
   3077 }
   3078 
   3079 void vp9_update_reference_frames(VP9_COMP *cpi) {
   3080   update_ref_frames(cpi);
   3081 
   3082 #if CONFIG_VP9_TEMPORAL_DENOISING
   3083   vp9_denoiser_update_ref_frame(cpi);
   3084 #endif
   3085 
   3086   if (is_one_pass_cbr_svc(cpi)) vp9_svc_update_ref_frame(cpi);
   3087 }
   3088 
   3089 static void loopfilter_frame(VP9_COMP *cpi, VP9_COMMON *cm) {
   3090   MACROBLOCKD *xd = &cpi->td.mb.e_mbd;
   3091   struct loopfilter *lf = &cm->lf;
   3092   int is_reference_frame =
   3093       (cm->frame_type == KEY_FRAME || cpi->refresh_last_frame ||
   3094        cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame);
   3095   if (cpi->use_svc &&
   3096       cpi->svc.temporal_layering_mode == VP9E_TEMPORAL_LAYERING_MODE_BYPASS)
   3097     is_reference_frame = !cpi->svc.non_reference_frame;
   3098 
   3099   // Skip loop filter in show_existing_frame mode.
   3100   if (cm->show_existing_frame) {
   3101     lf->filter_level = 0;
   3102     return;
   3103   }
   3104 
   3105   if (xd->lossless) {
   3106     lf->filter_level = 0;
   3107     lf->last_filt_level = 0;
   3108   } else {
   3109     struct vpx_usec_timer timer;
   3110 
   3111     vpx_clear_system_state();
   3112 
   3113     vpx_usec_timer_start(&timer);
   3114 
   3115     if (!cpi->rc.is_src_frame_alt_ref) {
   3116       if ((cpi->common.frame_type == KEY_FRAME) &&
   3117           (!cpi->rc.this_key_frame_forced)) {
   3118         lf->last_filt_level = 0;
   3119       }
   3120       vp9_pick_filter_level(cpi->Source, cpi, cpi->sf.lpf_pick);
   3121       lf->last_filt_level = lf->filter_level;
   3122     } else {
   3123       lf->filter_level = 0;
   3124     }
   3125 
   3126     vpx_usec_timer_mark(&timer);
   3127     cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
   3128   }
   3129 
   3130   if (lf->filter_level > 0 && is_reference_frame) {
   3131     vp9_build_mask_frame(cm, lf->filter_level, 0);
   3132 
   3133     if (cpi->num_workers > 1)
   3134       vp9_loop_filter_frame_mt(cm->frame_to_show, cm, xd->plane,
   3135                                lf->filter_level, 0, 0, cpi->workers,
   3136                                cpi->num_workers, &cpi->lf_row_sync);
   3137     else
   3138       vp9_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
   3139   }
   3140 
   3141   vpx_extend_frame_inner_borders(cm->frame_to_show);
   3142 }
   3143 
   3144 static INLINE void alloc_frame_mvs(VP9_COMMON *const cm, int buffer_idx) {
   3145   RefCntBuffer *const new_fb_ptr = &cm->buffer_pool->frame_bufs[buffer_idx];
   3146   if (new_fb_ptr->mvs == NULL || new_fb_ptr->mi_rows < cm->mi_rows ||
   3147       new_fb_ptr->mi_cols < cm->mi_cols) {
   3148     vpx_free(new_fb_ptr->mvs);
   3149     CHECK_MEM_ERROR(cm, new_fb_ptr->mvs,
   3150                     (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
   3151                                          sizeof(*new_fb_ptr->mvs)));
   3152     new_fb_ptr->mi_rows = cm->mi_rows;
   3153     new_fb_ptr->mi_cols = cm->mi_cols;
   3154   }
   3155 }
   3156 
   3157 void vp9_scale_references(VP9_COMP *cpi) {
   3158   VP9_COMMON *cm = &cpi->common;
   3159   MV_REFERENCE_FRAME ref_frame;
   3160   const VP9_REFFRAME ref_mask[3] = { VP9_LAST_FLAG, VP9_GOLD_FLAG,
   3161                                      VP9_ALT_FLAG };
   3162 
   3163   for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
   3164     // Need to convert from VP9_REFFRAME to index into ref_mask (subtract 1).
   3165     if (cpi->ref_frame_flags & ref_mask[ref_frame - 1]) {
   3166       BufferPool *const pool = cm->buffer_pool;
   3167       const YV12_BUFFER_CONFIG *const ref =
   3168           get_ref_frame_buffer(cpi, ref_frame);
   3169 
   3170       if (ref == NULL) {
   3171         cpi->scaled_ref_idx[ref_frame - 1] = INVALID_IDX;
   3172         continue;
   3173       }
   3174 
   3175 #if CONFIG_VP9_HIGHBITDEPTH
   3176       if (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height) {
   3177         RefCntBuffer *new_fb_ptr = NULL;
   3178         int force_scaling = 0;
   3179         int new_fb = cpi->scaled_ref_idx[ref_frame - 1];
   3180         if (new_fb == INVALID_IDX) {
   3181           new_fb = get_free_fb(cm);
   3182           force_scaling = 1;
   3183         }
   3184         if (new_fb == INVALID_IDX) return;
   3185         new_fb_ptr = &pool->frame_bufs[new_fb];
   3186         if (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width ||
   3187             new_fb_ptr->buf.y_crop_height != cm->height) {
   3188           if (vpx_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height,
   3189                                        cm->subsampling_x, cm->subsampling_y,
   3190                                        cm->use_highbitdepth,
   3191                                        VP9_ENC_BORDER_IN_PIXELS,
   3192                                        cm->byte_alignment, NULL, NULL, NULL))
   3193             vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
   3194                                "Failed to allocate frame buffer");
   3195           scale_and_extend_frame(ref, &new_fb_ptr->buf, (int)cm->bit_depth,
   3196                                  EIGHTTAP, 0);
   3197           cpi->scaled_ref_idx[ref_frame - 1] = new_fb;
   3198           alloc_frame_mvs(cm, new_fb);
   3199         }
   3200 #else
   3201       if (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height) {
   3202         RefCntBuffer *new_fb_ptr = NULL;
   3203         int force_scaling = 0;
   3204         int new_fb = cpi->scaled_ref_idx[ref_frame - 1];
   3205         if (new_fb == INVALID_IDX) {
   3206           new_fb = get_free_fb(cm);
   3207           force_scaling = 1;
   3208         }
   3209         if (new_fb == INVALID_IDX) return;
   3210         new_fb_ptr = &pool->frame_bufs[new_fb];
   3211         if (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width ||
   3212             new_fb_ptr->buf.y_crop_height != cm->height) {
   3213           if (vpx_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height,
   3214                                        cm->subsampling_x, cm->subsampling_y,
   3215                                        VP9_ENC_BORDER_IN_PIXELS,
   3216                                        cm->byte_alignment, NULL, NULL, NULL))
   3217             vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
   3218                                "Failed to allocate frame buffer");
   3219           vp9_scale_and_extend_frame(ref, &new_fb_ptr->buf, EIGHTTAP, 0);
   3220           cpi->scaled_ref_idx[ref_frame - 1] = new_fb;
   3221           alloc_frame_mvs(cm, new_fb);
   3222         }
   3223 #endif  // CONFIG_VP9_HIGHBITDEPTH
   3224       } else {
   3225         int buf_idx;
   3226         RefCntBuffer *buf = NULL;
   3227         if (cpi->oxcf.pass == 0 && !cpi->use_svc) {
   3228           // Check for release of scaled reference.
   3229           buf_idx = cpi->scaled_ref_idx[ref_frame - 1];
   3230           if (buf_idx != INVALID_IDX) {
   3231             buf = &pool->frame_bufs[buf_idx];
   3232             --buf->ref_count;
   3233             cpi->scaled_ref_idx[ref_frame - 1] = INVALID_IDX;
   3234           }
   3235         }
   3236         buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
   3237         buf = &pool->frame_bufs[buf_idx];
   3238         buf->buf.y_crop_width = ref->y_crop_width;
   3239         buf->buf.y_crop_height = ref->y_crop_height;
   3240         cpi->scaled_ref_idx[ref_frame - 1] = buf_idx;
   3241         ++buf->ref_count;
   3242       }
   3243     } else {
   3244       if (cpi->oxcf.pass != 0 || cpi->use_svc)
   3245         cpi->scaled_ref_idx[ref_frame - 1] = INVALID_IDX;
   3246     }
   3247   }
   3248 }
   3249 
   3250 static void release_scaled_references(VP9_COMP *cpi) {
   3251   VP9_COMMON *cm = &cpi->common;
   3252   int i;
   3253   if (cpi->oxcf.pass == 0 && !cpi->use_svc) {
   3254     // Only release scaled references under certain conditions:
   3255     // if reference will be updated, or if scaled reference has same resolution.
   3256     int refresh[3];
   3257     refresh[0] = (cpi->refresh_last_frame) ? 1 : 0;
   3258     refresh[1] = (cpi->refresh_golden_frame) ? 1 : 0;
   3259     refresh[2] = (cpi->refresh_alt_ref_frame) ? 1 : 0;
   3260     for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
   3261       const int idx = cpi->scaled_ref_idx[i - 1];
   3262       if (idx != INVALID_IDX) {
   3263         RefCntBuffer *const buf = &cm->buffer_pool->frame_bufs[idx];
   3264         const YV12_BUFFER_CONFIG *const ref = get_ref_frame_buffer(cpi, i);
   3265         if (refresh[i - 1] || (buf->buf.y_crop_width == ref->y_crop_width &&
   3266                                buf->buf.y_crop_height == ref->y_crop_height)) {
   3267           --buf->ref_count;
   3268           cpi->scaled_ref_idx[i - 1] = INVALID_IDX;
   3269         }
   3270       }
   3271     }
   3272   } else {
   3273     for (i = 0; i < REFS_PER_FRAME; ++i) {
   3274       const int idx = cpi->scaled_ref_idx[i];
   3275       if (idx != INVALID_IDX) {
   3276         RefCntBuffer *const buf = &cm->buffer_pool->frame_bufs[idx];
   3277         --buf->ref_count;
   3278         cpi->scaled_ref_idx[i] = INVALID_IDX;
   3279       }
   3280     }
   3281   }
   3282 }
   3283 
   3284 static void full_to_model_count(unsigned int *model_count,
   3285                                 unsigned int *full_count) {
   3286   int n;
   3287   model_count[ZERO_TOKEN] = full_count[ZERO_TOKEN];
   3288   model_count[ONE_TOKEN] = full_count[ONE_TOKEN];
   3289   model_count[TWO_TOKEN] = full_count[TWO_TOKEN];
   3290   for (n = THREE_TOKEN; n < EOB_TOKEN; ++n)
   3291     model_count[TWO_TOKEN] += full_count[n];
   3292   model_count[EOB_MODEL_TOKEN] = full_count[EOB_TOKEN];
   3293 }
   3294 
   3295 static void full_to_model_counts(vp9_coeff_count_model *model_count,
   3296                                  vp9_coeff_count *full_count) {
   3297   int i, j, k, l;
   3298 
   3299   for (i = 0; i < PLANE_TYPES; ++i)
   3300     for (j = 0; j < REF_TYPES; ++j)
   3301       for (k = 0; k < COEF_BANDS; ++k)
   3302         for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
   3303           full_to_model_count(model_count[i][j][k][l], full_count[i][j][k][l]);
   3304 }
   3305 
   3306 #if 0 && CONFIG_INTERNAL_STATS
   3307 static void output_frame_level_debug_stats(VP9_COMP *cpi) {
   3308   VP9_COMMON *const cm = &cpi->common;
   3309   FILE *const f = fopen("tmp.stt", cm->current_video_frame ? "a" : "w");
   3310   int64_t recon_err;
   3311 
   3312   vpx_clear_system_state();
   3313 
   3314 #if CONFIG_VP9_HIGHBITDEPTH
   3315   if (cm->use_highbitdepth) {
   3316     recon_err = vpx_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
   3317   } else {
   3318     recon_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
   3319   }
   3320 #else
   3321   recon_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
   3322 #endif  // CONFIG_VP9_HIGHBITDEPTH
   3323 
   3324 
   3325   if (cpi->twopass.total_left_stats.coded_error != 0.0) {
   3326     double dc_quant_devisor;
   3327 #if CONFIG_VP9_HIGHBITDEPTH
   3328     switch (cm->bit_depth) {
   3329       case VPX_BITS_8:
   3330         dc_quant_devisor = 4.0;
   3331         break;
   3332       case VPX_BITS_10:
   3333         dc_quant_devisor = 16.0;
   3334         break;
   3335       default:
   3336         assert(cm->bit_depth == VPX_BITS_12);
   3337         dc_quant_devisor = 64.0;
   3338         break;
   3339     }
   3340 #else
   3341     dc_quant_devisor = 4.0;
   3342 #endif
   3343 
   3344     if (!cm->current_video_frame) {
   3345       fprintf(f, "frame, width, height, last ts, last end ts, "
   3346           "source_alt_ref_pending, source_alt_ref_active, "
   3347           "this_frame_target, projected_frame_size, "
   3348           "projected_frame_size / MBs, "
   3349           "projected_frame_size - this_frame_target, "
   3350           "vbr_bits_off_target, vbr_bits_off_target_fast, "
   3351           "twopass.extend_minq, twopass.extend_minq_fast, "
   3352           "total_target_vs_actual, "
   3353           "starting_buffer_level - bits_off_target, "
   3354           "total_actual_bits, base_qindex, q for base_qindex, "
   3355           "dc quant, q for active_worst_quality, avg_q, q for oxcf.cq_level, "
   3356           "refresh_last_frame, refresh_golden_frame, refresh_alt_ref_frame, "
   3357           "frame_type, gfu_boost, "
   3358           "twopass.bits_left, "
   3359           "twopass.total_left_stats.coded_error, "
   3360           "twopass.bits_left / (1 + twopass.total_left_stats.coded_error), "
   3361           "tot_recode_hits, recon_err, kf_boost, "
   3362           "twopass.kf_zeromotion_pct, twopass.fr_content_type, "
   3363           "filter_level, seg.aq_av_offset\n");
   3364     }
   3365 
   3366     fprintf(f, "%10u, %d, %d, %10"PRId64", %10"PRId64", %d, %d, %10d, %10d, "
   3367         "%10d, %10d, %10"PRId64", %10"PRId64", %5d, %5d, %10"PRId64", "
   3368         "%10"PRId64", %10"PRId64", %10d, %7.2lf, %7.2lf, %7.2lf, %7.2lf, "
   3369         "%7.2lf, %6d, %6d, %5d, %5d, %5d, %10"PRId64", %10.3lf, %10lf, %8u, "
   3370         "%10"PRId64", %10d, %10d, %10d, %10d, %10d\n",
   3371         cpi->common.current_video_frame,
   3372         cm->width, cm->height,
   3373         cpi->last_time_stamp_seen,
   3374         cpi->last_end_time_stamp_seen,
   3375         cpi->rc.source_alt_ref_pending,
   3376         cpi->rc.source_alt_ref_active,
   3377         cpi->rc.this_frame_target,
   3378         cpi->rc.projected_frame_size,
   3379         cpi->rc.projected_frame_size / cpi->common.MBs,
   3380         (cpi->rc.projected_frame_size - cpi->rc.this_frame_target),
   3381         cpi->rc.vbr_bits_off_target,
   3382         cpi->rc.vbr_bits_off_target_fast,
   3383         cpi->twopass.extend_minq,
   3384         cpi->twopass.extend_minq_fast,
   3385         cpi->rc.total_target_vs_actual,
   3386         (cpi->rc.starting_buffer_level - cpi->rc.bits_off_target),
   3387         cpi->rc.total_actual_bits, cm->base_qindex,
   3388         vp9_convert_qindex_to_q(cm->base_qindex, cm->bit_depth),
   3389         (double)vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth) /
   3390             dc_quant_devisor,
   3391         vp9_convert_qindex_to_q(cpi->twopass.active_worst_quality,
   3392                                 cm->bit_depth),
   3393         cpi->rc.avg_q,
   3394         vp9_convert_qindex_to_q(cpi->oxcf.cq_level, cm->bit_depth),
   3395         cpi->refresh_last_frame, cpi->refresh_golden_frame,
   3396         cpi->refresh_alt_ref_frame, cm->frame_type, cpi->rc.gfu_boost,
   3397         cpi->twopass.bits_left,
   3398         cpi->twopass.total_left_stats.coded_error,
   3399         cpi->twopass.bits_left /
   3400             (1 + cpi->twopass.total_left_stats.coded_error),
   3401         cpi->tot_recode_hits, recon_err, cpi->rc.kf_boost,
   3402         cpi->twopass.kf_zeromotion_pct,
   3403         cpi->twopass.fr_content_type,
   3404         cm->lf.filter_level,
   3405         cm->seg.aq_av_offset);
   3406   }
   3407   fclose(f);
   3408 
   3409   if (0) {
   3410     FILE *const fmodes = fopen("Modes.stt", "a");
   3411     int i;
   3412 
   3413     fprintf(fmodes, "%6d:%1d:%1d:%1d ", cpi->common.current_video_frame,
   3414             cm->frame_type, cpi->refresh_golden_frame,
   3415             cpi->refresh_alt_ref_frame);
   3416 
   3417     for (i = 0; i < MAX_MODES; ++i)
   3418       fprintf(fmodes, "%5d ", cpi->mode_chosen_counts[i]);
   3419 
   3420     fprintf(fmodes, "\n");
   3421 
   3422     fclose(fmodes);
   3423   }
   3424 }
   3425 #endif
   3426 
   3427 static void set_mv_search_params(VP9_COMP *cpi) {
   3428   const VP9_COMMON *const cm = &cpi->common;
   3429   const unsigned int max_mv_def = VPXMIN(cm->width, cm->height);
   3430 
   3431   // Default based on max resolution.
   3432   cpi->mv_step_param = vp9_init_search_range(max_mv_def);
   3433 
   3434   if (cpi->sf.mv.auto_mv_step_size) {
   3435     if (frame_is_intra_only(cm)) {
   3436       // Initialize max_mv_magnitude for use in the first INTER frame
   3437       // after a key/intra-only frame.
   3438       cpi->max_mv_magnitude = max_mv_def;
   3439     } else {
   3440       if (cm->show_frame) {
   3441         // Allow mv_steps to correspond to twice the max mv magnitude found
   3442         // in the previous frame, capped by the default max_mv_magnitude based
   3443         // on resolution.
   3444         cpi->mv_step_param = vp9_init_search_range(
   3445             VPXMIN(max_mv_def, 2 * cpi->max_mv_magnitude));
   3446       }
   3447       cpi->max_mv_magnitude = 0;
   3448     }
   3449   }
   3450 }
   3451 
   3452 static void set_size_independent_vars(VP9_COMP *cpi) {
   3453   vp9_set_speed_features_framesize_independent(cpi);
   3454   vp9_set_rd_speed_thresholds(cpi);
   3455   vp9_set_rd_speed_thresholds_sub8x8(cpi);
   3456   cpi->common.interp_filter = cpi->sf.default_interp_filter;
   3457 }
   3458 
   3459 static void set_size_dependent_vars(VP9_COMP *cpi, int *q, int *bottom_index,
   3460                                     int *top_index) {
   3461   VP9_COMMON *const cm = &cpi->common;
   3462 
   3463   // Setup variables that depend on the dimensions of the frame.
   3464   vp9_set_speed_features_framesize_dependent(cpi);
   3465 
   3466   // Decide q and q bounds.
   3467   *q = vp9_rc_pick_q_and_bounds(cpi, bottom_index, top_index);
   3468 
   3469   if (cpi->oxcf.rc_mode == VPX_CBR && cpi->rc.force_max_q) {
   3470     *q = cpi->rc.worst_quality;
   3471     cpi->rc.force_max_q = 0;
   3472   }
   3473 
   3474   if (!frame_is_intra_only(cm)) {
   3475     vp9_set_high_precision_mv(cpi, (*q) < HIGH_PRECISION_MV_QTHRESH);
   3476   }
   3477 
   3478 #if !CONFIG_REALTIME_ONLY
   3479   // Configure experimental use of segmentation for enhanced coding of
   3480   // static regions if indicated.
   3481   // Only allowed in the second pass of a two pass encode, as it requires
   3482   // lagged coding, and if the relevant speed feature flag is set.
   3483   if (cpi->oxcf.pass == 2 && cpi->sf.static_segmentation)
   3484     configure_static_seg_features(cpi);
   3485 #endif  // !CONFIG_REALTIME_ONLY
   3486 
   3487 #if CONFIG_VP9_POSTPROC && !(CONFIG_VP9_TEMPORAL_DENOISING)
   3488   if (cpi->oxcf.noise_sensitivity > 0) {
   3489     int l = 0;
   3490     switch (cpi->oxcf.noise_sensitivity) {
   3491       case 1: l = 20; break;
   3492       case 2: l = 40; break;
   3493       case 3: l = 60; break;
   3494       case 4:
   3495       case 5: l = 100; break;
   3496       case 6: l = 150; break;
   3497     }
   3498     if (!cpi->common.postproc_state.limits) {
   3499       cpi->common.postproc_state.limits =
   3500           vpx_calloc(cpi->un_scaled_source->y_width,
   3501                      sizeof(*cpi->common.postproc_state.limits));
   3502     }
   3503     vp9_denoise(cpi->Source, cpi->Source, l, cpi->common.postproc_state.limits);
   3504   }
   3505 #endif  // CONFIG_VP9_POSTPROC
   3506 }
   3507 
   3508 #if CONFIG_VP9_TEMPORAL_DENOISING
   3509 static void setup_denoiser_buffer(VP9_COMP *cpi) {
   3510   VP9_COMMON *const cm = &cpi->common;
   3511   if (cpi->oxcf.noise_sensitivity > 0 &&
   3512       !cpi->denoiser.frame_buffer_initialized) {
   3513     if (vp9_denoiser_alloc(cm, &cpi->svc, &cpi->denoiser, cpi->use_svc,
   3514                            cpi->oxcf.noise_sensitivity, cm->width, cm->height,
   3515                            cm->subsampling_x, cm->subsampling_y,
   3516 #if CONFIG_VP9_HIGHBITDEPTH
   3517                            cm->use_highbitdepth,
   3518 #endif
   3519                            VP9_ENC_BORDER_IN_PIXELS))
   3520       vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
   3521                          "Failed to allocate denoiser");
   3522   }
   3523 }
   3524 #endif
   3525 
   3526 static void init_motion_estimation(VP9_COMP *cpi) {
   3527   int y_stride = cpi->scaled_source.y_stride;
   3528 
   3529   if (cpi->sf.mv.search_method == NSTEP) {
   3530     vp9_init3smotion_compensation(&cpi->ss_cfg, y_stride);
   3531   } else if (cpi->sf.mv.search_method == DIAMOND) {
   3532     vp9_init_dsmotion_compensation(&cpi->ss_cfg, y_stride);
   3533   }
   3534 }
   3535 
   3536 static void set_frame_size(VP9_COMP *cpi) {
   3537   int ref_frame;
   3538   VP9_COMMON *const cm = &cpi->common;
   3539   VP9EncoderConfig *const oxcf = &cpi->oxcf;
   3540   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
   3541 
   3542 #if !CONFIG_REALTIME_ONLY
   3543   if (oxcf->pass == 2 && oxcf->rc_mode == VPX_VBR &&
   3544       ((oxcf->resize_mode == RESIZE_FIXED && cm->current_video_frame == 0) ||
   3545        (oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending))) {
   3546     calculate_coded_size(cpi, &oxcf->scaled_frame_width,
   3547                          &oxcf->scaled_frame_height);
   3548 
   3549     // There has been a change in frame size.
   3550     vp9_set_size_literal(cpi, oxcf->scaled_frame_width,
   3551                          oxcf->scaled_frame_height);
   3552   }
   3553 #endif  // !CONFIG_REALTIME_ONLY
   3554 
   3555   if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR && !cpi->use_svc &&
   3556       oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending != 0) {
   3557     oxcf->scaled_frame_width =
   3558         (oxcf->width * cpi->resize_scale_num) / cpi->resize_scale_den;
   3559     oxcf->scaled_frame_height =
   3560         (oxcf->height * cpi->resize_scale_num) / cpi->resize_scale_den;
   3561     // There has been a change in frame size.
   3562     vp9_set_size_literal(cpi, oxcf->scaled_frame_width,
   3563                          oxcf->scaled_frame_height);
   3564 
   3565     // TODO(agrange) Scale cpi->max_mv_magnitude if frame-size has changed.
   3566     set_mv_search_params(cpi);
   3567 
   3568     vp9_noise_estimate_init(&cpi->noise_estimate, cm->width, cm->height);
   3569 #if CONFIG_VP9_TEMPORAL_DENOISING
   3570     // Reset the denoiser on the resized frame.
   3571     if (cpi->oxcf.noise_sensitivity > 0) {
   3572       vp9_denoiser_free(&(cpi->denoiser));
   3573       setup_denoiser_buffer(cpi);
   3574       // Dynamic resize is only triggered for non-SVC, so we can force
   3575       // golden frame update here as temporary fix to denoiser.
   3576       cpi->refresh_golden_frame = 1;
   3577     }
   3578 #endif
   3579   }
   3580 
   3581   if ((oxcf->pass == 2) && !cpi->use_svc) {
   3582     vp9_set_target_rate(cpi);
   3583   }
   3584 
   3585   alloc_frame_mvs(cm, cm->new_fb_idx);
   3586 
   3587   // Reset the frame pointers to the current frame size.
   3588   if (vpx_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height,
   3589                                cm->subsampling_x, cm->subsampling_y,
   3590 #if CONFIG_VP9_HIGHBITDEPTH
   3591                                cm->use_highbitdepth,
   3592 #endif
   3593                                VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
   3594                                NULL, NULL, NULL))
   3595     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
   3596                        "Failed to allocate frame buffer");
   3597 
   3598   alloc_util_frame_buffers(cpi);
   3599   init_motion_estimation(cpi);
   3600 
   3601   for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
   3602     RefBuffer *const ref_buf = &cm->frame_refs[ref_frame - 1];
   3603     const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
   3604 
   3605     ref_buf->idx = buf_idx;
   3606 
   3607     if (buf_idx != INVALID_IDX) {
   3608       YV12_BUFFER_CONFIG *const buf = &cm->buffer_pool->frame_bufs[buf_idx].buf;
   3609       ref_buf->buf = buf;
   3610 #if CONFIG_VP9_HIGHBITDEPTH
   3611       vp9_setup_scale_factors_for_frame(
   3612           &ref_buf->sf, buf->y_crop_width, buf->y_crop_height, cm->width,
   3613           cm->height, (buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0);
   3614 #else
   3615       vp9_setup_scale_factors_for_frame(&ref_buf->sf, buf->y_crop_width,
   3616                                         buf->y_crop_height, cm->width,
   3617                                         cm->height);
   3618 #endif  // CONFIG_VP9_HIGHBITDEPTH
   3619       if (vp9_is_scaled(&ref_buf->sf)) vpx_extend_frame_borders(buf);
   3620     } else {
   3621       ref_buf->buf = NULL;
   3622     }
   3623   }
   3624 
   3625   set_ref_ptrs(cm, xd, LAST_FRAME, LAST_FRAME);
   3626 }
   3627 
   3628 #if CONFIG_CONSISTENT_RECODE
   3629 static void save_encode_params(VP9_COMP *cpi) {
   3630   VP9_COMMON *const cm = &cpi->common;
   3631   const int tile_cols = 1 << cm->log2_tile_cols;
   3632   const int tile_rows = 1 << cm->log2_tile_rows;
   3633   int tile_col, tile_row;
   3634   int i, j;
   3635   RD_OPT *rd_opt = &cpi->rd;
   3636   for (i = 0; i < MAX_REF_FRAMES; i++) {
   3637     for (j = 0; j < REFERENCE_MODES; j++)
   3638       rd_opt->prediction_type_threshes_prev[i][j] =
   3639           rd_opt->prediction_type_threshes[i][j];
   3640 
   3641     for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; j++)
   3642       rd_opt->filter_threshes_prev[i][j] = rd_opt->filter_threshes[i][j];
   3643   }
   3644 
   3645   if (cpi->tile_data != NULL) {
   3646     for (tile_row = 0; tile_row < tile_rows; ++tile_row)
   3647       for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
   3648         TileDataEnc *tile_data =
   3649             &cpi->tile_data[tile_row * tile_cols + tile_col];
   3650         for (i = 0; i < BLOCK_SIZES; ++i) {
   3651           for (j = 0; j < MAX_MODES; ++j) {
   3652             tile_data->thresh_freq_fact_prev[i][j] =
   3653                 tile_data->thresh_freq_fact[i][j];
   3654           }
   3655         }
   3656       }
   3657   }
   3658 }
   3659 #endif
   3660 
   3661 static INLINE void set_raw_source_frame(VP9_COMP *cpi) {
   3662 #ifdef ENABLE_KF_DENOISE
   3663   if (is_spatial_denoise_enabled(cpi)) {
   3664     cpi->raw_source_frame = vp9_scale_if_required(
   3665         cm, &cpi->raw_unscaled_source, &cpi->raw_scaled_source,
   3666         (oxcf->pass == 0), EIGHTTAP, 0);
   3667   } else {
   3668     cpi->raw_source_frame = cpi->Source;
   3669   }
   3670 #else
   3671   cpi->raw_source_frame = cpi->Source;
   3672 #endif
   3673 }
   3674 
   3675 static int encode_without_recode_loop(VP9_COMP *cpi, size_t *size,
   3676                                       uint8_t *dest) {
   3677   VP9_COMMON *const cm = &cpi->common;
   3678   SVC *const svc = &cpi->svc;
   3679   int q = 0, bottom_index = 0, top_index = 0;
   3680   int no_drop_scene_change = 0;
   3681   const INTERP_FILTER filter_scaler =
   3682       (is_one_pass_cbr_svc(cpi))
   3683           ? svc->downsample_filter_type[svc->spatial_layer_id]
   3684           : EIGHTTAP;
   3685   const int phase_scaler =
   3686       (is_one_pass_cbr_svc(cpi))
   3687           ? svc->downsample_filter_phase[svc->spatial_layer_id]
   3688           : 0;
   3689 
   3690   if (cm->show_existing_frame) {
   3691     if (is_psnr_calc_enabled(cpi)) set_raw_source_frame(cpi);
   3692     return 1;
   3693   }
   3694 
   3695   svc->time_stamp_prev[svc->spatial_layer_id] = svc->time_stamp_superframe;
   3696 
   3697   // Flag to check if its valid to compute the source sad (used for
   3698   // scene detection and for superblock content state in CBR mode).
   3699   // The flag may get reset below based on SVC or resizing state.
   3700   cpi->compute_source_sad_onepass = cpi->oxcf.mode == REALTIME;
   3701 
   3702   vpx_clear_system_state();
   3703 
   3704   set_frame_size(cpi);
   3705 
   3706   if (is_one_pass_cbr_svc(cpi) &&
   3707       cpi->un_scaled_source->y_width == cm->width << 2 &&
   3708       cpi->un_scaled_source->y_height == cm->height << 2 &&
   3709       svc->scaled_temp.y_width == cm->width << 1 &&
   3710       svc->scaled_temp.y_height == cm->height << 1) {
   3711     // For svc, if it is a 1/4x1/4 downscaling, do a two-stage scaling to take
   3712     // advantage of the 1:2 optimized scaler. In the process, the 1/2x1/2
   3713     // result will be saved in scaled_temp and might be used later.
   3714     const INTERP_FILTER filter_scaler2 = svc->downsample_filter_type[1];
   3715     const int phase_scaler2 = svc->downsample_filter_phase[1];
   3716     cpi->Source = vp9_svc_twostage_scale(
   3717         cm, cpi->un_scaled_source, &cpi->scaled_source, &svc->scaled_temp,
   3718         filter_scaler, phase_scaler, filter_scaler2, phase_scaler2);
   3719     svc->scaled_one_half = 1;
   3720   } else if (is_one_pass_cbr_svc(cpi) &&
   3721              cpi->un_scaled_source->y_width == cm->width << 1 &&
   3722              cpi->un_scaled_source->y_height == cm->height << 1 &&
   3723              svc->scaled_one_half) {
   3724     // If the spatial layer is 1/2x1/2 and the scaling is already done in the
   3725     // two-stage scaling, use the result directly.
   3726     cpi->Source = &svc->scaled_temp;
   3727     svc->scaled_one_half = 0;
   3728   } else {
   3729     cpi->Source = vp9_scale_if_required(
   3730         cm, cpi->un_scaled_source, &cpi->scaled_source, (cpi->oxcf.pass == 0),
   3731         filter_scaler, phase_scaler);
   3732   }
   3733 #ifdef OUTPUT_YUV_SVC_SRC
   3734   // Write out at most 3 spatial layers.
   3735   if (is_one_pass_cbr_svc(cpi) && svc->spatial_layer_id < 3) {
   3736     vpx_write_yuv_frame(yuv_svc_src[svc->spatial_layer_id], cpi->Source);
   3737   }
   3738 #endif
   3739   // Unfiltered raw source used in metrics calculation if the source
   3740   // has been filtered.
   3741   if (is_psnr_calc_enabled(cpi)) {
   3742 #ifdef ENABLE_KF_DENOISE
   3743     if (is_spatial_denoise_enabled(cpi)) {
   3744       cpi->raw_source_frame = vp9_scale_if_required(
   3745           cm, &cpi->raw_unscaled_source, &cpi->raw_scaled_source,
   3746           (cpi->oxcf.pass == 0), EIGHTTAP, phase_scaler);
   3747     } else {
   3748       cpi->raw_source_frame = cpi->Source;
   3749     }
   3750 #else
   3751     cpi->raw_source_frame = cpi->Source;
   3752 #endif
   3753   }
   3754 
   3755   if ((cpi->use_svc &&
   3756        (svc->spatial_layer_id < svc->number_spatial_layers - 1 ||
   3757         svc->temporal_layer_id < svc->number_temporal_layers - 1 ||
   3758         svc->current_superframe < 1)) ||
   3759       cpi->resize_pending || cpi->resize_state || cpi->external_resize ||
   3760       cpi->resize_state != ORIG) {
   3761     cpi->compute_source_sad_onepass = 0;
   3762     if (cpi->content_state_sb_fd != NULL)
   3763       memset(cpi->content_state_sb_fd, 0,
   3764              (cm->mi_stride >> 3) * ((cm->mi_rows >> 3) + 1) *
   3765                  sizeof(*cpi->content_state_sb_fd));
   3766   }
   3767 
   3768   // Avoid scaling last_source unless its needed.
   3769   // Last source is needed if avg_source_sad() is used, or if
   3770   // partition_search_type == SOURCE_VAR_BASED_PARTITION, or if noise
   3771   // estimation is enabled.
   3772   if (cpi->unscaled_last_source != NULL &&
   3773       (cpi->oxcf.content == VP9E_CONTENT_SCREEN ||
   3774        (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_VBR &&
   3775         cpi->oxcf.mode == REALTIME && cpi->oxcf.speed >= 5) ||
   3776        cpi->sf.partition_search_type == SOURCE_VAR_BASED_PARTITION ||
   3777        (cpi->noise_estimate.enabled && !cpi->oxcf.noise_sensitivity) ||
   3778        cpi->compute_source_sad_onepass))
   3779     cpi->Last_Source = vp9_scale_if_required(
   3780         cm, cpi->unscaled_last_source, &cpi->scaled_last_source,
   3781         (cpi->oxcf.pass == 0), EIGHTTAP, 0);
   3782 
   3783   if (cpi->Last_Source == NULL ||
   3784       cpi->Last_Source->y_width != cpi->Source->y_width ||
   3785       cpi->Last_Source->y_height != cpi->Source->y_height)
   3786     cpi->compute_source_sad_onepass = 0;
   3787 
   3788   if (frame_is_intra_only(cm) || cpi->resize_pending != 0) {
   3789     memset(cpi->consec_zero_mv, 0,
   3790            cm->mi_rows * cm->mi_cols * sizeof(*cpi->consec_zero_mv));
   3791   }
   3792 
   3793 #if CONFIG_VP9_TEMPORAL_DENOISING
   3794   if (cpi->oxcf.noise_sensitivity > 0 && cpi->use_svc)
   3795     vp9_denoiser_reset_on_first_frame(cpi);
   3796 #endif
   3797   vp9_update_noise_estimate(cpi);
   3798 
   3799   // Scene detection is always used for VBR mode or screen-content case.
   3800   // For other cases (e.g., CBR mode) use it for 5 <= speed < 8 for now
   3801   // (need to check encoding time cost for doing this for speed 8).
   3802   cpi->rc.high_source_sad = 0;
   3803   cpi->rc.hybrid_intra_scene_change = 0;
   3804   cpi->rc.re_encode_maxq_scene_change = 0;
   3805   if (cm->show_frame && cpi->oxcf.mode == REALTIME &&
   3806       (cpi->oxcf.rc_mode == VPX_VBR ||
   3807        cpi->oxcf.content == VP9E_CONTENT_SCREEN ||
   3808        (cpi->oxcf.speed >= 5 && cpi->oxcf.speed < 8)))
   3809     vp9_scene_detection_onepass(cpi);
   3810 
   3811   if (svc->spatial_layer_id == svc->first_spatial_layer_to_encode) {
   3812     svc->high_source_sad_superframe = cpi->rc.high_source_sad;
   3813     svc->high_num_blocks_with_motion = cpi->rc.high_num_blocks_with_motion;
   3814     // On scene change reset temporal layer pattern to TL0.
   3815     // Note that if the base/lower spatial layers are skipped: instead of
   3816     // inserting base layer here, we force max-q for the next superframe
   3817     // with lower spatial layers: this is done in vp9_encodedframe_overshoot()
   3818     // when max-q is decided for the current layer.
   3819     // Only do this reset for bypass/flexible mode.
   3820     if (svc->high_source_sad_superframe && svc->temporal_layer_id > 0 &&
   3821         svc->temporal_layering_mode == VP9E_TEMPORAL_LAYERING_MODE_BYPASS) {
   3822       // rc->high_source_sad will get reset so copy it to restore it.
   3823       int tmp_high_source_sad = cpi->rc.high_source_sad;
   3824       vp9_svc_reset_temporal_layers(cpi, cm->frame_type == KEY_FRAME);
   3825       cpi->rc.high_source_sad = tmp_high_source_sad;
   3826     }
   3827   }
   3828 
   3829   // For 1 pass CBR, check if we are dropping this frame.
   3830   // Never drop on key frame, if base layer is key for svc,
   3831   // on scene change, or if superframe has layer sync.
   3832   if ((cpi->rc.high_source_sad || svc->high_source_sad_superframe) &&
   3833       !(cpi->rc.use_post_encode_drop && svc->last_layer_dropped[0]))
   3834     no_drop_scene_change = 1;
   3835   if (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR &&
   3836       !frame_is_intra_only(cm) && !no_drop_scene_change &&
   3837       !svc->superframe_has_layer_sync &&
   3838       (!cpi->use_svc ||
   3839        !svc->layer_context[svc->temporal_layer_id].is_key_frame)) {
   3840     if (vp9_rc_drop_frame(cpi)) return 0;
   3841   }
   3842 
   3843   // For 1 pass CBR SVC, only ZEROMV is allowed for spatial reference frame
   3844   // when svc->force_zero_mode_spatial_ref = 1. Under those conditions we can
   3845   // avoid this frame-level upsampling (for non intra_only frames).
   3846   if (frame_is_intra_only(cm) == 0 &&
   3847       !(is_one_pass_cbr_svc(cpi) && svc->force_zero_mode_spatial_ref)) {
   3848     vp9_scale_references(cpi);
   3849   }
   3850 
   3851   set_size_independent_vars(cpi);
   3852   set_size_dependent_vars(cpi, &q, &bottom_index, &top_index);
   3853 
   3854   // search method and step parameter might be changed in speed settings.
   3855   init_motion_estimation(cpi);
   3856 
   3857   if (cpi->sf.copy_partition_flag) alloc_copy_partition_data(cpi);
   3858 
   3859   if (cpi->sf.svc_use_lowres_part &&
   3860       svc->spatial_layer_id == svc->number_spatial_layers - 2) {
   3861     if (svc->prev_partition_svc == NULL) {
   3862       CHECK_MEM_ERROR(
   3863           cm, svc->prev_partition_svc,
   3864           (BLOCK_SIZE *)vpx_calloc(cm->mi_stride * cm->mi_rows,
   3865                                    sizeof(*svc->prev_partition_svc)));
   3866     }
   3867   }
   3868 
   3869   // TODO(jianj): Look into issue of skin detection with high bitdepth.
   3870   if (cm->bit_depth == 8 && cpi->oxcf.speed >= 5 && cpi->oxcf.pass == 0 &&
   3871       cpi->oxcf.rc_mode == VPX_CBR &&
   3872       cpi->oxcf.content != VP9E_CONTENT_SCREEN &&
   3873       cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
   3874     cpi->use_skin_detection = 1;
   3875   }
   3876 
   3877   // Enable post encode frame dropping for CBR on non key frame, when
   3878   // ext_use_post_encode_drop is specified by user.
   3879   cpi->rc.use_post_encode_drop = cpi->rc.ext_use_post_encode_drop &&
   3880                                  cpi->oxcf.rc_mode == VPX_CBR &&
   3881                                  cm->frame_type != KEY_FRAME;
   3882 
   3883   vp9_set_quantizer(cm, q);
   3884   vp9_set_variance_partition_thresholds(cpi, q, 0);
   3885 
   3886   setup_frame(cpi);
   3887 
   3888   suppress_active_map(cpi);
   3889 
   3890   if (cpi->use_svc) {
   3891     // On non-zero spatial layer, check for disabling inter-layer
   3892     // prediction.
   3893     if (svc->spatial_layer_id > 0) vp9_svc_constrain_inter_layer_pred(cpi);
   3894     vp9_svc_assert_constraints_pattern(cpi);
   3895   }
   3896 
   3897   if (cpi->rc.last_post_encode_dropped_scene_change) {
   3898     cpi->rc.high_source_sad = 1;
   3899     svc->high_source_sad_superframe = 1;
   3900     // For now disable use_source_sad since Last_Source will not be the previous
   3901     // encoded but the dropped one.
   3902     cpi->sf.use_source_sad = 0;
   3903     cpi->rc.last_post_encode_dropped_scene_change = 0;
   3904   }
   3905   // Check if this high_source_sad (scene/slide change) frame should be
   3906   // encoded at high/max QP, and if so, set the q and adjust some rate
   3907   // control parameters.
   3908   if (cpi->sf.overshoot_detection_cbr_rt == FAST_DETECTION_MAXQ &&
   3909       (cpi->rc.high_source_sad ||
   3910        (cpi->use_svc && svc->high_source_sad_superframe))) {
   3911     if (vp9_encodedframe_overshoot(cpi, -1, &q)) {
   3912       vp9_set_quantizer(cm, q);
   3913       vp9_set_variance_partition_thresholds(cpi, q, 0);
   3914     }
   3915   }
   3916 
   3917   // Variance adaptive and in frame q adjustment experiments are mutually
   3918   // exclusive.
   3919   if (cpi->oxcf.aq_mode == VARIANCE_AQ) {
   3920     vp9_vaq_frame_setup(cpi);
   3921   } else if (cpi->oxcf.aq_mode == EQUATOR360_AQ) {
   3922     vp9_360aq_frame_setup(cpi);
   3923   } else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
   3924     vp9_setup_in_frame_q_adj(cpi);
   3925   } else if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
   3926     vp9_cyclic_refresh_setup(cpi);
   3927   } else if (cpi->oxcf.aq_mode == LOOKAHEAD_AQ) {
   3928     // it may be pretty bad for rate-control,
   3929     // and I should handle it somehow
   3930     vp9_alt_ref_aq_setup_map(cpi->alt_ref_aq, cpi);
   3931   } else if (cpi->roi.enabled && !frame_is_intra_only(cm)) {
   3932     apply_roi_map(cpi);
   3933   }
   3934 
   3935   apply_active_map(cpi);
   3936 
   3937   vp9_encode_frame(cpi);
   3938 
   3939   // Check if we should re-encode this frame at high Q because of high
   3940   // overshoot based on the encoded frame size. Only for frames where
   3941   // high temporal-source SAD is detected.
   3942   // For SVC: all spatial layers are checked for re-encoding.
   3943   if (cpi->sf.overshoot_detection_cbr_rt == RE_ENCODE_MAXQ &&
   3944       (cpi->rc.high_source_sad ||
   3945        (cpi->use_svc && svc->high_source_sad_superframe))) {
   3946     int frame_size = 0;
   3947     // Get an estimate of the encoded frame size.
   3948     save_coding_context(cpi);
   3949     vp9_pack_bitstream(cpi, dest, size);
   3950     restore_coding_context(cpi);
   3951     frame_size = (int)(*size) << 3;
   3952     // Check if encoded frame will overshoot too much, and if so, set the q and
   3953     // adjust some rate control parameters, and return to re-encode the frame.
   3954     if (vp9_encodedframe_overshoot(cpi, frame_size, &q)) {
   3955       vpx_clear_system_state();
   3956       vp9_set_quantizer(cm, q);
   3957       vp9_set_variance_partition_thresholds(cpi, q, 0);
   3958       suppress_active_map(cpi);
   3959       // Turn-off cyclic refresh for re-encoded frame.
   3960       if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
   3961         CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   3962         unsigned char *const seg_map = cpi->segmentation_map;
   3963         memset(seg_map, 0, cm->mi_rows * cm->mi_cols);
   3964         memset(cr->last_coded_q_map, MAXQ,
   3965                cm->mi_rows * cm->mi_cols * sizeof(*cr->last_coded_q_map));
   3966         cr->sb_index = 0;
   3967         vp9_disable_segmentation(&cm->seg);
   3968       }
   3969       apply_active_map(cpi);
   3970       vp9_encode_frame(cpi);
   3971     }
   3972   }
   3973 
   3974   // Update some stats from cyclic refresh, and check for golden frame update.
   3975   if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled &&
   3976       !frame_is_intra_only(cm))
   3977     vp9_cyclic_refresh_postencode(cpi);
   3978 
   3979   // Update the skip mb flag probabilities based on the distribution
   3980   // seen in the last encoder iteration.
   3981   // update_base_skip_probs(cpi);
   3982   vpx_clear_system_state();
   3983   return 1;
   3984 }
   3985 
   3986 #define MAX_QSTEP_ADJ 4
   3987 static int get_qstep_adj(int rate_excess, int rate_limit) {
   3988   int qstep =
   3989       rate_limit ? ((rate_excess + rate_limit / 2) / rate_limit) : INT_MAX;
   3990   return VPXMIN(qstep, MAX_QSTEP_ADJ);
   3991 }
   3992 
   3993 static void encode_with_recode_loop(VP9_COMP *cpi, size_t *size,
   3994                                     uint8_t *dest) {
   3995   const VP9EncoderConfig *const oxcf = &cpi->oxcf;
   3996   VP9_COMMON *const cm = &cpi->common;
   3997   RATE_CONTROL *const rc = &cpi->rc;
   3998   int bottom_index, top_index;
   3999   int loop_count = 0;
   4000   int loop_at_this_size = 0;
   4001   int loop = 0;
   4002   int overshoot_seen = 0;
   4003   int undershoot_seen = 0;
   4004   int frame_over_shoot_limit;
   4005   int frame_under_shoot_limit;
   4006   int q = 0, q_low = 0, q_high = 0;
   4007   int enable_acl;
   4008 #ifdef AGGRESSIVE_VBR
   4009   int qrange_adj = 1;
   4010 #endif
   4011 
   4012   if (cm->show_existing_frame) {
   4013     if (is_psnr_calc_enabled(cpi)) set_raw_source_frame(cpi);
   4014     return;
   4015   }
   4016 
   4017   set_size_independent_vars(cpi);
   4018 
   4019   enable_acl = cpi->sf.allow_acl ? (cm->frame_type == KEY_FRAME) ||
   4020                                        (cpi->twopass.gf_group.index == 1)
   4021                                  : 0;
   4022 
   4023   do {
   4024     vpx_clear_system_state();
   4025 
   4026     set_frame_size(cpi);
   4027 
   4028     if (loop_count == 0 || cpi->resize_pending != 0) {
   4029       set_size_dependent_vars(cpi, &q, &bottom_index, &top_index);
   4030 
   4031 #ifdef AGGRESSIVE_VBR
   4032       if (two_pass_first_group_inter(cpi)) {
   4033         // Adjustment limits for min and max q
   4034         qrange_adj = VPXMAX(1, (top_index - bottom_index) / 2);
   4035 
   4036         bottom_index =
   4037             VPXMAX(bottom_index - qrange_adj / 2, oxcf->best_allowed_q);
   4038         top_index = VPXMIN(oxcf->worst_allowed_q, top_index + qrange_adj / 2);
   4039       }
   4040 #endif
   4041       // TODO(agrange) Scale cpi->max_mv_magnitude if frame-size has changed.
   4042       set_mv_search_params(cpi);
   4043 
   4044       // Reset the loop state for new frame size.
   4045       overshoot_seen = 0;
   4046       undershoot_seen = 0;
   4047 
   4048       // Reconfiguration for change in frame size has concluded.
   4049       cpi->resize_pending = 0;
   4050 
   4051       q_low = bottom_index;
   4052       q_high = top_index;
   4053 
   4054       loop_at_this_size = 0;
   4055     }
   4056 
   4057     // Decide frame size bounds first time through.
   4058     if (loop_count == 0) {
   4059       vp9_rc_compute_frame_size_bounds(cpi, rc->this_frame_target,
   4060                                        &frame_under_shoot_limit,
   4061                                        &frame_over_shoot_limit);
   4062     }
   4063 
   4064     cpi->Source =
   4065         vp9_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source,
   4066                               (oxcf->pass == 0), EIGHTTAP, 0);
   4067 
   4068     // Unfiltered raw source used in metrics calculation if the source
   4069     // has been filtered.
   4070     if (is_psnr_calc_enabled(cpi)) {
   4071 #ifdef ENABLE_KF_DENOISE
   4072       if (is_spatial_denoise_enabled(cpi)) {
   4073         cpi->raw_source_frame = vp9_scale_if_required(
   4074             cm, &cpi->raw_unscaled_source, &cpi->raw_scaled_source,
   4075             (oxcf->pass == 0), EIGHTTAP, 0);
   4076       } else {
   4077         cpi->raw_source_frame = cpi->Source;
   4078       }
   4079 #else
   4080       cpi->raw_source_frame = cpi->Source;
   4081 #endif
   4082     }
   4083 
   4084     if (cpi->unscaled_last_source != NULL)
   4085       cpi->Last_Source = vp9_scale_if_required(cm, cpi->unscaled_last_source,
   4086                                                &cpi->scaled_last_source,
   4087                                                (oxcf->pass == 0), EIGHTTAP, 0);
   4088 
   4089     if (frame_is_intra_only(cm) == 0) {
   4090       if (loop_count > 0) {
   4091         release_scaled_references(cpi);
   4092       }
   4093       vp9_scale_references(cpi);
   4094     }
   4095 
   4096     vp9_set_quantizer(cm, q);
   4097 
   4098     if (loop_count == 0) setup_frame(cpi);
   4099 
   4100     // Variance adaptive and in frame q adjustment experiments are mutually
   4101     // exclusive.
   4102     if (oxcf->aq_mode == VARIANCE_AQ) {
   4103       vp9_vaq_frame_setup(cpi);
   4104     } else if (oxcf->aq_mode == EQUATOR360_AQ) {
   4105       vp9_360aq_frame_setup(cpi);
   4106     } else if (oxcf->aq_mode == COMPLEXITY_AQ) {
   4107       vp9_setup_in_frame_q_adj(cpi);
   4108     } else if (oxcf->aq_mode == LOOKAHEAD_AQ) {
   4109       vp9_alt_ref_aq_setup_map(cpi->alt_ref_aq, cpi);
   4110     } else if (oxcf->aq_mode == PSNR_AQ) {
   4111       vp9_psnr_aq_mode_setup(&cm->seg);
   4112     }
   4113 
   4114     vp9_encode_frame(cpi);
   4115 
   4116     // Update the skip mb flag probabilities based on the distribution
   4117     // seen in the last encoder iteration.
   4118     // update_base_skip_probs(cpi);
   4119 
   4120     vpx_clear_system_state();
   4121 
   4122     // Dummy pack of the bitstream using up to date stats to get an
   4123     // accurate estimate of output frame size to determine if we need
   4124     // to recode.
   4125     if (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF) {
   4126       save_coding_context(cpi);
   4127       if (!cpi->sf.use_nonrd_pick_mode) vp9_pack_bitstream(cpi, dest, size);
   4128 
   4129       rc->projected_frame_size = (int)(*size) << 3;
   4130 
   4131       if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
   4132     }
   4133 
   4134     if (oxcf->rc_mode == VPX_Q) {
   4135       loop = 0;
   4136     } else {
   4137       if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced &&
   4138           (rc->projected_frame_size < rc->max_frame_bandwidth)) {
   4139         int last_q = q;
   4140         int64_t kf_err;
   4141 
   4142         int64_t high_err_target = cpi->ambient_err;
   4143         int64_t low_err_target = cpi->ambient_err >> 1;
   4144 
   4145 #if CONFIG_VP9_HIGHBITDEPTH
   4146         if (cm->use_highbitdepth) {
   4147           kf_err = vpx_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
   4148         } else {
   4149           kf_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
   4150         }
   4151 #else
   4152         kf_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
   4153 #endif  // CONFIG_VP9_HIGHBITDEPTH
   4154 
   4155         // Prevent possible divide by zero error below for perfect KF
   4156         kf_err += !kf_err;
   4157 
   4158         // The key frame is not good enough or we can afford
   4159         // to make it better without undue risk of popping.
   4160         if ((kf_err > high_err_target &&
   4161              rc->projected_frame_size <= frame_over_shoot_limit) ||
   4162             (kf_err > low_err_target &&
   4163              rc->projected_frame_size <= frame_under_shoot_limit)) {
   4164           // Lower q_high
   4165           q_high = q > q_low ? q - 1 : q_low;
   4166 
   4167           // Adjust Q
   4168           q = (int)((q * high_err_target) / kf_err);
   4169           q = VPXMIN(q, (q_high + q_low) >> 1);
   4170         } else if (kf_err < low_err_target &&
   4171                    rc->projected_frame_size >= frame_under_shoot_limit) {
   4172           // The key frame is much better than the previous frame
   4173           // Raise q_low
   4174           q_low = q < q_high ? q + 1 : q_high;
   4175 
   4176           // Adjust Q
   4177           q = (int)((q * low_err_target) / kf_err);
   4178           q = VPXMIN(q, (q_high + q_low + 1) >> 1);
   4179         }
   4180 
   4181         // Clamp Q to upper and lower limits:
   4182         q = clamp(q, q_low, q_high);
   4183 
   4184         loop = q != last_q;
   4185       } else if (recode_loop_test(cpi, frame_over_shoot_limit,
   4186                                   frame_under_shoot_limit, q,
   4187                                   VPXMAX(q_high, top_index), bottom_index)) {
   4188         // Is the projected frame size out of range and are we allowed
   4189         // to attempt to recode.
   4190         int last_q = q;
   4191         int retries = 0;
   4192         int qstep;
   4193 
   4194         if (cpi->resize_pending == 1) {
   4195           // Change in frame size so go back around the recode loop.
   4196           cpi->rc.frame_size_selector =
   4197               SCALE_STEP1 - cpi->rc.frame_size_selector;
   4198           cpi->rc.next_frame_size_selector = cpi->rc.frame_size_selector;
   4199 
   4200 #if CONFIG_INTERNAL_STATS
   4201           ++cpi->tot_recode_hits;
   4202 #endif
   4203           ++loop_count;
   4204           loop = 1;
   4205           continue;
   4206         }
   4207 
   4208         // Frame size out of permitted range:
   4209         // Update correction factor & compute new Q to try...
   4210 
   4211         // Frame is too large
   4212         if (rc->projected_frame_size > rc->this_frame_target) {
   4213           // Special case if the projected size is > the max allowed.
   4214           if ((q == q_high) &&
   4215               ((rc->projected_frame_size >= rc->max_frame_bandwidth) ||
   4216                (!rc->is_src_frame_alt_ref &&
   4217                 (rc->projected_frame_size >=
   4218                  big_rate_miss_high_threshold(cpi))))) {
   4219             int max_rate = VPXMAX(1, VPXMIN(rc->max_frame_bandwidth,
   4220                                             big_rate_miss_high_threshold(cpi)));
   4221             double q_val_high;
   4222             q_val_high = vp9_convert_qindex_to_q(q_high, cm->bit_depth);
   4223             q_val_high =
   4224                 q_val_high * ((double)rc->projected_frame_size / max_rate);
   4225             q_high = vp9_convert_q_to_qindex(q_val_high, cm->bit_depth);
   4226             q_high = clamp(q_high, rc->best_quality, rc->worst_quality);
   4227           }
   4228 
   4229           // Raise Qlow as to at least the current value
   4230           qstep =
   4231               get_qstep_adj(rc->projected_frame_size, rc->this_frame_target);
   4232           q_low = VPXMIN(q + qstep, q_high);
   4233 
   4234           if (undershoot_seen || loop_at_this_size > 1) {
   4235             // Update rate_correction_factor unless
   4236             vp9_rc_update_rate_correction_factors(cpi);
   4237 
   4238             q = (q_high + q_low + 1) / 2;
   4239           } else {
   4240             // Update rate_correction_factor unless
   4241             vp9_rc_update_rate_correction_factors(cpi);
   4242 
   4243             q = vp9_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
   4244                                   VPXMAX(q_high, top_index));
   4245 
   4246             while (q < q_low && retries < 10) {
   4247               vp9_rc_update_rate_correction_factors(cpi);
   4248               q = vp9_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
   4249                                     VPXMAX(q_high, top_index));
   4250               retries++;
   4251             }
   4252           }
   4253 
   4254           overshoot_seen = 1;
   4255         } else {
   4256           // Frame is too small
   4257           qstep =
   4258               get_qstep_adj(rc->this_frame_target, rc->projected_frame_size);
   4259           q_high = VPXMAX(q - qstep, q_low);
   4260 
   4261           if (overshoot_seen || loop_at_this_size > 1) {
   4262             vp9_rc_update_rate_correction_factors(cpi);
   4263             q = (q_high + q_low) / 2;
   4264           } else {
   4265             vp9_rc_update_rate_correction_factors(cpi);
   4266             q = vp9_rc_regulate_q(cpi, rc->this_frame_target,
   4267                                   VPXMIN(q_low, bottom_index), top_index);
   4268             // Special case reset for qlow for constrained quality.
   4269             // This should only trigger where there is very substantial
   4270             // undershoot on a frame and the auto cq level is above
   4271             // the user passsed in value.
   4272             if (oxcf->rc_mode == VPX_CQ && q < q_low) {
   4273               q_low = q;
   4274             }
   4275 
   4276             while (q > q_high && retries < 10) {
   4277               vp9_rc_update_rate_correction_factors(cpi);
   4278               q = vp9_rc_regulate_q(cpi, rc->this_frame_target,
   4279                                     VPXMIN(q_low, bottom_index), top_index);
   4280               retries++;
   4281             }
   4282           }
   4283           undershoot_seen = 1;
   4284         }
   4285 
   4286         // Clamp Q to upper and lower limits:
   4287         q = clamp(q, q_low, q_high);
   4288 
   4289         loop = (q != last_q);
   4290       } else {
   4291         loop = 0;
   4292       }
   4293     }
   4294 
   4295     // Special case for overlay frame.
   4296     if (rc->is_src_frame_alt_ref &&
   4297         rc->projected_frame_size < rc->max_frame_bandwidth)
   4298       loop = 0;
   4299 
   4300     if (loop) {
   4301       ++loop_count;
   4302       ++loop_at_this_size;
   4303 
   4304 #if CONFIG_INTERNAL_STATS
   4305       ++cpi->tot_recode_hits;
   4306 #endif
   4307     }
   4308 
   4309     if (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF)
   4310       if (loop || !enable_acl) restore_coding_context(cpi);
   4311   } while (loop);
   4312 
   4313 #ifdef AGGRESSIVE_VBR
   4314   if (two_pass_first_group_inter(cpi)) {
   4315     cpi->twopass.active_worst_quality =
   4316         VPXMIN(q + qrange_adj, oxcf->worst_allowed_q);
   4317   } else if (!frame_is_kf_gf_arf(cpi)) {
   4318 #else
   4319   if (!frame_is_kf_gf_arf(cpi)) {
   4320 #endif
   4321     // Have we been forced to adapt Q outside the expected range by an extreme
   4322     // rate miss. If so adjust the active maxQ for the subsequent frames.
   4323     if (!rc->is_src_frame_alt_ref && (q > cpi->twopass.active_worst_quality)) {
   4324       cpi->twopass.active_worst_quality = q;
   4325     } else if (oxcf->vbr_corpus_complexity && q == q_low &&
   4326                rc->projected_frame_size < rc->this_frame_target) {
   4327       cpi->twopass.active_worst_quality =
   4328           VPXMAX(q, cpi->twopass.active_worst_quality - 1);
   4329     }
   4330   }
   4331 
   4332   if (enable_acl) {
   4333     // Skip recoding, if model diff is below threshold
   4334     const int thresh = compute_context_model_thresh(cpi);
   4335     const int diff = compute_context_model_diff(cm);
   4336     if (diff < thresh) {
   4337       vpx_clear_system_state();
   4338       restore_coding_context(cpi);
   4339       return;
   4340     }
   4341 
   4342     vp9_encode_frame(cpi);
   4343     vpx_clear_system_state();
   4344     restore_coding_context(cpi);
   4345   }
   4346 }
   4347 
   4348 static int get_ref_frame_flags(const VP9_COMP *cpi) {
   4349   const int *const map = cpi->common.ref_frame_map;
   4350   const int gold_is_last = map[cpi->gld_fb_idx] == map[cpi->lst_fb_idx];
   4351   const int alt_is_last = map[cpi->alt_fb_idx] == map[cpi->lst_fb_idx];
   4352   const int gold_is_alt = map[cpi->gld_fb_idx] == map[cpi->alt_fb_idx];
   4353   int flags = VP9_ALT_FLAG | VP9_GOLD_FLAG | VP9_LAST_FLAG;
   4354 
   4355   if (gold_is_last) flags &= ~VP9_GOLD_FLAG;
   4356 
   4357   if (cpi->rc.frames_till_gf_update_due == INT_MAX &&
   4358       (cpi->svc.number_temporal_layers == 1 &&
   4359        cpi->svc.number_spatial_layers == 1))
   4360     flags &= ~VP9_GOLD_FLAG;
   4361 
   4362   if (alt_is_last) flags &= ~VP9_ALT_FLAG;
   4363 
   4364   if (gold_is_alt) flags &= ~VP9_ALT_FLAG;
   4365 
   4366   return flags;
   4367 }
   4368 
   4369 static void set_ext_overrides(VP9_COMP *cpi) {
   4370   // Overrides the defaults with the externally supplied values with
   4371   // vp9_update_reference() and vp9_update_entropy() calls
   4372   // Note: The overrides are valid only for the next frame passed
   4373   // to encode_frame_to_data_rate() function
   4374   if (cpi->ext_refresh_frame_context_pending) {
   4375     cpi->common.refresh_frame_context = cpi->ext_refresh_frame_context;
   4376     cpi->ext_refresh_frame_context_pending = 0;
   4377   }
   4378   if (cpi->ext_refresh_frame_flags_pending) {
   4379     cpi->refresh_last_frame = cpi->ext_refresh_last_frame;
   4380     cpi->refresh_golden_frame = cpi->ext_refresh_golden_frame;
   4381     cpi->refresh_alt_ref_frame = cpi->ext_refresh_alt_ref_frame;
   4382   }
   4383 }
   4384 
   4385 YV12_BUFFER_CONFIG *vp9_svc_twostage_scale(
   4386     VP9_COMMON *cm, YV12_BUFFER_CONFIG *unscaled, YV12_BUFFER_CONFIG *scaled,
   4387     YV12_BUFFER_CONFIG *scaled_temp, INTERP_FILTER filter_type,
   4388     int phase_scaler, INTERP_FILTER filter_type2, int phase_scaler2) {
   4389   if (cm->mi_cols * MI_SIZE != unscaled->y_width ||
   4390       cm->mi_rows * MI_SIZE != unscaled->y_height) {
   4391 #if CONFIG_VP9_HIGHBITDEPTH
   4392     if (cm->bit_depth == VPX_BITS_8) {
   4393       vp9_scale_and_extend_frame(unscaled, scaled_temp, filter_type2,
   4394                                  phase_scaler2);
   4395       vp9_scale_and_extend_frame(scaled_temp, scaled, filter_type,
   4396                                  phase_scaler);
   4397     } else {
   4398       scale_and_extend_frame(unscaled, scaled_temp, (int)cm->bit_depth,
   4399                              filter_type2, phase_scaler2);
   4400       scale_and_extend_frame(scaled_temp, scaled, (int)cm->bit_depth,
   4401                              filter_type, phase_scaler);
   4402     }
   4403 #else
   4404     vp9_scale_and_extend_frame(unscaled, scaled_temp, filter_type2,
   4405                                phase_scaler2);
   4406     vp9_scale_and_extend_frame(scaled_temp, scaled, filter_type, phase_scaler);
   4407 #endif  // CONFIG_VP9_HIGHBITDEPTH
   4408     return scaled;
   4409   } else {
   4410     return unscaled;
   4411   }
   4412 }
   4413 
   4414 YV12_BUFFER_CONFIG *vp9_scale_if_required(
   4415     VP9_COMMON *cm, YV12_BUFFER_CONFIG *unscaled, YV12_BUFFER_CONFIG *scaled,
   4416     int use_normative_scaler, INTERP_FILTER filter_type, int phase_scaler) {
   4417   if (cm->mi_cols * MI_SIZE != unscaled->y_width ||
   4418       cm->mi_rows * MI_SIZE != unscaled->y_height) {
   4419 #if CONFIG_VP9_HIGHBITDEPTH
   4420     if (use_normative_scaler && unscaled->y_width <= (scaled->y_width << 1) &&
   4421         unscaled->y_height <= (scaled->y_height << 1))
   4422       if (cm->bit_depth == VPX_BITS_8)
   4423         vp9_scale_and_extend_frame(unscaled, scaled, filter_type, phase_scaler);
   4424       else
   4425         scale_and_extend_frame(unscaled, scaled, (int)cm->bit_depth,
   4426                                filter_type, phase_scaler);
   4427     else
   4428       scale_and_extend_frame_nonnormative(unscaled, scaled, (int)cm->bit_depth);
   4429 #else
   4430     if (use_normative_scaler && unscaled->y_width <= (scaled->y_width << 1) &&
   4431         unscaled->y_height <= (scaled->y_height << 1))
   4432       vp9_scale_and_extend_frame(unscaled, scaled, filter_type, phase_scaler);
   4433     else
   4434       scale_and_extend_frame_nonnormative(unscaled, scaled);
   4435 #endif  // CONFIG_VP9_HIGHBITDEPTH
   4436     return scaled;
   4437   } else {
   4438     return unscaled;
   4439   }
   4440 }
   4441 
   4442 static void set_ref_sign_bias(VP9_COMP *cpi) {
   4443   VP9_COMMON *const cm = &cpi->common;
   4444   RefCntBuffer *const ref_buffer = get_ref_cnt_buffer(cm, cm->new_fb_idx);
   4445   const int cur_frame_index = ref_buffer->frame_index;
   4446   MV_REFERENCE_FRAME ref_frame;
   4447 
   4448   for (ref_frame = LAST_FRAME; ref_frame < MAX_REF_FRAMES; ++ref_frame) {
   4449     const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
   4450     const RefCntBuffer *const ref_cnt_buf =
   4451         get_ref_cnt_buffer(&cpi->common, buf_idx);
   4452     if (ref_cnt_buf) {
   4453       cm->ref_frame_sign_bias[ref_frame] =
   4454           cur_frame_index < ref_cnt_buf->frame_index;
   4455     }
   4456   }
   4457 }
   4458 
   4459 static int setup_interp_filter_search_mask(VP9_COMP *cpi) {
   4460   INTERP_FILTER ifilter;
   4461   int ref_total[MAX_REF_FRAMES] = { 0 };
   4462   MV_REFERENCE_FRAME ref;
   4463   int mask = 0;
   4464   if (cpi->common.last_frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame)
   4465     return mask;
   4466   for (ref = LAST_FRAME; ref <= ALTREF_FRAME; ++ref)
   4467     for (ifilter = EIGHTTAP; ifilter <= EIGHTTAP_SHARP; ++ifilter)
   4468       ref_total[ref] += cpi->interp_filter_selected[ref][ifilter];
   4469 
   4470   for (ifilter = EIGHTTAP; ifilter <= EIGHTTAP_SHARP; ++ifilter) {
   4471     if ((ref_total[LAST_FRAME] &&
   4472          cpi->interp_filter_selected[LAST_FRAME][ifilter] == 0) &&
   4473         (ref_total[GOLDEN_FRAME] == 0 ||
   4474          cpi->interp_filter_selected[GOLDEN_FRAME][ifilter] * 50 <
   4475              ref_total[GOLDEN_FRAME]) &&
   4476         (ref_total[ALTREF_FRAME] == 0 ||
   4477          cpi->interp_filter_selected[ALTREF_FRAME][ifilter] * 50 <
   4478              ref_total[ALTREF_FRAME]))
   4479       mask |= 1 << ifilter;
   4480   }
   4481   return mask;
   4482 }
   4483 
   4484 #ifdef ENABLE_KF_DENOISE
   4485 // Baseline Kernal weights for denoise
   4486 static uint8_t dn_kernal_3[9] = { 1, 2, 1, 2, 4, 2, 1, 2, 1 };
   4487 static uint8_t dn_kernal_5[25] = { 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 4,
   4488                                    2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1 };
   4489 
   4490 static INLINE void add_denoise_point(int centre_val, int data_val, int thresh,
   4491                                      uint8_t point_weight, int *sum_val,
   4492                                      int *sum_weight) {
   4493   if (abs(centre_val - data_val) <= thresh) {
   4494     *sum_weight += point_weight;
   4495     *sum_val += (int)data_val * (int)point_weight;
   4496   }
   4497 }
   4498 
   4499 static void spatial_denoise_point(uint8_t *src_ptr, const int stride,
   4500                                   const int strength) {
   4501   int sum_weight = 0;
   4502   int sum_val = 0;
   4503   int thresh = strength;
   4504   int kernal_size = 5;
   4505   int half_k_size = 2;
   4506   int i, j;
   4507   int max_diff = 0;
   4508   uint8_t *tmp_ptr;
   4509   uint8_t *kernal_ptr;
   4510 
   4511   // Find the maximum deviation from the source point in the locale.
   4512   tmp_ptr = src_ptr - (stride * (half_k_size + 1)) - (half_k_size + 1);
   4513   for (i = 0; i < kernal_size + 2; ++i) {
   4514     for (j = 0; j < kernal_size + 2; ++j) {
   4515       max_diff = VPXMAX(max_diff, abs((int)*src_ptr - (int)tmp_ptr[j]));
   4516     }
   4517     tmp_ptr += stride;
   4518   }
   4519 
   4520   // Select the kernal size.
   4521   if (max_diff > (strength + (strength >> 1))) {
   4522     kernal_size = 3;
   4523     half_k_size = 1;
   4524     thresh = thresh >> 1;
   4525   }
   4526   kernal_ptr = (kernal_size == 3) ? dn_kernal_3 : dn_kernal_5;
   4527 
   4528   // Apply the kernal
   4529   tmp_ptr = src_ptr - (stride * half_k_size) - half_k_size;
   4530   for (i = 0; i < kernal_size; ++i) {
   4531     for (j = 0; j < kernal_size; ++j) {
   4532       add_denoise_point((int)*src_ptr, (int)tmp_ptr[j], thresh, *kernal_ptr,
   4533                         &sum_val, &sum_weight);
   4534       ++kernal_ptr;
   4535     }
   4536     tmp_ptr += stride;
   4537   }
   4538 
   4539   // Update the source value with the new filtered value
   4540   *src_ptr = (uint8_t)((sum_val + (sum_weight >> 1)) / sum_weight);
   4541 }
   4542 
   4543 #if CONFIG_VP9_HIGHBITDEPTH
   4544 static void highbd_spatial_denoise_point(uint16_t *src_ptr, const int stride,
   4545                                          const int strength) {
   4546   int sum_weight = 0;
   4547   int sum_val = 0;
   4548   int thresh = strength;
   4549   int kernal_size = 5;
   4550   int half_k_size = 2;
   4551   int i, j;
   4552   int max_diff = 0;
   4553   uint16_t *tmp_ptr;
   4554   uint8_t *kernal_ptr;
   4555 
   4556   // Find the maximum deviation from the source point in the locale.
   4557   tmp_ptr = src_ptr - (stride * (half_k_size + 1)) - (half_k_size + 1);
   4558   for (i = 0; i < kernal_size + 2; ++i) {
   4559     for (j = 0; j < kernal_size + 2; ++j) {
   4560       max_diff = VPXMAX(max_diff, abs((int)src_ptr - (int)tmp_ptr[j]));
   4561     }
   4562     tmp_ptr += stride;
   4563   }
   4564 
   4565   // Select the kernal size.
   4566   if (max_diff > (strength + (strength >> 1))) {
   4567     kernal_size = 3;
   4568     half_k_size = 1;
   4569     thresh = thresh >> 1;
   4570   }
   4571   kernal_ptr = (kernal_size == 3) ? dn_kernal_3 : dn_kernal_5;
   4572 
   4573   // Apply the kernal
   4574   tmp_ptr = src_ptr - (stride * half_k_size) - half_k_size;
   4575   for (i = 0; i < kernal_size; ++i) {
   4576     for (j = 0; j < kernal_size; ++j) {
   4577       add_denoise_point((int)*src_ptr, (int)tmp_ptr[j], thresh, *kernal_ptr,
   4578                         &sum_val, &sum_weight);
   4579       ++kernal_ptr;
   4580     }
   4581     tmp_ptr += stride;
   4582   }
   4583 
   4584   // Update the source value with the new filtered value
   4585   *src_ptr = (uint16_t)((sum_val + (sum_weight >> 1)) / sum_weight);
   4586 }
   4587 #endif  // CONFIG_VP9_HIGHBITDEPTH
   4588 
   4589 // Apply thresholded spatial noise supression to a given buffer.
   4590 static void spatial_denoise_buffer(VP9_COMP *cpi, uint8_t *buffer,
   4591                                    const int stride, const int width,
   4592                                    const int height, const int strength) {
   4593   VP9_COMMON *const cm = &cpi->common;
   4594   uint8_t *src_ptr = buffer;
   4595   int row;
   4596   int col;
   4597 
   4598   for (row = 0; row < height; ++row) {
   4599     for (col = 0; col < width; ++col) {
   4600 #if CONFIG_VP9_HIGHBITDEPTH
   4601       if (cm->use_highbitdepth)
   4602         highbd_spatial_denoise_point(CONVERT_TO_SHORTPTR(&src_ptr[col]), stride,
   4603                                      strength);
   4604       else
   4605         spatial_denoise_point(&src_ptr[col], stride, strength);
   4606 #else
   4607       spatial_denoise_point(&src_ptr[col], stride, strength);
   4608 #endif  // CONFIG_VP9_HIGHBITDEPTH
   4609     }
   4610     src_ptr += stride;
   4611   }
   4612 }
   4613 
   4614 // Apply thresholded spatial noise supression to source.
   4615 static void spatial_denoise_frame(VP9_COMP *cpi) {
   4616   YV12_BUFFER_CONFIG *src = cpi->Source;
   4617   const VP9EncoderConfig *const oxcf = &cpi->oxcf;
   4618   TWO_PASS *const twopass = &cpi->twopass;
   4619   VP9_COMMON *const cm = &cpi->common;
   4620 
   4621   // Base the filter strength on the current active max Q.
   4622   const int q = (int)(vp9_convert_qindex_to_q(twopass->active_worst_quality,
   4623                                               cm->bit_depth));
   4624   int strength =
   4625       VPXMAX(oxcf->arnr_strength >> 2, VPXMIN(oxcf->arnr_strength, (q >> 4)));
   4626 
   4627   // Denoise each of Y,U and V buffers.
   4628   spatial_denoise_buffer(cpi, src->y_buffer, src->y_stride, src->y_width,
   4629                          src->y_height, strength);
   4630 
   4631   strength += (strength >> 1);
   4632   spatial_denoise_buffer(cpi, src->u_buffer, src->uv_stride, src->uv_width,
   4633                          src->uv_height, strength << 1);
   4634 
   4635   spatial_denoise_buffer(cpi, src->v_buffer, src->uv_stride, src->uv_width,
   4636                          src->uv_height, strength << 1);
   4637 }
   4638 #endif  // ENABLE_KF_DENOISE
   4639 
   4640 static void vp9_try_disable_lookahead_aq(VP9_COMP *cpi, size_t *size,
   4641                                          uint8_t *dest) {
   4642   if (cpi->common.seg.enabled)
   4643     if (ALT_REF_AQ_PROTECT_GAIN) {
   4644       size_t nsize = *size;
   4645       int overhead;
   4646 
   4647       // TODO(yuryg): optimize this, as
   4648       // we don't really need to repack
   4649 
   4650       save_coding_context(cpi);
   4651       vp9_disable_segmentation(&cpi->common.seg);
   4652       vp9_pack_bitstream(cpi, dest, &nsize);
   4653       restore_coding_context(cpi);
   4654 
   4655       overhead = (int)*size - (int)nsize;
   4656 
   4657       if (vp9_alt_ref_aq_disable_if(cpi->alt_ref_aq, overhead, (int)*size))
   4658         vp9_encode_frame(cpi);
   4659       else
   4660         vp9_enable_segmentation(&cpi->common.seg);
   4661     }
   4662 }
   4663 
   4664 static void set_frame_index(VP9_COMP *cpi, VP9_COMMON *cm) {
   4665   RefCntBuffer *const ref_buffer = get_ref_cnt_buffer(cm, cm->new_fb_idx);
   4666 
   4667   if (ref_buffer) {
   4668     const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
   4669     ref_buffer->frame_index =
   4670         cm->current_video_frame + gf_group->arf_src_offset[gf_group->index];
   4671   }
   4672 }
   4673 
   4674 static void encode_frame_to_data_rate(VP9_COMP *cpi, size_t *size,
   4675                                       uint8_t *dest,
   4676                                       unsigned int *frame_flags) {
   4677   VP9_COMMON *const cm = &cpi->common;
   4678   const VP9EncoderConfig *const oxcf = &cpi->oxcf;
   4679   struct segmentation *const seg = &cm->seg;
   4680   TX_SIZE t;
   4681 
   4682   // SVC: skip encoding of enhancement layer if the layer target bandwidth = 0.
   4683   // If in constrained layer drop mode (svc.framedrop_mode != LAYER_DROP) and
   4684   // base spatial layer was dropped, no need to set svc.skip_enhancement_layer,
   4685   // as whole superframe will be dropped.
   4686   if (cpi->use_svc && cpi->svc.spatial_layer_id > 0 &&
   4687       cpi->oxcf.target_bandwidth == 0 &&
   4688       !(cpi->svc.framedrop_mode != LAYER_DROP &&
   4689         cpi->svc.drop_spatial_layer[0])) {
   4690     cpi->svc.skip_enhancement_layer = 1;
   4691     vp9_rc_postencode_update_drop_frame(cpi);
   4692     cpi->ext_refresh_frame_flags_pending = 0;
   4693     cpi->last_frame_dropped = 1;
   4694     cpi->svc.last_layer_dropped[cpi->svc.spatial_layer_id] = 1;
   4695     cpi->svc.drop_spatial_layer[cpi->svc.spatial_layer_id] = 1;
   4696     if (cpi->svc.framedrop_mode == LAYER_DROP ||
   4697         cpi->svc.drop_spatial_layer[0] == 0) {
   4698       // For the case of constrained drop mode where the base is dropped
   4699       // (drop_spatial_layer[0] == 1), which means full superframe dropped,
   4700       // we don't increment the svc frame counters. In particular temporal
   4701       // layer counter (which is incremented in vp9_inc_frame_in_layer())
   4702       // won't be incremented, so on a dropped frame we try the same
   4703       // temporal_layer_id on next incoming frame. This is to avoid an
   4704       // issue with temporal alignement with full superframe dropping.
   4705       vp9_inc_frame_in_layer(cpi);
   4706     }
   4707     return;
   4708   }
   4709 
   4710   set_ext_overrides(cpi);
   4711   vpx_clear_system_state();
   4712 
   4713 #ifdef ENABLE_KF_DENOISE
   4714   // Spatial denoise of key frame.
   4715   if (