1 /* 2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #include <assert.h> 12 #include <limits.h> 13 #include <math.h> 14 #include <stdio.h> 15 16 #include "./vp9_rtcd.h" 17 #include "./vpx_dsp_rtcd.h" 18 19 #include "vpx/vpx_codec.h" 20 #include "vpx_dsp/vpx_dsp_common.h" 21 #include "vpx_mem/vpx_mem.h" 22 #include "vpx_ports/mem.h" 23 24 #include "vp9/common/vp9_blockd.h" 25 #include "vp9/common/vp9_common.h" 26 #include "vp9/common/vp9_mvref_common.h" 27 #include "vp9/common/vp9_pred_common.h" 28 #include "vp9/common/vp9_reconinter.h" 29 #include "vp9/common/vp9_reconintra.h" 30 #include "vp9/common/vp9_scan.h" 31 32 #include "vp9/encoder/vp9_cost.h" 33 #include "vp9/encoder/vp9_encoder.h" 34 #include "vp9/encoder/vp9_pickmode.h" 35 #include "vp9/encoder/vp9_ratectrl.h" 36 #include "vp9/encoder/vp9_rd.h" 37 38 typedef struct { 39 uint8_t *data; 40 int stride; 41 int in_use; 42 } PRED_BUFFER; 43 44 typedef struct { 45 PRED_BUFFER *best_pred; 46 PREDICTION_MODE best_mode; 47 TX_SIZE best_tx_size; 48 TX_SIZE best_intra_tx_size; 49 MV_REFERENCE_FRAME best_ref_frame; 50 MV_REFERENCE_FRAME best_second_ref_frame; 51 uint8_t best_mode_skip_txfm; 52 INTERP_FILTER best_pred_filter; 53 } BEST_PICKMODE; 54 55 static const int pos_shift_16x16[4][4] = { 56 { 9, 10, 13, 14 }, { 11, 12, 15, 16 }, { 17, 18, 21, 22 }, { 19, 20, 23, 24 } 57 }; 58 59 static int mv_refs_rt(VP9_COMP *cpi, const VP9_COMMON *cm, const MACROBLOCK *x, 60 const MACROBLOCKD *xd, const TileInfo *const tile, 61 MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame, 62 int_mv *mv_ref_list, int_mv *base_mv, int mi_row, 63 int mi_col, int use_base_mv) { 64 const int *ref_sign_bias = cm->ref_frame_sign_bias; 65 int i, refmv_count = 0; 66 67 const POSITION *const mv_ref_search = mv_ref_blocks[mi->sb_type]; 68 69 int different_ref_found = 0; 70 int context_counter = 0; 71 int const_motion = 0; 72 73 // Blank the reference vector list 74 memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES); 75 76 // The nearest 2 blocks are treated differently 77 // if the size < 8x8 we get the mv from the bmi substructure, 78 // and we also need to keep a mode count. 79 for (i = 0; i < 2; ++i) { 80 const POSITION *const mv_ref = &mv_ref_search[i]; 81 if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) { 82 const MODE_INFO *const candidate_mi = 83 xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride]; 84 // Keep counts for entropy encoding. 85 context_counter += mode_2_counter[candidate_mi->mode]; 86 different_ref_found = 1; 87 88 if (candidate_mi->ref_frame[0] == ref_frame) 89 ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 0, mv_ref->col, -1), 90 refmv_count, mv_ref_list, Done); 91 } 92 } 93 94 const_motion = 1; 95 96 // Check the rest of the neighbors in much the same way 97 // as before except we don't need to keep track of sub blocks or 98 // mode counts. 99 for (; i < MVREF_NEIGHBOURS && !refmv_count; ++i) { 100 const POSITION *const mv_ref = &mv_ref_search[i]; 101 if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) { 102 const MODE_INFO *const candidate_mi = 103 xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride]; 104 different_ref_found = 1; 105 106 if (candidate_mi->ref_frame[0] == ref_frame) 107 ADD_MV_REF_LIST(candidate_mi->mv[0], refmv_count, mv_ref_list, Done); 108 } 109 } 110 111 // Since we couldn't find 2 mvs from the same reference frame 112 // go back through the neighbors and find motion vectors from 113 // different reference frames. 114 if (different_ref_found && !refmv_count) { 115 for (i = 0; i < MVREF_NEIGHBOURS; ++i) { 116 const POSITION *mv_ref = &mv_ref_search[i]; 117 if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) { 118 const MODE_INFO *const candidate_mi = 119 xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride]; 120 121 // If the candidate is INTRA we don't want to consider its mv. 122 IF_DIFF_REF_FRAME_ADD_MV(candidate_mi, ref_frame, ref_sign_bias, 123 refmv_count, mv_ref_list, Done); 124 } 125 } 126 } 127 if (use_base_mv && 128 !cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame && 129 ref_frame == LAST_FRAME) { 130 // Get base layer mv. 131 MV_REF *candidate = 132 &cm->prev_frame 133 ->mvs[(mi_col >> 1) + (mi_row >> 1) * (cm->mi_cols >> 1)]; 134 if (candidate->mv[0].as_int != INVALID_MV) { 135 base_mv->as_mv.row = (candidate->mv[0].as_mv.row * 2); 136 base_mv->as_mv.col = (candidate->mv[0].as_mv.col * 2); 137 clamp_mv_ref(&base_mv->as_mv, xd); 138 } else { 139 base_mv->as_int = INVALID_MV; 140 } 141 } 142 143 Done: 144 145 x->mbmi_ext->mode_context[ref_frame] = counter_to_context[context_counter]; 146 147 // Clamp vectors 148 for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) 149 clamp_mv_ref(&mv_ref_list[i].as_mv, xd); 150 151 return const_motion; 152 } 153 154 static int combined_motion_search(VP9_COMP *cpi, MACROBLOCK *x, 155 BLOCK_SIZE bsize, int mi_row, int mi_col, 156 int_mv *tmp_mv, int *rate_mv, 157 int64_t best_rd_sofar, int use_base_mv) { 158 MACROBLOCKD *xd = &x->e_mbd; 159 MODE_INFO *mi = xd->mi[0]; 160 struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } }; 161 const int step_param = cpi->sf.mv.fullpel_search_step_param; 162 const int sadpb = x->sadperbit16; 163 MV mvp_full; 164 const int ref = mi->ref_frame[0]; 165 const MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv; 166 MV center_mv; 167 uint32_t dis; 168 int rate_mode; 169 const MvLimits tmp_mv_limits = x->mv_limits; 170 int rv = 0; 171 int cost_list[5]; 172 int search_subpel = 1; 173 const YV12_BUFFER_CONFIG *scaled_ref_frame = 174 vp9_get_scaled_ref_frame(cpi, ref); 175 if (scaled_ref_frame) { 176 int i; 177 // Swap out the reference frame for a version that's been scaled to 178 // match the resolution of the current frame, allowing the existing 179 // motion search code to be used without additional modifications. 180 for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0]; 181 vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL); 182 } 183 vp9_set_mv_search_range(&x->mv_limits, &ref_mv); 184 185 // Limit motion vector for large lightning change. 186 if (cpi->oxcf.speed > 5 && x->lowvar_highsumdiff) { 187 x->mv_limits.col_min = VPXMAX(x->mv_limits.col_min, -10); 188 x->mv_limits.row_min = VPXMAX(x->mv_limits.row_min, -10); 189 x->mv_limits.col_max = VPXMIN(x->mv_limits.col_max, 10); 190 x->mv_limits.row_max = VPXMIN(x->mv_limits.row_max, 10); 191 } 192 193 assert(x->mv_best_ref_index[ref] <= 2); 194 if (x->mv_best_ref_index[ref] < 2) 195 mvp_full = x->mbmi_ext->ref_mvs[ref][x->mv_best_ref_index[ref]].as_mv; 196 else 197 mvp_full = x->pred_mv[ref]; 198 199 mvp_full.col >>= 3; 200 mvp_full.row >>= 3; 201 202 if (!use_base_mv) 203 center_mv = ref_mv; 204 else 205 center_mv = tmp_mv->as_mv; 206 207 if (x->sb_use_mv_part) { 208 tmp_mv->as_mv.row = x->sb_mvrow_part >> 3; 209 tmp_mv->as_mv.col = x->sb_mvcol_part >> 3; 210 } else { 211 vp9_full_pixel_search( 212 cpi, x, bsize, &mvp_full, step_param, cpi->sf.mv.search_method, sadpb, 213 cond_cost_list(cpi, cost_list), ¢er_mv, &tmp_mv->as_mv, INT_MAX, 0); 214 } 215 216 x->mv_limits = tmp_mv_limits; 217 218 // calculate the bit cost on motion vector 219 mvp_full.row = tmp_mv->as_mv.row * 8; 220 mvp_full.col = tmp_mv->as_mv.col * 8; 221 222 *rate_mv = vp9_mv_bit_cost(&mvp_full, &ref_mv, x->nmvjointcost, x->mvcost, 223 MV_COST_WEIGHT); 224 225 rate_mode = 226 cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref]][INTER_OFFSET(NEWMV)]; 227 rv = 228 !(RDCOST(x->rdmult, x->rddiv, (*rate_mv + rate_mode), 0) > best_rd_sofar); 229 230 // For SVC on non-reference frame, avoid subpel for (0, 0) motion. 231 if (cpi->use_svc && cpi->svc.non_reference_frame) { 232 if (mvp_full.row == 0 && mvp_full.col == 0) search_subpel = 0; 233 } 234 235 if (rv && search_subpel) { 236 SUBPEL_FORCE_STOP subpel_force_stop = cpi->sf.mv.subpel_force_stop; 237 if (use_base_mv && cpi->sf.base_mv_aggressive) subpel_force_stop = HALF_PEL; 238 if (cpi->sf.mv.enable_adaptive_subpel_force_stop) { 239 const int mv_thresh = cpi->sf.mv.adapt_subpel_force_stop.mv_thresh; 240 if (abs(tmp_mv->as_mv.row) >= mv_thresh || 241 abs(tmp_mv->as_mv.col) >= mv_thresh) 242 subpel_force_stop = cpi->sf.mv.adapt_subpel_force_stop.force_stop_above; 243 else 244 subpel_force_stop = cpi->sf.mv.adapt_subpel_force_stop.force_stop_below; 245 } 246 cpi->find_fractional_mv_step( 247 x, &tmp_mv->as_mv, &ref_mv, cpi->common.allow_high_precision_mv, 248 x->errorperbit, &cpi->fn_ptr[bsize], subpel_force_stop, 249 cpi->sf.mv.subpel_search_level, cond_cost_list(cpi, cost_list), 250 x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0, 251 cpi->sf.use_accurate_subpel_search); 252 *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost, 253 x->mvcost, MV_COST_WEIGHT); 254 } 255 256 if (scaled_ref_frame) { 257 int i; 258 for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i]; 259 } 260 return rv; 261 } 262 263 static void block_variance(const uint8_t *src, int src_stride, 264 const uint8_t *ref, int ref_stride, int w, int h, 265 unsigned int *sse, int *sum, int block_size, 266 #if CONFIG_VP9_HIGHBITDEPTH 267 int use_highbitdepth, vpx_bit_depth_t bd, 268 #endif 269 uint32_t *sse8x8, int *sum8x8, uint32_t *var8x8) { 270 int i, j, k = 0; 271 272 *sse = 0; 273 *sum = 0; 274 275 for (i = 0; i < h; i += block_size) { 276 for (j = 0; j < w; j += block_size) { 277 #if CONFIG_VP9_HIGHBITDEPTH 278 if (use_highbitdepth) { 279 switch (bd) { 280 case VPX_BITS_8: 281 vpx_highbd_8_get8x8var(src + src_stride * i + j, src_stride, 282 ref + ref_stride * i + j, ref_stride, 283 &sse8x8[k], &sum8x8[k]); 284 break; 285 case VPX_BITS_10: 286 vpx_highbd_10_get8x8var(src + src_stride * i + j, src_stride, 287 ref + ref_stride * i + j, ref_stride, 288 &sse8x8[k], &sum8x8[k]); 289 break; 290 case VPX_BITS_12: 291 vpx_highbd_12_get8x8var(src + src_stride * i + j, src_stride, 292 ref + ref_stride * i + j, ref_stride, 293 &sse8x8[k], &sum8x8[k]); 294 break; 295 } 296 } else { 297 vpx_get8x8var(src + src_stride * i + j, src_stride, 298 ref + ref_stride * i + j, ref_stride, &sse8x8[k], 299 &sum8x8[k]); 300 } 301 #else 302 vpx_get8x8var(src + src_stride * i + j, src_stride, 303 ref + ref_stride * i + j, ref_stride, &sse8x8[k], 304 &sum8x8[k]); 305 #endif 306 *sse += sse8x8[k]; 307 *sum += sum8x8[k]; 308 var8x8[k] = sse8x8[k] - (uint32_t)(((int64_t)sum8x8[k] * sum8x8[k]) >> 6); 309 k++; 310 } 311 } 312 } 313 314 static void calculate_variance(int bw, int bh, TX_SIZE tx_size, 315 unsigned int *sse_i, int *sum_i, 316 unsigned int *var_o, unsigned int *sse_o, 317 int *sum_o) { 318 const BLOCK_SIZE unit_size = txsize_to_bsize[tx_size]; 319 const int nw = 1 << (bw - b_width_log2_lookup[unit_size]); 320 const int nh = 1 << (bh - b_height_log2_lookup[unit_size]); 321 int i, j, k = 0; 322 323 for (i = 0; i < nh; i += 2) { 324 for (j = 0; j < nw; j += 2) { 325 sse_o[k] = sse_i[i * nw + j] + sse_i[i * nw + j + 1] + 326 sse_i[(i + 1) * nw + j] + sse_i[(i + 1) * nw + j + 1]; 327 sum_o[k] = sum_i[i * nw + j] + sum_i[i * nw + j + 1] + 328 sum_i[(i + 1) * nw + j] + sum_i[(i + 1) * nw + j + 1]; 329 var_o[k] = sse_o[k] - (uint32_t)(((int64_t)sum_o[k] * sum_o[k]) >> 330 (b_width_log2_lookup[unit_size] + 331 b_height_log2_lookup[unit_size] + 6)); 332 k++; 333 } 334 } 335 } 336 337 // Adjust the ac_thr according to speed, width, height and normalized sum 338 static int ac_thr_factor(const int speed, const int width, const int height, 339 const int norm_sum) { 340 if (speed >= 8 && norm_sum < 5) { 341 if (width <= 640 && height <= 480) 342 return 4; 343 else 344 return 2; 345 } 346 return 1; 347 } 348 349 static TX_SIZE calculate_tx_size(VP9_COMP *const cpi, BLOCK_SIZE bsize, 350 MACROBLOCKD *const xd, unsigned int var, 351 unsigned int sse, int64_t ac_thr) { 352 TX_SIZE tx_size; 353 if (cpi->common.tx_mode == TX_MODE_SELECT) { 354 if (sse > (var << 2)) 355 tx_size = VPXMIN(max_txsize_lookup[bsize], 356 tx_mode_to_biggest_tx_size[cpi->common.tx_mode]); 357 else 358 tx_size = TX_8X8; 359 360 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && 361 cyclic_refresh_segment_id_boosted(xd->mi[0]->segment_id)) 362 tx_size = TX_8X8; 363 else if (tx_size > TX_16X16) 364 tx_size = TX_16X16; 365 366 // For screen-content force 4X4 tx_size over 8X8, for large variance. 367 if (cpi->oxcf.content == VP9E_CONTENT_SCREEN && tx_size == TX_8X8 && 368 bsize <= BLOCK_16X16 && var > (ac_thr << 6)) 369 tx_size = TX_4X4; 370 } else { 371 tx_size = VPXMIN(max_txsize_lookup[bsize], 372 tx_mode_to_biggest_tx_size[cpi->common.tx_mode]); 373 } 374 375 return tx_size; 376 } 377 378 static void model_rd_for_sb_y_large(VP9_COMP *cpi, BLOCK_SIZE bsize, 379 MACROBLOCK *x, MACROBLOCKD *xd, 380 int *out_rate_sum, int64_t *out_dist_sum, 381 unsigned int *var_y, unsigned int *sse_y, 382 int mi_row, int mi_col, int *early_term, 383 int *flag_preduv_computed) { 384 // Note our transform coeffs are 8 times an orthogonal transform. 385 // Hence quantizer step is also 8 times. To get effective quantizer 386 // we need to divide by 8 before sending to modeling function. 387 unsigned int sse; 388 int rate; 389 int64_t dist; 390 struct macroblock_plane *const p = &x->plane[0]; 391 struct macroblockd_plane *const pd = &xd->plane[0]; 392 const uint32_t dc_quant = pd->dequant[0]; 393 const uint32_t ac_quant = pd->dequant[1]; 394 int64_t dc_thr = dc_quant * dc_quant >> 6; 395 int64_t ac_thr = ac_quant * ac_quant >> 6; 396 unsigned int var; 397 int sum; 398 int skip_dc = 0; 399 400 const int bw = b_width_log2_lookup[bsize]; 401 const int bh = b_height_log2_lookup[bsize]; 402 const int num8x8 = 1 << (bw + bh - 2); 403 unsigned int sse8x8[64] = { 0 }; 404 int sum8x8[64] = { 0 }; 405 unsigned int var8x8[64] = { 0 }; 406 TX_SIZE tx_size; 407 int i, k; 408 #if CONFIG_VP9_HIGHBITDEPTH 409 const vpx_bit_depth_t bd = cpi->common.bit_depth; 410 #endif 411 // Calculate variance for whole partition, and also save 8x8 blocks' variance 412 // to be used in following transform skipping test. 413 block_variance(p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride, 414 4 << bw, 4 << bh, &sse, &sum, 8, 415 #if CONFIG_VP9_HIGHBITDEPTH 416 cpi->common.use_highbitdepth, bd, 417 #endif 418 sse8x8, sum8x8, var8x8); 419 var = sse - (unsigned int)(((int64_t)sum * sum) >> (bw + bh + 4)); 420 421 *var_y = var; 422 *sse_y = sse; 423 424 #if CONFIG_VP9_TEMPORAL_DENOISING 425 if (cpi->oxcf.noise_sensitivity > 0 && denoise_svc(cpi) && 426 cpi->oxcf.speed > 5) 427 ac_thr = vp9_scale_acskip_thresh(ac_thr, cpi->denoiser.denoising_level, 428 (abs(sum) >> (bw + bh)), 429 cpi->svc.temporal_layer_id); 430 else 431 ac_thr *= ac_thr_factor(cpi->oxcf.speed, cpi->common.width, 432 cpi->common.height, abs(sum) >> (bw + bh)); 433 #else 434 ac_thr *= ac_thr_factor(cpi->oxcf.speed, cpi->common.width, 435 cpi->common.height, abs(sum) >> (bw + bh)); 436 #endif 437 438 tx_size = calculate_tx_size(cpi, bsize, xd, var, sse, ac_thr); 439 // The code below for setting skip flag assumes tranform size of at least 8x8, 440 // so force this lower limit on transform. 441 if (tx_size < TX_8X8) tx_size = TX_8X8; 442 xd->mi[0]->tx_size = tx_size; 443 444 if (cpi->oxcf.content == VP9E_CONTENT_SCREEN && x->zero_temp_sad_source && 445 x->source_variance == 0) 446 dc_thr = dc_thr << 1; 447 448 // Evaluate if the partition block is a skippable block in Y plane. 449 { 450 unsigned int sse16x16[16] = { 0 }; 451 int sum16x16[16] = { 0 }; 452 unsigned int var16x16[16] = { 0 }; 453 const int num16x16 = num8x8 >> 2; 454 455 unsigned int sse32x32[4] = { 0 }; 456 int sum32x32[4] = { 0 }; 457 unsigned int var32x32[4] = { 0 }; 458 const int num32x32 = num8x8 >> 4; 459 460 int ac_test = 1; 461 int dc_test = 1; 462 const int num = (tx_size == TX_8X8) 463 ? num8x8 464 : ((tx_size == TX_16X16) ? num16x16 : num32x32); 465 const unsigned int *sse_tx = 466 (tx_size == TX_8X8) ? sse8x8 467 : ((tx_size == TX_16X16) ? sse16x16 : sse32x32); 468 const unsigned int *var_tx = 469 (tx_size == TX_8X8) ? var8x8 470 : ((tx_size == TX_16X16) ? var16x16 : var32x32); 471 472 // Calculate variance if tx_size > TX_8X8 473 if (tx_size >= TX_16X16) 474 calculate_variance(bw, bh, TX_8X8, sse8x8, sum8x8, var16x16, sse16x16, 475 sum16x16); 476 if (tx_size == TX_32X32) 477 calculate_variance(bw, bh, TX_16X16, sse16x16, sum16x16, var32x32, 478 sse32x32, sum32x32); 479 480 // Skipping test 481 x->skip_txfm[0] = SKIP_TXFM_NONE; 482 for (k = 0; k < num; k++) 483 // Check if all ac coefficients can be quantized to zero. 484 if (!(var_tx[k] < ac_thr || var == 0)) { 485 ac_test = 0; 486 break; 487 } 488 489 for (k = 0; k < num; k++) 490 // Check if dc coefficient can be quantized to zero. 491 if (!(sse_tx[k] - var_tx[k] < dc_thr || sse == var)) { 492 dc_test = 0; 493 break; 494 } 495 496 if (ac_test) { 497 x->skip_txfm[0] = SKIP_TXFM_AC_ONLY; 498 499 if (dc_test) x->skip_txfm[0] = SKIP_TXFM_AC_DC; 500 } else if (dc_test) { 501 skip_dc = 1; 502 } 503 } 504 505 if (x->skip_txfm[0] == SKIP_TXFM_AC_DC) { 506 int skip_uv[2] = { 0 }; 507 unsigned int var_uv[2]; 508 unsigned int sse_uv[2]; 509 510 *out_rate_sum = 0; 511 *out_dist_sum = sse << 4; 512 513 // Transform skipping test in UV planes. 514 for (i = 1; i <= 2; i++) { 515 if (cpi->oxcf.speed < 8 || x->color_sensitivity[i - 1]) { 516 struct macroblock_plane *const p = &x->plane[i]; 517 struct macroblockd_plane *const pd = &xd->plane[i]; 518 const TX_SIZE uv_tx_size = get_uv_tx_size(xd->mi[0], pd); 519 const BLOCK_SIZE unit_size = txsize_to_bsize[uv_tx_size]; 520 const BLOCK_SIZE uv_bsize = get_plane_block_size(bsize, pd); 521 const int uv_bw = b_width_log2_lookup[uv_bsize]; 522 const int uv_bh = b_height_log2_lookup[uv_bsize]; 523 const int sf = (uv_bw - b_width_log2_lookup[unit_size]) + 524 (uv_bh - b_height_log2_lookup[unit_size]); 525 const uint32_t uv_dc_thr = pd->dequant[0] * pd->dequant[0] >> (6 - sf); 526 const uint32_t uv_ac_thr = pd->dequant[1] * pd->dequant[1] >> (6 - sf); 527 int j = i - 1; 528 529 vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, i); 530 flag_preduv_computed[i - 1] = 1; 531 var_uv[j] = cpi->fn_ptr[uv_bsize].vf( 532 p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride, &sse_uv[j]); 533 534 if ((var_uv[j] < uv_ac_thr || var_uv[j] == 0) && 535 (sse_uv[j] - var_uv[j] < uv_dc_thr || sse_uv[j] == var_uv[j])) 536 skip_uv[j] = 1; 537 else 538 break; 539 } else { 540 skip_uv[i - 1] = 1; 541 } 542 } 543 544 // If the transform in YUV planes are skippable, the mode search checks 545 // fewer inter modes and doesn't check intra modes. 546 if (skip_uv[0] & skip_uv[1]) { 547 *early_term = 1; 548 } 549 return; 550 } 551 552 if (!skip_dc) { 553 #if CONFIG_VP9_HIGHBITDEPTH 554 vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize], 555 dc_quant >> (xd->bd - 5), &rate, &dist); 556 #else 557 vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize], 558 dc_quant >> 3, &rate, &dist); 559 #endif // CONFIG_VP9_HIGHBITDEPTH 560 } 561 562 if (!skip_dc) { 563 *out_rate_sum = rate >> 1; 564 *out_dist_sum = dist << 3; 565 } else { 566 *out_rate_sum = 0; 567 *out_dist_sum = (sse - var) << 4; 568 } 569 570 #if CONFIG_VP9_HIGHBITDEPTH 571 vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], 572 ac_quant >> (xd->bd - 5), &rate, &dist); 573 #else 574 vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], ac_quant >> 3, 575 &rate, &dist); 576 #endif // CONFIG_VP9_HIGHBITDEPTH 577 578 *out_rate_sum += rate; 579 *out_dist_sum += dist << 4; 580 } 581 582 static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x, 583 MACROBLOCKD *xd, int *out_rate_sum, 584 int64_t *out_dist_sum, unsigned int *var_y, 585 unsigned int *sse_y) { 586 // Note our transform coeffs are 8 times an orthogonal transform. 587 // Hence quantizer step is also 8 times. To get effective quantizer 588 // we need to divide by 8 before sending to modeling function. 589 unsigned int sse; 590 int rate; 591 int64_t dist; 592 struct macroblock_plane *const p = &x->plane[0]; 593 struct macroblockd_plane *const pd = &xd->plane[0]; 594 const int64_t dc_thr = p->quant_thred[0] >> 6; 595 const int64_t ac_thr = p->quant_thred[1] >> 6; 596 const uint32_t dc_quant = pd->dequant[0]; 597 const uint32_t ac_quant = pd->dequant[1]; 598 unsigned int var = cpi->fn_ptr[bsize].vf(p->src.buf, p->src.stride, 599 pd->dst.buf, pd->dst.stride, &sse); 600 int skip_dc = 0; 601 602 *var_y = var; 603 *sse_y = sse; 604 605 xd->mi[0]->tx_size = calculate_tx_size(cpi, bsize, xd, var, sse, ac_thr); 606 607 // Evaluate if the partition block is a skippable block in Y plane. 608 { 609 const BLOCK_SIZE unit_size = txsize_to_bsize[xd->mi[0]->tx_size]; 610 const unsigned int num_blk_log2 = 611 (b_width_log2_lookup[bsize] - b_width_log2_lookup[unit_size]) + 612 (b_height_log2_lookup[bsize] - b_height_log2_lookup[unit_size]); 613 const unsigned int sse_tx = sse >> num_blk_log2; 614 const unsigned int var_tx = var >> num_blk_log2; 615 616 x->skip_txfm[0] = SKIP_TXFM_NONE; 617 // Check if all ac coefficients can be quantized to zero. 618 if (var_tx < ac_thr || var == 0) { 619 x->skip_txfm[0] = SKIP_TXFM_AC_ONLY; 620 // Check if dc coefficient can be quantized to zero. 621 if (sse_tx - var_tx < dc_thr || sse == var) 622 x->skip_txfm[0] = SKIP_TXFM_AC_DC; 623 } else { 624 if (sse_tx - var_tx < dc_thr || sse == var) skip_dc = 1; 625 } 626 } 627 628 if (x->skip_txfm[0] == SKIP_TXFM_AC_DC) { 629 *out_rate_sum = 0; 630 *out_dist_sum = sse << 4; 631 return; 632 } 633 634 if (!skip_dc) { 635 #if CONFIG_VP9_HIGHBITDEPTH 636 vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize], 637 dc_quant >> (xd->bd - 5), &rate, &dist); 638 #else 639 vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize], 640 dc_quant >> 3, &rate, &dist); 641 #endif // CONFIG_VP9_HIGHBITDEPTH 642 } 643 644 if (!skip_dc) { 645 *out_rate_sum = rate >> 1; 646 *out_dist_sum = dist << 3; 647 } else { 648 *out_rate_sum = 0; 649 *out_dist_sum = (sse - var) << 4; 650 } 651 652 #if CONFIG_VP9_HIGHBITDEPTH 653 vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], 654 ac_quant >> (xd->bd - 5), &rate, &dist); 655 #else 656 vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], ac_quant >> 3, 657 &rate, &dist); 658 #endif // CONFIG_VP9_HIGHBITDEPTH 659 660 *out_rate_sum += rate; 661 *out_dist_sum += dist << 4; 662 } 663 664 static void block_yrd(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *this_rdc, 665 int *skippable, int64_t *sse, BLOCK_SIZE bsize, 666 TX_SIZE tx_size, int rd_computed) { 667 MACROBLOCKD *xd = &x->e_mbd; 668 const struct macroblockd_plane *pd = &xd->plane[0]; 669 struct macroblock_plane *const p = &x->plane[0]; 670 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; 671 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; 672 const int step = 1 << (tx_size << 1); 673 const int block_step = (1 << tx_size); 674 int block = 0, r, c; 675 const int max_blocks_wide = 676 num_4x4_w + (xd->mb_to_right_edge >= 0 ? 0 : xd->mb_to_right_edge >> 5); 677 const int max_blocks_high = 678 num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? 0 : xd->mb_to_bottom_edge >> 5); 679 int eob_cost = 0; 680 const int bw = 4 * num_4x4_w; 681 const int bh = 4 * num_4x4_h; 682 683 #if CONFIG_VP9_HIGHBITDEPTH 684 // TODO(jingning): Implement the high bit-depth Hadamard transforms and 685 // remove this check condition. 686 // TODO(marpan): Use this path (model_rd) for 8bit under certain conditions 687 // for now, as the vp9_quantize_fp below for highbitdepth build is slow. 688 if (xd->bd != 8 || 689 (cpi->oxcf.speed > 5 && cpi->common.frame_type != KEY_FRAME && 690 bsize < BLOCK_32X32)) { 691 unsigned int var_y, sse_y; 692 (void)tx_size; 693 if (!rd_computed) 694 model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc->rate, &this_rdc->dist, 695 &var_y, &sse_y); 696 *sse = INT_MAX; 697 *skippable = 0; 698 return; 699 } 700 #endif 701 702 if (cpi->sf.use_simple_block_yrd && cpi->common.frame_type != KEY_FRAME && 703 (bsize < BLOCK_32X32 || 704 (cpi->use_svc && 705 (bsize < BLOCK_32X32 || cpi->svc.temporal_layer_id > 0)))) { 706 unsigned int var_y, sse_y; 707 (void)tx_size; 708 if (!rd_computed) 709 model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc->rate, &this_rdc->dist, 710 &var_y, &sse_y); 711 *sse = INT_MAX; 712 *skippable = 0; 713 return; 714 } 715 716 (void)cpi; 717 718 // The max tx_size passed in is TX_16X16. 719 assert(tx_size != TX_32X32); 720 721 vpx_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride, 722 pd->dst.buf, pd->dst.stride); 723 *skippable = 1; 724 // Keep track of the row and column of the blocks we use so that we know 725 // if we are in the unrestricted motion border. 726 for (r = 0; r < max_blocks_high; r += block_step) { 727 for (c = 0; c < num_4x4_w; c += block_step) { 728 if (c < max_blocks_wide) { 729 const scan_order *const scan_order = &vp9_default_scan_orders[tx_size]; 730 tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block); 731 tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block); 732 tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); 733 uint16_t *const eob = &p->eobs[block]; 734 const int diff_stride = bw; 735 const int16_t *src_diff; 736 src_diff = &p->src_diff[(r * diff_stride + c) << 2]; 737 738 switch (tx_size) { 739 case TX_16X16: 740 vpx_hadamard_16x16(src_diff, diff_stride, coeff); 741 vp9_quantize_fp(coeff, 256, x->skip_block, p->round_fp, p->quant_fp, 742 qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan, 743 scan_order->iscan); 744 break; 745 case TX_8X8: 746 vpx_hadamard_8x8(src_diff, diff_stride, coeff); 747 vp9_quantize_fp(coeff, 64, x->skip_block, p->round_fp, p->quant_fp, 748 qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan, 749 scan_order->iscan); 750 break; 751 default: 752 assert(tx_size == TX_4X4); 753 x->fwd_txfm4x4(src_diff, coeff, diff_stride); 754 vp9_quantize_fp(coeff, 16, x->skip_block, p->round_fp, p->quant_fp, 755 qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan, 756 scan_order->iscan); 757 break; 758 } 759 *skippable &= (*eob == 0); 760 eob_cost += 1; 761 } 762 block += step; 763 } 764 } 765 766 this_rdc->rate = 0; 767 if (*sse < INT64_MAX) { 768 *sse = (*sse << 6) >> 2; 769 if (*skippable) { 770 this_rdc->dist = *sse; 771 return; 772 } 773 } 774 775 block = 0; 776 this_rdc->dist = 0; 777 for (r = 0; r < max_blocks_high; r += block_step) { 778 for (c = 0; c < num_4x4_w; c += block_step) { 779 if (c < max_blocks_wide) { 780 tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block); 781 tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block); 782 tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); 783 uint16_t *const eob = &p->eobs[block]; 784 785 if (*eob == 1) 786 this_rdc->rate += (int)abs(qcoeff[0]); 787 else if (*eob > 1) 788 this_rdc->rate += vpx_satd(qcoeff, step << 4); 789 790 this_rdc->dist += vp9_block_error_fp(coeff, dqcoeff, step << 4) >> 2; 791 } 792 block += step; 793 } 794 } 795 796 // If skippable is set, rate gets clobbered later. 797 this_rdc->rate <<= (2 + VP9_PROB_COST_SHIFT); 798 this_rdc->rate += (eob_cost << VP9_PROB_COST_SHIFT); 799 } 800 801 static void model_rd_for_sb_uv(VP9_COMP *cpi, BLOCK_SIZE plane_bsize, 802 MACROBLOCK *x, MACROBLOCKD *xd, 803 RD_COST *this_rdc, unsigned int *var_y, 804 unsigned int *sse_y, int start_plane, 805 int stop_plane) { 806 // Note our transform coeffs are 8 times an orthogonal transform. 807 // Hence quantizer step is also 8 times. To get effective quantizer 808 // we need to divide by 8 before sending to modeling function. 809 unsigned int sse; 810 int rate; 811 int64_t dist; 812 int i; 813 #if CONFIG_VP9_HIGHBITDEPTH 814 uint64_t tot_var = *var_y; 815 uint64_t tot_sse = *sse_y; 816 #else 817 uint32_t tot_var = *var_y; 818 uint32_t tot_sse = *sse_y; 819 #endif 820 821 this_rdc->rate = 0; 822 this_rdc->dist = 0; 823 824 for (i = start_plane; i <= stop_plane; ++i) { 825 struct macroblock_plane *const p = &x->plane[i]; 826 struct macroblockd_plane *const pd = &xd->plane[i]; 827 const uint32_t dc_quant = pd->dequant[0]; 828 const uint32_t ac_quant = pd->dequant[1]; 829 const BLOCK_SIZE bs = plane_bsize; 830 unsigned int var; 831 if (!x->color_sensitivity[i - 1]) continue; 832 833 var = cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride, pd->dst.buf, 834 pd->dst.stride, &sse); 835 assert(sse >= var); 836 tot_var += var; 837 tot_sse += sse; 838 839 #if CONFIG_VP9_HIGHBITDEPTH 840 vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs], 841 dc_quant >> (xd->bd - 5), &rate, &dist); 842 #else 843 vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs], 844 dc_quant >> 3, &rate, &dist); 845 #endif // CONFIG_VP9_HIGHBITDEPTH 846 847 this_rdc->rate += rate >> 1; 848 this_rdc->dist += dist << 3; 849 850 #if CONFIG_VP9_HIGHBITDEPTH 851 vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs], 852 ac_quant >> (xd->bd - 5), &rate, &dist); 853 #else 854 vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs], ac_quant >> 3, 855 &rate, &dist); 856 #endif // CONFIG_VP9_HIGHBITDEPTH 857 858 this_rdc->rate += rate; 859 this_rdc->dist += dist << 4; 860 } 861 862 #if CONFIG_VP9_HIGHBITDEPTH 863 *var_y = tot_var > UINT32_MAX ? UINT32_MAX : (uint32_t)tot_var; 864 *sse_y = tot_sse > UINT32_MAX ? UINT32_MAX : (uint32_t)tot_sse; 865 #else 866 *var_y = tot_var; 867 *sse_y = tot_sse; 868 #endif 869 } 870 871 static int get_pred_buffer(PRED_BUFFER *p, int len) { 872 int i; 873 874 for (i = 0; i < len; i++) { 875 if (!p[i].in_use) { 876 p[i].in_use = 1; 877 return i; 878 } 879 } 880 return -1; 881 } 882 883 static void free_pred_buffer(PRED_BUFFER *p) { 884 if (p != NULL) p->in_use = 0; 885 } 886 887 static void encode_breakout_test( 888 VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int mi_row, int mi_col, 889 MV_REFERENCE_FRAME ref_frame, PREDICTION_MODE this_mode, unsigned int var_y, 890 unsigned int sse_y, struct buf_2d yv12_mb[][MAX_MB_PLANE], int *rate, 891 int64_t *dist, int *flag_preduv_computed) { 892 MACROBLOCKD *xd = &x->e_mbd; 893 MODE_INFO *const mi = xd->mi[0]; 894 const BLOCK_SIZE uv_size = get_plane_block_size(bsize, &xd->plane[1]); 895 unsigned int var = var_y, sse = sse_y; 896 // Skipping threshold for ac. 897 unsigned int thresh_ac; 898 // Skipping threshold for dc. 899 unsigned int thresh_dc; 900 int motion_low = 1; 901 902 if (cpi->use_svc && ref_frame == GOLDEN_FRAME) return; 903 if (mi->mv[0].as_mv.row > 64 || mi->mv[0].as_mv.row < -64 || 904 mi->mv[0].as_mv.col > 64 || mi->mv[0].as_mv.col < -64) 905 motion_low = 0; 906 if (x->encode_breakout > 0 && motion_low == 1) { 907 // Set a maximum for threshold to avoid big PSNR loss in low bit rate 908 // case. Use extreme low threshold for static frames to limit 909 // skipping. 910 const unsigned int max_thresh = 36000; 911 // The encode_breakout input 912 const unsigned int min_thresh = 913 VPXMIN(((unsigned int)x->encode_breakout << 4), max_thresh); 914 #if CONFIG_VP9_HIGHBITDEPTH 915 const int shift = (xd->bd << 1) - 16; 916 #endif 917 918 // Calculate threshold according to dequant value. 919 thresh_ac = (xd->plane[0].dequant[1] * xd->plane[0].dequant[1]) >> 3; 920 #if CONFIG_VP9_HIGHBITDEPTH 921 if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && shift > 0) { 922 thresh_ac = ROUND_POWER_OF_TWO(thresh_ac, shift); 923 } 924 #endif // CONFIG_VP9_HIGHBITDEPTH 925 thresh_ac = clamp(thresh_ac, min_thresh, max_thresh); 926 927 // Adjust ac threshold according to partition size. 928 thresh_ac >>= 929 8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]); 930 931 thresh_dc = (xd->plane[0].dequant[0] * xd->plane[0].dequant[0] >> 6); 932 #if CONFIG_VP9_HIGHBITDEPTH 933 if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && shift > 0) { 934 thresh_dc = ROUND_POWER_OF_TWO(thresh_dc, shift); 935 } 936 #endif // CONFIG_VP9_HIGHBITDEPTH 937 } else { 938 thresh_ac = 0; 939 thresh_dc = 0; 940 } 941 942 // Y skipping condition checking for ac and dc. 943 if (var <= thresh_ac && (sse - var) <= thresh_dc) { 944 unsigned int sse_u, sse_v; 945 unsigned int var_u, var_v; 946 unsigned int thresh_ac_uv = thresh_ac; 947 unsigned int thresh_dc_uv = thresh_dc; 948 if (x->sb_is_skin) { 949 thresh_ac_uv = 0; 950 thresh_dc_uv = 0; 951 } 952 953 if (!flag_preduv_computed[0] || !flag_preduv_computed[1]) { 954 xd->plane[1].pre[0] = yv12_mb[ref_frame][1]; 955 xd->plane[2].pre[0] = yv12_mb[ref_frame][2]; 956 vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, bsize); 957 } 958 959 var_u = cpi->fn_ptr[uv_size].vf(x->plane[1].src.buf, x->plane[1].src.stride, 960 xd->plane[1].dst.buf, 961 xd->plane[1].dst.stride, &sse_u); 962 963 // U skipping condition checking 964 if (((var_u << 2) <= thresh_ac_uv) && (sse_u - var_u <= thresh_dc_uv)) { 965 var_v = cpi->fn_ptr[uv_size].vf( 966 x->plane[2].src.buf, x->plane[2].src.stride, xd->plane[2].dst.buf, 967 xd->plane[2].dst.stride, &sse_v); 968 969 // V skipping condition checking 970 if (((var_v << 2) <= thresh_ac_uv) && (sse_v - var_v <= thresh_dc_uv)) { 971 x->skip = 1; 972 973 // The cost of skip bit needs to be added. 974 *rate = cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]] 975 [INTER_OFFSET(this_mode)]; 976 977 // More on this part of rate 978 // rate += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1); 979 980 // Scaling factor for SSE from spatial domain to frequency 981 // domain is 16. Adjust distortion accordingly. 982 // TODO(yunqingwang): In this function, only y-plane dist is 983 // calculated. 984 *dist = (sse << 4); // + ((sse_u + sse_v) << 4); 985 986 // *disable_skip = 1; 987 } 988 } 989 } 990 } 991 992 struct estimate_block_intra_args { 993 VP9_COMP *cpi; 994 MACROBLOCK *x; 995 PREDICTION_MODE mode; 996 int skippable; 997 RD_COST *rdc; 998 }; 999 1000 static void estimate_block_intra(int plane, int block, int row, int col, 1001 BLOCK_SIZE plane_bsize, TX_SIZE tx_size, 1002 void *arg) { 1003 struct estimate_block_intra_args *const args = arg; 1004 VP9_COMP *const cpi = args->cpi; 1005 MACROBLOCK *const x = args->x; 1006 MACROBLOCKD *const xd = &x->e_mbd; 1007 struct macroblock_plane *const p = &x->plane[0]; 1008 struct macroblockd_plane *const pd = &xd->plane[0]; 1009 const BLOCK_SIZE bsize_tx = txsize_to_bsize[tx_size]; 1010 uint8_t *const src_buf_base = p->src.buf; 1011 uint8_t *const dst_buf_base = pd->dst.buf; 1012 const int src_stride = p->src.stride; 1013 const int dst_stride = pd->dst.stride; 1014 RD_COST this_rdc; 1015 1016 (void)block; 1017 1018 p->src.buf = &src_buf_base[4 * (row * src_stride + col)]; 1019 pd->dst.buf = &dst_buf_base[4 * (row * dst_stride + col)]; 1020 // Use source buffer as an approximation for the fully reconstructed buffer. 1021 vp9_predict_intra_block(xd, b_width_log2_lookup[plane_bsize], tx_size, 1022 args->mode, x->skip_encode ? p->src.buf : pd->dst.buf, 1023 x->skip_encode ? src_stride : dst_stride, pd->dst.buf, 1024 dst_stride, col, row, plane); 1025 1026 if (plane == 0) { 1027 int64_t this_sse = INT64_MAX; 1028 // TODO(jingning): This needs further refactoring. 1029 block_yrd(cpi, x, &this_rdc, &args->skippable, &this_sse, bsize_tx, 1030 VPXMIN(tx_size, TX_16X16), 0); 1031 } else { 1032 unsigned int var = 0; 1033 unsigned int sse = 0; 1034 model_rd_for_sb_uv(cpi, plane_bsize, x, xd, &this_rdc, &var, &sse, plane, 1035 plane); 1036 } 1037 1038 p->src.buf = src_buf_base; 1039 pd->dst.buf = dst_buf_base; 1040 args->rdc->rate += this_rdc.rate; 1041 args->rdc->dist += this_rdc.dist; 1042 } 1043 1044 static const THR_MODES mode_idx[MAX_REF_FRAMES][4] = { 1045 { THR_DC, THR_V_PRED, THR_H_PRED, THR_TM }, 1046 { THR_NEARESTMV, THR_NEARMV, THR_ZEROMV, THR_NEWMV }, 1047 { THR_NEARESTG, THR_NEARG, THR_ZEROG, THR_NEWG }, 1048 { THR_NEARESTA, THR_NEARA, THR_ZEROA, THR_NEWA }, 1049 }; 1050 1051 static const PREDICTION_MODE intra_mode_list[] = { DC_PRED, V_PRED, H_PRED, 1052 TM_PRED }; 1053 1054 static int mode_offset(const PREDICTION_MODE mode) { 1055 if (mode >= NEARESTMV) { 1056 return INTER_OFFSET(mode); 1057 } else { 1058 switch (mode) { 1059 case DC_PRED: return 0; 1060 case V_PRED: return 1; 1061 case H_PRED: return 2; 1062 case TM_PRED: return 3; 1063 default: return -1; 1064 } 1065 } 1066 } 1067 1068 static INLINE int rd_less_than_thresh_row_mt(int64_t best_rd, int thresh, 1069 const int *const thresh_fact) { 1070 int is_rd_less_than_thresh; 1071 is_rd_less_than_thresh = 1072 best_rd < ((int64_t)thresh * (*thresh_fact) >> 5) || thresh == INT_MAX; 1073 return is_rd_less_than_thresh; 1074 } 1075 1076 static INLINE void update_thresh_freq_fact_row_mt( 1077 VP9_COMP *cpi, TileDataEnc *tile_data, int source_variance, 1078 int thresh_freq_fact_idx, MV_REFERENCE_FRAME ref_frame, 1079 THR_MODES best_mode_idx, PREDICTION_MODE mode) { 1080 THR_MODES thr_mode_idx = mode_idx[ref_frame][mode_offset(mode)]; 1081 int freq_fact_idx = thresh_freq_fact_idx + thr_mode_idx; 1082 int *freq_fact = &tile_data->row_base_thresh_freq_fact[freq_fact_idx]; 1083 if (thr_mode_idx == best_mode_idx) 1084 *freq_fact -= (*freq_fact >> 4); 1085 else if (cpi->sf.limit_newmv_early_exit && mode == NEWMV && 1086 ref_frame == LAST_FRAME && source_variance < 5) { 1087 *freq_fact = VPXMIN(*freq_fact + RD_THRESH_INC, 32); 1088 } else { 1089 *freq_fact = VPXMIN(*freq_fact + RD_THRESH_INC, 1090 cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT); 1091 } 1092 } 1093 1094 static INLINE void update_thresh_freq_fact( 1095 VP9_COMP *cpi, TileDataEnc *tile_data, int source_variance, 1096 BLOCK_SIZE bsize, MV_REFERENCE_FRAME ref_frame, THR_MODES best_mode_idx, 1097 PREDICTION_MODE mode) { 1098 THR_MODES thr_mode_idx = mode_idx[ref_frame][mode_offset(mode)]; 1099 int *freq_fact = &tile_data->thresh_freq_fact[bsize][thr_mode_idx]; 1100 if (thr_mode_idx == best_mode_idx) 1101 *freq_fact -= (*freq_fact >> 4); 1102 else if (cpi->sf.limit_newmv_early_exit && mode == NEWMV && 1103 ref_frame == LAST_FRAME && source_variance < 5) { 1104 *freq_fact = VPXMIN(*freq_fact + RD_THRESH_INC, 32); 1105 } else { 1106 *freq_fact = VPXMIN(*freq_fact + RD_THRESH_INC, 1107 cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT); 1108 } 1109 } 1110 1111 void vp9_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost, 1112 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) { 1113 MACROBLOCKD *const xd = &x->e_mbd; 1114 MODE_INFO *const mi = xd->mi[0]; 1115 RD_COST this_rdc, best_rdc; 1116 PREDICTION_MODE this_mode; 1117 struct estimate_block_intra_args args = { cpi, x, DC_PRED, 1, 0 }; 1118 const TX_SIZE intra_tx_size = 1119 VPXMIN(max_txsize_lookup[bsize], 1120 tx_mode_to_biggest_tx_size[cpi->common.tx_mode]); 1121 MODE_INFO *const mic = xd->mi[0]; 1122 int *bmode_costs; 1123 const MODE_INFO *above_mi = xd->above_mi; 1124 const MODE_INFO *left_mi = xd->left_mi; 1125 const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, 0); 1126 const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, 0); 1127 bmode_costs = cpi->y_mode_costs[A][L]; 1128 1129 (void)ctx; 1130 vp9_rd_cost_reset(&best_rdc); 1131 vp9_rd_cost_reset(&this_rdc); 1132 1133 mi->ref_frame[0] = INTRA_FRAME; 1134 // Initialize interp_filter here so we do not have to check for inter block 1135 // modes in get_pred_context_switchable_interp() 1136 mi->interp_filter = SWITCHABLE_FILTERS; 1137 1138 mi->mv[0].as_int = INVALID_MV; 1139 mi->uv_mode = DC_PRED; 1140 memset(x->skip_txfm, 0, sizeof(x->skip_txfm)); 1141 1142 // Change the limit of this loop to add other intra prediction 1143 // mode tests. 1144 for (this_mode = DC_PRED; this_mode <= H_PRED; ++this_mode) { 1145 this_rdc.dist = this_rdc.rate = 0; 1146 args.mode = this_mode; 1147 args.skippable = 1; 1148 args.rdc = &this_rdc; 1149 mi->tx_size = intra_tx_size; 1150 vp9_foreach_transformed_block_in_plane(xd, bsize, 0, estimate_block_intra, 1151 &args); 1152 if (args.skippable) { 1153 x->skip_txfm[0] = SKIP_TXFM_AC_DC; 1154 this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 1); 1155 } else { 1156 x->skip_txfm[0] = SKIP_TXFM_NONE; 1157 this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 0); 1158 } 1159 this_rdc.rate += bmode_costs[this_mode]; 1160 this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist); 1161 1162 if (this_rdc.rdcost < best_rdc.rdcost) { 1163 best_rdc = this_rdc; 1164 mi->mode = this_mode; 1165 } 1166 } 1167 1168 *rd_cost = best_rdc; 1169 } 1170 1171 static void init_ref_frame_cost(VP9_COMMON *const cm, MACROBLOCKD *const xd, 1172 int ref_frame_cost[MAX_REF_FRAMES]) { 1173 vpx_prob intra_inter_p = vp9_get_intra_inter_prob(cm, xd); 1174 vpx_prob ref_single_p1 = vp9_get_pred_prob_single_ref_p1(cm, xd); 1175 vpx_prob ref_single_p2 = vp9_get_pred_prob_single_ref_p2(cm, xd); 1176 1177 ref_frame_cost[INTRA_FRAME] = vp9_cost_bit(intra_inter_p, 0); 1178 ref_frame_cost[LAST_FRAME] = ref_frame_cost[GOLDEN_FRAME] = 1179 ref_frame_cost[ALTREF_FRAME] = vp9_cost_bit(intra_inter_p, 1); 1180 1181 ref_frame_cost[LAST_FRAME] += vp9_cost_bit(ref_single_p1, 0); 1182 ref_frame_cost[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p1, 1); 1183 ref_frame_cost[ALTREF_FRAME] += vp9_cost_bit(ref_single_p1, 1); 1184 ref_frame_cost[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p2, 0); 1185 ref_frame_cost[ALTREF_FRAME] += vp9_cost_bit(ref_single_p2, 1); 1186 } 1187 1188 typedef struct { 1189 MV_REFERENCE_FRAME ref_frame; 1190 PREDICTION_MODE pred_mode; 1191 } REF_MODE; 1192 1193 #define RT_INTER_MODES 12 1194 static const REF_MODE ref_mode_set[RT_INTER_MODES] = { 1195 { LAST_FRAME, ZEROMV }, { LAST_FRAME, NEARESTMV }, 1196 { GOLDEN_FRAME, ZEROMV }, { LAST_FRAME, NEARMV }, 1197 { LAST_FRAME, NEWMV }, { GOLDEN_FRAME, NEARESTMV }, 1198 { GOLDEN_FRAME, NEARMV }, { GOLDEN_FRAME, NEWMV }, 1199 { ALTREF_FRAME, ZEROMV }, { ALTREF_FRAME, NEARESTMV }, 1200 { ALTREF_FRAME, NEARMV }, { ALTREF_FRAME, NEWMV } 1201 }; 1202 1203 #define RT_INTER_MODES_SVC 8 1204 static const REF_MODE ref_mode_set_svc[RT_INTER_MODES_SVC] = { 1205 { LAST_FRAME, ZEROMV }, { LAST_FRAME, NEARESTMV }, 1206 { LAST_FRAME, NEARMV }, { GOLDEN_FRAME, ZEROMV }, 1207 { GOLDEN_FRAME, NEARESTMV }, { GOLDEN_FRAME, NEARMV }, 1208 { LAST_FRAME, NEWMV }, { GOLDEN_FRAME, NEWMV } 1209 }; 1210 1211 static INLINE void find_predictors( 1212 VP9_COMP *cpi, MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame, 1213 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], 1214 int const_motion[MAX_REF_FRAMES], int *ref_frame_skip_mask, 1215 const int flag_list[4], TileDataEnc *tile_data, int mi_row, int mi_col, 1216 struct buf_2d yv12_mb[4][MAX_MB_PLANE], BLOCK_SIZE bsize, 1217 int force_skip_low_temp_var, int comp_pred_allowed) { 1218 VP9_COMMON *const cm = &cpi->common; 1219 MACROBLOCKD *const xd = &x->e_mbd; 1220 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame); 1221 TileInfo *const tile_info = &tile_data->tile_info; 1222 // TODO(jingning) placeholder for inter-frame non-RD mode decision. 1223 x->pred_mv_sad[ref_frame] = INT_MAX; 1224 frame_mv[NEWMV][ref_frame].as_int = INVALID_MV; 1225 frame_mv[ZEROMV][ref_frame].as_int = 0; 1226 // this needs various further optimizations. to be continued.. 1227 if ((cpi->ref_frame_flags & flag_list[ref_frame]) && (yv12 != NULL)) { 1228 int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame]; 1229 const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf; 1230 vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf); 1231 if (cm->use_prev_frame_mvs || comp_pred_allowed) { 1232 vp9_find_mv_refs(cm, xd, xd->mi[0], ref_frame, candidates, mi_row, mi_col, 1233 x->mbmi_ext->mode_context); 1234 } else { 1235 const_motion[ref_frame] = 1236 mv_refs_rt(cpi, cm, x, xd, tile_info, xd->mi[0], ref_frame, 1237 candidates, &frame_mv[NEWMV][ref_frame], mi_row, mi_col, 1238 (int)(cpi->svc.use_base_mv && cpi->svc.spatial_layer_id)); 1239 } 1240 vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates, 1241 &frame_mv[NEARESTMV][ref_frame], 1242 &frame_mv[NEARMV][ref_frame]); 1243 // Early exit for golden frame if force_skip_low_temp_var is set. 1244 if (!vp9_is_scaled(sf) && bsize >= BLOCK_8X8 && 1245 !(force_skip_low_temp_var && ref_frame == GOLDEN_FRAME)) { 1246 vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame, 1247 bsize); 1248 } 1249 } else { 1250 *ref_frame_skip_mask |= (1 << ref_frame); 1251 } 1252 } 1253 1254 static void vp9_NEWMV_diff_bias(const NOISE_ESTIMATE *ne, MACROBLOCKD *xd, 1255 PREDICTION_MODE this_mode, RD_COST *this_rdc, 1256 BLOCK_SIZE bsize, int mv_row, int mv_col, 1257 int is_last_frame, int lowvar_highsumdiff, 1258 int is_skin) { 1259 // Bias against MVs associated with NEWMV mode that are very different from 1260 // top/left neighbors. 1261 if (this_mode == NEWMV) { 1262 int al_mv_average_row; 1263 int al_mv_average_col; 1264 int left_row, left_col; 1265 int row_diff, col_diff; 1266 int above_mv_valid = 0; 1267 int left_mv_valid = 0; 1268 int above_row = 0; 1269 int above_col = 0; 1270 1271 if (xd->above_mi) { 1272 above_mv_valid = xd->above_mi->mv[0].as_int != INVALID_MV; 1273 above_row = xd->above_mi->mv[0].as_mv.row; 1274 above_col = xd->above_mi->mv[0].as_mv.col; 1275 } 1276 if (xd->left_mi) { 1277 left_mv_valid = xd->left_mi->mv[0].as_int != INVALID_MV; 1278 left_row = xd->left_mi->mv[0].as_mv.row; 1279 left_col = xd->left_mi->mv[0].as_mv.col; 1280 } 1281 if (above_mv_valid && left_mv_valid) { 1282 al_mv_average_row = (above_row + left_row + 1) >> 1; 1283 al_mv_average_col = (above_col + left_col + 1) >> 1; 1284 } else if (above_mv_valid) { 1285 al_mv_average_row = above_row; 1286 al_mv_average_col = above_col; 1287 } else if (left_mv_valid) { 1288 al_mv_average_row = left_row; 1289 al_mv_average_col = left_col; 1290 } else { 1291 al_mv_average_row = al_mv_average_col = 0; 1292 } 1293 row_diff = (al_mv_average_row - mv_row); 1294 col_diff = (al_mv_average_col - mv_col); 1295 if (row_diff > 48 || row_diff < -48 || col_diff > 48 || col_diff < -48) { 1296 if (bsize > BLOCK_32X32) 1297 this_rdc->rdcost = this_rdc->rdcost << 1; 1298 else 1299 this_rdc->rdcost = 3 * this_rdc->rdcost >> 1; 1300 } 1301 } 1302 // If noise estimation is enabled, and estimated level is above threshold, 1303 // add a bias to LAST reference with small motion, for large blocks. 1304 if (ne->enabled && ne->level >= kMedium && bsize >= BLOCK_32X32 && 1305 is_last_frame && mv_row < 8 && mv_row > -8 && mv_col < 8 && mv_col > -8) 1306 this_rdc->rdcost = 7 * (this_rdc->rdcost >> 3); 1307 else if (lowvar_highsumdiff && !is_skin && bsize >= BLOCK_16X16 && 1308 is_last_frame && mv_row < 16 && mv_row > -16 && mv_col < 16 && 1309 mv_col > -16) 1310 this_rdc->rdcost = 7 * (this_rdc->rdcost >> 3); 1311 } 1312 1313 #if CONFIG_VP9_TEMPORAL_DENOISING 1314 static void vp9_pickmode_ctx_den_update( 1315 VP9_PICKMODE_CTX_DEN *ctx_den, int64_t zero_last_cost_orig, 1316 int ref_frame_cost[MAX_REF_FRAMES], 1317 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int reuse_inter_pred, 1318 BEST_PICKMODE *bp) { 1319 ctx_den->zero_last_cost_orig = zero_last_cost_orig; 1320 ctx_den->ref_frame_cost = ref_frame_cost; 1321 ctx_den->frame_mv = frame_mv; 1322 ctx_den->reuse_inter_pred = reuse_inter_pred; 1323 ctx_den->best_tx_size = bp->best_tx_size; 1324 ctx_den->best_mode = bp->best_mode; 1325 ctx_den->best_ref_frame = bp->best_ref_frame; 1326 ctx_den->best_pred_filter = bp->best_pred_filter; 1327 ctx_den->best_mode_skip_txfm = bp->best_mode_skip_txfm; 1328 } 1329 1330 static void recheck_zeromv_after_denoising( 1331 VP9_COMP *cpi, MODE_INFO *const mi, MACROBLOCK *x, MACROBLOCKD *const xd, 1332 VP9_DENOISER_DECISION decision, VP9_PICKMODE_CTX_DEN *ctx_den, 1333 struct buf_2d yv12_mb[4][MAX_MB_PLANE], RD_COST *best_rdc, BLOCK_SIZE bsize, 1334 int mi_row, int mi_col) { 1335 // If INTRA or GOLDEN reference was selected, re-evaluate ZEROMV on 1336 // denoised result. Only do this under noise conditions, and if rdcost of 1337 // ZEROMV onoriginal source is not significantly higher than rdcost of best 1338 // mode. 1339 if (cpi->noise_estimate.enabled && cpi->noise_estimate.level > kLow && 1340 ctx_den->zero_last_cost_orig < (best_rdc->rdcost << 3) && 1341 ((ctx_den->best_ref_frame == INTRA_FRAME && decision >= FILTER_BLOCK) || 1342 (ctx_den->best_ref_frame == GOLDEN_FRAME && 1343 cpi->svc.number_spatial_layers == 1 && 1344 decision == FILTER_ZEROMV_BLOCK))) { 1345 // Check if we should pick ZEROMV on denoised signal. 1346 int rate = 0; 1347 int64_t dist = 0; 1348 uint32_t var_y = UINT_MAX; 1349 uint32_t sse_y = UINT_MAX; 1350 RD_COST this_rdc; 1351 mi->mode = ZEROMV; 1352 mi->ref_frame[0] = LAST_FRAME; 1353 mi->ref_frame[1] = NONE; 1354 mi->mv[0].as_int = 0; 1355 mi->interp_filter = EIGHTTAP; 1356 if (cpi->sf.default_interp_filter == BILINEAR) mi->interp_filter = BILINEAR; 1357 xd->plane[0].pre[0] = yv12_mb[LAST_FRAME][0]; 1358 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize); 1359 model_rd_for_sb_y(cpi, bsize, x, xd, &rate, &dist, &var_y, &sse_y); 1360 this_rdc.rate = rate + ctx_den->ref_frame_cost[LAST_FRAME] + 1361 cpi->inter_mode_cost[x->mbmi_ext->mode_context[LAST_FRAME]] 1362 [INTER_OFFSET(ZEROMV)]; 1363 this_rdc.dist = dist; 1364 this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, rate, dist); 1365 // Don't switch to ZEROMV if the rdcost for ZEROMV on denoised source 1366 // is higher than best_ref mode (on original source). 1367 if (this_rdc.rdcost > best_rdc->rdcost) { 1368 this_rdc = *best_rdc; 1369 mi->mode = ctx_den->best_mode; 1370 mi->ref_frame[0] = ctx_den->best_ref_frame; 1371 mi->interp_filter = ctx_den->best_pred_filter; 1372 if (ctx_den->best_ref_frame == INTRA_FRAME) { 1373 mi->mv[0].as_int = INVALID_MV; 1374 mi->interp_filter = SWITCHABLE_FILTERS; 1375 } else if (ctx_den->best_ref_frame == GOLDEN_FRAME) { 1376 mi->mv[0].as_int = 1377 ctx_den->frame_mv[ctx_den->best_mode][ctx_den->best_ref_frame] 1378 .as_int; 1379 if (ctx_den->reuse_inter_pred) { 1380 xd->plane[0].pre[0] = yv12_mb[GOLDEN_FRAME][0]; 1381 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize); 1382 } 1383 } 1384 mi->tx_size = ctx_den->best_tx_size; 1385 x->skip_txfm[0] = ctx_den->best_mode_skip_txfm; 1386 } else { 1387 ctx_den->best_ref_frame = LAST_FRAME; 1388 *best_rdc = this_rdc; 1389 } 1390 } 1391 } 1392 #endif // CONFIG_VP9_TEMPORAL_DENOISING 1393 1394 static INLINE int get_force_skip_low_temp_var(uint8_t *variance_low, int mi_row, 1395 int mi_col, BLOCK_SIZE bsize) { 1396 const int i = (mi_row & 0x7) >> 1; 1397 const int j = (mi_col & 0x7) >> 1; 1398 int force_skip_low_temp_var = 0; 1399 // Set force_skip_low_temp_var based on the block size and block offset. 1400 if (bsize == BLOCK_64X64) { 1401 force_skip_low_temp_var = variance_low[0]; 1402 } else if (bsize == BLOCK_64X32) { 1403 if (!(mi_col & 0x7) && !(mi_row & 0x7)) { 1404 force_skip_low_temp_var = variance_low[1]; 1405 } else if (!(mi_col & 0x7) && (mi_row & 0x7)) { 1406 force_skip_low_temp_var = variance_low[2]; 1407 } 1408 } else if (bsize == BLOCK_32X64) { 1409 if (!(mi_col & 0x7) && !(mi_row & 0x7)) { 1410 force_skip_low_temp_var = variance_low[3]; 1411 } else if ((mi_col & 0x7) && !(mi_row & 0x7)) { 1412 force_skip_low_temp_var = variance_low[4]; 1413 } 1414 } else if (bsize == BLOCK_32X32) { 1415 if (!(mi_col & 0x7) && !(mi_row & 0x7)) { 1416 force_skip_low_temp_var = variance_low[5]; 1417 } else if ((mi_col & 0x7) && !(mi_row & 0x7)) { 1418 force_skip_low_temp_var = variance_low[6]; 1419 } else if (!(mi_col & 0x7) && (mi_row & 0x7)) { 1420 force_skip_low_temp_var = variance_low[7]; 1421 } else if ((mi_col & 0x7) && (mi_row & 0x7)) { 1422 force_skip_low_temp_var = variance_low[8]; 1423 } 1424 } else if (bsize == BLOCK_16X16) { 1425 force_skip_low_temp_var = variance_low[pos_shift_16x16[i][j]]; 1426 } else if (bsize == BLOCK_32X16) { 1427 // The col shift index for the second 16x16 block. 1428 const int j2 = ((mi_col + 2) & 0x7) >> 1; 1429 // Only if each 16x16 block inside has low temporal variance. 1430 force_skip_low_temp_var = variance_low[pos_shift_16x16[i][j]] && 1431 variance_low[pos_shift_16x16[i][j2]]; 1432 } else if (bsize == BLOCK_16X32) { 1433 // The row shift index for the second 16x16 block. 1434 const int i2 = ((mi_row + 2) & 0x7) >> 1; 1435 force_skip_low_temp_var = variance_low[pos_shift_16x16[i][j]] && 1436 variance_low[pos_shift_16x16[i2][j]]; 1437 } 1438 return force_skip_low_temp_var; 1439 } 1440 1441 static void search_filter_ref(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *this_rdc, 1442 int mi_row, int mi_col, PRED_BUFFER *tmp, 1443 BLOCK_SIZE bsize, int reuse_inter_pred, 1444 PRED_BUFFER **this_mode_pred, unsigned int *var_y, 1445 unsigned int *sse_y) { 1446 MACROBLOCKD *const xd = &x->e_mbd; 1447 MODE_INFO *const mi = xd->mi[0]; 1448 struct macroblockd_plane *const pd = &xd->plane[0]; 1449 const int bw = num_4x4_blocks_wide_lookup[bsize] << 2; 1450 1451 int pf_rate[3] = { 0 }; 1452 int64_t pf_dist[3] = { 0 }; 1453 int curr_rate[3] = { 0 }; 1454 unsigned int pf_var[3] = { 0 }; 1455 unsigned int pf_sse[3] = { 0 }; 1456 TX_SIZE pf_tx_size[3] = { 0 }; 1457 int64_t best_cost = INT64_MAX; 1458 INTERP_FILTER best_filter = SWITCHABLE, filter; 1459 PRED_BUFFER *current_pred = *this_mode_pred; 1460 uint8_t skip_txfm = SKIP_TXFM_NONE; 1461 1462 for (filter = EIGHTTAP; filter <= EIGHTTAP_SMOOTH; ++filter) { 1463 int64_t cost; 1464 mi->interp_filter = filter; 1465 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize); 1466 model_rd_for_sb_y(cpi, bsize, x, xd, &pf_rate[filter], &pf_dist[filter], 1467 &pf_var[filter], &pf_sse[filter]); 1468 curr_rate[filter] = pf_rate[filter]; 1469 pf_rate[filter] += vp9_get_switchable_rate(cpi, xd); 1470 cost = RDCOST(x->rdmult, x->rddiv, pf_rate[filter], pf_dist[filter]); 1471 pf_tx_size[filter] = mi->tx_size; 1472 if (cost < best_cost) { 1473 best_filter = filter; 1474 best_cost = cost; 1475 skip_txfm = x->skip_txfm[0]; 1476 1477 if (reuse_inter_pred) { 1478 if (*this_mode_pred != current_pred) { 1479 free_pred_buffer(*this_mode_pred); 1480 *this_mode_pred = current_pred; 1481 } 1482 current_pred = &tmp[get_pred_buffer(tmp, 3)]; 1483 pd->dst.buf = current_pred->data; 1484 pd->dst.stride = bw; 1485 } 1486 } 1487 } 1488 1489 if (reuse_inter_pred && *this_mode_pred != current_pred) 1490 free_pred_buffer(current_pred); 1491 1492 mi->interp_filter = best_filter; 1493 mi->tx_size = pf_tx_size[best_filter]; 1494 this_rdc->rate = curr_rate[best_filter]; 1495 this_rdc->dist = pf_dist[best_filter]; 1496 *var_y = pf_var[best_filter]; 1497 *sse_y = pf_sse[best_filter]; 1498 x->skip_txfm[0] = skip_txfm; 1499 if (reuse_inter_pred) { 1500 pd->dst.buf = (*this_mode_pred)->data; 1501 pd->dst.stride = (*this_mode_pred)->stride; 1502 } 1503 } 1504 1505 static int search_new_mv(VP9_COMP *cpi, MACROBLOCK *x, 1506 int_mv frame_mv[][MAX_REF_FRAMES], 1507 MV_REFERENCE_FRAME ref_frame, int gf_temporal_ref, 1508 BLOCK_SIZE bsize, int mi_row, int mi_col, 1509 int best_pred_sad, int *rate_mv, 1510 unsigned int best_sse_sofar, RD_COST *best_rdc) { 1511 SVC *const svc = &cpi->svc; 1512 MACROBLOCKD *const xd = &x->e_mbd; 1513 MODE_INFO *const mi = xd->mi[0]; 1514 SPEED_FEATURES *const sf = &cpi->sf; 1515 1516 if (ref_frame > LAST_FRAME && gf_temporal_ref && 1517 cpi->oxcf.rc_mode == VPX_CBR) { 1518 int tmp_sad; 1519 uint32_t dis; 1520 int cost_list[5] = { INT_MAX, INT_MAX, INT_MAX, INT_MAX, INT_MAX }; 1521 1522 if (bsize < BLOCK_16X16) return -1; 1523 1524 tmp_sad = vp9_int_pro_motion_estimation( 1525 cpi, x, bsize, mi_row, mi_col, 1526 &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv); 1527 1528 if (tmp_sad > x->pred_mv_sad[LAST_FRAME]) return -1; 1529 if (tmp_sad + (num_pels_log2_lookup[bsize] << 4) > best_pred_sad) return -1; 1530 1531 frame_mv[NEWMV][ref_frame].as_int = mi->mv[0].as_int; 1532 *rate_mv = vp9_mv_bit_cost(&frame_mv[NEWMV][ref_frame].as_mv, 1533 &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv, 1534 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); 1535 frame_mv[NEWMV][ref_frame].as_mv.row >>= 3; 1536 frame_mv[NEWMV][ref_frame].as_mv.col >>= 3; 1537 1538 cpi->find_fractional_mv_step( 1539 x, &frame_mv[NEWMV][ref_frame].as_mv, 1540 &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv, 1541 cpi->common.allow_high_precision_mv, x->errorperbit, 1542 &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop, 1543 cpi->sf.mv.subpel_search_level, cond_cost_list(cpi, cost_list), 1544 x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref_frame], NULL, 0, 0, 1545 cpi->sf.use_accurate_subpel_search); 1546 } else if (svc->use_base_mv && svc->spatial_layer_id) { 1547 if (frame_mv[NEWMV][ref_frame].as_int != INVALID_MV) { 1548 const int pre_stride = xd->plane[0].pre[0].stride; 1549 unsigned int base_mv_sse = UINT_MAX; 1550 int scale = (cpi->rc.avg_frame_low_motion > 60) ? 2 : 4; 1551 const uint8_t *const pre_buf = 1552 xd->plane[0].pre[0].buf + 1553 (frame_mv[NEWMV][ref_frame].as_mv.row >> 3) * pre_stride + 1554 (frame_mv[NEWMV][ref_frame].as_mv.col >> 3); 1555 cpi->fn_ptr[bsize].vf(x->plane[0].src.buf, x->plane[0].src.stride, 1556 pre_buf, pre_stride, &base_mv_sse); 1557 1558 // Exit NEWMV search if base_mv is (0,0) && bsize < BLOCK_16x16, 1559 // for SVC encoding. 1560 if (cpi->use_svc && svc->use_base_mv && bsize < BLOCK_16X16 && 1561 frame_mv[NEWMV][ref_frame].as_mv.row == 0 && 1562 frame_mv[NEWMV][ref_frame].as_mv.col == 0) 1563 return -1; 1564 1565 // Exit NEWMV search if base_mv_sse is large. 1566 if (sf->base_mv_aggressive && base_mv_sse > (best_sse_sofar << scale)) 1567 return -1; 1568 if (base_mv_sse < (best_sse_sofar << 1)) { 1569 // Base layer mv is good. 1570 // Exit NEWMV search if the base_mv is (0, 0) and sse is low, since 1571 // (0, 0) mode is already tested. 1572 unsigned int base_mv_sse_normalized = 1573 base_mv_sse >> 1574 (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]); 1575 if (sf->base_mv_aggressive && base_mv_sse <= best_sse_sofar && 1576 base_mv_sse_normalized < 400 && 1577 frame_mv[NEWMV][ref_frame].as_mv.row == 0 && 1578 frame_mv[NEWMV][ref_frame].as_mv.col == 0) 1579 return -1; 1580 if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col, 1581 &frame_mv[NEWMV][ref_frame], rate_mv, 1582 best_rdc->rdcost, 1)) { 1583 return -1; 1584 } 1585 } else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col, 1586 &frame_mv[NEWMV][ref_frame], rate_mv, 1587 best_rdc->rdcost, 0)) { 1588 return -1; 1589 } 1590 } else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col, 1591 &frame_mv[NEWMV][ref_frame], rate_mv, 1592 best_rdc->rdcost, 0)) { 1593 return -1; 1594 } 1595 } else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col, 1596 &frame_mv[NEWMV][ref_frame], rate_mv, 1597 best_rdc->rdcost, 0)) { 1598 return -1; 1599 } 1600 1601 return 0; 1602 } 1603 1604 static INLINE void init_best_pickmode(BEST_PICKMODE *bp) { 1605 bp->best_mode = ZEROMV; 1606 bp->best_ref_frame = LAST_FRAME; 1607 bp->best_tx_size = TX_SIZES; 1608 bp->best_intra_tx_size = TX_SIZES; 1609 bp->best_pred_filter = EIGHTTAP; 1610 bp->best_mode_skip_txfm = SKIP_TXFM_NONE; 1611 bp->best_second_ref_frame = NONE; 1612 bp->best_pred = NULL; 1613 } 1614 1615 void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, TileDataEnc *tile_data, 1616 int mi_row, int mi_col, RD_COST *rd_cost, 1617 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) { 1618 VP9_COMMON *const cm = &cpi->common; 1619 SPEED_FEATURES *const sf = &cpi->sf; 1620 SVC *const svc = &cpi->svc; 1621 MACROBLOCKD *const xd = &x->e_mbd; 1622 MODE_INFO *const mi = xd->mi[0]; 1623 struct macroblockd_plane *const pd = &xd->plane[0]; 1624 1625 BEST_PICKMODE best_pickmode; 1626 1627 MV_REFERENCE_FRAME ref_frame; 1628 MV_REFERENCE_FRAME usable_ref_frame, second_ref_frame; 1629 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES]; 1630 uint8_t mode_checked[MB_MODE_COUNT][MAX_REF_FRAMES]; 1631 struct buf_2d yv12_mb[4][MAX_MB_PLANE]; 1632 static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG, 1633 VP9_ALT_FLAG }; 1634 RD_COST this_rdc, best_rdc; 1635 // var_y and sse_y are saved to be used in skipping checking 1636 unsigned int var_y = UINT_MAX; 1637 unsigned int sse_y = UINT_MAX; 1638 const int intra_cost_penalty = 1639 vp9_get_intra_cost_penalty(cpi, bsize, cm->base_qindex, cm->y_dc_delta_q); 1640 int64_t inter_mode_thresh = 1641 RDCOST(x->rdmult, x->rddiv, intra_cost_penalty, 0); 1642 const int *const rd_threshes = cpi->rd.threshes[mi->segment_id][bsize]; 1643 const int sb_row = mi_row >> MI_BLOCK_SIZE_LOG2; 1644 int thresh_freq_fact_idx = (sb_row * BLOCK_SIZES + bsize) * MAX_MODES; 1645 const int *const rd_thresh_freq_fact = 1646 (cpi->sf.adaptive_rd_thresh_row_mt) 1647 ? &(tile_data->row_base_thresh_freq_fact[thresh_freq_fact_idx]) 1648 : tile_data->thresh_freq_fact[bsize]; 1649 1650 INTERP_FILTER filter_ref; 1651 const int bsl = mi_width_log2_lookup[bsize]; 1652 const int pred_filter_search = 1653 cm->interp_filter == SWITCHABLE 1654 ? (((mi_row + mi_col) >> bsl) + 1655 get_chessboard_index(cm->current_video_frame)) & 1656 0x1 1657 : 0; 1658 int const_motion[MAX_REF_FRAMES] = { 0 }; 1659 const int bh = num_4x4_blocks_high_lookup[bsize] << 2; 1660 const int bw = num_4x4_blocks_wide_lookup[bsize] << 2; 1661 // For speed 6, the result of interp filter is reused later in actual encoding 1662 // process. 1663 // tmp[3] points to dst buffer, and the other 3 point to allocated buffers. 1664 PRED_BUFFER tmp[4]; 1665 DECLARE_ALIGNED(16, uint8_t, pred_buf[3 * 64 * 64]); 1666 #if CONFIG_VP9_HIGHBITDEPTH 1667 DECLARE_ALIGNED(16, uint16_t, pred_buf_16[3 * 64 * 64]); 1668 #endif 1669 struct buf_2d orig_dst = pd->dst; 1670 PRED_BUFFER *this_mode_pred = NULL; 1671 const int pixels_in_block = bh * bw; 1672 int reuse_inter_pred = cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready; 1673 int ref_frame_skip_mask = 0; 1674 int idx; 1675 int best_pred_sad = INT_MAX; 1676 int best_early_term = 0; 1677 int ref_frame_cost[MAX_REF_FRAMES]; 1678 int svc_force_zero_mode[3] = { 0 }; 1679 int perform_intra_pred = 1; 1680 int use_golden_nonzeromv = 1; 1681 int force_skip_low_temp_var = 0; 1682 int skip_ref_find_pred[4] = { 0 }; 1683 unsigned int sse_zeromv_normalized = UINT_MAX; 1684 unsigned int best_sse_sofar = UINT_MAX; 1685 int gf_temporal_ref = 0; 1686 #if CONFIG_VP9_TEMPORAL_DENOISING 1687 VP9_PICKMODE_CTX_DEN ctx_den; 1688 int64_t zero_last_cost_orig = INT64_MAX; 1689 int denoise_svc_pickmode = 1; 1690 #endif 1691 INTERP_FILTER filter_gf_svc = EIGHTTAP; 1692 MV_REFERENCE_FRAME inter_layer_ref = GOLDEN_FRAME; 1693 const struct segmentation *const seg = &cm->seg; 1694 int comp_modes = 0; 1695 int num_inter_modes = (cpi->use_svc) ? RT_INTER_MODES_SVC : RT_INTER_MODES; 1696 int flag_svc_subpel = 0; 1697 int svc_mv_col = 0; 1698 int svc_mv_row = 0; 1699 int no_scaling = 0; 1700 unsigned int thresh_svc_skip_golden = 500; 1701 int scene_change_detected = 1702 cpi->rc.high_source_sad || 1703 (cpi->use_svc && cpi->svc.high_source_sad_superframe); 1704 1705 init_best_pickmode(&best_pickmode); 1706 1707 x->encode_breakout = seg->enabled 1708 ? cpi->segment_encode_breakout[mi->segment_id] 1709 : cpi->encode_breakout; 1710 1711 x->source_variance = UINT_MAX; 1712 if (cpi->sf.default_interp_filter == BILINEAR) { 1713 best_pickmode.best_pred_filter = BILINEAR; 1714 filter_gf_svc = BILINEAR; 1715 } 1716 if (cpi->use_svc && svc->spatial_layer_id > 0) { 1717 int layer = 1718 LAYER_IDS_TO_IDX(svc->spatial_layer_id - 1, svc->temporal_layer_id, 1719 svc->number_temporal_layers); 1720 LAYER_CONTEXT *const lc = &svc->layer_context[layer]; 1721 if (lc->scaling_factor_num == lc->scaling_factor_den) no_scaling = 1; 1722 } 1723 if (svc->spatial_layer_id > 0 && 1724 (svc->high_source_sad_superframe || no_scaling)) 1725 thresh_svc_skip_golden = 0; 1726 // Lower the skip threshold if lower spatial layer is better quality relative 1727 // to current layer. 1728 else if (svc->spatial_layer_id > 0 && cm->base_qindex > 150 && 1729 cm->base_qindex > svc->lower_layer_qindex + 15) 1730 thresh_svc_skip_golden = 100; 1731 // Increase skip threshold if lower spatial layer is lower quality relative 1732 // to current layer. 1733 else if (svc->spatial_layer_id > 0 && cm->base_qindex < 140 && 1734 cm->base_qindex < svc->lower_layer_qindex - 20) 1735 thresh_svc_skip_golden = 1000; 1736 1737 if (!cpi->use_svc || 1738 (svc->use_gf_temporal_ref_current_layer && 1739 !svc->layer_context[svc->temporal_layer_id].is_key_frame)) { 1740 struct scale_factors *const sf_last = &cm->frame_refs[LAST_FRAME - 1].sf; 1741 struct scale_factors *const sf_golden = 1742 &cm->frame_refs[GOLDEN_FRAME - 1].sf; 1743 gf_temporal_ref = 1; 1744 // For temporal long term prediction, check that the golden reference 1745 // is same scale as last reference, otherwise disable. 1746 if ((sf_last->x_scale_fp != sf_golden->x_scale_fp) || 1747 (sf_last->y_scale_fp != sf_golden->y_scale_fp)) { 1748 gf_temporal_ref = 0; 1749 } else { 1750 if (cpi->rc.avg_frame_low_motion > 70) 1751 thresh_svc_skip_golden = 500; 1752 else 1753 thresh_svc_skip_golden = 0; 1754 } 1755 } 1756 1757 init_ref_frame_cost(cm, xd, ref_frame_cost); 1758 memset(&mode_checked[0][0], 0, MB_MODE_COUNT * MAX_REF_FRAMES); 1759 1760 if (reuse_inter_pred) { 1761 int i; 1762 for (i = 0; i < 3; i++) { 1763 #if CONFIG_VP9_HIGHBITDEPTH 1764 if (cm->use_highbitdepth) 1765 tmp[i].data = CONVERT_TO_BYTEPTR(&pred_buf_16[pixels_in_block * i]); 1766 else 1767 tmp[i].data = &pred_buf[pixels_in_block * i]; 1768 #else 1769 tmp[i].data = &pred_buf[pixels_in_block * i]; 1770 #endif // CONFIG_VP9_HIGHBITDEPTH 1771 tmp[i].stride = bw; 1772 tmp[i].in_use = 0; 1773 } 1774 tmp[3].data = pd->dst.buf; 1775 tmp[3].stride = pd->dst.stride; 1776 tmp[3].in_use = 0; 1777 } 1778 1779 x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH; 1780 x->skip = 0; 1781 1782 // Instead of using vp9_get_pred_context_switchable_interp(xd) to assign 1783 // filter_ref, we use a less strict condition on assigning filter_ref. 1784 // This is to reduce the probabily of entering the flow of not assigning 1785 // filter_ref and then skip filter search. 1786 filter_ref = cm->interp_filter; 1787 if (cpi->sf.default_interp_filter != BILINEAR) { 1788 if (xd->above_mi && is_inter_block(xd->above_mi)) 1789 filter_ref = xd->above_mi->interp_filter; 1790 else if (xd->left_mi && is_inter_block(xd->left_mi)) 1791 filter_ref = xd->left_mi->interp_filter; 1792 } 1793 1794 // initialize mode decisions 1795 vp9_rd_cost_reset(&best_rdc); 1796 vp9_rd_cost_reset(rd_cost); 1797 mi->sb_type = bsize; 1798 mi->ref_frame[0] = NONE; 1799 mi->ref_frame[1] = NONE; 1800 1801 mi->tx_size = 1802 VPXMIN(max_txsize_lookup[bsize], tx_mode_to_biggest_tx_size[cm->tx_mode]); 1803 1804 if (sf->short_circuit_flat_blocks || sf->limit_newmv_early_exit) { 1805 #if CONFIG_VP9_HIGHBITDEPTH 1806 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) 1807 x->source_variance = vp9_high_get_sby_perpixel_variance( 1808 cpi, &x->plane[0].src, bsize, xd->bd); 1809 else 1810 #endif // CONFIG_VP9_HIGHBITDEPTH 1811 x->source_variance = 1812 vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize); 1813 1814 if (cpi->oxcf.content == VP9E_CONTENT_SCREEN && mi->segment_id > 0 && 1815 x->zero_temp_sad_source && x->source_variance == 0) { 1816 mi->segment_id = 0; 1817 vp9_init_plane_quantizers(cpi, x); 1818 } 1819 } 1820 1821 #if CONFIG_VP9_TEMPORAL_DENOISING 1822 if (cpi->oxcf.noise_sensitivity > 0) { 1823 if (cpi->use_svc) denoise_svc_pickmode = vp9_denoise_svc_non_key(cpi); 1824 if (cpi->denoiser.denoising_level > kDenLowLow && denoise_svc_pickmode) 1825 vp9_denoiser_reset_frame_stats(ctx); 1826 } 1827 #endif 1828 1829 if (cpi->rc.frames_since_golden == 0 && gf_temporal_ref && 1830 !cpi->rc.alt_ref_gf_group && !cpi->rc.last_frame_is_src_altref) { 1831 usable_ref_frame = LAST_FRAME; 1832 } else { 1833 usable_ref_frame = GOLDEN_FRAME; 1834 } 1835 1836 if (cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR) { 1837 if (cpi->rc.alt_ref_gf_group || cpi->rc.is_src_frame_alt_ref) 1838 usable_ref_frame = ALTREF_FRAME; 1839 1840 if (cpi->rc.is_src_frame_alt_ref) { 1841 skip_ref_find_pred[LAST_FRAME] = 1; 1842 skip_ref_find_pred[GOLDEN_FRAME] = 1; 1843 } 1844 if (!cm->show_frame) { 1845 if (cpi->rc.frames_since_key == 1) { 1846 usable_ref_frame = LAST_FRAME; 1847 skip_ref_find_pred[GOLDEN_FRAME] = 1; 1848 skip_ref_find_pred[ALTREF_FRAME] = 1; 1849 } 1850 } 1851 } 1852 1853 // For svc mode, on spatial_layer_id > 0: if the reference has different scale 1854 // constrain the inter mode to only test zero motion. 1855 if (cpi->use_svc && svc->force_zero_mode_spatial_ref && 1856 svc->spatial_layer_id > 0 && !gf_temporal_ref) { 1857 if (cpi->ref_frame_flags & flag_list[LAST_FRAME]) { 1858 struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf; 1859 if (vp9_is_scaled(sf)) { 1860 svc_force_zero_mode[LAST_FRAME - 1] = 1; 1861 inter_layer_ref = LAST_FRAME; 1862 } 1863 } 1864 if (cpi->ref_frame_flags & flag_list[GOLDEN_FRAME]) { 1865 struct scale_factors *const sf = &cm->frame_refs[GOLDEN_FRAME - 1].sf; 1866 if (vp9_is_scaled(sf)) { 1867 svc_force_zero_mode[GOLDEN_FRAME - 1] = 1; 1868 inter_layer_ref = GOLDEN_FRAME; 1869 } 1870 } 1871 } 1872 1873 if (cpi->sf.short_circuit_low_temp_var) { 1874 force_skip_low_temp_var = 1875 get_force_skip_low_temp_var(&x->variance_low[0], mi_row, mi_col, bsize); 1876 // If force_skip_low_temp_var is set, and for short circuit mode = 1 and 3, 1877 // skip golden reference. 1878 if ((cpi->sf.short_circuit_low_temp_var == 1 || 1879 cpi->sf.short_circuit_low_temp_var == 3) && 1880 force_skip_low_temp_var) { 1881 usable_ref_frame = LAST_FRAME; 1882 } 1883 } 1884 1885 if (sf->disable_golden_ref && (x->content_state_sb != kVeryHighSad || 1886 cpi->rc.avg_frame_low_motion < 60)) 1887 usable_ref_frame = LAST_FRAME; 1888 1889 if (!((cpi->ref_frame_flags & flag_list[GOLDEN_FRAME]) && 1890 !svc_force_zero_mode[GOLDEN_FRAME - 1] && !force_skip_low_temp_var)) 1891 use_golden_nonzeromv = 0; 1892 1893 if (cpi->oxcf.speed >= 8 && !cpi->use_svc && 1894 ((cpi->rc.frames_since_golden + 1) < x->last_sb_high_content || 1895 x->last_sb_high_content > 40 || cpi->rc.frames_since_golden > 120)) 1896 usable_ref_frame = LAST_FRAME; 1897 1898 // Compound prediction modes: (0,0) on LAST/GOLDEN and ARF. 1899 if (cm->reference_mode == REFERENCE_MODE_SELECT && 1900 cpi->sf.use_compound_nonrd_pickmode && usable_ref_frame == ALTREF_FRAME) 1901 comp_modes = 2; 1902 1903 // If the segment reference frame feature is enabled and it's set to GOLDEN 1904 // reference, then make sure we don't skip checking GOLDEN, this is to 1905 // prevent possibility of not picking any mode. 1906 if (segfeature_active(seg, mi->segment_id, SEG_LVL_REF_FRAME) && 1907 get_segdata(seg, mi->segment_id, SEG_LVL_REF_FRAME) == GOLDEN_FRAME) { 1908 usable_ref_frame = GOLDEN_FRAME; 1909 skip_ref_find_pred[GOLDEN_FRAME] = 0; 1910 thresh_svc_skip_golden = 0; 1911 } 1912 1913 for (ref_frame = LAST_FRAME; ref_frame <= usable_ref_frame; ++ref_frame) { 1914 // Skip find_predictor if the reference frame is not in the 1915 // ref_frame_flags (i.e., not used as a reference for this frame). 1916 skip_ref_find_pred[ref_frame] = 1917 !(cpi->ref_frame_flags & flag_list[ref_frame]); 1918 if (!skip_ref_find_pred[ref_frame]) { 1919 find_predictors(cpi, x, ref_frame, frame_mv, const_motion, 1920 &ref_frame_skip_mask, flag_list, tile_data, mi_row, 1921 mi_col, yv12_mb, bsize, force_skip_low_temp_var, 1922 comp_modes > 0); 1923 } 1924 } 1925 1926 if (cpi->use_svc || cpi->oxcf.speed <= 7 || bsize < BLOCK_32X32) 1927 x->sb_use_mv_part = 0; 1928 1929 // Set the flag_svc_subpel to 1 for SVC if the lower spatial layer used 1930 // an averaging filter for downsampling (phase = 8). If so, we will test 1931 // a nonzero motion mode on the spatial reference. 1932 // The nonzero motion is half pixel shifted to left and top (-4, -4). 1933 if (cpi->use_svc && svc->spatial_layer_id > 0 && 1934 svc_force_zero_mode[inter_layer_ref - 1] && 1935 svc->downsample_filter_phase[svc->spatial_layer_id - 1] == 8 && 1936 !gf_temporal_ref) { 1937 svc_mv_col = -4; 1938 svc_mv_row = -4; 1939 flag_svc_subpel = 1; 1940 } 1941 1942 for (idx = 0; idx < num_inter_modes + comp_modes; ++idx) { 1943 int rate_mv = 0; 1944 int mode_rd_thresh; 1945 int mode_index; 1946 int i; 1947 int64_t this_sse; 1948 int is_skippable; 1949 int this_early_term = 0; 1950 int rd_computed = 0; 1951 int flag_preduv_computed[2] = { 0 }; 1952 int inter_mv_mode = 0; 1953 int skip_this_mv = 0; 1954 int comp_pred = 0; 1955 int force_mv_inter_layer = 0; 1956 PREDICTION_MODE this_mode; 1957 second_ref_frame = NONE; 1958 1959 if (idx < num_inter_modes) { 1960 this_mode = ref_mode_set[idx].pred_mode; 1961 ref_frame = ref_mode_set[idx].ref_frame; 1962 1963 if (cpi->use_svc) { 1964 this_mode = ref_mode_set_svc[idx].pred_mode; 1965 ref_frame = ref_mode_set_svc[idx].ref_frame; 1966 } 1967 } else { 1968 // Add (0,0) compound modes. 1969 this_mode = ZEROMV; 1970 ref_frame = LAST_FRAME; 1971 if (idx == num_inter_modes + comp_modes - 1) ref_frame = GOLDEN_FRAME; 1972 second_ref_frame = ALTREF_FRAME; 1973 comp_pred = 1; 1974 } 1975 1976 if (ref_frame > usable_ref_frame) continue; 1977 if (skip_ref_find_pred[ref_frame]) continue; 1978 1979 if (svc->previous_frame_is_intra_only) { 1980 if (ref_frame != LAST_FRAME || frame_mv[this_mode][ref_frame].as_int != 0) 1981 continue; 1982 } 1983 1984 // If the segment reference frame feature is enabled then do nothing if the 1985 // current ref frame is not allowed. 1986 if (segfeature_active(seg, mi->segment_id, SEG_LVL_REF_FRAME) && 1987 get_segdata(seg, mi->segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) 1988 continue; 1989 1990 if (flag_svc_subpel && ref_frame == inter_layer_ref) { 1991 force_mv_inter_layer = 1; 1992 // Only test mode if NEARESTMV/NEARMV is (svc_mv_col, svc_mv_row), 1993 // otherwise set NEWMV to (svc_mv_col, svc_mv_row). 1994 if (this_mode == NEWMV) { 1995 frame_mv[this_mode][ref_frame].as_mv.col = svc_mv_col; 1996 frame_mv[this_mode][ref_frame].as_mv.row = svc_mv_row; 1997 } else if (frame_mv[this_mode][ref_frame].as_mv.col != svc_mv_col || 1998 frame_mv[this_mode][ref_frame].as_mv.row != svc_mv_row) { 1999 continue; 2000 } 2001 } 2002 2003 if (comp_pred) { 2004 if (!cpi->allow_comp_inter_inter) continue; 2005 // Skip compound inter modes if ARF is not available. 2006 if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue; 2007 // Do not allow compound prediction if the segment level reference frame 2008 // feature is in use as in this case there can only be one reference. 2009 if (segfeature_active(seg, mi->segment_id, SEG_LVL_REF_FRAME)) continue; 2010 } 2011 2012 // For SVC, skip the golden (spatial) reference search if sse of zeromv_last 2013 // is below threshold. 2014 if (cpi->use_svc && ref_frame == GOLDEN_FRAME && 2015 sse_zeromv_normalized < thresh_svc_skip_golden) 2016 continue; 2017 2018 if (!(cpi->ref_frame_flags & flag_list[ref_frame])) continue; 2019 2020 if (sf->short_circuit_flat_blocks && x->source_variance == 0 && 2021 (frame_mv[this_mode][ref_frame].as_int != 0 || 2022 (cpi->oxcf.content == VP9E_CONTENT_SCREEN && !svc->spatial_layer_id && 2023 !x->zero_temp_sad_source))) { 2024 continue; 2025 } 2026 2027 if (!(cpi->sf.inter_mode_mask[bsize] & (1 << this_mode))) continue; 2028 2029 if (cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR) { 2030 if (cpi->rc.is_src_frame_alt_ref && 2031 (ref_frame != ALTREF_FRAME || 2032 frame_mv[this_mode][ref_frame].as_int != 0)) 2033 continue; 2034 2035 if (!cm->show_frame && ref_frame == ALTREF_FRAME && 2036 frame_mv[this_mode][ref_frame].as_int != 0) 2037 continue; 2038 2039 if (cpi->rc.alt_ref_gf_group && cm->show_frame && 2040 cpi->rc.frames_since_golden > (cpi->rc.baseline_gf_interval >> 1) && 2041 ref_frame == GOLDEN_FRAME && 2042 frame_mv[this_mode][ref_frame].as_int != 0) 2043 continue; 2044 2045 if (cpi->rc.alt_ref_gf_group && cm->show_frame && 2046 cpi->rc.frames_since_golden > 0 && 2047 cpi->rc.frames_since_golden < (cpi->rc.baseline_gf_interval >> 1) && 2048 ref_frame == ALTREF_FRAME && 2049 frame_mv[this_mode][ref_frame].as_int != 0) 2050 continue; 2051 } 2052 2053 if (const_motion[ref_frame] && this_mode == NEARMV) continue; 2054 2055 // Skip non-zeromv mode search for golden frame if force_skip_low_temp_var 2056 // is set. If nearestmv for golden frame is 0, zeromv mode will be skipped 2057 // later. 2058 if (!force_mv_inter_layer && force_skip_low_temp_var && 2059 ref_frame == GOLDEN_FRAME && 2060 frame_mv[this_mode][ref_frame].as_int != 0) { 2061 continue; 2062 } 2063 2064 if (x->content_state_sb != kVeryHighSad && 2065 (cpi->sf.short_circuit_low_temp_var >= 2 || 2066 (cpi->sf.short_circuit_low_temp_var == 1 && bsize == BLOCK_64X64)) && 2067 force_skip_low_temp_var && ref_frame == LAST_FRAME && 2068 this_mode == NEWMV) { 2069 continue; 2070 } 2071 2072 if (cpi->use_svc) { 2073 if (!force_mv_inter_layer && svc_force_zero_mode[ref_frame - 1] && 2074 frame_mv[this_mode][ref_frame].as_int != 0) 2075 continue; 2076 } 2077 2078 // Disable this drop out case if the ref frame segment level feature is 2079 // enabled for this segment. This is to prevent the possibility that we end 2080 // up unable to pick any mode. 2081 if (!segfeature_active(seg, mi->segment_id, SEG_LVL_REF_FRAME)) { 2082 if (sf->reference_masking && 2083 !(frame_mv[this_mode][ref_frame].as_int == 0 && 2084 ref_frame == LAST_FRAME)) { 2085 if (usable_ref_frame < ALTREF_FRAME) { 2086 if (!force_skip_low_temp_var && usable_ref_frame > LAST_FRAME) { 2087 i = (ref_frame == LAST_FRAME) ? GOLDEN_FRAME : LAST_FRAME; 2088 if ((cpi->ref_frame_flags & flag_list[i])) 2089 if (x->pred_mv_sad[ref_frame] > (x->pred_mv_sad[i] << 1)) 2090 ref_frame_skip_mask |= (1 << ref_frame); 2091 } 2092 } else if (!cpi->rc.is_src_frame_alt_ref && 2093 !(frame_mv[this_mode][ref_frame].as_int == 0 && 2094 ref_frame == ALTREF_FRAME)) { 2095 int ref1 = (ref_frame == GOLDEN_FRAME) ? LAST_FRAME : GOLDEN_FRAME; 2096 int ref2 = (ref_frame == ALTREF_FRAME) ? LAST_FRAME : ALTREF_FRAME; 2097 if (((cpi->ref_frame_flags & flag_list[ref1]) && 2098 (x->pred_mv_sad[ref_frame] > (x->pred_mv_sad[ref1] << 1))) || 2099 ((cpi->ref_frame_flags & flag_list[ref2]) && 2100 (x->pred_mv_sad[ref_frame] > (x->pred_mv_sad[ref2] << 1)))) 2101 ref_frame_skip_mask |= (1 << ref_frame); 2102 } 2103 } 2104 if (ref_frame_skip_mask & (1 << ref_frame)) continue; 2105 } 2106 2107 // Select prediction reference frames. 2108 for (i = 0; i < MAX_MB_PLANE; i++) { 2109 xd->plane[i].pre[0] = yv12_mb[ref_frame][i]; 2110 if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i]; 2111 } 2112 2113 mi->ref_frame[0] = ref_frame; 2114 mi->ref_frame[1] = second_ref_frame; 2115 set_ref_ptrs(cm, xd, ref_frame, second_ref_frame); 2116 2117 mode_index = mode_idx[ref_frame][INTER_OFFSET(this_mode)]; 2118 mode_rd_thresh = best_pickmode.best_mode_skip_txfm 2119 ? rd_threshes[mode_index] << 1 2120 : rd_threshes[mode_index]; 2121 2122 // Increase mode_rd_thresh value for GOLDEN_FRAME for improved encoding 2123 // speed with little/no subjective quality loss. 2124 if (cpi->sf.bias_golden && ref_frame == GOLDEN_FRAME && 2125 cpi->rc.frames_since_golden > 4) 2126 mode_rd_thresh = mode_rd_thresh << 3; 2127 2128 if ((cpi->sf.adaptive_rd_thresh_row_mt && 2129 rd_less_than_thresh_row_mt(best_rdc.rdcost, mode_rd_thresh, 2130 &rd_thresh_freq_fact[mode_index])) || 2131 (!cpi->sf.adaptive_rd_thresh_row_mt && 2132 rd_less_than_thresh(best_rdc.rdcost, mode_rd_thresh, 2133 &rd_thresh_freq_fact[mode_index]))) 2134 if (frame_mv[this_mode][ref_frame].as_int != 0) continue; 2135 2136 if (this_mode == NEWMV && !force_mv_inter_layer) { 2137 if (search_new_mv(cpi, x, frame_mv, ref_frame, gf_temporal_ref, bsize, 2138 mi_row, mi_col, best_pred_sad, &rate_mv, best_sse_sofar, 2139 &best_rdc)) 2140 continue; 2141 } 2142 2143 // TODO(jianj): Skipping the testing of (duplicate) non-zero motion vector 2144 // causes some regression, leave it for duplicate zero-mv for now, until 2145 // regression issue is resolved. 2146 for (inter_mv_mode = NEARESTMV; inter_mv_mode <= NEWMV; inter_mv_mode++) { 2147 if (inter_mv_mode == this_mode || comp_pred) continue; 2148 if (mode_checked[inter_mv_mode][ref_frame] && 2149 frame_mv[this_mode][ref_frame].as_int == 2150 frame_mv[inter_mv_mode][ref_frame].as_int && 2151 frame_mv[inter_mv_mode][ref_frame].as_int == 0) { 2152 skip_this_mv = 1; 2153 break; 2154 } 2155 } 2156 2157 if (skip_this_mv) continue; 2158 2159 // If use_golden_nonzeromv is false, NEWMV mode is skipped for golden, no 2160 // need to compute best_pred_sad which is only used to skip golden NEWMV. 2161 if (use_golden_nonzeromv && this_mode == NEWMV && ref_frame == LAST_FRAME && 2162 frame_mv[NEWMV][LAST_FRAME].as_int != INVALID_MV) { 2163 const int pre_stride = xd->plane[0].pre[0].stride; 2164 const uint8_t *const pre_buf = 2165 xd->plane[0].pre[0].buf + 2166 (frame_mv[NEWMV][LAST_FRAME].as_mv.row >> 3) * pre_stride + 2167 (frame_mv[NEWMV][LAST_FRAME].as_mv.col >> 3); 2168 best_pred_sad = cpi->fn_ptr[bsize].sdf( 2169 x->plane[0].src.buf, x->plane[0].src.stride, pre_buf, pre_stride); 2170 x->pred_mv_sad[LAST_FRAME] = best_pred_sad; 2171 } 2172 2173 if (this_mode != NEARESTMV && !comp_pred && 2174 frame_mv[this_mode][ref_frame].as_int == 2175 frame_mv[NEARESTMV][ref_frame].as_int) 2176 continue; 2177 2178 mi->mode = this_mode; 2179 mi->mv[0].as_int = frame_mv[this_mode][ref_frame].as_int; 2180 mi->mv[1].as_int = 0; 2181 2182 // Search for the best prediction filter type, when the resulting 2183 // motion vector is at sub-pixel accuracy level for luma component, i.e., 2184 // the last three bits are all zeros. 2185 if (reuse_inter_pred) { 2186 if (!this_mode_pred) { 2187 this_mode_pred = &tmp[3]; 2188 } else { 2189 this_mode_pred = &tmp[get_pred_buffer(tmp, 3)]; 2190 pd->dst.buf = this_mode_pred->data; 2191 pd->dst.stride = bw; 2192 } 2193 } 2194 2195 if ((this_mode == NEWMV || filter_ref == SWITCHABLE) && 2196 pred_filter_search && 2197 (ref_frame == LAST_FRAME || 2198 (ref_frame == GOLDEN_FRAME && !force_mv_inter_layer && 2199 (cpi->use_svc || cpi->oxcf.rc_mode == VPX_VBR))) && 2200 (((mi->mv[0].as_mv.row | mi->mv[0].as_mv.col) & 0x07) != 0)) { 2201 rd_computed = 1; 2202 search_filter_ref(cpi, x, &this_rdc, mi_row, mi_col, tmp, bsize, 2203 reuse_inter_pred, &this_mode_pred, &var_y, &sse_y); 2204 } else { 2205 // For low motion content use x->sb_is_skin in addition to VeryHighSad 2206 // for setting large_block. 2207 const int large_block = 2208 (x->content_state_sb == kVeryHighSad || 2209 (x->sb_is_skin && cpi->rc.avg_frame_low_motion > 70) || 2210 cpi->oxcf.speed < 7) 2211 ? bsize > BLOCK_32X32 2212 : bsize >= BLOCK_32X32; 2213 mi->interp_filter = (filter_ref == SWITCHABLE) ? EIGHTTAP : filter_ref; 2214 2215 if (cpi->use_svc && ref_frame == GOLDEN_FRAME && 2216 svc_force_zero_mode[ref_frame - 1]) 2217 mi->interp_filter = filter_gf_svc; 2218 2219 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize); 2220 2221 // For large partition blocks, extra testing is done. 2222 if (cpi->oxcf.rc_mode == VPX_CBR && large_block && 2223 !cyclic_refresh_segment_id_boosted(xd->mi[0]->segment_id) && 2224 cm->base_qindex) { 2225 model_rd_for_sb_y_large(cpi, bsize, x, xd, &this_rdc.rate, 2226 &this_rdc.dist, &var_y, &sse_y, mi_row, mi_col, 2227 &this_early_term, flag_preduv_computed); 2228 } else { 2229 rd_computed = 1; 2230 model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc.rate, &this_rdc.dist, 2231 &var_y, &sse_y); 2232 } 2233 // Save normalized sse (between current and last frame) for (0, 0) motion. 2234 if (cpi->use_svc && ref_frame == LAST_FRAME && 2235 frame_mv[this_mode][ref_frame].as_int == 0) { 2236 sse_zeromv_normalized = 2237 sse_y >> (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]); 2238 } 2239 if (sse_y < best_sse_sofar) best_sse_sofar = sse_y; 2240 } 2241 2242 if (!this_early_term) { 2243 this_sse = (int64_t)sse_y; 2244 block_yrd(cpi, x, &this_rdc, &is_skippable, &this_sse, bsize, 2245 VPXMIN(mi->tx_size, TX_16X16), rd_computed); 2246 2247 x->skip_txfm[0] = is_skippable; 2248 if (is_skippable) { 2249 this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1); 2250 } else { 2251 if (RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist) < 2252 RDCOST(x->rdmult, x->rddiv, 0, this_sse)) { 2253 this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0); 2254 } else { 2255 this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1); 2256 this_rdc.dist = this_sse; 2257 x->skip_txfm[0] = SKIP_TXFM_AC_DC; 2258 } 2259 } 2260 2261 if (cm->interp_filter == SWITCHABLE) { 2262 if ((mi->mv[0].as_mv.row | mi->mv[0].as_mv.col) & 0x07) 2263 this_rdc.rate += vp9_get_switchable_rate(cpi, xd); 2264 } 2265 } else { 2266 this_rdc.rate += cm->interp_filter == SWITCHABLE 2267 ? vp9_get_switchable_rate(cpi, xd) 2268 : 0; 2269 this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1); 2270 } 2271 2272 if (!this_early_term && 2273 (x->color_sensitivity[0] || x->color_sensitivity[1])) { 2274 RD_COST rdc_uv; 2275 const BLOCK_SIZE uv_bsize = get_plane_block_size(bsize, &xd->plane[1]); 2276 if (x->color_sensitivity[0] && !flag_preduv_computed[0]) { 2277 vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, 1); 2278 flag_preduv_computed[0] = 1; 2279 } 2280 if (x->color_sensitivity[1] && !flag_preduv_computed[1]) { 2281 vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, 2); 2282 flag_preduv_computed[1] = 1; 2283 } 2284 model_rd_for_sb_uv(cpi, uv_bsize, x, xd, &rdc_uv, &var_y, &sse_y, 1, 2); 2285 this_rdc.rate += rdc_uv.rate; 2286 this_rdc.dist += rdc_uv.dist; 2287 } 2288 2289 this_rdc.rate += rate_mv; 2290 this_rdc.rate += cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]] 2291 [INTER_OFFSET(this_mode)]; 2292 // TODO(marpan): Add costing for compound mode. 2293 this_rdc.rate += ref_frame_cost[ref_frame]; 2294 this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist); 2295 2296 // Bias against NEWMV that is very different from its neighbors, and bias 2297 // to small motion-lastref for noisy input. 2298 if (cpi->oxcf.rc_mode == VPX_CBR && cpi->oxcf.speed >= 5 && 2299 cpi->oxcf.content != VP9E_CONTENT_SCREEN) { 2300 vp9_NEWMV_diff_bias(&cpi->noise_estimate, xd, this_mode, &this_rdc, bsize, 2301 frame_mv[this_mode][ref_frame].as_mv.row, 2302 frame_mv[this_mode][ref_frame].as_mv.col, 2303 ref_frame == LAST_FRAME, x->lowvar_highsumdiff, 2304 x->sb_is_skin); 2305 } 2306 2307 // Skipping checking: test to see if this block can be reconstructed by 2308 // prediction only. 2309 if (cpi->allow_encode_breakout && !xd->lossless && !scene_change_detected) { 2310 encode_breakout_test(cpi, x, bsize, mi_row, mi_col, ref_frame, this_mode, 2311 var_y, sse_y, yv12_mb, &this_rdc.rate, 2312 &this_rdc.dist, flag_preduv_computed); 2313 if (x->skip) { 2314 this_rdc.rate += rate_mv; 2315 this_rdc.rdcost = 2316 RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist); 2317 } 2318 } 2319 2320 #if CONFIG_VP9_TEMPORAL_DENOISING 2321 if (cpi->oxcf.noise_sensitivity > 0 && denoise_svc_pickmode && 2322 cpi->denoiser.denoising_level > kDenLowLow) { 2323 vp9_denoiser_update_frame_stats(mi, sse_y, this_mode, ctx); 2324 // Keep track of zero_last cost. 2325 if (ref_frame == LAST_FRAME && frame_mv[this_mode][ref_frame].as_int == 0) 2326 zero_last_cost_orig = this_rdc.rdcost; 2327 } 2328 #else 2329 (void)ctx; 2330 #endif 2331 2332 mode_checked[this_mode][ref_frame] = 1; 2333 2334 if (this_rdc.rdcost < best_rdc.rdcost || x->skip) { 2335 best_rdc = this_rdc; 2336 best_early_term = this_early_term; 2337 best_pickmode.best_mode = this_mode; 2338 best_pickmode.best_pred_filter = mi->interp_filter; 2339 best_pickmode.best_tx_size = mi->tx_size; 2340 best_pickmode.best_ref_frame = ref_frame; 2341 best_pickmode.best_mode_skip_txfm = x->skip_txfm[0]; 2342 best_pickmode.best_second_ref_frame = second_ref_frame; 2343 2344 if (reuse_inter_pred) { 2345 free_pred_buffer(best_pickmode.best_pred); 2346 best_pickmode.best_pred = this_mode_pred; 2347 } 2348 } else { 2349 if (reuse_inter_pred) free_pred_buffer(this_mode_pred); 2350 } 2351 2352 if (x->skip) break; 2353 2354 // If early termination flag is 1 and at least 2 modes are checked, 2355 // the mode search is terminated. 2356 if (best_early_term && idx > 0 && !scene_change_detected) { 2357 x->skip = 1; 2358 break; 2359 } 2360 } 2361 2362 mi->mode = best_pickmode.best_mode; 2363 mi->interp_filter = best_pickmode.best_pred_filter; 2364 mi->tx_size = best_pickmode.best_tx_size; 2365 mi->ref_frame[0] = best_pickmode.best_ref_frame; 2366 mi->mv[0].as_int = 2367 frame_mv[best_pickmode.best_mode][best_pickmode.best_ref_frame].as_int; 2368 xd->mi[0]->bmi[0].as_mv[0].as_int = mi->mv[0].as_int; 2369 x->skip_txfm[0] = best_pickmode.best_mode_skip_txfm; 2370 mi->ref_frame[1] = best_pickmode.best_second_ref_frame; 2371 2372 // For spatial enhancemanent layer: perform intra prediction only if base 2373 // layer is chosen as the reference. Always perform intra prediction if 2374 // LAST is the only reference, or is_key_frame is set, or on base 2375 // temporal layer. 2376 if (svc->spatial_layer_id && !gf_temporal_ref) { 2377 perform_intra_pred = 2378 svc->temporal_layer_id == 0 || 2379 svc->layer_context[svc->temporal_layer_id].is_key_frame || 2380 !(cpi->ref_frame_flags & flag_list[GOLDEN_FRAME]) || 2381 (!svc->layer_context[svc->temporal_layer_id].is_key_frame && 2382 svc_force_zero_mode[best_pickmode.best_ref_frame - 1]); 2383 inter_mode_thresh = (inter_mode_thresh << 1) + inter_mode_thresh; 2384 } 2385 if ((cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR && 2386 cpi->rc.is_src_frame_alt_ref) || 2387 svc->previous_frame_is_intra_only) 2388 perform_intra_pred = 0; 2389 2390 // If the segment reference frame feature is enabled and set then 2391 // skip the intra prediction. 2392 if (segfeature_active(seg, mi->segment_id, SEG_LVL_REF_FRAME) && 2393 get_segdata(seg, mi->segment_id, SEG_LVL_REF_FRAME) > 0) 2394 perform_intra_pred = 0; 2395 2396 // Perform intra prediction search, if the best SAD is above a certain 2397 // threshold. 2398 if (best_rdc.rdcost == INT64_MAX || 2399 (scene_change_detected && perform_intra_pred) || 2400 ((!force_skip_low_temp_var || bsize < BLOCK_32X32 || 2401 x->content_state_sb == kVeryHighSad) && 2402 perform_intra_pred && !x->skip && best_rdc.rdcost > inter_mode_thresh && 2403 bsize <= cpi->sf.max_intra_bsize && !x->skip_low_source_sad && 2404 !x->lowvar_highsumdiff)) { 2405 struct estimate_block_intra_args args = { cpi, x, DC_PRED, 1, 0 }; 2406 int i; 2407 PRED_BUFFER *const best_pred = best_pickmode.best_pred; 2408 TX_SIZE intra_tx_size = 2409 VPXMIN(max_txsize_lookup[bsize], 2410 tx_mode_to_biggest_tx_size[cpi->common.tx_mode]); 2411 if (cpi->oxcf.content != VP9E_CONTENT_SCREEN && intra_tx_size > TX_16X16) 2412 intra_tx_size = TX_16X16; 2413 2414 if (reuse_inter_pred && best_pred != NULL) { 2415 if (best_pred->data == orig_dst.buf) { 2416 this_mode_pred = &tmp[get_pred_buffer(tmp, 3)]; 2417 #if CONFIG_VP9_HIGHBITDEPTH 2418 if (cm->use_highbitdepth) 2419 vpx_highbd_convolve_copy( 2420 CONVERT_TO_SHORTPTR(best_pred->data), best_pred->stride, 2421 CONVERT_TO_SHORTPTR(this_mode_pred->data), this_mode_pred->stride, 2422 NULL, 0, 0, 0, 0, bw, bh, xd->bd); 2423 else 2424 vpx_convolve_copy(best_pred->data, best_pred->stride, 2425 this_mode_pred->data, this_mode_pred->stride, NULL, 2426 0, 0, 0, 0, bw, bh); 2427 #else 2428 vpx_convolve_copy(best_pred->data, best_pred->stride, 2429 this_mode_pred->data, this_mode_pred->stride, NULL, 0, 2430 0, 0, 0, bw, bh); 2431 #endif // CONFIG_VP9_HIGHBITDEPTH 2432 best_pickmode.best_pred = this_mode_pred; 2433 } 2434 } 2435 pd->dst = orig_dst; 2436 2437 for (i = 0; i < 4; ++i) { 2438 const PREDICTION_MODE this_mode = intra_mode_list[i]; 2439 THR_MODES mode_index = mode_idx[INTRA_FRAME][mode_offset(this_mode)]; 2440 int mode_rd_thresh = rd_threshes[mode_index]; 2441 if (sf->short_circuit_flat_blocks && x->source_variance == 0 && 2442 this_mode != DC_PRED) { 2443 continue; 2444 } 2445 2446 if (!((1 << this_mode) & cpi->sf.intra_y_mode_bsize_mask[bsize])) 2447 continue; 2448 2449 if ((cpi->sf.adaptive_rd_thresh_row_mt && 2450 rd_less_than_thresh_row_mt(best_rdc.rdcost, mode_rd_thresh, 2451 &rd_thresh_freq_fact[mode_index])) || 2452 (!cpi->sf.adaptive_rd_thresh_row_mt && 2453 rd_less_than_thresh(best_rdc.rdcost, mode_rd_thresh, 2454 &rd_thresh_freq_fact[mode_index]))) 2455 continue; 2456 2457 mi->mode = this_mode; 2458 mi->ref_frame[0] = INTRA_FRAME; 2459 this_rdc.dist = this_rdc.rate = 0; 2460 args.mode = this_mode; 2461 args.skippable = 1; 2462 args.rdc = &this_rdc; 2463 mi->tx_size = intra_tx_size; 2464 vp9_foreach_transformed_block_in_plane(xd, bsize, 0, estimate_block_intra, 2465 &args); 2466 // Check skip cost here since skippable is not set for for uv, this 2467 // mirrors the behavior used by inter 2468 if (args.skippable) { 2469 x->skip_txfm[0] = SKIP_TXFM_AC_DC; 2470 this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 1); 2471 } else { 2472 x->skip_txfm[0] = SKIP_TXFM_NONE; 2473 this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 0); 2474 } 2475 // Inter and intra RD will mismatch in scale for non-screen content. 2476 if (cpi->oxcf.content == VP9E_CONTENT_SCREEN) { 2477 if (x->color_sensitivity[0]) 2478 vp9_foreach_transformed_block_in_plane(xd, bsize, 1, 2479 estimate_block_intra, &args); 2480 if (x->color_sensitivity[1]) 2481 vp9_foreach_transformed_block_in_plane(xd, bsize, 2, 2482 estimate_block_intra, &args); 2483 } 2484 this_rdc.rate += cpi->mbmode_cost[this_mode]; 2485 this_rdc.rate += ref_frame_cost[INTRA_FRAME]; 2486 this_rdc.rate += intra_cost_penalty; 2487 this_rdc.rdcost = 2488 RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist); 2489 2490 if (this_rdc.rdcost < best_rdc.rdcost) { 2491 best_rdc = this_rdc; 2492 best_pickmode.best_mode = this_mode; 2493 best_pickmode.best_intra_tx_size = mi->tx_size; 2494 best_pickmode.best_ref_frame = INTRA_FRAME; 2495 best_pickmode.best_second_ref_frame = NONE; 2496 mi->uv_mode = this_mode; 2497 mi->mv[0].as_int = INVALID_MV; 2498 mi->mv[1].as_int = INVALID_MV; 2499 best_pickmode.best_mode_skip_txfm = x->skip_txfm[0]; 2500 } 2501 } 2502 2503 // Reset mb_mode_info to the best inter mode. 2504 if (best_pickmode.best_ref_frame != INTRA_FRAME) { 2505 mi->tx_size = best_pickmode.best_tx_size; 2506 } else { 2507 mi->tx_size = best_pickmode.best_intra_tx_size; 2508 } 2509 } 2510 2511 pd->dst = orig_dst; 2512 mi->mode = best_pickmode.best_mode; 2513 mi->ref_frame[0] = best_pickmode.best_ref_frame; 2514 mi->ref_frame[1] = best_pickmode.best_second_ref_frame; 2515 x->skip_txfm[0] = best_pickmode.best_mode_skip_txfm; 2516 2517 if (!is_inter_block(mi)) { 2518 mi->interp_filter = SWITCHABLE_FILTERS; 2519 } 2520 2521 if (reuse_inter_pred && best_pickmode.best_pred != NULL) { 2522 PRED_BUFFER *const best_pred = best_pickmode.best_pred; 2523 if (best_pred->data != orig_dst.buf && is_inter_mode(mi->mode)) { 2524 #if CONFIG_VP9_HIGHBITDEPTH 2525 if (cm->use_highbitdepth) 2526 vpx_highbd_convolve_copy( 2527 CONVERT_TO_SHORTPTR(best_pred->data), best_pred->stride, 2528 CONVERT_TO_SHORTPTR(pd->dst.buf), pd->dst.stride, NULL, 0, 0, 0, 0, 2529 bw, bh, xd->bd); 2530 else 2531 vpx_convolve_copy(best_pred->data, best_pred->stride, pd->dst.buf, 2532 pd->dst.stride, NULL, 0, 0, 0, 0, bw, bh); 2533 #else 2534 vpx_convolve_copy(best_pred->data, best_pred->stride, pd->dst.buf, 2535 pd->dst.stride, NULL, 0, 0, 0, 0, bw, bh); 2536 #endif // CONFIG_VP9_HIGHBITDEPTH 2537 } 2538 } 2539 2540 #if CONFIG_VP9_TEMPORAL_DENOISING 2541 if (cpi->oxcf.noise_sensitivity > 0 && cpi->resize_pending == 0 && 2542 denoise_svc_pickmode && cpi->denoiser.denoising_level > kDenLowLow && 2543 cpi->denoiser.reset == 0) { 2544 VP9_DENOISER_DECISION decision = COPY_BLOCK; 2545 ctx->sb_skip_denoising = 0; 2546 // TODO(marpan): There is an issue with denoising when the 2547 // superblock partitioning scheme is based on the pickmode. 2548 // Remove this condition when the issue is resolved. 2549 if (x->sb_pickmode_part) ctx->sb_skip_denoising = 1; 2550 vp9_pickmode_ctx_den_update(&ctx_den, zero_last_cost_orig, ref_frame_cost, 2551 frame_mv, reuse_inter_pred, &best_pickmode); 2552 vp9_denoiser_denoise(cpi, x, mi_row, mi_col, bsize, ctx, &decision, 2553 gf_temporal_ref); 2554 recheck_zeromv_after_denoising(cpi, mi, x, xd, decision, &ctx_den, yv12_mb, 2555 &best_rdc, bsize, mi_row, mi_col); 2556 best_pickmode.best_ref_frame = ctx_den.best_ref_frame; 2557 } 2558 #endif 2559 2560 if (best_pickmode.best_ref_frame == ALTREF_FRAME || 2561 best_pickmode.best_second_ref_frame == ALTREF_FRAME) 2562 x->arf_frame_usage++; 2563 else if (best_pickmode.best_ref_frame != INTRA_FRAME) 2564 x->lastgolden_frame_usage++; 2565 2566 if (cpi->sf.adaptive_rd_thresh) { 2567 THR_MODES best_mode_idx = 2568 mode_idx[best_pickmode.best_ref_frame][mode_offset(mi->mode)]; 2569 2570 if (best_pickmode.best_ref_frame == INTRA_FRAME) { 2571 // Only consider the modes that are included in the intra_mode_list. 2572 int intra_modes = sizeof(intra_mode_list) / sizeof(PREDICTION_MODE); 2573 int i; 2574 2575 // TODO(yunqingwang): Check intra mode mask and only update freq_fact 2576 // for those valid modes. 2577 for (i = 0; i < intra_modes; i++) { 2578 if (cpi->sf.adaptive_rd_thresh_row_mt) 2579 update_thresh_freq_fact_row_mt(cpi, tile_data, x->source_variance, 2580 thresh_freq_fact_idx, INTRA_FRAME, 2581 best_mode_idx, intra_mode_list[i]); 2582 else 2583 update_thresh_freq_fact(cpi, tile_data, x->source_variance, bsize, 2584 INTRA_FRAME, best_mode_idx, 2585 intra_mode_list[i]); 2586 } 2587 } else { 2588 for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) { 2589 PREDICTION_MODE this_mode; 2590 if (best_pickmode.best_ref_frame != ref_frame) continue; 2591 for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) { 2592 if (cpi->sf.adaptive_rd_thresh_row_mt) 2593 update_thresh_freq_fact_row_mt(cpi, tile_data, x->source_variance, 2594 thresh_freq_fact_idx, ref_frame, 2595 best_mode_idx, this_mode); 2596 else 2597 update_thresh_freq_fact(cpi, tile_data, x->source_variance, bsize, 2598 ref_frame, best_mode_idx, this_mode); 2599 } 2600 } 2601 } 2602 } 2603 2604 *rd_cost = best_rdc; 2605 } 2606 2607 void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, int mi_row, 2608 int mi_col, RD_COST *rd_cost, BLOCK_SIZE bsize, 2609 PICK_MODE_CONTEXT *ctx) { 2610 VP9_COMMON *const cm = &cpi->common; 2611 SPEED_FEATURES *const sf = &cpi->sf; 2612 MACROBLOCKD *const xd = &x->e_mbd; 2613 MODE_INFO *const mi = xd->mi[0]; 2614 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext; 2615 const struct segmentation *const seg = &cm->seg; 2616 MV_REFERENCE_FRAME ref_frame, second_ref_frame = NONE; 2617 MV_REFERENCE_FRAME best_ref_frame = NONE; 2618 unsigned char segment_id = mi->segment_id; 2619 struct buf_2d yv12_mb[4][MAX_MB_PLANE]; 2620 static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG, 2621 VP9_ALT_FLAG }; 2622 int64_t best_rd = INT64_MAX; 2623 b_mode_info bsi[MAX_REF_FRAMES][4]; 2624 int ref_frame_skip_mask = 0; 2625 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; 2626 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; 2627 int idx, idy; 2628 2629 x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH; 2630 ctx->pred_pixel_ready = 0; 2631 2632 for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) { 2633 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame); 2634 int_mv dummy_mv[2]; 2635 x->pred_mv_sad[ref_frame] = INT_MAX; 2636 2637 if ((cpi->ref_frame_flags & flag_list[ref_frame]) && (yv12 != NULL)) { 2638 int_mv *const candidates = mbmi_ext->ref_mvs[ref_frame]; 2639 const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf; 2640 vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, 2641 sf); 2642 vp9_find_mv_refs(cm, xd, xd->mi[0], ref_frame, candidates, mi_row, mi_col, 2643 mbmi_ext->mode_context); 2644 2645 vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates, 2646 &dummy_mv[0], &dummy_mv[1]); 2647 } else { 2648 ref_frame_skip_mask |= (1 << ref_frame); 2649 } 2650 } 2651 2652 mi->sb_type = bsize; 2653 mi->tx_size = TX_4X4; 2654 mi->uv_mode = DC_PRED; 2655 mi->ref_frame[0] = LAST_FRAME; 2656 mi->ref_frame[1] = NONE; 2657 mi->interp_filter = 2658 cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter; 2659 2660 for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) { 2661 int64_t this_rd = 0; 2662 int plane; 2663 2664 if (ref_frame_skip_mask & (1 << ref_frame)) continue; 2665 2666 #if CONFIG_BETTER_HW_COMPATIBILITY 2667 if ((bsize == BLOCK_8X4 || bsize == BLOCK_4X8) && ref_frame > INTRA_FRAME && 2668 vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf)) 2669 continue; 2670 #endif 2671 2672 // TODO(jingning, agrange): Scaling reference frame not supported for 2673 // sub8x8 blocks. Is this supported now? 2674 if (ref_frame > INTRA_FRAME && 2675 vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf)) 2676 continue; 2677 2678 // If the segment reference frame feature is enabled.... 2679 // then do nothing if the current ref frame is not allowed.. 2680 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) && 2681 get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) 2682 continue; 2683 2684 mi->ref_frame[0] = ref_frame; 2685 x->skip = 0; 2686 set_ref_ptrs(cm, xd, ref_frame, second_ref_frame); 2687 2688 // Select prediction reference frames. 2689 for (plane = 0; plane < MAX_MB_PLANE; plane++) 2690 xd->plane[plane].pre[0] = yv12_mb[ref_frame][plane]; 2691 2692 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { 2693 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { 2694 int_mv b_mv[MB_MODE_COUNT]; 2695 int64_t b_best_rd = INT64_MAX; 2696 const int i = idy * 2 + idx; 2697 PREDICTION_MODE this_mode; 2698 RD_COST this_rdc; 2699 unsigned int var_y, sse_y; 2700 2701 struct macroblock_plane *p = &x->plane[0]; 2702 struct macroblockd_plane *pd = &xd->plane[0]; 2703 2704 const struct buf_2d orig_src = p->src; 2705 const struct buf_2d orig_dst = pd->dst; 2706 struct buf_2d orig_pre[2]; 2707 memcpy(orig_pre, xd->plane[0].pre, sizeof(orig_pre)); 2708 2709 // set buffer pointers for sub8x8 motion search. 2710 p->src.buf = 2711 &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)]; 2712 pd->dst.buf = 2713 &pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)]; 2714 pd->pre[0].buf = 2715 &pd->pre[0] 2716 .buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)]; 2717 2718 b_mv[ZEROMV].as_int = 0; 2719 b_mv[NEWMV].as_int = INVALID_MV; 2720 vp9_append_sub8x8_mvs_for_idx(cm, xd, i, 0, mi_row, mi_col, 2721 &b_mv[NEARESTMV], &b_mv[NEARMV], 2722 mbmi_ext->mode_context); 2723 2724 for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) { 2725 int b_rate = 0; 2726 xd->mi[0]->bmi[i].as_mv[0].as_int = b_mv[this_mode].as_int; 2727 2728 if (this_mode == NEWMV) { 2729 const int step_param = cpi->sf.mv.fullpel_search_step_param; 2730 MV mvp_full; 2731 MV tmp_mv; 2732 int cost_list[5]; 2733 const MvLimits tmp_mv_limits = x->mv_limits; 2734 uint32_t dummy_dist; 2735 2736 if (i == 0) { 2737 mvp_full.row = b_mv[NEARESTMV].as_mv.row >> 3; 2738 mvp_full.col = b_mv[NEARESTMV].as_mv.col >> 3; 2739 } else { 2740 mvp_full.row = xd->mi[0]->bmi[0].as_mv[0].as_mv.row >> 3; 2741 mvp_full.col = xd->mi[0]->bmi[0].as_mv[0].as_mv.col >> 3; 2742 } 2743 2744 vp9_set_mv_search_range(&x->mv_limits, 2745 &mbmi_ext->ref_mvs[ref_frame][0].as_mv); 2746 2747 vp9_full_pixel_search( 2748 cpi, x, bsize, &mvp_full, step_param, cpi->sf.mv.search_method, 2749 x->sadperbit4, cond_cost_list(cpi, cost_list), 2750 &mbmi_ext->ref_mvs[ref_frame][0].as_mv, &tmp_mv, INT_MAX, 0); 2751 2752 x->mv_limits = tmp_mv_limits; 2753 2754 // calculate the bit cost on motion vector 2755 mvp_full.row = tmp_mv.row * 8; 2756 mvp_full.col = tmp_mv.col * 8; 2757 2758 b_rate += vp9_mv_bit_cost( 2759 &mvp_full, &mbmi_ext->ref_mvs[ref_frame][0].as_mv, 2760 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); 2761 2762 b_rate += cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]] 2763 [INTER_OFFSET(NEWMV)]; 2764 if (RDCOST(x->rdmult, x->rddiv, b_rate, 0) > b_best_rd) continue; 2765 2766 cpi->find_fractional_mv_step( 2767 x, &tmp_mv, &mbmi_ext->ref_mvs[ref_frame][0].as_mv, 2768 cpi->common.allow_high_precision_mv, x->errorperbit, 2769 &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop, 2770 cpi->sf.mv.subpel_search_level, cond_cost_list(cpi, cost_list), 2771 x->nmvjointcost, x->mvcost, &dummy_dist, 2772 &x->pred_sse[ref_frame], NULL, 0, 0, 2773 cpi->sf.use_accurate_subpel_search); 2774 2775 xd->mi[0]->bmi[i].as_mv[0].as_mv = tmp_mv; 2776 } else { 2777 b_rate += cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]] 2778 [INTER_OFFSET(this_mode)]; 2779 } 2780 2781 #if CONFIG_VP9_HIGHBITDEPTH 2782 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { 2783 vp9_highbd_build_inter_predictor( 2784 CONVERT_TO_SHORTPTR(pd->pre[0].buf), pd->pre[0].stride, 2785 CONVERT_TO_SHORTPTR(pd->dst.buf), pd->dst.stride, 2786 &xd->mi[0]->bmi[i].as_mv[0].as_mv, &xd->block_refs[0]->sf, 2787 4 * num_4x4_blocks_wide, 4 * num_4x4_blocks_high, 0, 2788 vp9_filter_kernels[mi->interp_filter], MV_PRECISION_Q3, 2789 mi_col * MI_SIZE + 4 * (i & 0x01), 2790 mi_row * MI_SIZE + 4 * (i >> 1), xd->bd); 2791 } else { 2792 #endif 2793 vp9_build_inter_predictor( 2794 pd->pre[0].buf, pd->pre[0].stride, pd->dst.buf, pd->dst.stride, 2795 &xd->mi[0]->bmi[i].as_mv[0].as_mv, &xd->block_refs[0]->sf, 2796 4 * num_4x4_blocks_wide, 4 * num_4x4_blocks_high, 0, 2797 vp9_filter_kernels[mi->interp_filter], MV_PRECISION_Q3, 2798 mi_col * MI_SIZE + 4 * (i & 0x01), 2799 mi_row * MI_SIZE + 4 * (i >> 1)); 2800 2801 #if CONFIG_VP9_HIGHBITDEPTH 2802 } 2803 #endif 2804 2805 model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc.rate, &this_rdc.dist, 2806 &var_y, &sse_y); 2807 2808 this_rdc.rate += b_rate; 2809 this_rdc.rdcost = 2810 RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist); 2811 if (this_rdc.rdcost < b_best_rd) { 2812 b_best_rd = this_rdc.rdcost; 2813 bsi[ref_frame][i].as_mode = this_mode; 2814 bsi[ref_frame][i].as_mv[0].as_mv = xd->mi[0]->bmi[i].as_mv[0].as_mv; 2815 } 2816 } // mode search 2817 2818 // restore source and prediction buffer pointers. 2819 p->src = orig_src; 2820 pd->pre[0] = orig_pre[0]; 2821 pd->dst = orig_dst; 2822 this_rd += b_best_rd; 2823 2824 xd->mi[0]->bmi[i] = bsi[ref_frame][i]; 2825 if (num_4x4_blocks_wide > 1) xd->mi[0]->bmi[i + 1] = xd->mi[0]->bmi[i]; 2826 if (num_4x4_blocks_high > 1) xd->mi[0]->bmi[i + 2] = xd->mi[0]->bmi[i]; 2827 } 2828 } // loop through sub8x8 blocks 2829 2830 if (this_rd < best_rd) { 2831 best_rd = this_rd; 2832 best_ref_frame = ref_frame; 2833 } 2834 } // reference frames 2835 2836 mi->tx_size = TX_4X4; 2837 mi->ref_frame[0] = best_ref_frame; 2838 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { 2839 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { 2840 const int block = idy * 2 + idx; 2841 xd->mi[0]->bmi[block] = bsi[best_ref_frame][block]; 2842 if (num_4x4_blocks_wide > 1) 2843 xd->mi[0]->bmi[block + 1] = bsi[best_ref_frame][block]; 2844 if (num_4x4_blocks_high > 1) 2845 xd->mi[0]->bmi[block + 2] = bsi[best_ref_frame][block]; 2846 } 2847 } 2848 mi->mode = xd->mi[0]->bmi[3].as_mode; 2849 ctx->mic = *(xd->mi[0]); 2850 ctx->mbmi_ext = *x->mbmi_ext; 2851 ctx->skip_txfm[0] = SKIP_TXFM_NONE; 2852 ctx->skip = 0; 2853 // Dummy assignment for speed -5. No effect in speed -6. 2854 rd_cost->rdcost = best_rd; 2855 } 2856