1 /* 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #include "vpx_config.h" 12 #include "vp8_rtcd.h" 13 #include "./vpx_dsp_rtcd.h" 14 #include "bitstream.h" 15 #include "encodemb.h" 16 #include "encodemv.h" 17 #if CONFIG_MULTITHREAD 18 #include "ethreading.h" 19 #endif 20 #include "vp8/common/common.h" 21 #include "onyx_int.h" 22 #include "vp8/common/extend.h" 23 #include "vp8/common/entropymode.h" 24 #include "vp8/common/quant_common.h" 25 #include "segmentation.h" 26 #include "vp8/common/setupintrarecon.h" 27 #include "encodeintra.h" 28 #include "vp8/common/reconinter.h" 29 #include "rdopt.h" 30 #include "pickinter.h" 31 #include "vp8/common/findnearmv.h" 32 #include <stdio.h> 33 #include <limits.h> 34 #include "vp8/common/invtrans.h" 35 #include "vpx_ports/vpx_timer.h" 36 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 37 #include "bitstream.h" 38 #endif 39 #include "encodeframe.h" 40 41 extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t); 42 static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x); 43 44 #ifdef MODE_STATS 45 unsigned int inter_y_modes[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 46 unsigned int inter_uv_modes[4] = { 0, 0, 0, 0 }; 47 unsigned int inter_b_modes[15] = { 48 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 49 }; 50 unsigned int y_modes[5] = { 0, 0, 0, 0, 0 }; 51 unsigned int uv_modes[4] = { 0, 0, 0, 0 }; 52 unsigned int b_modes[14] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 53 #endif 54 55 /* activity_avg must be positive, or flat regions could get a zero weight 56 * (infinite lambda), which confounds analysis. 57 * This also avoids the need for divide by zero checks in 58 * vp8_activity_masking(). 59 */ 60 #define VP8_ACTIVITY_AVG_MIN (64) 61 62 /* This is used as a reference when computing the source variance for the 63 * purposes of activity masking. 64 * Eventually this should be replaced by custom no-reference routines, 65 * which will be faster. 66 */ 67 static const unsigned char VP8_VAR_OFFS[16] = { 68 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 69 }; 70 71 /* Original activity measure from Tim T's code. */ 72 static unsigned int tt_activity_measure(VP8_COMP *cpi, MACROBLOCK *x) { 73 unsigned int act; 74 unsigned int sse; 75 (void)cpi; 76 /* TODO: This could also be done over smaller areas (8x8), but that would 77 * require extensive changes elsewhere, as lambda is assumed to be fixed 78 * over an entire MB in most of the code. 79 * Another option is to compute four 8x8 variances, and pick a single 80 * lambda using a non-linear combination (e.g., the smallest, or second 81 * smallest, etc.). 82 */ 83 act = vpx_variance16x16(x->src.y_buffer, x->src.y_stride, VP8_VAR_OFFS, 0, 84 &sse); 85 act = act << 4; 86 87 /* If the region is flat, lower the activity some more. */ 88 if (act < 8 << 12) act = act < 5 << 12 ? act : 5 << 12; 89 90 return act; 91 } 92 93 /* Stub for alternative experimental activity measures. */ 94 static unsigned int alt_activity_measure(VP8_COMP *cpi, MACROBLOCK *x, 95 int use_dc_pred) { 96 return vp8_encode_intra(cpi, x, use_dc_pred); 97 } 98 99 /* Measure the activity of the current macroblock 100 * What we measure here is TBD so abstracted to this function 101 */ 102 #define ALT_ACT_MEASURE 1 103 static unsigned int mb_activity_measure(VP8_COMP *cpi, MACROBLOCK *x, 104 int mb_row, int mb_col) { 105 unsigned int mb_activity; 106 107 if (ALT_ACT_MEASURE) { 108 int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row); 109 110 /* Or use and alternative. */ 111 mb_activity = alt_activity_measure(cpi, x, use_dc_pred); 112 } else { 113 /* Original activity measure from Tim T's code. */ 114 mb_activity = tt_activity_measure(cpi, x); 115 } 116 117 if (mb_activity < VP8_ACTIVITY_AVG_MIN) mb_activity = VP8_ACTIVITY_AVG_MIN; 118 119 return mb_activity; 120 } 121 122 /* Calculate an "average" mb activity value for the frame */ 123 #define ACT_MEDIAN 0 124 static void calc_av_activity(VP8_COMP *cpi, int64_t activity_sum) { 125 #if ACT_MEDIAN 126 /* Find median: Simple n^2 algorithm for experimentation */ 127 { 128 unsigned int median; 129 unsigned int i, j; 130 unsigned int *sortlist; 131 unsigned int tmp; 132 133 /* Create a list to sort to */ 134 CHECK_MEM_ERROR(sortlist, 135 vpx_calloc(sizeof(unsigned int), cpi->common.MBs)); 136 137 /* Copy map to sort list */ 138 memcpy(sortlist, cpi->mb_activity_map, 139 sizeof(unsigned int) * cpi->common.MBs); 140 141 /* Ripple each value down to its correct position */ 142 for (i = 1; i < cpi->common.MBs; ++i) { 143 for (j = i; j > 0; j--) { 144 if (sortlist[j] < sortlist[j - 1]) { 145 /* Swap values */ 146 tmp = sortlist[j - 1]; 147 sortlist[j - 1] = sortlist[j]; 148 sortlist[j] = tmp; 149 } else 150 break; 151 } 152 } 153 154 /* Even number MBs so estimate median as mean of two either side. */ 155 median = (1 + sortlist[cpi->common.MBs >> 1] + 156 sortlist[(cpi->common.MBs >> 1) + 1]) >> 157 1; 158 159 cpi->activity_avg = median; 160 161 vpx_free(sortlist); 162 } 163 #else 164 /* Simple mean for now */ 165 cpi->activity_avg = (unsigned int)(activity_sum / cpi->common.MBs); 166 #endif 167 168 if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN) { 169 cpi->activity_avg = VP8_ACTIVITY_AVG_MIN; 170 } 171 172 /* Experimental code: return fixed value normalized for several clips */ 173 if (ALT_ACT_MEASURE) cpi->activity_avg = 100000; 174 } 175 176 #define USE_ACT_INDEX 0 177 #define OUTPUT_NORM_ACT_STATS 0 178 179 #if USE_ACT_INDEX 180 /* Calculate and activity index for each mb */ 181 static void calc_activity_index(VP8_COMP *cpi, MACROBLOCK *x) { 182 VP8_COMMON *const cm = &cpi->common; 183 int mb_row, mb_col; 184 185 int64_t act; 186 int64_t a; 187 int64_t b; 188 189 #if OUTPUT_NORM_ACT_STATS 190 FILE *f = fopen("norm_act.stt", "a"); 191 fprintf(f, "\n%12d\n", cpi->activity_avg); 192 #endif 193 194 /* Reset pointers to start of activity map */ 195 x->mb_activity_ptr = cpi->mb_activity_map; 196 197 /* Calculate normalized mb activity number. */ 198 for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { 199 /* for each macroblock col in image */ 200 for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { 201 /* Read activity from the map */ 202 act = *(x->mb_activity_ptr); 203 204 /* Calculate a normalized activity number */ 205 a = act + 4 * cpi->activity_avg; 206 b = 4 * act + cpi->activity_avg; 207 208 if (b >= a) 209 *(x->activity_ptr) = (int)((b + (a >> 1)) / a) - 1; 210 else 211 *(x->activity_ptr) = 1 - (int)((a + (b >> 1)) / b); 212 213 #if OUTPUT_NORM_ACT_STATS 214 fprintf(f, " %6d", *(x->mb_activity_ptr)); 215 #endif 216 /* Increment activity map pointers */ 217 x->mb_activity_ptr++; 218 } 219 220 #if OUTPUT_NORM_ACT_STATS 221 fprintf(f, "\n"); 222 #endif 223 } 224 225 #if OUTPUT_NORM_ACT_STATS 226 fclose(f); 227 #endif 228 } 229 #endif 230 231 /* Loop through all MBs. Note activity of each, average activity and 232 * calculate a normalized activity for each 233 */ 234 static void build_activity_map(VP8_COMP *cpi) { 235 MACROBLOCK *const x = &cpi->mb; 236 MACROBLOCKD *xd = &x->e_mbd; 237 VP8_COMMON *const cm = &cpi->common; 238 239 #if ALT_ACT_MEASURE 240 YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx]; 241 int recon_yoffset; 242 int recon_y_stride = new_yv12->y_stride; 243 #endif 244 245 int mb_row, mb_col; 246 unsigned int mb_activity; 247 int64_t activity_sum = 0; 248 249 /* for each macroblock row in image */ 250 for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { 251 #if ALT_ACT_MEASURE 252 /* reset above block coeffs */ 253 xd->up_available = (mb_row != 0); 254 recon_yoffset = (mb_row * recon_y_stride * 16); 255 #endif 256 /* for each macroblock col in image */ 257 for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { 258 #if ALT_ACT_MEASURE 259 xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset; 260 xd->left_available = (mb_col != 0); 261 recon_yoffset += 16; 262 #endif 263 /* Copy current mb to a buffer */ 264 vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); 265 266 /* measure activity */ 267 mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col); 268 269 /* Keep frame sum */ 270 activity_sum += mb_activity; 271 272 /* Store MB level activity details. */ 273 *x->mb_activity_ptr = mb_activity; 274 275 /* Increment activity map pointer */ 276 x->mb_activity_ptr++; 277 278 /* adjust to the next column of source macroblocks */ 279 x->src.y_buffer += 16; 280 } 281 282 /* adjust to the next row of mbs */ 283 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols; 284 285 #if ALT_ACT_MEASURE 286 /* extend the recon for intra prediction */ 287 vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, 288 xd->dst.v_buffer + 8); 289 #endif 290 } 291 292 /* Calculate an "average" MB activity */ 293 calc_av_activity(cpi, activity_sum); 294 295 #if USE_ACT_INDEX 296 /* Calculate an activity index number of each mb */ 297 calc_activity_index(cpi, x); 298 #endif 299 } 300 301 /* Macroblock activity masking */ 302 void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x) { 303 #if USE_ACT_INDEX 304 x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2); 305 x->errorperbit = x->rdmult * 100 / (110 * x->rddiv); 306 x->errorperbit += (x->errorperbit == 0); 307 #else 308 int64_t a; 309 int64_t b; 310 int64_t act = *(x->mb_activity_ptr); 311 312 /* Apply the masking to the RD multiplier. */ 313 a = act + (2 * cpi->activity_avg); 314 b = (2 * act) + cpi->activity_avg; 315 316 x->rdmult = (unsigned int)(((int64_t)x->rdmult * b + (a >> 1)) / a); 317 x->errorperbit = x->rdmult * 100 / (110 * x->rddiv); 318 x->errorperbit += (x->errorperbit == 0); 319 #endif 320 321 /* Activity based Zbin adjustment */ 322 adjust_act_zbin(cpi, x); 323 } 324 325 static void encode_mb_row(VP8_COMP *cpi, VP8_COMMON *cm, int mb_row, 326 MACROBLOCK *x, MACROBLOCKD *xd, TOKENEXTRA **tp, 327 int *segment_counts, int *totalrate) { 328 int recon_yoffset, recon_uvoffset; 329 int mb_col; 330 int ref_fb_idx = cm->lst_fb_idx; 331 int dst_fb_idx = cm->new_fb_idx; 332 int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride; 333 int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride; 334 int map_index = (mb_row * cpi->common.mb_cols); 335 336 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) 337 const int num_part = (1 << cm->multi_token_partition); 338 TOKENEXTRA *tp_start = cpi->tok; 339 vp8_writer *w; 340 #endif 341 342 #if CONFIG_MULTITHREAD 343 const int nsync = cpi->mt_sync_range; 344 vpx_atomic_int rightmost_col = VPX_ATOMIC_INIT(cm->mb_cols + nsync); 345 const vpx_atomic_int *last_row_current_mb_col; 346 vpx_atomic_int *current_mb_col = &cpi->mt_current_mb_col[mb_row]; 347 348 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) != 0 && mb_row != 0) { 349 last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1]; 350 } else { 351 last_row_current_mb_col = &rightmost_col; 352 } 353 #endif 354 355 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) 356 if (num_part > 1) 357 w = &cpi->bc[1 + (mb_row % num_part)]; 358 else 359 w = &cpi->bc[1]; 360 #endif 361 362 /* reset above block coeffs */ 363 xd->above_context = cm->above_context; 364 365 xd->up_available = (mb_row != 0); 366 recon_yoffset = (mb_row * recon_y_stride * 16); 367 recon_uvoffset = (mb_row * recon_uv_stride * 8); 368 369 cpi->tplist[mb_row].start = *tp; 370 /* printf("Main mb_row = %d\n", mb_row); */ 371 372 /* Distance of Mb to the top & bottom edges, specified in 1/8th pel 373 * units as they are always compared to values that are in 1/8th pel 374 */ 375 xd->mb_to_top_edge = -((mb_row * 16) << 3); 376 xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3; 377 378 /* Set up limit values for vertical motion vector components 379 * to prevent them extending beyond the UMV borders 380 */ 381 x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16)); 382 x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16); 383 384 /* Set the mb activity pointer to the start of the row. */ 385 x->mb_activity_ptr = &cpi->mb_activity_map[map_index]; 386 387 /* for each macroblock col in image */ 388 for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { 389 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) 390 *tp = cpi->tok; 391 #endif 392 /* Distance of Mb to the left & right edges, specified in 393 * 1/8th pel units as they are always compared to values 394 * that are in 1/8th pel units 395 */ 396 xd->mb_to_left_edge = -((mb_col * 16) << 3); 397 xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3; 398 399 /* Set up limit values for horizontal motion vector components 400 * to prevent them extending beyond the UMV borders 401 */ 402 x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16)); 403 x->mv_col_max = 404 ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16); 405 406 xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; 407 xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; 408 xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; 409 xd->left_available = (mb_col != 0); 410 411 x->rddiv = cpi->RDDIV; 412 x->rdmult = cpi->RDMULT; 413 414 /* Copy current mb to a buffer */ 415 vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); 416 417 #if CONFIG_MULTITHREAD 418 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) != 0) { 419 if (((mb_col - 1) % nsync) == 0) { 420 vpx_atomic_store_release(current_mb_col, mb_col - 1); 421 } 422 423 if (mb_row && !(mb_col & (nsync - 1))) { 424 vp8_atomic_spin_wait(mb_col, last_row_current_mb_col, nsync); 425 } 426 } 427 #endif 428 429 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) vp8_activity_masking(cpi, x); 430 431 /* Is segmentation enabled */ 432 /* MB level adjustment to quantizer */ 433 if (xd->segmentation_enabled) { 434 /* Code to set segment id in xd->mbmi.segment_id for current MB 435 * (with range checking) 436 */ 437 if (cpi->segmentation_map[map_index + mb_col] <= 3) { 438 xd->mode_info_context->mbmi.segment_id = 439 cpi->segmentation_map[map_index + mb_col]; 440 } else { 441 xd->mode_info_context->mbmi.segment_id = 0; 442 } 443 444 vp8cx_mb_init_quantizer(cpi, x, 1); 445 } else { 446 /* Set to Segment 0 by default */ 447 xd->mode_info_context->mbmi.segment_id = 0; 448 } 449 450 x->active_ptr = cpi->active_map + map_index + mb_col; 451 452 if (cm->frame_type == KEY_FRAME) { 453 *totalrate += vp8cx_encode_intra_macroblock(cpi, x, tp); 454 #ifdef MODE_STATS 455 y_modes[xd->mbmi.mode]++; 456 #endif 457 } else { 458 *totalrate += vp8cx_encode_inter_macroblock( 459 cpi, x, tp, recon_yoffset, recon_uvoffset, mb_row, mb_col); 460 461 #ifdef MODE_STATS 462 inter_y_modes[xd->mbmi.mode]++; 463 464 if (xd->mbmi.mode == SPLITMV) { 465 int b; 466 467 for (b = 0; b < xd->mbmi.partition_count; ++b) { 468 inter_b_modes[x->partition->bmi[b].mode]++; 469 } 470 } 471 472 #endif 473 474 // Keep track of how many (consecutive) times a block is coded 475 // as ZEROMV_LASTREF, for base layer frames. 476 // Reset to 0 if its coded as anything else. 477 if (cpi->current_layer == 0) { 478 if (xd->mode_info_context->mbmi.mode == ZEROMV && 479 xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) { 480 // Increment, check for wrap-around. 481 if (cpi->consec_zero_last[map_index + mb_col] < 255) { 482 cpi->consec_zero_last[map_index + mb_col] += 1; 483 } 484 if (cpi->consec_zero_last_mvbias[map_index + mb_col] < 255) { 485 cpi->consec_zero_last_mvbias[map_index + mb_col] += 1; 486 } 487 } else { 488 cpi->consec_zero_last[map_index + mb_col] = 0; 489 cpi->consec_zero_last_mvbias[map_index + mb_col] = 0; 490 } 491 if (x->zero_last_dot_suppress) { 492 cpi->consec_zero_last_mvbias[map_index + mb_col] = 0; 493 } 494 } 495 496 /* Special case code for cyclic refresh 497 * If cyclic update enabled then copy xd->mbmi.segment_id; (which 498 * may have been updated based on mode during 499 * vp8cx_encode_inter_macroblock()) back into the global 500 * segmentation map 501 */ 502 if ((cpi->current_layer == 0) && 503 (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)) { 504 cpi->segmentation_map[map_index + mb_col] = 505 xd->mode_info_context->mbmi.segment_id; 506 507 /* If the block has been refreshed mark it as clean (the 508 * magnitude of the -ve influences how long it will be before 509 * we consider another refresh): 510 * Else if it was coded (last frame 0,0) and has not already 511 * been refreshed then mark it as a candidate for cleanup 512 * next time (marked 0) else mark it as dirty (1). 513 */ 514 if (xd->mode_info_context->mbmi.segment_id) { 515 cpi->cyclic_refresh_map[map_index + mb_col] = -1; 516 } else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && 517 (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)) { 518 if (cpi->cyclic_refresh_map[map_index + mb_col] == 1) { 519 cpi->cyclic_refresh_map[map_index + mb_col] = 0; 520 } 521 } else { 522 cpi->cyclic_refresh_map[map_index + mb_col] = 1; 523 } 524 } 525 } 526 527 cpi->tplist[mb_row].stop = *tp; 528 529 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 530 /* pack tokens for this MB */ 531 { 532 int tok_count = *tp - tp_start; 533 vp8_pack_tokens(w, tp_start, tok_count); 534 } 535 #endif 536 /* Increment pointer into gf usage flags structure. */ 537 x->gf_active_ptr++; 538 539 /* Increment the activity mask pointers. */ 540 x->mb_activity_ptr++; 541 542 /* adjust to the next column of macroblocks */ 543 x->src.y_buffer += 16; 544 x->src.u_buffer += 8; 545 x->src.v_buffer += 8; 546 547 recon_yoffset += 16; 548 recon_uvoffset += 8; 549 550 /* Keep track of segment usage */ 551 segment_counts[xd->mode_info_context->mbmi.segment_id]++; 552 553 /* skip to next mb */ 554 xd->mode_info_context++; 555 x->partition_info++; 556 xd->above_context++; 557 } 558 559 /* extend the recon for intra prediction */ 560 vp8_extend_mb_row(&cm->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 16, 561 xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); 562 563 #if CONFIG_MULTITHREAD 564 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) != 0) { 565 vpx_atomic_store_release(current_mb_col, 566 vpx_atomic_load_acquire(&rightmost_col)); 567 } 568 #endif 569 570 /* this is to account for the border */ 571 xd->mode_info_context++; 572 x->partition_info++; 573 } 574 575 static void init_encode_frame_mb_context(VP8_COMP *cpi) { 576 MACROBLOCK *const x = &cpi->mb; 577 VP8_COMMON *const cm = &cpi->common; 578 MACROBLOCKD *const xd = &x->e_mbd; 579 580 /* GF active flags data structure */ 581 x->gf_active_ptr = (signed char *)cpi->gf_active_flags; 582 583 /* Activity map pointer */ 584 x->mb_activity_ptr = cpi->mb_activity_map; 585 586 x->act_zbin_adj = 0; 587 588 x->partition_info = x->pi; 589 590 xd->mode_info_context = cm->mi; 591 xd->mode_info_stride = cm->mode_info_stride; 592 593 xd->frame_type = cm->frame_type; 594 595 /* reset intra mode contexts */ 596 if (cm->frame_type == KEY_FRAME) vp8_init_mbmode_probs(cm); 597 598 /* Copy data over into macro block data structures. */ 599 x->src = *cpi->Source; 600 xd->pre = cm->yv12_fb[cm->lst_fb_idx]; 601 xd->dst = cm->yv12_fb[cm->new_fb_idx]; 602 603 /* set up frame for intra coded blocks */ 604 vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]); 605 606 vp8_build_block_offsets(x); 607 608 xd->mode_info_context->mbmi.mode = DC_PRED; 609 xd->mode_info_context->mbmi.uv_mode = DC_PRED; 610 611 xd->left_context = &cm->left_context; 612 613 x->mvc = cm->fc.mvc; 614 615 memset(cm->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols); 616 617 /* Special case treatment when GF and ARF are not sensible options 618 * for reference 619 */ 620 if (cpi->ref_frame_flags == VP8_LAST_FRAME) { 621 vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 255, 622 128); 623 } else if ((cpi->oxcf.number_of_layers > 1) && 624 (cpi->ref_frame_flags == VP8_GOLD_FRAME)) { 625 vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 1, 255); 626 } else if ((cpi->oxcf.number_of_layers > 1) && 627 (cpi->ref_frame_flags == VP8_ALTR_FRAME)) { 628 vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 1, 1); 629 } else { 630 vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 631 cpi->prob_last_coded, cpi->prob_gf_coded); 632 } 633 634 xd->fullpixel_mask = 0xffffffff; 635 if (cm->full_pixel) xd->fullpixel_mask = 0xfffffff8; 636 637 vp8_zero(x->coef_counts); 638 vp8_zero(x->ymode_count); 639 vp8_zero(x->uv_mode_count) x->prediction_error = 0; 640 x->intra_error = 0; 641 vp8_zero(x->count_mb_ref_frame_usage); 642 } 643 644 #if CONFIG_MULTITHREAD 645 static void sum_coef_counts(MACROBLOCK *x, MACROBLOCK *x_thread) { 646 int i = 0; 647 do { 648 int j = 0; 649 do { 650 int k = 0; 651 do { 652 /* at every context */ 653 654 /* calc probs and branch cts for this frame only */ 655 int t = 0; /* token/prob index */ 656 657 do { 658 x->coef_counts[i][j][k][t] += x_thread->coef_counts[i][j][k][t]; 659 } while (++t < ENTROPY_NODES); 660 } while (++k < PREV_COEF_CONTEXTS); 661 } while (++j < COEF_BANDS); 662 } while (++i < BLOCK_TYPES); 663 } 664 #endif // CONFIG_MULTITHREAD 665 666 void vp8_encode_frame(VP8_COMP *cpi) { 667 int mb_row; 668 MACROBLOCK *const x = &cpi->mb; 669 VP8_COMMON *const cm = &cpi->common; 670 MACROBLOCKD *const xd = &x->e_mbd; 671 TOKENEXTRA *tp = cpi->tok; 672 int segment_counts[MAX_MB_SEGMENTS]; 673 int totalrate; 674 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 675 BOOL_CODER *bc = &cpi->bc[1]; /* bc[0] is for control partition */ 676 const int num_part = (1 << cm->multi_token_partition); 677 #endif 678 679 memset(segment_counts, 0, sizeof(segment_counts)); 680 totalrate = 0; 681 682 if (cpi->compressor_speed == 2) { 683 if (cpi->oxcf.cpu_used < 0) { 684 cpi->Speed = -(cpi->oxcf.cpu_used); 685 } else { 686 vp8_auto_select_speed(cpi); 687 } 688 } 689 690 /* Functions setup for all frame types so we can use MC in AltRef */ 691 if (!cm->use_bilinear_mc_filter) { 692 xd->subpixel_predict = vp8_sixtap_predict4x4; 693 xd->subpixel_predict8x4 = vp8_sixtap_predict8x4; 694 xd->subpixel_predict8x8 = vp8_sixtap_predict8x8; 695 xd->subpixel_predict16x16 = vp8_sixtap_predict16x16; 696 } else { 697 xd->subpixel_predict = vp8_bilinear_predict4x4; 698 xd->subpixel_predict8x4 = vp8_bilinear_predict8x4; 699 xd->subpixel_predict8x8 = vp8_bilinear_predict8x8; 700 xd->subpixel_predict16x16 = vp8_bilinear_predict16x16; 701 } 702 703 cpi->mb.skip_true_count = 0; 704 cpi->tok_count = 0; 705 706 #if 0 707 /* Experimental code */ 708 cpi->frame_distortion = 0; 709 cpi->last_mb_distortion = 0; 710 #endif 711 712 xd->mode_info_context = cm->mi; 713 714 vp8_zero(cpi->mb.MVcount); 715 716 vp8cx_frame_init_quantizer(cpi); 717 718 vp8_initialize_rd_consts(cpi, x, 719 vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q)); 720 721 vp8cx_initialize_me_consts(cpi, cm->base_qindex); 722 723 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { 724 /* Initialize encode frame context. */ 725 init_encode_frame_mb_context(cpi); 726 727 /* Build a frame level activity map */ 728 build_activity_map(cpi); 729 } 730 731 /* re-init encode frame context. */ 732 init_encode_frame_mb_context(cpi); 733 734 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 735 { 736 int i; 737 for (i = 0; i < num_part; ++i) { 738 vp8_start_encode(&bc[i], cpi->partition_d[i + 1], 739 cpi->partition_d_end[i + 1]); 740 bc[i].error = &cm->error; 741 } 742 } 743 744 #endif 745 746 { 747 struct vpx_usec_timer emr_timer; 748 vpx_usec_timer_start(&emr_timer); 749 750 #if CONFIG_MULTITHREAD 751 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) { 752 int i; 753 754 vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei, 755 cpi->encoding_thread_count); 756 757 for (i = 0; i < cm->mb_rows; ++i) 758 vpx_atomic_store_release(&cpi->mt_current_mb_col[i], -1); 759 760 for (i = 0; i < cpi->encoding_thread_count; ++i) { 761 sem_post(&cpi->h_event_start_encoding[i]); 762 } 763 764 for (mb_row = 0; mb_row < cm->mb_rows; 765 mb_row += (cpi->encoding_thread_count + 1)) { 766 vp8_zero(cm->left_context) 767 768 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 769 tp = cpi->tok; 770 #else 771 tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24); 772 #endif 773 774 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate); 775 776 /* adjust to the next row of mbs */ 777 x->src.y_buffer += 778 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 779 16 * cm->mb_cols; 780 x->src.u_buffer += 781 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 782 8 * cm->mb_cols; 783 x->src.v_buffer += 784 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 785 8 * cm->mb_cols; 786 787 xd->mode_info_context += 788 xd->mode_info_stride * cpi->encoding_thread_count; 789 x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count; 790 x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count; 791 } 792 /* Wait for all the threads to finish. */ 793 for (i = 0; i < cpi->encoding_thread_count; ++i) { 794 sem_wait(&cpi->h_event_end_encoding[i]); 795 } 796 797 for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { 798 cpi->tok_count += (unsigned int)(cpi->tplist[mb_row].stop - 799 cpi->tplist[mb_row].start); 800 } 801 802 if (xd->segmentation_enabled) { 803 int j; 804 805 if (xd->segmentation_enabled) { 806 for (i = 0; i < cpi->encoding_thread_count; ++i) { 807 for (j = 0; j < 4; ++j) { 808 segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j]; 809 } 810 } 811 } 812 } 813 814 for (i = 0; i < cpi->encoding_thread_count; ++i) { 815 int mode_count; 816 int c_idx; 817 totalrate += cpi->mb_row_ei[i].totalrate; 818 819 cpi->mb.skip_true_count += cpi->mb_row_ei[i].mb.skip_true_count; 820 821 for (mode_count = 0; mode_count < VP8_YMODES; ++mode_count) { 822 cpi->mb.ymode_count[mode_count] += 823 cpi->mb_row_ei[i].mb.ymode_count[mode_count]; 824 } 825 826 for (mode_count = 0; mode_count < VP8_UV_MODES; ++mode_count) { 827 cpi->mb.uv_mode_count[mode_count] += 828 cpi->mb_row_ei[i].mb.uv_mode_count[mode_count]; 829 } 830 831 for (c_idx = 0; c_idx < MVvals; ++c_idx) { 832 cpi->mb.MVcount[0][c_idx] += cpi->mb_row_ei[i].mb.MVcount[0][c_idx]; 833 cpi->mb.MVcount[1][c_idx] += cpi->mb_row_ei[i].mb.MVcount[1][c_idx]; 834 } 835 836 cpi->mb.prediction_error += cpi->mb_row_ei[i].mb.prediction_error; 837 cpi->mb.intra_error += cpi->mb_row_ei[i].mb.intra_error; 838 839 for (c_idx = 0; c_idx < MAX_REF_FRAMES; ++c_idx) { 840 cpi->mb.count_mb_ref_frame_usage[c_idx] += 841 cpi->mb_row_ei[i].mb.count_mb_ref_frame_usage[c_idx]; 842 } 843 844 for (c_idx = 0; c_idx < MAX_ERROR_BINS; ++c_idx) { 845 cpi->mb.error_bins[c_idx] += cpi->mb_row_ei[i].mb.error_bins[c_idx]; 846 } 847 848 /* add up counts for each thread */ 849 sum_coef_counts(x, &cpi->mb_row_ei[i].mb); 850 } 851 852 } else 853 #endif // CONFIG_MULTITHREAD 854 { 855 856 /* for each macroblock row in image */ 857 for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { 858 vp8_zero(cm->left_context) 859 860 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 861 tp = cpi->tok; 862 #endif 863 864 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate); 865 866 /* adjust to the next row of mbs */ 867 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols; 868 x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols; 869 x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols; 870 } 871 872 cpi->tok_count = (unsigned int)(tp - cpi->tok); 873 } 874 875 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 876 { 877 int i; 878 for (i = 0; i < num_part; ++i) { 879 vp8_stop_encode(&bc[i]); 880 cpi->partition_sz[i + 1] = bc[i].pos; 881 } 882 } 883 #endif 884 885 vpx_usec_timer_mark(&emr_timer); 886 cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer); 887 } 888 889 // Work out the segment probabilities if segmentation is enabled 890 // and needs to be updated 891 if (xd->segmentation_enabled && xd->update_mb_segmentation_map) { 892 int tot_count; 893 int i; 894 895 /* Set to defaults */ 896 memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs)); 897 898 tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + 899 segment_counts[3]; 900 901 if (tot_count) { 902 xd->mb_segment_tree_probs[0] = 903 ((segment_counts[0] + segment_counts[1]) * 255) / tot_count; 904 905 tot_count = segment_counts[0] + segment_counts[1]; 906 907 if (tot_count > 0) { 908 xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count; 909 } 910 911 tot_count = segment_counts[2] + segment_counts[3]; 912 913 if (tot_count > 0) { 914 xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count; 915 } 916 917 /* Zero probabilities not allowed */ 918 for (i = 0; i < MB_FEATURE_TREE_PROBS; ++i) { 919 if (xd->mb_segment_tree_probs[i] == 0) xd->mb_segment_tree_probs[i] = 1; 920 } 921 } 922 } 923 924 /* projected_frame_size in units of BYTES */ 925 cpi->projected_frame_size = totalrate >> 8; 926 927 /* Make a note of the percentage MBs coded Intra. */ 928 if (cm->frame_type == KEY_FRAME) { 929 cpi->this_frame_percent_intra = 100; 930 } else { 931 int tot_modes; 932 933 tot_modes = cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] + 934 cpi->mb.count_mb_ref_frame_usage[LAST_FRAME] + 935 cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME] + 936 cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME]; 937 938 if (tot_modes) { 939 cpi->this_frame_percent_intra = 940 cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes; 941 } 942 } 943 944 #if !CONFIG_REALTIME_ONLY 945 /* Adjust the projected reference frame usage probability numbers to 946 * reflect what we have just seen. This may be useful when we make 947 * multiple iterations of the recode loop rather than continuing to use 948 * values from the previous frame. 949 */ 950 if ((cm->frame_type != KEY_FRAME) && 951 ((cpi->oxcf.number_of_layers > 1) || 952 (!cm->refresh_alt_ref_frame && !cm->refresh_golden_frame))) { 953 vp8_convert_rfct_to_prob(cpi); 954 } 955 #endif 956 } 957 void vp8_setup_block_ptrs(MACROBLOCK *x) { 958 int r, c; 959 int i; 960 961 for (r = 0; r < 4; ++r) { 962 for (c = 0; c < 4; ++c) { 963 x->block[r * 4 + c].src_diff = x->src_diff + r * 4 * 16 + c * 4; 964 } 965 } 966 967 for (r = 0; r < 2; ++r) { 968 for (c = 0; c < 2; ++c) { 969 x->block[16 + r * 2 + c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4; 970 } 971 } 972 973 for (r = 0; r < 2; ++r) { 974 for (c = 0; c < 2; ++c) { 975 x->block[20 + r * 2 + c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4; 976 } 977 } 978 979 x->block[24].src_diff = x->src_diff + 384; 980 981 for (i = 0; i < 25; ++i) { 982 x->block[i].coeff = x->coeff + i * 16; 983 } 984 } 985 986 void vp8_build_block_offsets(MACROBLOCK *x) { 987 int block = 0; 988 int br, bc; 989 990 vp8_build_block_doffsets(&x->e_mbd); 991 992 /* y blocks */ 993 x->thismb_ptr = &x->thismb[0]; 994 for (br = 0; br < 4; ++br) { 995 for (bc = 0; bc < 4; ++bc) { 996 BLOCK *this_block = &x->block[block]; 997 this_block->base_src = &x->thismb_ptr; 998 this_block->src_stride = 16; 999 this_block->src = 4 * br * 16 + 4 * bc; 1000 ++block; 1001 } 1002 } 1003 1004 /* u blocks */ 1005 for (br = 0; br < 2; ++br) { 1006 for (bc = 0; bc < 2; ++bc) { 1007 BLOCK *this_block = &x->block[block]; 1008 this_block->base_src = &x->src.u_buffer; 1009 this_block->src_stride = x->src.uv_stride; 1010 this_block->src = 4 * br * this_block->src_stride + 4 * bc; 1011 ++block; 1012 } 1013 } 1014 1015 /* v blocks */ 1016 for (br = 0; br < 2; ++br) { 1017 for (bc = 0; bc < 2; ++bc) { 1018 BLOCK *this_block = &x->block[block]; 1019 this_block->base_src = &x->src.v_buffer; 1020 this_block->src_stride = x->src.uv_stride; 1021 this_block->src = 4 * br * this_block->src_stride + 4 * bc; 1022 ++block; 1023 } 1024 } 1025 } 1026 1027 static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x) { 1028 const MACROBLOCKD *xd = &x->e_mbd; 1029 const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode; 1030 const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode; 1031 1032 #ifdef MODE_STATS 1033 const int is_key = cpi->common.frame_type == KEY_FRAME; 1034 1035 ++(is_key ? uv_modes : inter_uv_modes)[uvm]; 1036 1037 if (m == B_PRED) { 1038 unsigned int *const bct = is_key ? b_modes : inter_b_modes; 1039 1040 int b = 0; 1041 1042 do { 1043 ++bct[xd->block[b].bmi.mode]; 1044 } while (++b < 16); 1045 } 1046 1047 #else 1048 (void)cpi; 1049 #endif 1050 1051 ++x->ymode_count[m]; 1052 ++x->uv_mode_count[uvm]; 1053 } 1054 1055 /* Experimental stub function to create a per MB zbin adjustment based on 1056 * some previously calculated measure of MB activity. 1057 */ 1058 static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x) { 1059 #if USE_ACT_INDEX 1060 x->act_zbin_adj = *(x->mb_activity_ptr); 1061 #else 1062 int64_t a; 1063 int64_t b; 1064 int64_t act = *(x->mb_activity_ptr); 1065 1066 /* Apply the masking to the RD multiplier. */ 1067 a = act + 4 * cpi->activity_avg; 1068 b = 4 * act + cpi->activity_avg; 1069 1070 if (act > cpi->activity_avg) { 1071 x->act_zbin_adj = (int)(((int64_t)b + (a >> 1)) / a) - 1; 1072 } else { 1073 x->act_zbin_adj = 1 - (int)(((int64_t)a + (b >> 1)) / b); 1074 } 1075 #endif 1076 } 1077 1078 int vp8cx_encode_intra_macroblock(VP8_COMP *cpi, MACROBLOCK *x, 1079 TOKENEXTRA **t) { 1080 MACROBLOCKD *xd = &x->e_mbd; 1081 int rate; 1082 1083 if (cpi->sf.RD && cpi->compressor_speed != 2) { 1084 vp8_rd_pick_intra_mode(x, &rate); 1085 } else { 1086 vp8_pick_intra_mode(x, &rate); 1087 } 1088 1089 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { 1090 adjust_act_zbin(cpi, x); 1091 vp8_update_zbin_extra(cpi, x); 1092 } 1093 1094 if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED) { 1095 vp8_encode_intra4x4mby(x); 1096 } else { 1097 vp8_encode_intra16x16mby(x); 1098 } 1099 1100 vp8_encode_intra16x16mbuv(x); 1101 1102 sum_intra_stats(cpi, x); 1103 1104 vp8_tokenize_mb(cpi, x, t); 1105 1106 if (xd->mode_info_context->mbmi.mode != B_PRED) vp8_inverse_transform_mby(xd); 1107 1108 vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv, 1109 xd->dst.u_buffer, xd->dst.v_buffer, 1110 xd->dst.uv_stride, xd->eobs + 16); 1111 return rate; 1112 } 1113 #ifdef SPEEDSTATS 1114 extern int cnt_pm; 1115 #endif 1116 1117 extern void vp8_fix_contexts(MACROBLOCKD *x); 1118 1119 int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, 1120 int recon_yoffset, int recon_uvoffset, 1121 int mb_row, int mb_col) { 1122 MACROBLOCKD *const xd = &x->e_mbd; 1123 int intra_error = 0; 1124 int rate; 1125 int distortion; 1126 1127 x->skip = 0; 1128 1129 if (xd->segmentation_enabled) { 1130 x->encode_breakout = 1131 cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id]; 1132 } else { 1133 x->encode_breakout = cpi->oxcf.encode_breakout; 1134 } 1135 1136 #if CONFIG_TEMPORAL_DENOISING 1137 /* Reset the best sse mode/mv for each macroblock. */ 1138 x->best_reference_frame = INTRA_FRAME; 1139 x->best_zeromv_reference_frame = INTRA_FRAME; 1140 x->best_sse_inter_mode = 0; 1141 x->best_sse_mv.as_int = 0; 1142 x->need_to_clamp_best_mvs = 0; 1143 #endif 1144 1145 if (cpi->sf.RD) { 1146 int zbin_mode_boost_enabled = x->zbin_mode_boost_enabled; 1147 1148 /* Are we using the fast quantizer for the mode selection? */ 1149 if (cpi->sf.use_fastquant_for_pick) { 1150 x->quantize_b = vp8_fast_quantize_b; 1151 1152 /* the fast quantizer does not use zbin_extra, so 1153 * do not recalculate */ 1154 x->zbin_mode_boost_enabled = 0; 1155 } 1156 vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, 1157 &distortion, &intra_error, mb_row, mb_col); 1158 1159 /* switch back to the regular quantizer for the encode */ 1160 if (cpi->sf.improved_quant) { 1161 x->quantize_b = vp8_regular_quantize_b; 1162 } 1163 1164 /* restore cpi->zbin_mode_boost_enabled */ 1165 x->zbin_mode_boost_enabled = zbin_mode_boost_enabled; 1166 1167 } else { 1168 vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, 1169 &distortion, &intra_error, mb_row, mb_col); 1170 } 1171 1172 x->prediction_error += distortion; 1173 x->intra_error += intra_error; 1174 1175 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { 1176 /* Adjust the zbin based on this MB rate. */ 1177 adjust_act_zbin(cpi, x); 1178 } 1179 1180 #if 0 1181 /* Experimental RD code */ 1182 cpi->frame_distortion += distortion; 1183 cpi->last_mb_distortion = distortion; 1184 #endif 1185 1186 /* MB level adjutment to quantizer setup */ 1187 if (xd->segmentation_enabled) { 1188 /* If cyclic update enabled */ 1189 if (cpi->current_layer == 0 && cpi->cyclic_refresh_mode_enabled) { 1190 /* Clear segment_id back to 0 if not coded (last frame 0,0) */ 1191 if ((xd->mode_info_context->mbmi.segment_id == 1) && 1192 ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || 1193 (xd->mode_info_context->mbmi.mode != ZEROMV))) { 1194 xd->mode_info_context->mbmi.segment_id = 0; 1195 1196 /* segment_id changed, so update */ 1197 vp8cx_mb_init_quantizer(cpi, x, 1); 1198 } 1199 } 1200 } 1201 1202 { 1203 /* Experimental code. 1204 * Special case for gf and arf zeromv modes, for 1 temporal layer. 1205 * Increase zbin size to supress noise. 1206 */ 1207 x->zbin_mode_boost = 0; 1208 if (x->zbin_mode_boost_enabled) { 1209 if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) { 1210 if (xd->mode_info_context->mbmi.mode == ZEROMV) { 1211 if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME && 1212 cpi->oxcf.number_of_layers == 1) { 1213 x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST; 1214 } else { 1215 x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST; 1216 } 1217 } else if (xd->mode_info_context->mbmi.mode == SPLITMV) { 1218 x->zbin_mode_boost = 0; 1219 } else { 1220 x->zbin_mode_boost = MV_ZBIN_BOOST; 1221 } 1222 } 1223 } 1224 1225 /* The fast quantizer doesn't use zbin_extra, only do so with 1226 * the regular quantizer. */ 1227 if (cpi->sf.improved_quant) vp8_update_zbin_extra(cpi, x); 1228 } 1229 1230 x->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame]++; 1231 1232 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { 1233 vp8_encode_intra16x16mbuv(x); 1234 1235 if (xd->mode_info_context->mbmi.mode == B_PRED) { 1236 vp8_encode_intra4x4mby(x); 1237 } else { 1238 vp8_encode_intra16x16mby(x); 1239 } 1240 1241 sum_intra_stats(cpi, x); 1242 } else { 1243 int ref_fb_idx; 1244 1245 if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) { 1246 ref_fb_idx = cpi->common.lst_fb_idx; 1247 } else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) { 1248 ref_fb_idx = cpi->common.gld_fb_idx; 1249 } else { 1250 ref_fb_idx = cpi->common.alt_fb_idx; 1251 } 1252 1253 xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset; 1254 xd->pre.u_buffer = 1255 cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset; 1256 xd->pre.v_buffer = 1257 cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset; 1258 1259 if (!x->skip) { 1260 vp8_encode_inter16x16(x); 1261 } else { 1262 vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, xd->dst.u_buffer, 1263 xd->dst.v_buffer, xd->dst.y_stride, 1264 xd->dst.uv_stride); 1265 } 1266 } 1267 1268 if (!x->skip) { 1269 vp8_tokenize_mb(cpi, x, t); 1270 1271 if (xd->mode_info_context->mbmi.mode != B_PRED) { 1272 vp8_inverse_transform_mby(xd); 1273 } 1274 1275 vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv, 1276 xd->dst.u_buffer, xd->dst.v_buffer, 1277 xd->dst.uv_stride, xd->eobs + 16); 1278 } else { 1279 /* always set mb_skip_coeff as it is needed by the loopfilter */ 1280 xd->mode_info_context->mbmi.mb_skip_coeff = 1; 1281 1282 if (cpi->common.mb_no_coeff_skip) { 1283 x->skip_true_count++; 1284 vp8_fix_contexts(xd); 1285 } else { 1286 vp8_stuff_mb(cpi, x, t); 1287 } 1288 } 1289 1290 return rate; 1291 } 1292