1 /* 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #include <assert.h> 12 #include <stdio.h> 13 #include <limits.h> 14 15 #include "vpx/vpx_encoder.h" 16 #include "vpx_mem/vpx_mem.h" 17 18 #include "vp9/common/vp9_entropymode.h" 19 #include "vp9/common/vp9_entropymv.h" 20 #include "vp9/common/vp9_findnearmv.h" 21 #include "vp9/common/vp9_tile_common.h" 22 #include "vp9/common/vp9_seg_common.h" 23 #include "vp9/common/vp9_pred_common.h" 24 #include "vp9/common/vp9_entropy.h" 25 #include "vp9/common/vp9_mvref_common.h" 26 #include "vp9/common/vp9_treecoder.h" 27 #include "vp9/common/vp9_systemdependent.h" 28 #include "vp9/common/vp9_pragmas.h" 29 30 #include "vp9/encoder/vp9_mcomp.h" 31 #include "vp9/encoder/vp9_encodemv.h" 32 #include "vp9/encoder/vp9_bitstream.h" 33 #include "vp9/encoder/vp9_segmentation.h" 34 #include "vp9/encoder/vp9_subexp.h" 35 #include "vp9/encoder/vp9_write_bit_buffer.h" 36 37 38 #if defined(SECTIONBITS_OUTPUT) 39 unsigned __int64 Sectionbits[500]; 40 #endif 41 42 #ifdef ENTROPY_STATS 43 int intra_mode_stats[INTRA_MODES] 44 [INTRA_MODES] 45 [INTRA_MODES]; 46 vp9_coeff_stats tree_update_hist[TX_SIZES][BLOCK_TYPES]; 47 48 extern unsigned int active_section; 49 #endif 50 51 52 #ifdef MODE_STATS 53 int64_t tx_count_32x32p_stats[TX_SIZE_CONTEXTS][TX_SIZES]; 54 int64_t tx_count_16x16p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 1]; 55 int64_t tx_count_8x8p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 2]; 56 int64_t switchable_interp_stats[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS]; 57 58 void init_tx_count_stats() { 59 vp9_zero(tx_count_32x32p_stats); 60 vp9_zero(tx_count_16x16p_stats); 61 vp9_zero(tx_count_8x8p_stats); 62 } 63 64 void init_switchable_interp_stats() { 65 vp9_zero(switchable_interp_stats); 66 } 67 68 static void update_tx_count_stats(VP9_COMMON *cm) { 69 int i, j; 70 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 71 for (j = 0; j < TX_SIZES; j++) { 72 tx_count_32x32p_stats[i][j] += cm->fc.tx_count_32x32p[i][j]; 73 } 74 } 75 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 76 for (j = 0; j < TX_SIZES - 1; j++) { 77 tx_count_16x16p_stats[i][j] += cm->fc.tx_count_16x16p[i][j]; 78 } 79 } 80 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 81 for (j = 0; j < TX_SIZES - 2; j++) { 82 tx_count_8x8p_stats[i][j] += cm->fc.tx_count_8x8p[i][j]; 83 } 84 } 85 } 86 87 static void update_switchable_interp_stats(VP9_COMMON *cm) { 88 int i, j; 89 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) 90 for (j = 0; j < SWITCHABLE_FILTERS; ++j) 91 switchable_interp_stats[i][j] += cm->fc.switchable_interp_count[i][j]; 92 } 93 94 void write_tx_count_stats() { 95 int i, j; 96 FILE *fp = fopen("tx_count.bin", "wb"); 97 fwrite(tx_count_32x32p_stats, sizeof(tx_count_32x32p_stats), 1, fp); 98 fwrite(tx_count_16x16p_stats, sizeof(tx_count_16x16p_stats), 1, fp); 99 fwrite(tx_count_8x8p_stats, sizeof(tx_count_8x8p_stats), 1, fp); 100 fclose(fp); 101 102 printf( 103 "vp9_default_tx_count_32x32p[TX_SIZE_CONTEXTS][TX_SIZES] = {\n"); 104 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 105 printf(" { "); 106 for (j = 0; j < TX_SIZES; j++) { 107 printf("%"PRId64", ", tx_count_32x32p_stats[i][j]); 108 } 109 printf("},\n"); 110 } 111 printf("};\n"); 112 printf( 113 "vp9_default_tx_count_16x16p[TX_SIZE_CONTEXTS][TX_SIZES-1] = {\n"); 114 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 115 printf(" { "); 116 for (j = 0; j < TX_SIZES - 1; j++) { 117 printf("%"PRId64", ", tx_count_16x16p_stats[i][j]); 118 } 119 printf("},\n"); 120 } 121 printf("};\n"); 122 printf( 123 "vp9_default_tx_count_8x8p[TX_SIZE_CONTEXTS][TX_SIZES-2] = {\n"); 124 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 125 printf(" { "); 126 for (j = 0; j < TX_SIZES - 2; j++) { 127 printf("%"PRId64", ", tx_count_8x8p_stats[i][j]); 128 } 129 printf("},\n"); 130 } 131 printf("};\n"); 132 } 133 134 void write_switchable_interp_stats() { 135 int i, j; 136 FILE *fp = fopen("switchable_interp.bin", "wb"); 137 fwrite(switchable_interp_stats, sizeof(switchable_interp_stats), 1, fp); 138 fclose(fp); 139 140 printf( 141 "vp9_default_switchable_filter_count[SWITCHABLE_FILTER_CONTEXTS]" 142 "[SWITCHABLE_FILTERS] = {\n"); 143 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { 144 printf(" { "); 145 for (j = 0; j < SWITCHABLE_FILTERS; j++) { 146 printf("%"PRId64", ", switchable_interp_stats[i][j]); 147 } 148 printf("},\n"); 149 } 150 printf("};\n"); 151 } 152 #endif 153 154 static INLINE void write_be32(uint8_t *p, int value) { 155 p[0] = value >> 24; 156 p[1] = value >> 16; 157 p[2] = value >> 8; 158 p[3] = value; 159 } 160 161 void vp9_encode_unsigned_max(struct vp9_write_bit_buffer *wb, 162 int data, int max) { 163 vp9_wb_write_literal(wb, data, get_unsigned_bits(max)); 164 } 165 166 static void update_mode(vp9_writer *w, int n, vp9_tree tree, 167 vp9_prob Pcur[/* n-1 */], 168 unsigned int bct[/* n-1 */][2], 169 const unsigned int num_events[/* n */]) { 170 int i = 0; 171 172 vp9_tree_probs_from_distribution(tree, bct, num_events); 173 for (i = 0; i < n - 1; ++i) 174 vp9_cond_prob_diff_update(w, &Pcur[i], bct[i]); 175 } 176 177 static void update_mbintra_mode_probs(VP9_COMP* const cpi, 178 vp9_writer* const bc) { 179 VP9_COMMON *const cm = &cpi->common; 180 int j; 181 unsigned int bct[INTRA_MODES - 1][2]; 182 183 for (j = 0; j < BLOCK_SIZE_GROUPS; j++) 184 update_mode(bc, INTRA_MODES, vp9_intra_mode_tree, 185 cm->fc.y_mode_prob[j], bct, 186 (unsigned int *)cpi->y_mode_count[j]); 187 } 188 189 static void write_selected_tx_size(const VP9_COMP *cpi, MODE_INFO *m, 190 TX_SIZE tx_size, BLOCK_SIZE bsize, 191 vp9_writer *w) { 192 const TX_SIZE max_tx_size = max_txsize_lookup[bsize]; 193 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; 194 const vp9_prob *const tx_probs = get_tx_probs2(max_tx_size, xd, 195 &cpi->common.fc.tx_probs); 196 vp9_write(w, tx_size != TX_4X4, tx_probs[0]); 197 if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) { 198 vp9_write(w, tx_size != TX_8X8, tx_probs[1]); 199 if (tx_size != TX_8X8 && max_tx_size >= TX_32X32) 200 vp9_write(w, tx_size != TX_16X16, tx_probs[2]); 201 } 202 } 203 204 static int write_skip_coeff(const VP9_COMP *cpi, int segment_id, MODE_INFO *m, 205 vp9_writer *w) { 206 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; 207 if (vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) { 208 return 1; 209 } else { 210 const int skip_coeff = m->mbmi.skip_coeff; 211 vp9_write(w, skip_coeff, vp9_get_pred_prob_mbskip(&cpi->common, xd)); 212 return skip_coeff; 213 } 214 } 215 216 void vp9_update_skip_probs(VP9_COMP *cpi, vp9_writer *w) { 217 VP9_COMMON *cm = &cpi->common; 218 int k; 219 220 for (k = 0; k < MBSKIP_CONTEXTS; ++k) 221 vp9_cond_prob_diff_update(w, &cm->fc.mbskip_probs[k], cm->counts.mbskip[k]); 222 } 223 224 static void write_intra_mode(vp9_writer *bc, int m, const vp9_prob *p) { 225 write_token(bc, vp9_intra_mode_tree, p, vp9_intra_mode_encodings + m); 226 } 227 228 static void update_switchable_interp_probs(VP9_COMP *cpi, vp9_writer *w) { 229 VP9_COMMON *const cm = &cpi->common; 230 unsigned int branch_ct[SWITCHABLE_FILTERS - 1][2]; 231 int i, j; 232 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) { 233 vp9_tree_probs_from_distribution(vp9_switchable_interp_tree, branch_ct, 234 cm->counts.switchable_interp[j]); 235 236 for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i) 237 vp9_cond_prob_diff_update(w, &cm->fc.switchable_interp_prob[j][i], 238 branch_ct[i]); 239 } 240 241 #ifdef MODE_STATS 242 if (!cpi->dummy_packing) 243 update_switchable_interp_stats(cm); 244 #endif 245 } 246 247 static void update_inter_mode_probs(VP9_COMMON *cm, vp9_writer *w) { 248 int i, j; 249 250 for (i = 0; i < INTER_MODE_CONTEXTS; ++i) { 251 unsigned int branch_ct[INTER_MODES - 1][2]; 252 vp9_tree_probs_from_distribution(vp9_inter_mode_tree, branch_ct, 253 cm->counts.inter_mode[i]); 254 255 for (j = 0; j < INTER_MODES - 1; ++j) 256 vp9_cond_prob_diff_update(w, &cm->fc.inter_mode_probs[i][j], 257 branch_ct[j]); 258 } 259 } 260 261 static void pack_mb_tokens(vp9_writer* const w, 262 TOKENEXTRA **tp, 263 const TOKENEXTRA *const stop) { 264 TOKENEXTRA *p = *tp; 265 266 while (p < stop && p->token != EOSB_TOKEN) { 267 const int t = p->token; 268 const struct vp9_token *const a = &vp9_coef_encodings[t]; 269 const vp9_extra_bit *const b = &vp9_extra_bits[t]; 270 int i = 0; 271 const vp9_prob *pp; 272 int v = a->value; 273 int n = a->len; 274 vp9_prob probs[ENTROPY_NODES]; 275 276 if (t >= TWO_TOKEN) { 277 vp9_model_to_full_probs(p->context_tree, probs); 278 pp = probs; 279 } else { 280 pp = p->context_tree; 281 } 282 assert(pp != 0); 283 284 /* skip one or two nodes */ 285 if (p->skip_eob_node) { 286 n -= p->skip_eob_node; 287 i = 2 * p->skip_eob_node; 288 } 289 290 do { 291 const int bb = (v >> --n) & 1; 292 vp9_write(w, bb, pp[i >> 1]); 293 i = vp9_coef_tree[i + bb]; 294 } while (n); 295 296 if (b->base_val) { 297 const int e = p->extra, l = b->len; 298 299 if (l) { 300 const unsigned char *pb = b->prob; 301 int v = e >> 1; 302 int n = l; /* number of bits in v, assumed nonzero */ 303 int i = 0; 304 305 do { 306 const int bb = (v >> --n) & 1; 307 vp9_write(w, bb, pb[i >> 1]); 308 i = b->tree[i + bb]; 309 } while (n); 310 } 311 312 vp9_write_bit(w, e & 1); 313 } 314 ++p; 315 } 316 317 *tp = p + (p->token == EOSB_TOKEN); 318 } 319 320 static void write_sb_mv_ref(vp9_writer *w, MB_PREDICTION_MODE mode, 321 const vp9_prob *p) { 322 assert(is_inter_mode(mode)); 323 write_token(w, vp9_inter_mode_tree, p, 324 &vp9_inter_mode_encodings[INTER_OFFSET(mode)]); 325 } 326 327 328 static void write_segment_id(vp9_writer *w, const struct segmentation *seg, 329 int segment_id) { 330 if (seg->enabled && seg->update_map) 331 treed_write(w, vp9_segment_tree, seg->tree_probs, segment_id, 3); 332 } 333 334 // This function encodes the reference frame 335 static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) { 336 VP9_COMMON *const cm = &cpi->common; 337 MACROBLOCK *const x = &cpi->mb; 338 MACROBLOCKD *const xd = &x->e_mbd; 339 MB_MODE_INFO *mi = &xd->mi_8x8[0]->mbmi; 340 const int segment_id = mi->segment_id; 341 int seg_ref_active = vp9_segfeature_active(&cm->seg, segment_id, 342 SEG_LVL_REF_FRAME); 343 // If segment level coding of this signal is disabled... 344 // or the segment allows multiple reference frame options 345 if (!seg_ref_active) { 346 // does the feature use compound prediction or not 347 // (if not specified at the frame/segment level) 348 if (cm->comp_pred_mode == HYBRID_PREDICTION) { 349 vp9_write(bc, mi->ref_frame[1] > INTRA_FRAME, 350 vp9_get_pred_prob_comp_inter_inter(cm, xd)); 351 } else { 352 assert((mi->ref_frame[1] <= INTRA_FRAME) == 353 (cm->comp_pred_mode == SINGLE_PREDICTION_ONLY)); 354 } 355 356 if (mi->ref_frame[1] > INTRA_FRAME) { 357 vp9_write(bc, mi->ref_frame[0] == GOLDEN_FRAME, 358 vp9_get_pred_prob_comp_ref_p(cm, xd)); 359 } else { 360 vp9_write(bc, mi->ref_frame[0] != LAST_FRAME, 361 vp9_get_pred_prob_single_ref_p1(cm, xd)); 362 if (mi->ref_frame[0] != LAST_FRAME) 363 vp9_write(bc, mi->ref_frame[0] != GOLDEN_FRAME, 364 vp9_get_pred_prob_single_ref_p2(cm, xd)); 365 } 366 } else { 367 assert(mi->ref_frame[1] <= INTRA_FRAME); 368 assert(vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) == 369 mi->ref_frame[0]); 370 } 371 372 // If using the prediction model we have nothing further to do because 373 // the reference frame is fully coded by the segment. 374 } 375 376 static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) { 377 VP9_COMMON *const cm = &cpi->common; 378 const nmv_context *nmvc = &cm->fc.nmvc; 379 MACROBLOCK *const x = &cpi->mb; 380 MACROBLOCKD *const xd = &x->e_mbd; 381 struct segmentation *seg = &cm->seg; 382 MB_MODE_INFO *const mi = &m->mbmi; 383 const MV_REFERENCE_FRAME rf = mi->ref_frame[0]; 384 const MB_PREDICTION_MODE mode = mi->mode; 385 const int segment_id = mi->segment_id; 386 int skip_coeff; 387 const BLOCK_SIZE bsize = mi->sb_type; 388 const int allow_hp = cm->allow_high_precision_mv; 389 390 #ifdef ENTROPY_STATS 391 active_section = 9; 392 #endif 393 394 if (seg->update_map) { 395 if (seg->temporal_update) { 396 const int pred_flag = mi->seg_id_predicted; 397 vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); 398 vp9_write(bc, pred_flag, pred_prob); 399 if (!pred_flag) 400 write_segment_id(bc, seg, segment_id); 401 } else { 402 write_segment_id(bc, seg, segment_id); 403 } 404 } 405 406 skip_coeff = write_skip_coeff(cpi, segment_id, m, bc); 407 408 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) 409 vp9_write(bc, rf != INTRA_FRAME, 410 vp9_get_pred_prob_intra_inter(cm, xd)); 411 412 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT && 413 !(rf != INTRA_FRAME && 414 (skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) { 415 write_selected_tx_size(cpi, m, mi->tx_size, bsize, bc); 416 } 417 418 if (rf == INTRA_FRAME) { 419 #ifdef ENTROPY_STATS 420 active_section = 6; 421 #endif 422 423 if (bsize >= BLOCK_8X8) { 424 write_intra_mode(bc, mode, cm->fc.y_mode_prob[size_group_lookup[bsize]]); 425 } else { 426 int idx, idy; 427 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; 428 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; 429 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { 430 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { 431 const MB_PREDICTION_MODE bm = m->bmi[idy * 2 + idx].as_mode; 432 write_intra_mode(bc, bm, cm->fc.y_mode_prob[0]); 433 } 434 } 435 } 436 write_intra_mode(bc, mi->uv_mode, cm->fc.uv_mode_prob[mode]); 437 } else { 438 vp9_prob *mv_ref_p; 439 encode_ref_frame(cpi, bc); 440 mv_ref_p = cpi->common.fc.inter_mode_probs[mi->mode_context[rf]]; 441 442 #ifdef ENTROPY_STATS 443 active_section = 3; 444 #endif 445 446 // If segment skip is not enabled code the mode. 447 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { 448 if (bsize >= BLOCK_8X8) { 449 write_sb_mv_ref(bc, mode, mv_ref_p); 450 ++cm->counts.inter_mode[mi->mode_context[rf]] 451 [INTER_OFFSET(mode)]; 452 } 453 } 454 455 if (cm->mcomp_filter_type == SWITCHABLE) { 456 const int ctx = vp9_get_pred_context_switchable_interp(xd); 457 write_token(bc, vp9_switchable_interp_tree, 458 cm->fc.switchable_interp_prob[ctx], 459 &vp9_switchable_interp_encodings[mi->interp_filter]); 460 } else { 461 assert(mi->interp_filter == cm->mcomp_filter_type); 462 } 463 464 if (bsize < BLOCK_8X8) { 465 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; 466 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; 467 int idx, idy; 468 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { 469 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { 470 const int j = idy * 2 + idx; 471 const MB_PREDICTION_MODE blockmode = m->bmi[j].as_mode; 472 write_sb_mv_ref(bc, blockmode, mv_ref_p); 473 ++cm->counts.inter_mode[mi->mode_context[rf]] 474 [INTER_OFFSET(blockmode)]; 475 476 if (blockmode == NEWMV) { 477 #ifdef ENTROPY_STATS 478 active_section = 11; 479 #endif 480 vp9_encode_mv(cpi, bc, &m->bmi[j].as_mv[0].as_mv, 481 &mi->best_mv[0].as_mv, nmvc, allow_hp); 482 483 if (has_second_ref(mi)) 484 vp9_encode_mv(cpi, bc, &m->bmi[j].as_mv[1].as_mv, 485 &mi->best_mv[1].as_mv, nmvc, allow_hp); 486 } 487 } 488 } 489 } else if (mode == NEWMV) { 490 #ifdef ENTROPY_STATS 491 active_section = 5; 492 #endif 493 vp9_encode_mv(cpi, bc, &mi->mv[0].as_mv, 494 &mi->best_mv[0].as_mv, nmvc, allow_hp); 495 496 if (has_second_ref(mi)) 497 vp9_encode_mv(cpi, bc, &mi->mv[1].as_mv, 498 &mi->best_mv[1].as_mv, nmvc, allow_hp); 499 } 500 } 501 } 502 503 static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8, 504 vp9_writer *bc) { 505 const VP9_COMMON *const cm = &cpi->common; 506 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; 507 const struct segmentation *const seg = &cm->seg; 508 MODE_INFO *m = mi_8x8[0]; 509 const int ym = m->mbmi.mode; 510 const int segment_id = m->mbmi.segment_id; 511 MODE_INFO *above_mi = mi_8x8[-xd->mode_info_stride]; 512 MODE_INFO *left_mi = xd->left_available ? mi_8x8[-1] : NULL; 513 514 if (seg->update_map) 515 write_segment_id(bc, seg, m->mbmi.segment_id); 516 517 write_skip_coeff(cpi, segment_id, m, bc); 518 519 if (m->mbmi.sb_type >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT) 520 write_selected_tx_size(cpi, m, m->mbmi.tx_size, m->mbmi.sb_type, bc); 521 522 if (m->mbmi.sb_type >= BLOCK_8X8) { 523 const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0); 524 const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, 0); 525 write_intra_mode(bc, ym, vp9_kf_y_mode_prob[A][L]); 526 } else { 527 int idx, idy; 528 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[m->mbmi.sb_type]; 529 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[m->mbmi.sb_type]; 530 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { 531 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { 532 int i = idy * 2 + idx; 533 const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, i); 534 const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, i); 535 const int bm = m->bmi[i].as_mode; 536 #ifdef ENTROPY_STATS 537 ++intra_mode_stats[A][L][bm]; 538 #endif 539 write_intra_mode(bc, bm, vp9_kf_y_mode_prob[A][L]); 540 } 541 } 542 } 543 544 write_intra_mode(bc, m->mbmi.uv_mode, vp9_kf_uv_mode_prob[ym]); 545 } 546 547 static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile, 548 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end, 549 int mi_row, int mi_col) { 550 VP9_COMMON *const cm = &cpi->common; 551 MACROBLOCKD *const xd = &cpi->mb.e_mbd; 552 MODE_INFO *m; 553 554 xd->mi_8x8 = cm->mi_grid_visible + (mi_row * cm->mode_info_stride + mi_col); 555 m = xd->mi_8x8[0]; 556 557 set_mi_row_col(xd, tile, 558 mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type], 559 mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type], 560 cm->mi_rows, cm->mi_cols); 561 if (frame_is_intra_only(cm)) { 562 write_mb_modes_kf(cpi, xd->mi_8x8, w); 563 #ifdef ENTROPY_STATS 564 active_section = 8; 565 #endif 566 } else { 567 pack_inter_mode_mvs(cpi, m, w); 568 #ifdef ENTROPY_STATS 569 active_section = 1; 570 #endif 571 } 572 573 assert(*tok < tok_end); 574 pack_mb_tokens(w, tok, tok_end); 575 } 576 577 static void write_partition(VP9_COMP *cpi, int hbs, int mi_row, int mi_col, 578 PARTITION_TYPE p, BLOCK_SIZE bsize, vp9_writer *w) { 579 VP9_COMMON *const cm = &cpi->common; 580 const int ctx = partition_plane_context(cpi->above_seg_context, 581 cpi->left_seg_context, 582 mi_row, mi_col, bsize); 583 const vp9_prob *const probs = get_partition_probs(cm, ctx); 584 const int has_rows = (mi_row + hbs) < cm->mi_rows; 585 const int has_cols = (mi_col + hbs) < cm->mi_cols; 586 587 if (has_rows && has_cols) { 588 write_token(w, vp9_partition_tree, probs, &vp9_partition_encodings[p]); 589 } else if (!has_rows && has_cols) { 590 assert(p == PARTITION_SPLIT || p == PARTITION_HORZ); 591 vp9_write(w, p == PARTITION_SPLIT, probs[1]); 592 } else if (has_rows && !has_cols) { 593 assert(p == PARTITION_SPLIT || p == PARTITION_VERT); 594 vp9_write(w, p == PARTITION_SPLIT, probs[2]); 595 } else { 596 assert(p == PARTITION_SPLIT); 597 } 598 } 599 600 static void write_modes_sb(VP9_COMP *cpi, const TileInfo *const tile, 601 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end, 602 int mi_row, int mi_col, BLOCK_SIZE bsize) { 603 VP9_COMMON *const cm = &cpi->common; 604 const int bsl = b_width_log2(bsize); 605 const int bs = (1 << bsl) / 4; 606 PARTITION_TYPE partition; 607 BLOCK_SIZE subsize; 608 MODE_INFO *m = cm->mi_grid_visible[mi_row * cm->mode_info_stride + mi_col]; 609 610 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) 611 return; 612 613 partition = partition_lookup[bsl][m->mbmi.sb_type]; 614 write_partition(cpi, bs, mi_row, mi_col, partition, bsize, w); 615 subsize = get_subsize(bsize, partition); 616 if (subsize < BLOCK_8X8) { 617 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 618 } else { 619 switch (partition) { 620 case PARTITION_NONE: 621 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 622 break; 623 case PARTITION_HORZ: 624 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 625 if (mi_row + bs < cm->mi_rows) 626 write_modes_b(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col); 627 break; 628 case PARTITION_VERT: 629 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 630 if (mi_col + bs < cm->mi_cols) 631 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs); 632 break; 633 case PARTITION_SPLIT: 634 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, subsize); 635 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs, 636 subsize); 637 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col, 638 subsize); 639 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs, 640 subsize); 641 break; 642 default: 643 assert(0); 644 } 645 } 646 647 // update partition context 648 if (bsize >= BLOCK_8X8 && 649 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) 650 update_partition_context(cpi->above_seg_context, cpi->left_seg_context, 651 mi_row, mi_col, subsize, bsize); 652 } 653 654 static void write_modes(VP9_COMP *cpi, const TileInfo *const tile, 655 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end) { 656 int mi_row, mi_col; 657 658 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; 659 mi_row += MI_BLOCK_SIZE) { 660 vp9_zero(cpi->left_seg_context); 661 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; 662 mi_col += MI_BLOCK_SIZE) 663 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, BLOCK_64X64); 664 } 665 } 666 667 static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size) { 668 vp9_coeff_probs_model *coef_probs = cpi->frame_coef_probs[tx_size]; 669 vp9_coeff_count *coef_counts = cpi->coef_counts[tx_size]; 670 unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] = 671 cpi->common.counts.eob_branch[tx_size]; 672 vp9_coeff_stats *coef_branch_ct = cpi->frame_branch_ct[tx_size]; 673 int i, j, k, l, m; 674 675 for (i = 0; i < BLOCK_TYPES; ++i) { 676 for (j = 0; j < REF_TYPES; ++j) { 677 for (k = 0; k < COEF_BANDS; ++k) { 678 for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { 679 if (l >= 3 && k == 0) 680 continue; 681 vp9_tree_probs_from_distribution(vp9_coef_tree, 682 coef_branch_ct[i][j][k][l], 683 coef_counts[i][j][k][l]); 684 coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] - 685 coef_branch_ct[i][j][k][l][0][0]; 686 for (m = 0; m < UNCONSTRAINED_NODES; ++m) 687 coef_probs[i][j][k][l][m] = get_binary_prob( 688 coef_branch_ct[i][j][k][l][m][0], 689 coef_branch_ct[i][j][k][l][m][1]); 690 #ifdef ENTROPY_STATS 691 if (!cpi->dummy_packing) { 692 int t; 693 for (t = 0; t < MAX_ENTROPY_TOKENS; ++t) 694 context_counters[tx_size][i][j][k][l][t] += 695 coef_counts[i][j][k][l][t]; 696 context_counters[tx_size][i][j][k][l][MAX_ENTROPY_TOKENS] += 697 eob_branch_ct[i][j][k][l]; 698 } 699 #endif 700 } 701 } 702 } 703 } 704 } 705 706 static void build_coeff_contexts(VP9_COMP *cpi) { 707 TX_SIZE t; 708 for (t = TX_4X4; t <= TX_32X32; t++) 709 build_tree_distribution(cpi, t); 710 } 711 712 static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, 713 TX_SIZE tx_size) { 714 vp9_coeff_probs_model *new_frame_coef_probs = cpi->frame_coef_probs[tx_size]; 715 vp9_coeff_probs_model *old_frame_coef_probs = 716 cpi->common.fc.coef_probs[tx_size]; 717 vp9_coeff_stats *frame_branch_ct = cpi->frame_branch_ct[tx_size]; 718 const vp9_prob upd = DIFF_UPDATE_PROB; 719 const int entropy_nodes_update = UNCONSTRAINED_NODES; 720 int i, j, k, l, t; 721 switch (cpi->sf.use_fast_coef_updates) { 722 case 0: { 723 /* dry run to see if there is any udpate at all needed */ 724 int savings = 0; 725 int update[2] = {0, 0}; 726 for (i = 0; i < BLOCK_TYPES; ++i) { 727 for (j = 0; j < REF_TYPES; ++j) { 728 for (k = 0; k < COEF_BANDS; ++k) { 729 for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { 730 for (t = 0; t < entropy_nodes_update; ++t) { 731 vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; 732 const vp9_prob oldp = old_frame_coef_probs[i][j][k][l][t]; 733 int s; 734 int u = 0; 735 736 if (l >= 3 && k == 0) 737 continue; 738 if (t == PIVOT_NODE) 739 s = vp9_prob_diff_update_savings_search_model( 740 frame_branch_ct[i][j][k][l][0], 741 old_frame_coef_probs[i][j][k][l], &newp, upd, i, j); 742 else 743 s = vp9_prob_diff_update_savings_search( 744 frame_branch_ct[i][j][k][l][t], oldp, &newp, upd); 745 if (s > 0 && newp != oldp) 746 u = 1; 747 if (u) 748 savings += s - (int)(vp9_cost_zero(upd)); 749 else 750 savings -= (int)(vp9_cost_zero(upd)); 751 update[u]++; 752 } 753 } 754 } 755 } 756 } 757 758 // printf("Update %d %d, savings %d\n", update[0], update[1], savings); 759 /* Is coef updated at all */ 760 if (update[1] == 0 || savings < 0) { 761 vp9_write_bit(bc, 0); 762 return; 763 } 764 vp9_write_bit(bc, 1); 765 for (i = 0; i < BLOCK_TYPES; ++i) { 766 for (j = 0; j < REF_TYPES; ++j) { 767 for (k = 0; k < COEF_BANDS; ++k) { 768 for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { 769 // calc probs and branch cts for this frame only 770 for (t = 0; t < entropy_nodes_update; ++t) { 771 vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; 772 vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t; 773 const vp9_prob upd = DIFF_UPDATE_PROB; 774 int s; 775 int u = 0; 776 if (l >= 3 && k == 0) 777 continue; 778 if (t == PIVOT_NODE) 779 s = vp9_prob_diff_update_savings_search_model( 780 frame_branch_ct[i][j][k][l][0], 781 old_frame_coef_probs[i][j][k][l], &newp, upd, i, j); 782 else 783 s = vp9_prob_diff_update_savings_search( 784 frame_branch_ct[i][j][k][l][t], 785 *oldp, &newp, upd); 786 if (s > 0 && newp != *oldp) 787 u = 1; 788 vp9_write(bc, u, upd); 789 #ifdef ENTROPY_STATS 790 if (!cpi->dummy_packing) 791 ++tree_update_hist[tx_size][i][j][k][l][t][u]; 792 #endif 793 if (u) { 794 /* send/use new probability */ 795 vp9_write_prob_diff_update(bc, newp, *oldp); 796 *oldp = newp; 797 } 798 } 799 } 800 } 801 } 802 } 803 return; 804 } 805 806 case 1: 807 case 2: { 808 const int prev_coef_contexts_to_update = 809 (cpi->sf.use_fast_coef_updates == 2 ? 810 PREV_COEF_CONTEXTS >> 1 : PREV_COEF_CONTEXTS); 811 const int coef_band_to_update = 812 (cpi->sf.use_fast_coef_updates == 2 ? 813 COEF_BANDS >> 1 : COEF_BANDS); 814 int updates = 0; 815 int noupdates_before_first = 0; 816 for (i = 0; i < BLOCK_TYPES; ++i) { 817 for (j = 0; j < REF_TYPES; ++j) { 818 for (k = 0; k < COEF_BANDS; ++k) { 819 for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { 820 // calc probs and branch cts for this frame only 821 for (t = 0; t < entropy_nodes_update; ++t) { 822 vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; 823 vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t; 824 int s; 825 int u = 0; 826 if (l >= 3 && k == 0) 827 continue; 828 if (l >= prev_coef_contexts_to_update || 829 k >= coef_band_to_update) { 830 u = 0; 831 } else { 832 if (t == PIVOT_NODE) 833 s = vp9_prob_diff_update_savings_search_model( 834 frame_branch_ct[i][j][k][l][0], 835 old_frame_coef_probs[i][j][k][l], &newp, upd, i, j); 836 else 837 s = vp9_prob_diff_update_savings_search( 838 frame_branch_ct[i][j][k][l][t], 839 *oldp, &newp, upd); 840 if (s > 0 && newp != *oldp) 841 u = 1; 842 } 843 updates += u; 844 if (u == 0 && updates == 0) { 845 noupdates_before_first++; 846 #ifdef ENTROPY_STATS 847 if (!cpi->dummy_packing) 848 ++tree_update_hist[tx_size][i][j][k][l][t][u]; 849 #endif 850 continue; 851 } 852 if (u == 1 && updates == 1) { 853 int v; 854 // first update 855 vp9_write_bit(bc, 1); 856 for (v = 0; v < noupdates_before_first; ++v) 857 vp9_write(bc, 0, upd); 858 } 859 vp9_write(bc, u, upd); 860 #ifdef ENTROPY_STATS 861 if (!cpi->dummy_packing) 862 ++tree_update_hist[tx_size][i][j][k][l][t][u]; 863 #endif 864 if (u) { 865 /* send/use new probability */ 866 vp9_write_prob_diff_update(bc, newp, *oldp); 867 *oldp = newp; 868 } 869 } 870 } 871 } 872 } 873 } 874 if (updates == 0) { 875 vp9_write_bit(bc, 0); // no updates 876 } 877 return; 878 } 879 880 default: 881 assert(0); 882 } 883 } 884 885 static void update_coef_probs(VP9_COMP* const cpi, vp9_writer* const bc) { 886 const TX_MODE tx_mode = cpi->common.tx_mode; 887 888 vp9_clear_system_state(); 889 890 // Build the cofficient contexts based on counts collected in encode loop 891 build_coeff_contexts(cpi); 892 893 update_coef_probs_common(bc, cpi, TX_4X4); 894 895 // do not do this if not even allowed 896 if (tx_mode > ONLY_4X4) 897 update_coef_probs_common(bc, cpi, TX_8X8); 898 899 if (tx_mode > ALLOW_8X8) 900 update_coef_probs_common(bc, cpi, TX_16X16); 901 902 if (tx_mode > ALLOW_16X16) 903 update_coef_probs_common(bc, cpi, TX_32X32); 904 } 905 906 static void encode_loopfilter(struct loopfilter *lf, 907 struct vp9_write_bit_buffer *wb) { 908 int i; 909 910 // Encode the loop filter level and type 911 vp9_wb_write_literal(wb, lf->filter_level, 6); 912 vp9_wb_write_literal(wb, lf->sharpness_level, 3); 913 914 // Write out loop filter deltas applied at the MB level based on mode or 915 // ref frame (if they are enabled). 916 vp9_wb_write_bit(wb, lf->mode_ref_delta_enabled); 917 918 if (lf->mode_ref_delta_enabled) { 919 // Do the deltas need to be updated 920 vp9_wb_write_bit(wb, lf->mode_ref_delta_update); 921 if (lf->mode_ref_delta_update) { 922 // Send update 923 for (i = 0; i < MAX_REF_LF_DELTAS; i++) { 924 const int delta = lf->ref_deltas[i]; 925 926 // Frame level data 927 if (delta != lf->last_ref_deltas[i]) { 928 lf->last_ref_deltas[i] = delta; 929 vp9_wb_write_bit(wb, 1); 930 931 assert(delta != 0); 932 vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6); 933 vp9_wb_write_bit(wb, delta < 0); 934 } else { 935 vp9_wb_write_bit(wb, 0); 936 } 937 } 938 939 // Send update 940 for (i = 0; i < MAX_MODE_LF_DELTAS; i++) { 941 const int delta = lf->mode_deltas[i]; 942 if (delta != lf->last_mode_deltas[i]) { 943 lf->last_mode_deltas[i] = delta; 944 vp9_wb_write_bit(wb, 1); 945 946 assert(delta != 0); 947 vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6); 948 vp9_wb_write_bit(wb, delta < 0); 949 } else { 950 vp9_wb_write_bit(wb, 0); 951 } 952 } 953 } 954 } 955 } 956 957 static void write_delta_q(struct vp9_write_bit_buffer *wb, int delta_q) { 958 if (delta_q != 0) { 959 vp9_wb_write_bit(wb, 1); 960 vp9_wb_write_literal(wb, abs(delta_q), 4); 961 vp9_wb_write_bit(wb, delta_q < 0); 962 } else { 963 vp9_wb_write_bit(wb, 0); 964 } 965 } 966 967 static void encode_quantization(VP9_COMMON *cm, 968 struct vp9_write_bit_buffer *wb) { 969 vp9_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS); 970 write_delta_q(wb, cm->y_dc_delta_q); 971 write_delta_q(wb, cm->uv_dc_delta_q); 972 write_delta_q(wb, cm->uv_ac_delta_q); 973 } 974 975 976 static void encode_segmentation(VP9_COMP *cpi, 977 struct vp9_write_bit_buffer *wb) { 978 int i, j; 979 980 struct segmentation *seg = &cpi->common.seg; 981 982 vp9_wb_write_bit(wb, seg->enabled); 983 if (!seg->enabled) 984 return; 985 986 // Segmentation map 987 vp9_wb_write_bit(wb, seg->update_map); 988 if (seg->update_map) { 989 // Select the coding strategy (temporal or spatial) 990 vp9_choose_segmap_coding_method(cpi); 991 // Write out probabilities used to decode unpredicted macro-block segments 992 for (i = 0; i < SEG_TREE_PROBS; i++) { 993 const int prob = seg->tree_probs[i]; 994 const int update = prob != MAX_PROB; 995 vp9_wb_write_bit(wb, update); 996 if (update) 997 vp9_wb_write_literal(wb, prob, 8); 998 } 999 1000 // Write out the chosen coding method. 1001 vp9_wb_write_bit(wb, seg->temporal_update); 1002 if (seg->temporal_update) { 1003 for (i = 0; i < PREDICTION_PROBS; i++) { 1004 const int prob = seg->pred_probs[i]; 1005 const int update = prob != MAX_PROB; 1006 vp9_wb_write_bit(wb, update); 1007 if (update) 1008 vp9_wb_write_literal(wb, prob, 8); 1009 } 1010 } 1011 } 1012 1013 // Segmentation data 1014 vp9_wb_write_bit(wb, seg->update_data); 1015 if (seg->update_data) { 1016 vp9_wb_write_bit(wb, seg->abs_delta); 1017 1018 for (i = 0; i < MAX_SEGMENTS; i++) { 1019 for (j = 0; j < SEG_LVL_MAX; j++) { 1020 const int active = vp9_segfeature_active(seg, i, j); 1021 vp9_wb_write_bit(wb, active); 1022 if (active) { 1023 const int data = vp9_get_segdata(seg, i, j); 1024 const int data_max = vp9_seg_feature_data_max(j); 1025 1026 if (vp9_is_segfeature_signed(j)) { 1027 vp9_encode_unsigned_max(wb, abs(data), data_max); 1028 vp9_wb_write_bit(wb, data < 0); 1029 } else { 1030 vp9_encode_unsigned_max(wb, data, data_max); 1031 } 1032 } 1033 } 1034 } 1035 } 1036 } 1037 1038 1039 static void encode_txfm_probs(VP9_COMP *cpi, vp9_writer *w) { 1040 VP9_COMMON *const cm = &cpi->common; 1041 1042 // Mode 1043 vp9_write_literal(w, MIN(cm->tx_mode, ALLOW_32X32), 2); 1044 if (cm->tx_mode >= ALLOW_32X32) 1045 vp9_write_bit(w, cm->tx_mode == TX_MODE_SELECT); 1046 1047 // Probabilities 1048 if (cm->tx_mode == TX_MODE_SELECT) { 1049 int i, j; 1050 unsigned int ct_8x8p[TX_SIZES - 3][2]; 1051 unsigned int ct_16x16p[TX_SIZES - 2][2]; 1052 unsigned int ct_32x32p[TX_SIZES - 1][2]; 1053 1054 1055 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 1056 tx_counts_to_branch_counts_8x8(cm->counts.tx.p8x8[i], ct_8x8p); 1057 for (j = 0; j < TX_SIZES - 3; j++) 1058 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p8x8[i][j], ct_8x8p[j]); 1059 } 1060 1061 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 1062 tx_counts_to_branch_counts_16x16(cm->counts.tx.p16x16[i], ct_16x16p); 1063 for (j = 0; j < TX_SIZES - 2; j++) 1064 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p16x16[i][j], 1065 ct_16x16p[j]); 1066 } 1067 1068 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 1069 tx_counts_to_branch_counts_32x32(cm->counts.tx.p32x32[i], ct_32x32p); 1070 for (j = 0; j < TX_SIZES - 1; j++) 1071 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p32x32[i][j], 1072 ct_32x32p[j]); 1073 } 1074 #ifdef MODE_STATS 1075 if (!cpi->dummy_packing) 1076 update_tx_count_stats(cm); 1077 #endif 1078 } 1079 } 1080 1081 static void write_interp_filter_type(INTERPOLATION_TYPE type, 1082 struct vp9_write_bit_buffer *wb) { 1083 const int type_to_literal[] = { 1, 0, 2, 3 }; 1084 1085 vp9_wb_write_bit(wb, type == SWITCHABLE); 1086 if (type != SWITCHABLE) 1087 vp9_wb_write_literal(wb, type_to_literal[type], 2); 1088 } 1089 1090 static void fix_mcomp_filter_type(VP9_COMP *cpi) { 1091 VP9_COMMON *const cm = &cpi->common; 1092 1093 if (cm->mcomp_filter_type == SWITCHABLE) { 1094 // Check to see if only one of the filters is actually used 1095 int count[SWITCHABLE_FILTERS]; 1096 int i, j, c = 0; 1097 for (i = 0; i < SWITCHABLE_FILTERS; ++i) { 1098 count[i] = 0; 1099 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) 1100 count[i] += cm->counts.switchable_interp[j][i]; 1101 c += (count[i] > 0); 1102 } 1103 if (c == 1) { 1104 // Only one filter is used. So set the filter at frame level 1105 for (i = 0; i < SWITCHABLE_FILTERS; ++i) { 1106 if (count[i]) { 1107 cm->mcomp_filter_type = i; 1108 break; 1109 } 1110 } 1111 } 1112 } 1113 } 1114 1115 static void write_tile_info(VP9_COMMON *cm, struct vp9_write_bit_buffer *wb) { 1116 int min_log2_tile_cols, max_log2_tile_cols, ones; 1117 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); 1118 1119 // columns 1120 ones = cm->log2_tile_cols - min_log2_tile_cols; 1121 while (ones--) 1122 vp9_wb_write_bit(wb, 1); 1123 1124 if (cm->log2_tile_cols < max_log2_tile_cols) 1125 vp9_wb_write_bit(wb, 0); 1126 1127 // rows 1128 vp9_wb_write_bit(wb, cm->log2_tile_rows != 0); 1129 if (cm->log2_tile_rows != 0) 1130 vp9_wb_write_bit(wb, cm->log2_tile_rows != 1); 1131 } 1132 1133 static int get_refresh_mask(VP9_COMP *cpi) { 1134 // Should the GF or ARF be updated using the transmitted frame or buffer 1135 #if CONFIG_MULTIPLE_ARF 1136 if (!cpi->multi_arf_enabled && cpi->refresh_golden_frame && 1137 !cpi->refresh_alt_ref_frame) { 1138 #else 1139 if (cpi->refresh_golden_frame && !cpi->refresh_alt_ref_frame && 1140 !cpi->use_svc) { 1141 #endif 1142 // Preserve the previously existing golden frame and update the frame in 1143 // the alt ref slot instead. This is highly specific to the use of 1144 // alt-ref as a forward reference, and this needs to be generalized as 1145 // other uses are implemented (like RTC/temporal scaling) 1146 // 1147 // gld_fb_idx and alt_fb_idx need to be swapped for future frames, but 1148 // that happens in vp9_onyx_if.c:update_reference_frames() so that it can 1149 // be done outside of the recode loop. 1150 return (cpi->refresh_last_frame << cpi->lst_fb_idx) | 1151 (cpi->refresh_golden_frame << cpi->alt_fb_idx); 1152 } else { 1153 int arf_idx = cpi->alt_fb_idx; 1154 #if CONFIG_MULTIPLE_ARF 1155 // Determine which ARF buffer to use to encode this ARF frame. 1156 if (cpi->multi_arf_enabled) { 1157 int sn = cpi->sequence_number; 1158 arf_idx = (cpi->frame_coding_order[sn] < 0) ? 1159 cpi->arf_buffer_idx[sn + 1] : 1160 cpi->arf_buffer_idx[sn]; 1161 } 1162 #endif 1163 return (cpi->refresh_last_frame << cpi->lst_fb_idx) | 1164 (cpi->refresh_golden_frame << cpi->gld_fb_idx) | 1165 (cpi->refresh_alt_ref_frame << arf_idx); 1166 } 1167 } 1168 1169 static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) { 1170 VP9_COMMON *const cm = &cpi->common; 1171 vp9_writer residual_bc; 1172 1173 int tile_row, tile_col; 1174 TOKENEXTRA *tok[4][1 << 6], *tok_end; 1175 size_t total_size = 0; 1176 const int tile_cols = 1 << cm->log2_tile_cols; 1177 const int tile_rows = 1 << cm->log2_tile_rows; 1178 1179 vpx_memset(cpi->above_seg_context, 0, sizeof(*cpi->above_seg_context) * 1180 mi_cols_aligned_to_sb(cm->mi_cols)); 1181 1182 tok[0][0] = cpi->tok; 1183 for (tile_row = 0; tile_row < tile_rows; tile_row++) { 1184 if (tile_row) 1185 tok[tile_row][0] = tok[tile_row - 1][tile_cols - 1] + 1186 cpi->tok_count[tile_row - 1][tile_cols - 1]; 1187 1188 for (tile_col = 1; tile_col < tile_cols; tile_col++) 1189 tok[tile_row][tile_col] = tok[tile_row][tile_col - 1] + 1190 cpi->tok_count[tile_row][tile_col - 1]; 1191 } 1192 1193 for (tile_row = 0; tile_row < tile_rows; tile_row++) { 1194 for (tile_col = 0; tile_col < tile_cols; tile_col++) { 1195 TileInfo tile; 1196 1197 vp9_tile_init(&tile, cm, tile_row, tile_col); 1198 tok_end = tok[tile_row][tile_col] + cpi->tok_count[tile_row][tile_col]; 1199 1200 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) 1201 vp9_start_encode(&residual_bc, data_ptr + total_size + 4); 1202 else 1203 vp9_start_encode(&residual_bc, data_ptr + total_size); 1204 1205 write_modes(cpi, &tile, &residual_bc, &tok[tile_row][tile_col], tok_end); 1206 assert(tok[tile_row][tile_col] == tok_end); 1207 vp9_stop_encode(&residual_bc); 1208 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) { 1209 // size of this tile 1210 write_be32(data_ptr + total_size, residual_bc.pos); 1211 total_size += 4; 1212 } 1213 1214 total_size += residual_bc.pos; 1215 } 1216 } 1217 1218 return total_size; 1219 } 1220 1221 static void write_display_size(VP9_COMP *cpi, struct vp9_write_bit_buffer *wb) { 1222 VP9_COMMON *const cm = &cpi->common; 1223 1224 const int scaling_active = cm->width != cm->display_width || 1225 cm->height != cm->display_height; 1226 vp9_wb_write_bit(wb, scaling_active); 1227 if (scaling_active) { 1228 vp9_wb_write_literal(wb, cm->display_width - 1, 16); 1229 vp9_wb_write_literal(wb, cm->display_height - 1, 16); 1230 } 1231 } 1232 1233 static void write_frame_size(VP9_COMP *cpi, 1234 struct vp9_write_bit_buffer *wb) { 1235 VP9_COMMON *const cm = &cpi->common; 1236 vp9_wb_write_literal(wb, cm->width - 1, 16); 1237 vp9_wb_write_literal(wb, cm->height - 1, 16); 1238 1239 write_display_size(cpi, wb); 1240 } 1241 1242 static void write_frame_size_with_refs(VP9_COMP *cpi, 1243 struct vp9_write_bit_buffer *wb) { 1244 VP9_COMMON *const cm = &cpi->common; 1245 int refs[ALLOWED_REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx, 1246 cpi->alt_fb_idx}; 1247 int i, found = 0; 1248 1249 for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) { 1250 YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[cm->ref_frame_map[refs[i]]]; 1251 found = cm->width == cfg->y_crop_width && 1252 cm->height == cfg->y_crop_height; 1253 1254 // TODO(ivan): This prevents a bug while more than 3 buffers are used. Do it 1255 // in a better way. 1256 if (cpi->use_svc) { 1257 found = 0; 1258 } 1259 vp9_wb_write_bit(wb, found); 1260 if (found) { 1261 break; 1262 } 1263 } 1264 1265 if (!found) { 1266 vp9_wb_write_literal(wb, cm->width - 1, 16); 1267 vp9_wb_write_literal(wb, cm->height - 1, 16); 1268 } 1269 1270 write_display_size(cpi, wb); 1271 } 1272 1273 static void write_sync_code(struct vp9_write_bit_buffer *wb) { 1274 vp9_wb_write_literal(wb, VP9_SYNC_CODE_0, 8); 1275 vp9_wb_write_literal(wb, VP9_SYNC_CODE_1, 8); 1276 vp9_wb_write_literal(wb, VP9_SYNC_CODE_2, 8); 1277 } 1278 1279 static void write_uncompressed_header(VP9_COMP *cpi, 1280 struct vp9_write_bit_buffer *wb) { 1281 VP9_COMMON *const cm = &cpi->common; 1282 1283 vp9_wb_write_literal(wb, VP9_FRAME_MARKER, 2); 1284 1285 // bitstream version. 1286 // 00 - profile 0. 4:2:0 only 1287 // 10 - profile 1. adds 4:4:4, 4:2:2, alpha 1288 vp9_wb_write_bit(wb, cm->version); 1289 vp9_wb_write_bit(wb, 0); 1290 1291 vp9_wb_write_bit(wb, 0); 1292 vp9_wb_write_bit(wb, cm->frame_type); 1293 vp9_wb_write_bit(wb, cm->show_frame); 1294 vp9_wb_write_bit(wb, cm->error_resilient_mode); 1295 1296 if (cm->frame_type == KEY_FRAME) { 1297 const COLOR_SPACE cs = UNKNOWN; 1298 write_sync_code(wb); 1299 vp9_wb_write_literal(wb, cs, 3); 1300 if (cs != SRGB) { 1301 vp9_wb_write_bit(wb, 0); // 0: [16, 235] (i.e. xvYCC), 1: [0, 255] 1302 if (cm->version == 1) { 1303 vp9_wb_write_bit(wb, cm->subsampling_x); 1304 vp9_wb_write_bit(wb, cm->subsampling_y); 1305 vp9_wb_write_bit(wb, 0); // has extra plane 1306 } 1307 } else { 1308 assert(cm->version == 1); 1309 vp9_wb_write_bit(wb, 0); // has extra plane 1310 } 1311 1312 write_frame_size(cpi, wb); 1313 } else { 1314 const int refs[ALLOWED_REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx, 1315 cpi->alt_fb_idx}; 1316 if (!cm->show_frame) 1317 vp9_wb_write_bit(wb, cm->intra_only); 1318 1319 if (!cm->error_resilient_mode) 1320 vp9_wb_write_literal(wb, cm->reset_frame_context, 2); 1321 1322 if (cm->intra_only) { 1323 write_sync_code(wb); 1324 1325 vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES); 1326 write_frame_size(cpi, wb); 1327 } else { 1328 int i; 1329 vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES); 1330 for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) { 1331 vp9_wb_write_literal(wb, refs[i], NUM_REF_FRAMES_LOG2); 1332 vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[LAST_FRAME + i]); 1333 } 1334 1335 write_frame_size_with_refs(cpi, wb); 1336 1337 vp9_wb_write_bit(wb, cm->allow_high_precision_mv); 1338 1339 fix_mcomp_filter_type(cpi); 1340 write_interp_filter_type(cm->mcomp_filter_type, wb); 1341 } 1342 } 1343 1344 if (!cm->error_resilient_mode) { 1345 vp9_wb_write_bit(wb, cm->refresh_frame_context); 1346 vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode); 1347 } 1348 1349 vp9_wb_write_literal(wb, cm->frame_context_idx, NUM_FRAME_CONTEXTS_LOG2); 1350 1351 encode_loopfilter(&cm->lf, wb); 1352 encode_quantization(cm, wb); 1353 encode_segmentation(cpi, wb); 1354 1355 write_tile_info(cm, wb); 1356 } 1357 1358 static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) { 1359 VP9_COMMON *const cm = &cpi->common; 1360 MACROBLOCKD *const xd = &cpi->mb.e_mbd; 1361 FRAME_CONTEXT *const fc = &cm->fc; 1362 vp9_writer header_bc; 1363 1364 vp9_start_encode(&header_bc, data); 1365 1366 if (xd->lossless) 1367 cm->tx_mode = ONLY_4X4; 1368 else 1369 encode_txfm_probs(cpi, &header_bc); 1370 1371 update_coef_probs(cpi, &header_bc); 1372 1373 #ifdef ENTROPY_STATS 1374 active_section = 2; 1375 #endif 1376 1377 vp9_update_skip_probs(cpi, &header_bc); 1378 1379 if (!frame_is_intra_only(cm)) { 1380 int i; 1381 #ifdef ENTROPY_STATS 1382 active_section = 1; 1383 #endif 1384 1385 update_inter_mode_probs(cm, &header_bc); 1386 vp9_zero(cm->counts.inter_mode); 1387 1388 if (cm->mcomp_filter_type == SWITCHABLE) 1389 update_switchable_interp_probs(cpi, &header_bc); 1390 1391 for (i = 0; i < INTRA_INTER_CONTEXTS; i++) 1392 vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i], 1393 cpi->intra_inter_count[i]); 1394 1395 if (cm->allow_comp_inter_inter) { 1396 const int comp_pred_mode = cpi->common.comp_pred_mode; 1397 const int use_compound_pred = comp_pred_mode != SINGLE_PREDICTION_ONLY; 1398 const int use_hybrid_pred = comp_pred_mode == HYBRID_PREDICTION; 1399 1400 vp9_write_bit(&header_bc, use_compound_pred); 1401 if (use_compound_pred) { 1402 vp9_write_bit(&header_bc, use_hybrid_pred); 1403 if (use_hybrid_pred) 1404 for (i = 0; i < COMP_INTER_CONTEXTS; i++) 1405 vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i], 1406 cpi->comp_inter_count[i]); 1407 } 1408 } 1409 1410 if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) { 1411 for (i = 0; i < REF_CONTEXTS; i++) { 1412 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0], 1413 cpi->single_ref_count[i][0]); 1414 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1], 1415 cpi->single_ref_count[i][1]); 1416 } 1417 } 1418 1419 if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY) 1420 for (i = 0; i < REF_CONTEXTS; i++) 1421 vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i], 1422 cpi->comp_ref_count[i]); 1423 1424 update_mbintra_mode_probs(cpi, &header_bc); 1425 1426 for (i = 0; i < PARTITION_CONTEXTS; ++i) { 1427 unsigned int bct[PARTITION_TYPES - 1][2]; 1428 update_mode(&header_bc, PARTITION_TYPES, vp9_partition_tree, 1429 fc->partition_prob[i], bct, 1430 (unsigned int *)cpi->partition_count[i]); 1431 } 1432 1433 vp9_write_nmv_probs(cpi, cm->allow_high_precision_mv, &header_bc); 1434 } 1435 1436 vp9_stop_encode(&header_bc); 1437 assert(header_bc.pos <= 0xffff); 1438 1439 return header_bc.pos; 1440 } 1441 1442 void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) { 1443 uint8_t *data = dest; 1444 size_t first_part_size; 1445 struct vp9_write_bit_buffer wb = {data, 0}; 1446 struct vp9_write_bit_buffer saved_wb; 1447 1448 write_uncompressed_header(cpi, &wb); 1449 saved_wb = wb; 1450 vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size 1451 1452 data += vp9_rb_bytes_written(&wb); 1453 1454 vp9_compute_update_table(); 1455 1456 #ifdef ENTROPY_STATS 1457 if (cm->frame_type == INTER_FRAME) 1458 active_section = 0; 1459 else 1460 active_section = 7; 1461 #endif 1462 1463 vp9_clear_system_state(); // __asm emms; 1464 1465 first_part_size = write_compressed_header(cpi, data); 1466 data += first_part_size; 1467 vp9_wb_write_literal(&saved_wb, first_part_size, 16); 1468 1469 data += encode_tiles(cpi, data); 1470 1471 *size = data - dest; 1472 } 1473 1474 #ifdef ENTROPY_STATS 1475 static void print_tree_update_for_type(FILE *f, 1476 vp9_coeff_stats *tree_update_hist, 1477 int block_types, const char *header) { 1478 int i, j, k, l, m; 1479 1480 fprintf(f, "const vp9_coeff_prob %s = {\n", header); 1481 for (i = 0; i < block_types; i++) { 1482 fprintf(f, " { \n"); 1483 for (j = 0; j < REF_TYPES; j++) { 1484 fprintf(f, " { \n"); 1485 for (k = 0; k < COEF_BANDS; k++) { 1486 fprintf(f, " {\n"); 1487 for (l = 0; l < PREV_COEF_CONTEXTS; l++) { 1488 fprintf(f, " {"); 1489 for (m = 0; m < ENTROPY_NODES; m++) { 1490 fprintf(f, "%3d, ", 1491 get_binary_prob(tree_update_hist[i][j][k][l][m][0], 1492 tree_update_hist[i][j][k][l][m][1])); 1493 } 1494 fprintf(f, "},\n"); 1495 } 1496 fprintf(f, "},\n"); 1497 } 1498 fprintf(f, " },\n"); 1499 } 1500 fprintf(f, " },\n"); 1501 } 1502 fprintf(f, "};\n"); 1503 } 1504 1505 void print_tree_update_probs() { 1506 FILE *f = fopen("coefupdprob.h", "w"); 1507 fprintf(f, "\n/* Update probabilities for token entropy tree. */\n\n"); 1508 1509 print_tree_update_for_type(f, tree_update_hist[TX_4X4], BLOCK_TYPES, 1510 "vp9_coef_update_probs_4x4[BLOCK_TYPES]"); 1511 print_tree_update_for_type(f, tree_update_hist[TX_8X8], BLOCK_TYPES, 1512 "vp9_coef_update_probs_8x8[BLOCK_TYPES]"); 1513 print_tree_update_for_type(f, tree_update_hist[TX_16X16], BLOCK_TYPES, 1514 "vp9_coef_update_probs_16x16[BLOCK_TYPES]"); 1515 print_tree_update_for_type(f, tree_update_hist[TX_32X32], BLOCK_TYPES, 1516 "vp9_coef_update_probs_32x32[BLOCK_TYPES]"); 1517 1518 fclose(f); 1519 f = fopen("treeupdate.bin", "wb"); 1520 fwrite(tree_update_hist, sizeof(tree_update_hist), 1, f); 1521 fclose(f); 1522 } 1523 #endif 1524