1 /* 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #include "decodemv.h" 12 #include "treereader.h" 13 #include "vp8/common/entropymv.h" 14 #include "vp8/common/entropymode.h" 15 #include "onyxd_int.h" 16 #include "vp8/common/findnearmv.h" 17 18 static B_PREDICTION_MODE read_bmode(vp8_reader *bc, const vp8_prob *p) { 19 const int i = vp8_treed_read(bc, vp8_bmode_tree, p); 20 21 return (B_PREDICTION_MODE)i; 22 } 23 24 static MB_PREDICTION_MODE read_ymode(vp8_reader *bc, const vp8_prob *p) { 25 const int i = vp8_treed_read(bc, vp8_ymode_tree, p); 26 27 return (MB_PREDICTION_MODE)i; 28 } 29 30 static MB_PREDICTION_MODE read_kf_ymode(vp8_reader *bc, const vp8_prob *p) { 31 const int i = vp8_treed_read(bc, vp8_kf_ymode_tree, p); 32 33 return (MB_PREDICTION_MODE)i; 34 } 35 36 static MB_PREDICTION_MODE read_uv_mode(vp8_reader *bc, const vp8_prob *p) { 37 const int i = vp8_treed_read(bc, vp8_uv_mode_tree, p); 38 39 return (MB_PREDICTION_MODE)i; 40 } 41 42 static void read_kf_modes(VP8D_COMP *pbi, MODE_INFO *mi) { 43 vp8_reader *const bc = &pbi->mbc[8]; 44 const int mis = pbi->common.mode_info_stride; 45 46 mi->mbmi.ref_frame = INTRA_FRAME; 47 mi->mbmi.mode = read_kf_ymode(bc, vp8_kf_ymode_prob); 48 49 if (mi->mbmi.mode == B_PRED) { 50 int i = 0; 51 mi->mbmi.is_4x4 = 1; 52 53 do { 54 const B_PREDICTION_MODE A = above_block_mode(mi, i, mis); 55 const B_PREDICTION_MODE L = left_block_mode(mi, i); 56 57 mi->bmi[i].as_mode = read_bmode(bc, vp8_kf_bmode_prob[A][L]); 58 } while (++i < 16); 59 } 60 61 mi->mbmi.uv_mode = read_uv_mode(bc, vp8_kf_uv_mode_prob); 62 } 63 64 static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc) { 65 const vp8_prob *const p = (const vp8_prob *)mvc; 66 int x = 0; 67 68 if (vp8_read(r, p[mvpis_short])) { /* Large */ 69 int i = 0; 70 71 do { 72 x += vp8_read(r, p[MVPbits + i]) << i; 73 } while (++i < 3); 74 75 i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */ 76 77 do { 78 x += vp8_read(r, p[MVPbits + i]) << i; 79 } while (--i > 3); 80 81 if (!(x & 0xFFF0) || vp8_read(r, p[MVPbits + 3])) x += 8; 82 } else { /* small */ 83 x = vp8_treed_read(r, vp8_small_mvtree, p + MVPshort); 84 } 85 86 if (x && vp8_read(r, p[MVPsign])) x = -x; 87 88 return x; 89 } 90 91 static void read_mv(vp8_reader *r, MV *mv, const MV_CONTEXT *mvc) { 92 mv->row = (short)(read_mvcomponent(r, mvc) * 2); 93 mv->col = (short)(read_mvcomponent(r, ++mvc) * 2); 94 } 95 96 static void read_mvcontexts(vp8_reader *bc, MV_CONTEXT *mvc) { 97 int i = 0; 98 99 do { 100 const vp8_prob *up = vp8_mv_update_probs[i].prob; 101 vp8_prob *p = (vp8_prob *)(mvc + i); 102 vp8_prob *const pstop = p + MVPcount; 103 104 do { 105 if (vp8_read(bc, *up++)) { 106 const vp8_prob x = (vp8_prob)vp8_read_literal(bc, 7); 107 108 *p = x ? x << 1 : 1; 109 } 110 } while (++p < pstop); 111 } while (++i < 2); 112 } 113 114 static const unsigned char mbsplit_fill_count[4] = { 8, 8, 4, 1 }; 115 static const unsigned char mbsplit_fill_offset[4][16] = { 116 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, 117 { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15 }, 118 { 0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15 }, 119 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } 120 }; 121 122 static void mb_mode_mv_init(VP8D_COMP *pbi) { 123 vp8_reader *const bc = &pbi->mbc[8]; 124 MV_CONTEXT *const mvc = pbi->common.fc.mvc; 125 126 #if CONFIG_ERROR_CONCEALMENT 127 /* Default is that no macroblock is corrupt, therefore we initialize 128 * mvs_corrupt_from_mb to something very big, which we can be sure is 129 * outside the frame. */ 130 pbi->mvs_corrupt_from_mb = UINT_MAX; 131 #endif 132 /* Read the mb_no_coeff_skip flag */ 133 pbi->common.mb_no_coeff_skip = (int)vp8_read_bit(bc); 134 135 pbi->prob_skip_false = 0; 136 if (pbi->common.mb_no_coeff_skip) { 137 pbi->prob_skip_false = (vp8_prob)vp8_read_literal(bc, 8); 138 } 139 140 if (pbi->common.frame_type != KEY_FRAME) { 141 pbi->prob_intra = (vp8_prob)vp8_read_literal(bc, 8); 142 pbi->prob_last = (vp8_prob)vp8_read_literal(bc, 8); 143 pbi->prob_gf = (vp8_prob)vp8_read_literal(bc, 8); 144 145 if (vp8_read_bit(bc)) { 146 int i = 0; 147 148 do { 149 pbi->common.fc.ymode_prob[i] = (vp8_prob)vp8_read_literal(bc, 8); 150 } while (++i < 4); 151 } 152 153 if (vp8_read_bit(bc)) { 154 int i = 0; 155 156 do { 157 pbi->common.fc.uv_mode_prob[i] = (vp8_prob)vp8_read_literal(bc, 8); 158 } while (++i < 3); 159 } 160 161 read_mvcontexts(bc, mvc); 162 } 163 } 164 165 const vp8_prob vp8_sub_mv_ref_prob3[8][VP8_SUBMVREFS - 1] = { 166 { 147, 136, 18 }, /* SUBMVREF_NORMAL */ 167 { 223, 1, 34 }, /* SUBMVREF_LEFT_ABOVE_SAME */ 168 { 106, 145, 1 }, /* SUBMVREF_LEFT_ZED */ 169 { 208, 1, 1 }, /* SUBMVREF_LEFT_ABOVE_ZED */ 170 { 179, 121, 1 }, /* SUBMVREF_ABOVE_ZED */ 171 { 223, 1, 34 }, /* SUBMVREF_LEFT_ABOVE_SAME */ 172 { 179, 121, 1 }, /* SUBMVREF_ABOVE_ZED */ 173 { 208, 1, 1 } /* SUBMVREF_LEFT_ABOVE_ZED */ 174 }; 175 176 static const vp8_prob *get_sub_mv_ref_prob(const int left, const int above) { 177 int lez = (left == 0); 178 int aez = (above == 0); 179 int lea = (left == above); 180 const vp8_prob *prob; 181 182 prob = vp8_sub_mv_ref_prob3[(aez << 2) | (lez << 1) | (lea)]; 183 184 return prob; 185 } 186 187 static void decode_split_mv(vp8_reader *const bc, MODE_INFO *mi, 188 const MODE_INFO *left_mb, const MODE_INFO *above_mb, 189 MB_MODE_INFO *mbmi, int_mv best_mv, 190 MV_CONTEXT *const mvc, int mb_to_left_edge, 191 int mb_to_right_edge, int mb_to_top_edge, 192 int mb_to_bottom_edge) { 193 int s; /* split configuration (16x8, 8x16, 8x8, 4x4) */ 194 /* number of partitions in the split configuration (see vp8_mbsplit_count) */ 195 int num_p; 196 int j = 0; 197 198 s = 3; 199 num_p = 16; 200 if (vp8_read(bc, 110)) { 201 s = 2; 202 num_p = 4; 203 if (vp8_read(bc, 111)) { 204 s = vp8_read(bc, 150); 205 num_p = 2; 206 } 207 } 208 209 do /* for each subset j */ 210 { 211 int_mv leftmv, abovemv; 212 int_mv blockmv; 213 int k; /* first block in subset j */ 214 215 const vp8_prob *prob; 216 k = vp8_mbsplit_offset[s][j]; 217 218 if (!(k & 3)) { 219 /* On L edge, get from MB to left of us */ 220 if (left_mb->mbmi.mode != SPLITMV) { 221 leftmv.as_int = left_mb->mbmi.mv.as_int; 222 } else { 223 leftmv.as_int = (left_mb->bmi + k + 4 - 1)->mv.as_int; 224 } 225 } else { 226 leftmv.as_int = (mi->bmi + k - 1)->mv.as_int; 227 } 228 229 if (!(k >> 2)) { 230 /* On top edge, get from MB above us */ 231 if (above_mb->mbmi.mode != SPLITMV) { 232 abovemv.as_int = above_mb->mbmi.mv.as_int; 233 } else { 234 abovemv.as_int = (above_mb->bmi + k + 16 - 4)->mv.as_int; 235 } 236 } else { 237 abovemv.as_int = (mi->bmi + k - 4)->mv.as_int; 238 } 239 240 prob = get_sub_mv_ref_prob(leftmv.as_int, abovemv.as_int); 241 242 if (vp8_read(bc, prob[0])) { 243 if (vp8_read(bc, prob[1])) { 244 blockmv.as_int = 0; 245 if (vp8_read(bc, prob[2])) { 246 blockmv.as_mv.row = read_mvcomponent(bc, &mvc[0]) * 2; 247 blockmv.as_mv.row += best_mv.as_mv.row; 248 blockmv.as_mv.col = read_mvcomponent(bc, &mvc[1]) * 2; 249 blockmv.as_mv.col += best_mv.as_mv.col; 250 } 251 } else { 252 blockmv.as_int = abovemv.as_int; 253 } 254 } else { 255 blockmv.as_int = leftmv.as_int; 256 } 257 258 mbmi->need_to_clamp_mvs |= 259 vp8_check_mv_bounds(&blockmv, mb_to_left_edge, mb_to_right_edge, 260 mb_to_top_edge, mb_to_bottom_edge); 261 262 { 263 /* Fill (uniform) modes, mvs of jth subset. 264 Must do it here because ensuing subsets can 265 refer back to us via "left" or "above". */ 266 const unsigned char *fill_offset; 267 unsigned int fill_count = mbsplit_fill_count[s]; 268 269 fill_offset = 270 &mbsplit_fill_offset[s][(unsigned char)j * mbsplit_fill_count[s]]; 271 272 do { 273 mi->bmi[*fill_offset].mv.as_int = blockmv.as_int; 274 fill_offset++; 275 } while (--fill_count); 276 } 277 278 } while (++j < num_p); 279 280 mbmi->partitioning = s; 281 } 282 283 static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, 284 MB_MODE_INFO *mbmi) { 285 vp8_reader *const bc = &pbi->mbc[8]; 286 mbmi->ref_frame = (MV_REFERENCE_FRAME)vp8_read(bc, pbi->prob_intra); 287 if (mbmi->ref_frame) { /* inter MB */ 288 enum { CNT_INTRA, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV }; 289 int cnt[4]; 290 int *cntx = cnt; 291 int_mv near_mvs[4]; 292 int_mv *nmv = near_mvs; 293 const int mis = pbi->mb.mode_info_stride; 294 const MODE_INFO *above = mi - mis; 295 const MODE_INFO *left = mi - 1; 296 const MODE_INFO *aboveleft = above - 1; 297 int *ref_frame_sign_bias = pbi->common.ref_frame_sign_bias; 298 299 mbmi->need_to_clamp_mvs = 0; 300 301 if (vp8_read(bc, pbi->prob_last)) { 302 mbmi->ref_frame = 303 (MV_REFERENCE_FRAME)((int)(2 + vp8_read(bc, pbi->prob_gf))); 304 } 305 306 /* Zero accumulators */ 307 nmv[0].as_int = nmv[1].as_int = nmv[2].as_int = 0; 308 cnt[0] = cnt[1] = cnt[2] = cnt[3] = 0; 309 310 /* Process above */ 311 if (above->mbmi.ref_frame != INTRA_FRAME) { 312 if (above->mbmi.mv.as_int) { 313 (++nmv)->as_int = above->mbmi.mv.as_int; 314 mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], mbmi->ref_frame, 315 nmv, ref_frame_sign_bias); 316 ++cntx; 317 } 318 319 *cntx += 2; 320 } 321 322 /* Process left */ 323 if (left->mbmi.ref_frame != INTRA_FRAME) { 324 if (left->mbmi.mv.as_int) { 325 int_mv this_mv; 326 327 this_mv.as_int = left->mbmi.mv.as_int; 328 mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], mbmi->ref_frame, 329 &this_mv, ref_frame_sign_bias); 330 331 if (this_mv.as_int != nmv->as_int) { 332 (++nmv)->as_int = this_mv.as_int; 333 ++cntx; 334 } 335 336 *cntx += 2; 337 } else { 338 cnt[CNT_INTRA] += 2; 339 } 340 } 341 342 /* Process above left */ 343 if (aboveleft->mbmi.ref_frame != INTRA_FRAME) { 344 if (aboveleft->mbmi.mv.as_int) { 345 int_mv this_mv; 346 347 this_mv.as_int = aboveleft->mbmi.mv.as_int; 348 mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], mbmi->ref_frame, 349 &this_mv, ref_frame_sign_bias); 350 351 if (this_mv.as_int != nmv->as_int) { 352 (++nmv)->as_int = this_mv.as_int; 353 ++cntx; 354 } 355 356 *cntx += 1; 357 } else { 358 cnt[CNT_INTRA] += 1; 359 } 360 } 361 362 if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_INTRA]][0])) { 363 /* If we have three distinct MV's ... */ 364 /* See if above-left MV can be merged with NEAREST */ 365 cnt[CNT_NEAREST] += ((cnt[CNT_SPLITMV] > 0) & 366 (nmv->as_int == near_mvs[CNT_NEAREST].as_int)); 367 368 /* Swap near and nearest if necessary */ 369 if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) { 370 int tmp; 371 tmp = cnt[CNT_NEAREST]; 372 cnt[CNT_NEAREST] = cnt[CNT_NEAR]; 373 cnt[CNT_NEAR] = tmp; 374 tmp = near_mvs[CNT_NEAREST].as_int; 375 near_mvs[CNT_NEAREST].as_int = near_mvs[CNT_NEAR].as_int; 376 near_mvs[CNT_NEAR].as_int = tmp; 377 } 378 379 if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) { 380 if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_NEAR]][2])) { 381 int mb_to_top_edge; 382 int mb_to_bottom_edge; 383 int mb_to_left_edge; 384 int mb_to_right_edge; 385 MV_CONTEXT *const mvc = pbi->common.fc.mvc; 386 int near_index; 387 388 mb_to_top_edge = pbi->mb.mb_to_top_edge; 389 mb_to_bottom_edge = pbi->mb.mb_to_bottom_edge; 390 mb_to_top_edge -= LEFT_TOP_MARGIN; 391 mb_to_bottom_edge += RIGHT_BOTTOM_MARGIN; 392 mb_to_right_edge = pbi->mb.mb_to_right_edge; 393 mb_to_right_edge += RIGHT_BOTTOM_MARGIN; 394 mb_to_left_edge = pbi->mb.mb_to_left_edge; 395 mb_to_left_edge -= LEFT_TOP_MARGIN; 396 397 /* Use near_mvs[0] to store the "best" MV */ 398 near_index = CNT_INTRA + (cnt[CNT_NEAREST] >= cnt[CNT_INTRA]); 399 400 vp8_clamp_mv2(&near_mvs[near_index], &pbi->mb); 401 402 cnt[CNT_SPLITMV] = 403 ((above->mbmi.mode == SPLITMV) + (left->mbmi.mode == SPLITMV)) * 404 2 + 405 (aboveleft->mbmi.mode == SPLITMV); 406 407 if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) { 408 decode_split_mv(bc, mi, left, above, mbmi, near_mvs[near_index], 409 mvc, mb_to_left_edge, mb_to_right_edge, 410 mb_to_top_edge, mb_to_bottom_edge); 411 mbmi->mv.as_int = mi->bmi[15].mv.as_int; 412 mbmi->mode = SPLITMV; 413 mbmi->is_4x4 = 1; 414 } else { 415 int_mv *const mbmi_mv = &mbmi->mv; 416 read_mv(bc, &mbmi_mv->as_mv, (const MV_CONTEXT *)mvc); 417 mbmi_mv->as_mv.row += near_mvs[near_index].as_mv.row; 418 mbmi_mv->as_mv.col += near_mvs[near_index].as_mv.col; 419 420 /* Don't need to check this on NEARMV and NEARESTMV 421 * modes since those modes clamp the MV. The NEWMV mode 422 * does not, so signal to the prediction stage whether 423 * special handling may be required. 424 */ 425 mbmi->need_to_clamp_mvs = 426 vp8_check_mv_bounds(mbmi_mv, mb_to_left_edge, mb_to_right_edge, 427 mb_to_top_edge, mb_to_bottom_edge); 428 mbmi->mode = NEWMV; 429 } 430 } else { 431 mbmi->mode = NEARMV; 432 mbmi->mv.as_int = near_mvs[CNT_NEAR].as_int; 433 vp8_clamp_mv2(&mbmi->mv, &pbi->mb); 434 } 435 } else { 436 mbmi->mode = NEARESTMV; 437 mbmi->mv.as_int = near_mvs[CNT_NEAREST].as_int; 438 vp8_clamp_mv2(&mbmi->mv, &pbi->mb); 439 } 440 } else { 441 mbmi->mode = ZEROMV; 442 mbmi->mv.as_int = 0; 443 } 444 445 #if CONFIG_ERROR_CONCEALMENT 446 if (pbi->ec_enabled && (mbmi->mode != SPLITMV)) { 447 mi->bmi[0].mv.as_int = mi->bmi[1].mv.as_int = mi->bmi[2].mv.as_int = 448 mi->bmi[3].mv.as_int = mi->bmi[4].mv.as_int = mi->bmi[5].mv.as_int = 449 mi->bmi[6].mv.as_int = mi->bmi[7].mv.as_int = 450 mi->bmi[8].mv.as_int = mi->bmi[9].mv.as_int = 451 mi->bmi[10].mv.as_int = mi->bmi[11].mv.as_int = 452 mi->bmi[12].mv.as_int = mi->bmi[13].mv.as_int = 453 mi->bmi[14].mv.as_int = mi->bmi[15].mv.as_int = 454 mbmi->mv.as_int; 455 } 456 #endif 457 } else { 458 /* required for left and above block mv */ 459 mbmi->mv.as_int = 0; 460 461 /* MB is intra coded */ 462 if ((mbmi->mode = read_ymode(bc, pbi->common.fc.ymode_prob)) == B_PRED) { 463 int j = 0; 464 mbmi->is_4x4 = 1; 465 do { 466 mi->bmi[j].as_mode = read_bmode(bc, pbi->common.fc.bmode_prob); 467 } while (++j < 16); 468 } 469 470 mbmi->uv_mode = read_uv_mode(bc, pbi->common.fc.uv_mode_prob); 471 } 472 } 473 474 static void read_mb_features(vp8_reader *r, MB_MODE_INFO *mi, MACROBLOCKD *x) { 475 /* Is segmentation enabled */ 476 if (x->segmentation_enabled && x->update_mb_segmentation_map) { 477 /* If so then read the segment id. */ 478 if (vp8_read(r, x->mb_segment_tree_probs[0])) { 479 mi->segment_id = 480 (unsigned char)(2 + vp8_read(r, x->mb_segment_tree_probs[2])); 481 } else { 482 mi->segment_id = 483 (unsigned char)(vp8_read(r, x->mb_segment_tree_probs[1])); 484 } 485 } 486 } 487 488 static void decode_mb_mode_mvs(VP8D_COMP *pbi, MODE_INFO *mi, 489 MB_MODE_INFO *mbmi) { 490 (void)mbmi; 491 492 /* Read the Macroblock segmentation map if it is being updated explicitly 493 * this frame (reset to 0 above by default) 494 * By default on a key frame reset all MBs to segment 0 495 */ 496 if (pbi->mb.update_mb_segmentation_map) { 497 read_mb_features(&pbi->mbc[8], &mi->mbmi, &pbi->mb); 498 } else if (pbi->common.frame_type == KEY_FRAME) { 499 mi->mbmi.segment_id = 0; 500 } 501 502 /* Read the macroblock coeff skip flag if this feature is in use, 503 * else default to 0 */ 504 if (pbi->common.mb_no_coeff_skip) { 505 mi->mbmi.mb_skip_coeff = vp8_read(&pbi->mbc[8], pbi->prob_skip_false); 506 } else { 507 mi->mbmi.mb_skip_coeff = 0; 508 } 509 510 mi->mbmi.is_4x4 = 0; 511 if (pbi->common.frame_type == KEY_FRAME) { 512 read_kf_modes(pbi, mi); 513 } else { 514 read_mb_modes_mv(pbi, mi, &mi->mbmi); 515 } 516 } 517 518 void vp8_decode_mode_mvs(VP8D_COMP *pbi) { 519 MODE_INFO *mi = pbi->common.mi; 520 int mb_row = -1; 521 int mb_to_right_edge_start; 522 523 mb_mode_mv_init(pbi); 524 525 pbi->mb.mb_to_top_edge = 0; 526 pbi->mb.mb_to_bottom_edge = ((pbi->common.mb_rows - 1) * 16) << 3; 527 mb_to_right_edge_start = ((pbi->common.mb_cols - 1) * 16) << 3; 528 529 while (++mb_row < pbi->common.mb_rows) { 530 int mb_col = -1; 531 532 pbi->mb.mb_to_left_edge = 0; 533 pbi->mb.mb_to_right_edge = mb_to_right_edge_start; 534 535 while (++mb_col < pbi->common.mb_cols) { 536 #if CONFIG_ERROR_CONCEALMENT 537 int mb_num = mb_row * pbi->common.mb_cols + mb_col; 538 #endif 539 540 decode_mb_mode_mvs(pbi, mi, &mi->mbmi); 541 542 #if CONFIG_ERROR_CONCEALMENT 543 /* look for corruption. set mvs_corrupt_from_mb to the current 544 * mb_num if the frame is corrupt from this macroblock. */ 545 if (vp8dx_bool_error(&pbi->mbc[8]) && 546 mb_num < (int)pbi->mvs_corrupt_from_mb) { 547 pbi->mvs_corrupt_from_mb = mb_num; 548 /* no need to continue since the partition is corrupt from 549 * here on. 550 */ 551 return; 552 } 553 #endif 554 555 pbi->mb.mb_to_left_edge -= (16 << 3); 556 pbi->mb.mb_to_right_edge -= (16 << 3); 557 mi++; /* next macroblock */ 558 } 559 pbi->mb.mb_to_top_edge -= (16 << 3); 560 pbi->mb.mb_to_bottom_edge -= (16 << 3); 561 562 mi++; /* skip left predictor each row */ 563 } 564 } 565