1 /* 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #include <assert.h> 12 #include <stdlib.h> // qsort() 13 14 #include "./vp9_rtcd.h" 15 #include "./vpx_dsp_rtcd.h" 16 #include "./vpx_scale_rtcd.h" 17 18 #include "vpx_dsp/bitreader_buffer.h" 19 #include "vpx_dsp/bitreader.h" 20 #include "vpx_dsp/vpx_dsp_common.h" 21 #include "vpx_mem/vpx_mem.h" 22 #include "vpx_ports/mem.h" 23 #include "vpx_ports/mem_ops.h" 24 #include "vpx_scale/vpx_scale.h" 25 #include "vpx_util/vpx_thread.h" 26 27 #include "vp9/common/vp9_alloccommon.h" 28 #include "vp9/common/vp9_common.h" 29 #include "vp9/common/vp9_entropy.h" 30 #include "vp9/common/vp9_entropymode.h" 31 #include "vp9/common/vp9_idct.h" 32 #include "vp9/common/vp9_thread_common.h" 33 #include "vp9/common/vp9_pred_common.h" 34 #include "vp9/common/vp9_quant_common.h" 35 #include "vp9/common/vp9_reconintra.h" 36 #include "vp9/common/vp9_reconinter.h" 37 #include "vp9/common/vp9_seg_common.h" 38 #include "vp9/common/vp9_tile_common.h" 39 40 #include "vp9/decoder/vp9_decodeframe.h" 41 #include "vp9/decoder/vp9_detokenize.h" 42 #include "vp9/decoder/vp9_decodemv.h" 43 #include "vp9/decoder/vp9_decoder.h" 44 #include "vp9/decoder/vp9_dsubexp.h" 45 46 #define MAX_VP9_HEADER_SIZE 80 47 48 static int is_compound_reference_allowed(const VP9_COMMON *cm) { 49 int i; 50 for (i = 1; i < REFS_PER_FRAME; ++i) 51 if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1]) return 1; 52 53 return 0; 54 } 55 56 static void setup_compound_reference_mode(VP9_COMMON *cm) { 57 if (cm->ref_frame_sign_bias[LAST_FRAME] == 58 cm->ref_frame_sign_bias[GOLDEN_FRAME]) { 59 cm->comp_fixed_ref = ALTREF_FRAME; 60 cm->comp_var_ref[0] = LAST_FRAME; 61 cm->comp_var_ref[1] = GOLDEN_FRAME; 62 } else if (cm->ref_frame_sign_bias[LAST_FRAME] == 63 cm->ref_frame_sign_bias[ALTREF_FRAME]) { 64 cm->comp_fixed_ref = GOLDEN_FRAME; 65 cm->comp_var_ref[0] = LAST_FRAME; 66 cm->comp_var_ref[1] = ALTREF_FRAME; 67 } else { 68 cm->comp_fixed_ref = LAST_FRAME; 69 cm->comp_var_ref[0] = GOLDEN_FRAME; 70 cm->comp_var_ref[1] = ALTREF_FRAME; 71 } 72 } 73 74 static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) { 75 return len != 0 && len <= (size_t)(end - start); 76 } 77 78 static int decode_unsigned_max(struct vpx_read_bit_buffer *rb, int max) { 79 const int data = vpx_rb_read_literal(rb, get_unsigned_bits(max)); 80 return data > max ? max : data; 81 } 82 83 static TX_MODE read_tx_mode(vpx_reader *r) { 84 TX_MODE tx_mode = vpx_read_literal(r, 2); 85 if (tx_mode == ALLOW_32X32) tx_mode += vpx_read_bit(r); 86 return tx_mode; 87 } 88 89 static void read_tx_mode_probs(struct tx_probs *tx_probs, vpx_reader *r) { 90 int i, j; 91 92 for (i = 0; i < TX_SIZE_CONTEXTS; ++i) 93 for (j = 0; j < TX_SIZES - 3; ++j) 94 vp9_diff_update_prob(r, &tx_probs->p8x8[i][j]); 95 96 for (i = 0; i < TX_SIZE_CONTEXTS; ++i) 97 for (j = 0; j < TX_SIZES - 2; ++j) 98 vp9_diff_update_prob(r, &tx_probs->p16x16[i][j]); 99 100 for (i = 0; i < TX_SIZE_CONTEXTS; ++i) 101 for (j = 0; j < TX_SIZES - 1; ++j) 102 vp9_diff_update_prob(r, &tx_probs->p32x32[i][j]); 103 } 104 105 static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vpx_reader *r) { 106 int i, j; 107 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) 108 for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i) 109 vp9_diff_update_prob(r, &fc->switchable_interp_prob[j][i]); 110 } 111 112 static void read_inter_mode_probs(FRAME_CONTEXT *fc, vpx_reader *r) { 113 int i, j; 114 for (i = 0; i < INTER_MODE_CONTEXTS; ++i) 115 for (j = 0; j < INTER_MODES - 1; ++j) 116 vp9_diff_update_prob(r, &fc->inter_mode_probs[i][j]); 117 } 118 119 static REFERENCE_MODE read_frame_reference_mode(const VP9_COMMON *cm, 120 vpx_reader *r) { 121 if (is_compound_reference_allowed(cm)) { 122 return vpx_read_bit(r) 123 ? (vpx_read_bit(r) ? REFERENCE_MODE_SELECT : COMPOUND_REFERENCE) 124 : SINGLE_REFERENCE; 125 } else { 126 return SINGLE_REFERENCE; 127 } 128 } 129 130 static void read_frame_reference_mode_probs(VP9_COMMON *cm, vpx_reader *r) { 131 FRAME_CONTEXT *const fc = cm->fc; 132 int i; 133 134 if (cm->reference_mode == REFERENCE_MODE_SELECT) 135 for (i = 0; i < COMP_INTER_CONTEXTS; ++i) 136 vp9_diff_update_prob(r, &fc->comp_inter_prob[i]); 137 138 if (cm->reference_mode != COMPOUND_REFERENCE) 139 for (i = 0; i < REF_CONTEXTS; ++i) { 140 vp9_diff_update_prob(r, &fc->single_ref_prob[i][0]); 141 vp9_diff_update_prob(r, &fc->single_ref_prob[i][1]); 142 } 143 144 if (cm->reference_mode != SINGLE_REFERENCE) 145 for (i = 0; i < REF_CONTEXTS; ++i) 146 vp9_diff_update_prob(r, &fc->comp_ref_prob[i]); 147 } 148 149 static void update_mv_probs(vpx_prob *p, int n, vpx_reader *r) { 150 int i; 151 for (i = 0; i < n; ++i) 152 if (vpx_read(r, MV_UPDATE_PROB)) p[i] = (vpx_read_literal(r, 7) << 1) | 1; 153 } 154 155 static void read_mv_probs(nmv_context *ctx, int allow_hp, vpx_reader *r) { 156 int i, j; 157 158 update_mv_probs(ctx->joints, MV_JOINTS - 1, r); 159 160 for (i = 0; i < 2; ++i) { 161 nmv_component *const comp_ctx = &ctx->comps[i]; 162 update_mv_probs(&comp_ctx->sign, 1, r); 163 update_mv_probs(comp_ctx->classes, MV_CLASSES - 1, r); 164 update_mv_probs(comp_ctx->class0, CLASS0_SIZE - 1, r); 165 update_mv_probs(comp_ctx->bits, MV_OFFSET_BITS, r); 166 } 167 168 for (i = 0; i < 2; ++i) { 169 nmv_component *const comp_ctx = &ctx->comps[i]; 170 for (j = 0; j < CLASS0_SIZE; ++j) 171 update_mv_probs(comp_ctx->class0_fp[j], MV_FP_SIZE - 1, r); 172 update_mv_probs(comp_ctx->fp, 3, r); 173 } 174 175 if (allow_hp) { 176 for (i = 0; i < 2; ++i) { 177 nmv_component *const comp_ctx = &ctx->comps[i]; 178 update_mv_probs(&comp_ctx->class0_hp, 1, r); 179 update_mv_probs(&comp_ctx->hp, 1, r); 180 } 181 } 182 } 183 184 static void inverse_transform_block_inter(MACROBLOCKD *xd, int plane, 185 const TX_SIZE tx_size, uint8_t *dst, 186 int stride, int eob) { 187 struct macroblockd_plane *const pd = &xd->plane[plane]; 188 tran_low_t *const dqcoeff = pd->dqcoeff; 189 assert(eob > 0); 190 #if CONFIG_VP9_HIGHBITDEPTH 191 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { 192 uint16_t *const dst16 = CONVERT_TO_SHORTPTR(dst); 193 if (xd->lossless) { 194 vp9_highbd_iwht4x4_add(dqcoeff, dst16, stride, eob, xd->bd); 195 } else { 196 switch (tx_size) { 197 case TX_4X4: 198 vp9_highbd_idct4x4_add(dqcoeff, dst16, stride, eob, xd->bd); 199 break; 200 case TX_8X8: 201 vp9_highbd_idct8x8_add(dqcoeff, dst16, stride, eob, xd->bd); 202 break; 203 case TX_16X16: 204 vp9_highbd_idct16x16_add(dqcoeff, dst16, stride, eob, xd->bd); 205 break; 206 case TX_32X32: 207 vp9_highbd_idct32x32_add(dqcoeff, dst16, stride, eob, xd->bd); 208 break; 209 default: assert(0 && "Invalid transform size"); 210 } 211 } 212 } else { 213 if (xd->lossless) { 214 vp9_iwht4x4_add(dqcoeff, dst, stride, eob); 215 } else { 216 switch (tx_size) { 217 case TX_4X4: vp9_idct4x4_add(dqcoeff, dst, stride, eob); break; 218 case TX_8X8: vp9_idct8x8_add(dqcoeff, dst, stride, eob); break; 219 case TX_16X16: vp9_idct16x16_add(dqcoeff, dst, stride, eob); break; 220 case TX_32X32: vp9_idct32x32_add(dqcoeff, dst, stride, eob); break; 221 default: assert(0 && "Invalid transform size"); return; 222 } 223 } 224 } 225 #else 226 if (xd->lossless) { 227 vp9_iwht4x4_add(dqcoeff, dst, stride, eob); 228 } else { 229 switch (tx_size) { 230 case TX_4X4: vp9_idct4x4_add(dqcoeff, dst, stride, eob); break; 231 case TX_8X8: vp9_idct8x8_add(dqcoeff, dst, stride, eob); break; 232 case TX_16X16: vp9_idct16x16_add(dqcoeff, dst, stride, eob); break; 233 case TX_32X32: vp9_idct32x32_add(dqcoeff, dst, stride, eob); break; 234 default: assert(0 && "Invalid transform size"); return; 235 } 236 } 237 #endif // CONFIG_VP9_HIGHBITDEPTH 238 239 if (eob == 1) { 240 dqcoeff[0] = 0; 241 } else { 242 if (tx_size <= TX_16X16 && eob <= 10) 243 memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0])); 244 else if (tx_size == TX_32X32 && eob <= 34) 245 memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0])); 246 else 247 memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0])); 248 } 249 } 250 251 static void inverse_transform_block_intra(MACROBLOCKD *xd, int plane, 252 const TX_TYPE tx_type, 253 const TX_SIZE tx_size, uint8_t *dst, 254 int stride, int eob) { 255 struct macroblockd_plane *const pd = &xd->plane[plane]; 256 tran_low_t *const dqcoeff = pd->dqcoeff; 257 assert(eob > 0); 258 #if CONFIG_VP9_HIGHBITDEPTH 259 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { 260 uint16_t *const dst16 = CONVERT_TO_SHORTPTR(dst); 261 if (xd->lossless) { 262 vp9_highbd_iwht4x4_add(dqcoeff, dst16, stride, eob, xd->bd); 263 } else { 264 switch (tx_size) { 265 case TX_4X4: 266 vp9_highbd_iht4x4_add(tx_type, dqcoeff, dst16, stride, eob, xd->bd); 267 break; 268 case TX_8X8: 269 vp9_highbd_iht8x8_add(tx_type, dqcoeff, dst16, stride, eob, xd->bd); 270 break; 271 case TX_16X16: 272 vp9_highbd_iht16x16_add(tx_type, dqcoeff, dst16, stride, eob, xd->bd); 273 break; 274 case TX_32X32: 275 vp9_highbd_idct32x32_add(dqcoeff, dst16, stride, eob, xd->bd); 276 break; 277 default: assert(0 && "Invalid transform size"); 278 } 279 } 280 } else { 281 if (xd->lossless) { 282 vp9_iwht4x4_add(dqcoeff, dst, stride, eob); 283 } else { 284 switch (tx_size) { 285 case TX_4X4: vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob); break; 286 case TX_8X8: vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob); break; 287 case TX_16X16: 288 vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob); 289 break; 290 case TX_32X32: vp9_idct32x32_add(dqcoeff, dst, stride, eob); break; 291 default: assert(0 && "Invalid transform size"); return; 292 } 293 } 294 } 295 #else 296 if (xd->lossless) { 297 vp9_iwht4x4_add(dqcoeff, dst, stride, eob); 298 } else { 299 switch (tx_size) { 300 case TX_4X4: vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob); break; 301 case TX_8X8: vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob); break; 302 case TX_16X16: 303 vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob); 304 break; 305 case TX_32X32: vp9_idct32x32_add(dqcoeff, dst, stride, eob); break; 306 default: assert(0 && "Invalid transform size"); return; 307 } 308 } 309 #endif // CONFIG_VP9_HIGHBITDEPTH 310 311 if (eob == 1) { 312 dqcoeff[0] = 0; 313 } else { 314 if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10) 315 memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0])); 316 else if (tx_size == TX_32X32 && eob <= 34) 317 memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0])); 318 else 319 memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0])); 320 } 321 } 322 323 static void predict_and_reconstruct_intra_block(TileWorkerData *twd, 324 MODE_INFO *const mi, int plane, 325 int row, int col, 326 TX_SIZE tx_size) { 327 MACROBLOCKD *const xd = &twd->xd; 328 struct macroblockd_plane *const pd = &xd->plane[plane]; 329 PREDICTION_MODE mode = (plane == 0) ? mi->mode : mi->uv_mode; 330 uint8_t *dst; 331 dst = &pd->dst.buf[4 * row * pd->dst.stride + 4 * col]; 332 333 if (mi->sb_type < BLOCK_8X8) 334 if (plane == 0) mode = xd->mi[0]->bmi[(row << 1) + col].as_mode; 335 336 vp9_predict_intra_block(xd, pd->n4_wl, tx_size, mode, dst, pd->dst.stride, 337 dst, pd->dst.stride, col, row, plane); 338 339 if (!mi->skip) { 340 const TX_TYPE tx_type = 341 (plane || xd->lossless) ? DCT_DCT : intra_mode_to_tx_type_lookup[mode]; 342 const scan_order *sc = (plane || xd->lossless) 343 ? &vp9_default_scan_orders[tx_size] 344 : &vp9_scan_orders[tx_size][tx_type]; 345 const int eob = vp9_decode_block_tokens(twd, plane, sc, col, row, tx_size, 346 mi->segment_id); 347 if (eob > 0) { 348 inverse_transform_block_intra(xd, plane, tx_type, tx_size, dst, 349 pd->dst.stride, eob); 350 } 351 } 352 } 353 354 static int reconstruct_inter_block(TileWorkerData *twd, MODE_INFO *const mi, 355 int plane, int row, int col, 356 TX_SIZE tx_size) { 357 MACROBLOCKD *const xd = &twd->xd; 358 struct macroblockd_plane *const pd = &xd->plane[plane]; 359 const scan_order *sc = &vp9_default_scan_orders[tx_size]; 360 const int eob = vp9_decode_block_tokens(twd, plane, sc, col, row, tx_size, 361 mi->segment_id); 362 363 if (eob > 0) { 364 inverse_transform_block_inter( 365 xd, plane, tx_size, &pd->dst.buf[4 * row * pd->dst.stride + 4 * col], 366 pd->dst.stride, eob); 367 } 368 return eob; 369 } 370 371 static void build_mc_border(const uint8_t *src, int src_stride, uint8_t *dst, 372 int dst_stride, int x, int y, int b_w, int b_h, 373 int w, int h) { 374 // Get a pointer to the start of the real data for this row. 375 const uint8_t *ref_row = src - x - y * src_stride; 376 377 if (y >= h) 378 ref_row += (h - 1) * src_stride; 379 else if (y > 0) 380 ref_row += y * src_stride; 381 382 do { 383 int right = 0, copy; 384 int left = x < 0 ? -x : 0; 385 386 if (left > b_w) left = b_w; 387 388 if (x + b_w > w) right = x + b_w - w; 389 390 if (right > b_w) right = b_w; 391 392 copy = b_w - left - right; 393 394 if (left) memset(dst, ref_row[0], left); 395 396 if (copy) memcpy(dst + left, ref_row + x + left, copy); 397 398 if (right) memset(dst + left + copy, ref_row[w - 1], right); 399 400 dst += dst_stride; 401 ++y; 402 403 if (y > 0 && y < h) ref_row += src_stride; 404 } while (--b_h); 405 } 406 407 #if CONFIG_VP9_HIGHBITDEPTH 408 static void high_build_mc_border(const uint8_t *src8, int src_stride, 409 uint16_t *dst, int dst_stride, int x, int y, 410 int b_w, int b_h, int w, int h) { 411 // Get a pointer to the start of the real data for this row. 412 const uint16_t *src = CONVERT_TO_SHORTPTR(src8); 413 const uint16_t *ref_row = src - x - y * src_stride; 414 415 if (y >= h) 416 ref_row += (h - 1) * src_stride; 417 else if (y > 0) 418 ref_row += y * src_stride; 419 420 do { 421 int right = 0, copy; 422 int left = x < 0 ? -x : 0; 423 424 if (left > b_w) left = b_w; 425 426 if (x + b_w > w) right = x + b_w - w; 427 428 if (right > b_w) right = b_w; 429 430 copy = b_w - left - right; 431 432 if (left) vpx_memset16(dst, ref_row[0], left); 433 434 if (copy) memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t)); 435 436 if (right) vpx_memset16(dst + left + copy, ref_row[w - 1], right); 437 438 dst += dst_stride; 439 ++y; 440 441 if (y > 0 && y < h) ref_row += src_stride; 442 } while (--b_h); 443 } 444 #endif // CONFIG_VP9_HIGHBITDEPTH 445 446 #if CONFIG_VP9_HIGHBITDEPTH 447 static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride, 448 int x0, int y0, int b_w, int b_h, 449 int frame_width, int frame_height, 450 int border_offset, uint8_t *const dst, 451 int dst_buf_stride, int subpel_x, int subpel_y, 452 const InterpKernel *kernel, 453 const struct scale_factors *sf, MACROBLOCKD *xd, 454 int w, int h, int ref, int xs, int ys) { 455 DECLARE_ALIGNED(16, uint16_t, mc_buf_high[80 * 2 * 80 * 2]); 456 457 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { 458 high_build_mc_border(buf_ptr1, pre_buf_stride, mc_buf_high, b_w, x0, y0, 459 b_w, b_h, frame_width, frame_height); 460 highbd_inter_predictor(mc_buf_high + border_offset, b_w, 461 CONVERT_TO_SHORTPTR(dst), dst_buf_stride, subpel_x, 462 subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd); 463 } else { 464 build_mc_border(buf_ptr1, pre_buf_stride, (uint8_t *)mc_buf_high, b_w, x0, 465 y0, b_w, b_h, frame_width, frame_height); 466 inter_predictor(((uint8_t *)mc_buf_high) + border_offset, b_w, dst, 467 dst_buf_stride, subpel_x, subpel_y, sf, w, h, ref, kernel, 468 xs, ys); 469 } 470 } 471 #else 472 static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride, 473 int x0, int y0, int b_w, int b_h, 474 int frame_width, int frame_height, 475 int border_offset, uint8_t *const dst, 476 int dst_buf_stride, int subpel_x, int subpel_y, 477 const InterpKernel *kernel, 478 const struct scale_factors *sf, int w, int h, 479 int ref, int xs, int ys) { 480 DECLARE_ALIGNED(16, uint8_t, mc_buf[80 * 2 * 80 * 2]); 481 const uint8_t *buf_ptr; 482 483 build_mc_border(buf_ptr1, pre_buf_stride, mc_buf, b_w, x0, y0, b_w, b_h, 484 frame_width, frame_height); 485 buf_ptr = mc_buf + border_offset; 486 487 inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x, subpel_y, sf, w, 488 h, ref, kernel, xs, ys); 489 } 490 #endif // CONFIG_VP9_HIGHBITDEPTH 491 492 static void dec_build_inter_predictors( 493 MACROBLOCKD *xd, int plane, int bw, int bh, int x, int y, int w, int h, 494 int mi_x, int mi_y, const InterpKernel *kernel, 495 const struct scale_factors *sf, struct buf_2d *pre_buf, 496 struct buf_2d *dst_buf, const MV *mv, RefCntBuffer *ref_frame_buf, 497 int is_scaled, int ref) { 498 struct macroblockd_plane *const pd = &xd->plane[plane]; 499 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; 500 MV32 scaled_mv; 501 int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height, buf_stride, 502 subpel_x, subpel_y; 503 uint8_t *ref_frame, *buf_ptr; 504 505 // Get reference frame pointer, width and height. 506 if (plane == 0) { 507 frame_width = ref_frame_buf->buf.y_crop_width; 508 frame_height = ref_frame_buf->buf.y_crop_height; 509 ref_frame = ref_frame_buf->buf.y_buffer; 510 } else { 511 frame_width = ref_frame_buf->buf.uv_crop_width; 512 frame_height = ref_frame_buf->buf.uv_crop_height; 513 ref_frame = 514 plane == 1 ? ref_frame_buf->buf.u_buffer : ref_frame_buf->buf.v_buffer; 515 } 516 517 if (is_scaled) { 518 const MV mv_q4 = clamp_mv_to_umv_border_sb( 519 xd, mv, bw, bh, pd->subsampling_x, pd->subsampling_y); 520 // Co-ordinate of containing block to pixel precision. 521 int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)); 522 int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)); 523 #if 0 // CONFIG_BETTER_HW_COMPATIBILITY 524 assert(xd->mi[0]->sb_type != BLOCK_4X8 && 525 xd->mi[0]->sb_type != BLOCK_8X4); 526 assert(mv_q4.row == mv->row * (1 << (1 - pd->subsampling_y)) && 527 mv_q4.col == mv->col * (1 << (1 - pd->subsampling_x))); 528 #endif 529 // Co-ordinate of the block to 1/16th pixel precision. 530 x0_16 = (x_start + x) << SUBPEL_BITS; 531 y0_16 = (y_start + y) << SUBPEL_BITS; 532 533 // Co-ordinate of current block in reference frame 534 // to 1/16th pixel precision. 535 x0_16 = sf->scale_value_x(x0_16, sf); 536 y0_16 = sf->scale_value_y(y0_16, sf); 537 538 // Map the top left corner of the block into the reference frame. 539 x0 = sf->scale_value_x(x_start + x, sf); 540 y0 = sf->scale_value_y(y_start + y, sf); 541 542 // Scale the MV and incorporate the sub-pixel offset of the block 543 // in the reference frame. 544 scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf); 545 xs = sf->x_step_q4; 546 ys = sf->y_step_q4; 547 } else { 548 // Co-ordinate of containing block to pixel precision. 549 x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x; 550 y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y; 551 552 // Co-ordinate of the block to 1/16th pixel precision. 553 x0_16 = x0 << SUBPEL_BITS; 554 y0_16 = y0 << SUBPEL_BITS; 555 556 scaled_mv.row = mv->row * (1 << (1 - pd->subsampling_y)); 557 scaled_mv.col = mv->col * (1 << (1 - pd->subsampling_x)); 558 xs = ys = 16; 559 } 560 subpel_x = scaled_mv.col & SUBPEL_MASK; 561 subpel_y = scaled_mv.row & SUBPEL_MASK; 562 563 // Calculate the top left corner of the best matching block in the 564 // reference frame. 565 x0 += scaled_mv.col >> SUBPEL_BITS; 566 y0 += scaled_mv.row >> SUBPEL_BITS; 567 x0_16 += scaled_mv.col; 568 y0_16 += scaled_mv.row; 569 570 // Get reference block pointer. 571 buf_ptr = ref_frame + y0 * pre_buf->stride + x0; 572 buf_stride = pre_buf->stride; 573 574 // Do border extension if there is motion or the 575 // width/height is not a multiple of 8 pixels. 576 if (is_scaled || scaled_mv.col || scaled_mv.row || (frame_width & 0x7) || 577 (frame_height & 0x7)) { 578 int y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1; 579 580 // Get reference block bottom right horizontal coordinate. 581 int x1 = ((x0_16 + (w - 1) * xs) >> SUBPEL_BITS) + 1; 582 int x_pad = 0, y_pad = 0; 583 584 if (subpel_x || (sf->x_step_q4 != SUBPEL_SHIFTS)) { 585 x0 -= VP9_INTERP_EXTEND - 1; 586 x1 += VP9_INTERP_EXTEND; 587 x_pad = 1; 588 } 589 590 if (subpel_y || (sf->y_step_q4 != SUBPEL_SHIFTS)) { 591 y0 -= VP9_INTERP_EXTEND - 1; 592 y1 += VP9_INTERP_EXTEND; 593 y_pad = 1; 594 } 595 596 // Skip border extension if block is inside the frame. 597 if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 || 598 y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) { 599 // Extend the border. 600 const uint8_t *const buf_ptr1 = ref_frame + y0 * buf_stride + x0; 601 const int b_w = x1 - x0 + 1; 602 const int b_h = y1 - y0 + 1; 603 const int border_offset = y_pad * 3 * b_w + x_pad * 3; 604 605 extend_and_predict(buf_ptr1, buf_stride, x0, y0, b_w, b_h, frame_width, 606 frame_height, border_offset, dst, dst_buf->stride, 607 subpel_x, subpel_y, kernel, sf, 608 #if CONFIG_VP9_HIGHBITDEPTH 609 xd, 610 #endif 611 w, h, ref, xs, ys); 612 return; 613 } 614 } 615 #if CONFIG_VP9_HIGHBITDEPTH 616 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { 617 highbd_inter_predictor(CONVERT_TO_SHORTPTR(buf_ptr), buf_stride, 618 CONVERT_TO_SHORTPTR(dst), dst_buf->stride, subpel_x, 619 subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd); 620 } else { 621 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, 622 subpel_y, sf, w, h, ref, kernel, xs, ys); 623 } 624 #else 625 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, subpel_y, 626 sf, w, h, ref, kernel, xs, ys); 627 #endif // CONFIG_VP9_HIGHBITDEPTH 628 } 629 630 static void dec_build_inter_predictors_sb(VP9Decoder *const pbi, 631 MACROBLOCKD *xd, int mi_row, 632 int mi_col) { 633 int plane; 634 const int mi_x = mi_col * MI_SIZE; 635 const int mi_y = mi_row * MI_SIZE; 636 const MODE_INFO *mi = xd->mi[0]; 637 const InterpKernel *kernel = vp9_filter_kernels[mi->interp_filter]; 638 const BLOCK_SIZE sb_type = mi->sb_type; 639 const int is_compound = has_second_ref(mi); 640 int ref; 641 int is_scaled; 642 643 for (ref = 0; ref < 1 + is_compound; ++ref) { 644 const MV_REFERENCE_FRAME frame = mi->ref_frame[ref]; 645 RefBuffer *ref_buf = &pbi->common.frame_refs[frame - LAST_FRAME]; 646 const struct scale_factors *const sf = &ref_buf->sf; 647 const int idx = ref_buf->idx; 648 BufferPool *const pool = pbi->common.buffer_pool; 649 RefCntBuffer *const ref_frame_buf = &pool->frame_bufs[idx]; 650 651 if (!vp9_is_valid_scale(sf)) 652 vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM, 653 "Reference frame has invalid dimensions"); 654 655 is_scaled = vp9_is_scaled(sf); 656 vp9_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col, 657 is_scaled ? sf : NULL); 658 xd->block_refs[ref] = ref_buf; 659 660 if (sb_type < BLOCK_8X8) { 661 for (plane = 0; plane < MAX_MB_PLANE; ++plane) { 662 struct macroblockd_plane *const pd = &xd->plane[plane]; 663 struct buf_2d *const dst_buf = &pd->dst; 664 const int num_4x4_w = pd->n4_w; 665 const int num_4x4_h = pd->n4_h; 666 const int n4w_x4 = 4 * num_4x4_w; 667 const int n4h_x4 = 4 * num_4x4_h; 668 struct buf_2d *const pre_buf = &pd->pre[ref]; 669 int i = 0, x, y; 670 for (y = 0; y < num_4x4_h; ++y) { 671 for (x = 0; x < num_4x4_w; ++x) { 672 const MV mv = average_split_mvs(pd, mi, ref, i++); 673 dec_build_inter_predictors(xd, plane, n4w_x4, n4h_x4, 4 * x, 4 * y, 674 4, 4, mi_x, mi_y, kernel, sf, pre_buf, 675 dst_buf, &mv, ref_frame_buf, is_scaled, 676 ref); 677 } 678 } 679 } 680 } else { 681 const MV mv = mi->mv[ref].as_mv; 682 for (plane = 0; plane < MAX_MB_PLANE; ++plane) { 683 struct macroblockd_plane *const pd = &xd->plane[plane]; 684 struct buf_2d *const dst_buf = &pd->dst; 685 const int num_4x4_w = pd->n4_w; 686 const int num_4x4_h = pd->n4_h; 687 const int n4w_x4 = 4 * num_4x4_w; 688 const int n4h_x4 = 4 * num_4x4_h; 689 struct buf_2d *const pre_buf = &pd->pre[ref]; 690 dec_build_inter_predictors(xd, plane, n4w_x4, n4h_x4, 0, 0, n4w_x4, 691 n4h_x4, mi_x, mi_y, kernel, sf, pre_buf, 692 dst_buf, &mv, ref_frame_buf, is_scaled, ref); 693 } 694 } 695 } 696 } 697 698 static INLINE void dec_reset_skip_context(MACROBLOCKD *xd) { 699 int i; 700 for (i = 0; i < MAX_MB_PLANE; i++) { 701 struct macroblockd_plane *const pd = &xd->plane[i]; 702 memset(pd->above_context, 0, sizeof(ENTROPY_CONTEXT) * pd->n4_w); 703 memset(pd->left_context, 0, sizeof(ENTROPY_CONTEXT) * pd->n4_h); 704 } 705 } 706 707 static void set_plane_n4(MACROBLOCKD *const xd, int bw, int bh, int bwl, 708 int bhl) { 709 int i; 710 for (i = 0; i < MAX_MB_PLANE; i++) { 711 xd->plane[i].n4_w = (bw << 1) >> xd->plane[i].subsampling_x; 712 xd->plane[i].n4_h = (bh << 1) >> xd->plane[i].subsampling_y; 713 xd->plane[i].n4_wl = bwl - xd->plane[i].subsampling_x; 714 xd->plane[i].n4_hl = bhl - xd->plane[i].subsampling_y; 715 } 716 } 717 718 static MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd, 719 BLOCK_SIZE bsize, int mi_row, int mi_col, int bw, 720 int bh, int x_mis, int y_mis, int bwl, int bhl) { 721 const int offset = mi_row * cm->mi_stride + mi_col; 722 int x, y; 723 const TileInfo *const tile = &xd->tile; 724 725 xd->mi = cm->mi_grid_visible + offset; 726 xd->mi[0] = &cm->mi[offset]; 727 // TODO(slavarnway): Generate sb_type based on bwl and bhl, instead of 728 // passing bsize from decode_partition(). 729 xd->mi[0]->sb_type = bsize; 730 for (y = 0; y < y_mis; ++y) 731 for (x = !y; x < x_mis; ++x) { 732 xd->mi[y * cm->mi_stride + x] = xd->mi[0]; 733 } 734 735 set_plane_n4(xd, bw, bh, bwl, bhl); 736 737 set_skip_context(xd, mi_row, mi_col); 738 739 // Distance of Mb to the various image edges. These are specified to 8th pel 740 // as they are always compared to values that are in 1/8th pel units 741 set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols); 742 743 vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col); 744 return xd->mi[0]; 745 } 746 747 static void decode_block(TileWorkerData *twd, VP9Decoder *const pbi, int mi_row, 748 int mi_col, BLOCK_SIZE bsize, int bwl, int bhl) { 749 VP9_COMMON *const cm = &pbi->common; 750 const int less8x8 = bsize < BLOCK_8X8; 751 const int bw = 1 << (bwl - 1); 752 const int bh = 1 << (bhl - 1); 753 const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col); 754 const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row); 755 vpx_reader *r = &twd->bit_reader; 756 MACROBLOCKD *const xd = &twd->xd; 757 758 MODE_INFO *mi = set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis, 759 y_mis, bwl, bhl); 760 761 if (bsize >= BLOCK_8X8 && (cm->subsampling_x || cm->subsampling_y)) { 762 const BLOCK_SIZE uv_subsize = 763 ss_size_lookup[bsize][cm->subsampling_x][cm->subsampling_y]; 764 if (uv_subsize == BLOCK_INVALID) 765 vpx_internal_error(xd->error_info, VPX_CODEC_CORRUPT_FRAME, 766 "Invalid block size."); 767 } 768 769 vp9_read_mode_info(twd, pbi, mi_row, mi_col, x_mis, y_mis); 770 771 if (mi->skip) { 772 dec_reset_skip_context(xd); 773 } 774 775 if (!is_inter_block(mi)) { 776 int plane; 777 for (plane = 0; plane < MAX_MB_PLANE; ++plane) { 778 const struct macroblockd_plane *const pd = &xd->plane[plane]; 779 const TX_SIZE tx_size = plane ? get_uv_tx_size(mi, pd) : mi->tx_size; 780 const int num_4x4_w = pd->n4_w; 781 const int num_4x4_h = pd->n4_h; 782 const int step = (1 << tx_size); 783 int row, col; 784 const int max_blocks_wide = 785 num_4x4_w + (xd->mb_to_right_edge >= 0 786 ? 0 787 : xd->mb_to_right_edge >> (5 + pd->subsampling_x)); 788 const int max_blocks_high = 789 num_4x4_h + (xd->mb_to_bottom_edge >= 0 790 ? 0 791 : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); 792 793 xd->max_blocks_wide = xd->mb_to_right_edge >= 0 ? 0 : max_blocks_wide; 794 xd->max_blocks_high = xd->mb_to_bottom_edge >= 0 ? 0 : max_blocks_high; 795 796 for (row = 0; row < max_blocks_high; row += step) 797 for (col = 0; col < max_blocks_wide; col += step) 798 predict_and_reconstruct_intra_block(twd, mi, plane, row, col, 799 tx_size); 800 } 801 } else { 802 // Prediction 803 dec_build_inter_predictors_sb(pbi, xd, mi_row, mi_col); 804 805 // Reconstruction 806 if (!mi->skip) { 807 int eobtotal = 0; 808 int plane; 809 810 for (plane = 0; plane < MAX_MB_PLANE; ++plane) { 811 const struct macroblockd_plane *const pd = &xd->plane[plane]; 812 const TX_SIZE tx_size = plane ? get_uv_tx_size(mi, pd) : mi->tx_size; 813 const int num_4x4_w = pd->n4_w; 814 const int num_4x4_h = pd->n4_h; 815 const int step = (1 << tx_size); 816 int row, col; 817 const int max_blocks_wide = 818 num_4x4_w + (xd->mb_to_right_edge >= 0 819 ? 0 820 : xd->mb_to_right_edge >> (5 + pd->subsampling_x)); 821 const int max_blocks_high = 822 num_4x4_h + 823 (xd->mb_to_bottom_edge >= 0 824 ? 0 825 : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); 826 827 xd->max_blocks_wide = xd->mb_to_right_edge >= 0 ? 0 : max_blocks_wide; 828 xd->max_blocks_high = xd->mb_to_bottom_edge >= 0 ? 0 : max_blocks_high; 829 830 for (row = 0; row < max_blocks_high; row += step) 831 for (col = 0; col < max_blocks_wide; col += step) 832 eobtotal += 833 reconstruct_inter_block(twd, mi, plane, row, col, tx_size); 834 } 835 836 if (!less8x8 && eobtotal == 0) mi->skip = 1; // skip loopfilter 837 } 838 } 839 840 xd->corrupted |= vpx_reader_has_error(r); 841 842 if (cm->lf.filter_level) { 843 vp9_build_mask(cm, mi, mi_row, mi_col, bw, bh); 844 } 845 } 846 847 static INLINE int dec_partition_plane_context(TileWorkerData *twd, int mi_row, 848 int mi_col, int bsl) { 849 const PARTITION_CONTEXT *above_ctx = twd->xd.above_seg_context + mi_col; 850 const PARTITION_CONTEXT *left_ctx = 851 twd->xd.left_seg_context + (mi_row & MI_MASK); 852 int above = (*above_ctx >> bsl) & 1, left = (*left_ctx >> bsl) & 1; 853 854 // assert(bsl >= 0); 855 856 return (left * 2 + above) + bsl * PARTITION_PLOFFSET; 857 } 858 859 static INLINE void dec_update_partition_context(TileWorkerData *twd, int mi_row, 860 int mi_col, BLOCK_SIZE subsize, 861 int bw) { 862 PARTITION_CONTEXT *const above_ctx = twd->xd.above_seg_context + mi_col; 863 PARTITION_CONTEXT *const left_ctx = 864 twd->xd.left_seg_context + (mi_row & MI_MASK); 865 866 // update the partition context at the end notes. set partition bits 867 // of block sizes larger than the current one to be one, and partition 868 // bits of smaller block sizes to be zero. 869 memset(above_ctx, partition_context_lookup[subsize].above, bw); 870 memset(left_ctx, partition_context_lookup[subsize].left, bw); 871 } 872 873 static PARTITION_TYPE read_partition(TileWorkerData *twd, int mi_row, 874 int mi_col, int has_rows, int has_cols, 875 int bsl) { 876 const int ctx = dec_partition_plane_context(twd, mi_row, mi_col, bsl); 877 const vpx_prob *const probs = twd->xd.partition_probs[ctx]; 878 FRAME_COUNTS *counts = twd->xd.counts; 879 PARTITION_TYPE p; 880 vpx_reader *r = &twd->bit_reader; 881 882 if (has_rows && has_cols) 883 p = (PARTITION_TYPE)vpx_read_tree(r, vp9_partition_tree, probs); 884 else if (!has_rows && has_cols) 885 p = vpx_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ; 886 else if (has_rows && !has_cols) 887 p = vpx_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT; 888 else 889 p = PARTITION_SPLIT; 890 891 if (counts) ++counts->partition[ctx][p]; 892 893 return p; 894 } 895 896 // TODO(slavarnway): eliminate bsize and subsize in future commits 897 static void decode_partition(TileWorkerData *twd, VP9Decoder *const pbi, 898 int mi_row, int mi_col, BLOCK_SIZE bsize, 899 int n4x4_l2) { 900 VP9_COMMON *const cm = &pbi->common; 901 const int n8x8_l2 = n4x4_l2 - 1; 902 const int num_8x8_wh = 1 << n8x8_l2; 903 const int hbs = num_8x8_wh >> 1; 904 PARTITION_TYPE partition; 905 BLOCK_SIZE subsize; 906 const int has_rows = (mi_row + hbs) < cm->mi_rows; 907 const int has_cols = (mi_col + hbs) < cm->mi_cols; 908 MACROBLOCKD *const xd = &twd->xd; 909 910 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; 911 912 partition = read_partition(twd, mi_row, mi_col, has_rows, has_cols, n8x8_l2); 913 subsize = subsize_lookup[partition][bsize]; // get_subsize(bsize, partition); 914 if (!hbs) { 915 // calculate bmode block dimensions (log 2) 916 xd->bmode_blocks_wl = 1 >> !!(partition & PARTITION_VERT); 917 xd->bmode_blocks_hl = 1 >> !!(partition & PARTITION_HORZ); 918 decode_block(twd, pbi, mi_row, mi_col, subsize, 1, 1); 919 } else { 920 switch (partition) { 921 case PARTITION_NONE: 922 decode_block(twd, pbi, mi_row, mi_col, subsize, n4x4_l2, n4x4_l2); 923 break; 924 case PARTITION_HORZ: 925 decode_block(twd, pbi, mi_row, mi_col, subsize, n4x4_l2, n8x8_l2); 926 if (has_rows) 927 decode_block(twd, pbi, mi_row + hbs, mi_col, subsize, n4x4_l2, 928 n8x8_l2); 929 break; 930 case PARTITION_VERT: 931 decode_block(twd, pbi, mi_row, mi_col, subsize, n8x8_l2, n4x4_l2); 932 if (has_cols) 933 decode_block(twd, pbi, mi_row, mi_col + hbs, subsize, n8x8_l2, 934 n4x4_l2); 935 break; 936 case PARTITION_SPLIT: 937 decode_partition(twd, pbi, mi_row, mi_col, subsize, n8x8_l2); 938 decode_partition(twd, pbi, mi_row, mi_col + hbs, subsize, n8x8_l2); 939 decode_partition(twd, pbi, mi_row + hbs, mi_col, subsize, n8x8_l2); 940 decode_partition(twd, pbi, mi_row + hbs, mi_col + hbs, subsize, 941 n8x8_l2); 942 break; 943 default: assert(0 && "Invalid partition type"); 944 } 945 } 946 947 // update partition context 948 if (bsize >= BLOCK_8X8 && 949 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) 950 dec_update_partition_context(twd, mi_row, mi_col, subsize, num_8x8_wh); 951 } 952 953 static void setup_token_decoder(const uint8_t *data, const uint8_t *data_end, 954 size_t read_size, 955 struct vpx_internal_error_info *error_info, 956 vpx_reader *r, vpx_decrypt_cb decrypt_cb, 957 void *decrypt_state) { 958 // Validate the calculated partition length. If the buffer 959 // described by the partition can't be fully read, then restrict 960 // it to the portion that can be (for EC mode) or throw an error. 961 if (!read_is_valid(data, read_size, data_end)) 962 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME, 963 "Truncated packet or corrupt tile length"); 964 965 if (vpx_reader_init(r, data, read_size, decrypt_cb, decrypt_state)) 966 vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR, 967 "Failed to allocate bool decoder %d", 1); 968 } 969 970 static void read_coef_probs_common(vp9_coeff_probs_model *coef_probs, 971 vpx_reader *r) { 972 int i, j, k, l, m; 973 974 if (vpx_read_bit(r)) 975 for (i = 0; i < PLANE_TYPES; ++i) 976 for (j = 0; j < REF_TYPES; ++j) 977 for (k = 0; k < COEF_BANDS; ++k) 978 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) 979 for (m = 0; m < UNCONSTRAINED_NODES; ++m) 980 vp9_diff_update_prob(r, &coef_probs[i][j][k][l][m]); 981 } 982 983 static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, vpx_reader *r) { 984 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode]; 985 TX_SIZE tx_size; 986 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) 987 read_coef_probs_common(fc->coef_probs[tx_size], r); 988 } 989 990 static void setup_segmentation(struct segmentation *seg, 991 struct vpx_read_bit_buffer *rb) { 992 int i, j; 993 994 seg->update_map = 0; 995 seg->update_data = 0; 996 997 seg->enabled = vpx_rb_read_bit(rb); 998 if (!seg->enabled) return; 999 1000 // Segmentation map update 1001 seg->update_map = vpx_rb_read_bit(rb); 1002 if (seg->update_map) { 1003 for (i = 0; i < SEG_TREE_PROBS; i++) 1004 seg->tree_probs[i] = 1005 vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8) : MAX_PROB; 1006 1007 seg->temporal_update = vpx_rb_read_bit(rb); 1008 if (seg->temporal_update) { 1009 for (i = 0; i < PREDICTION_PROBS; i++) 1010 seg->pred_probs[i] = 1011 vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8) : MAX_PROB; 1012 } else { 1013 for (i = 0; i < PREDICTION_PROBS; i++) seg->pred_probs[i] = MAX_PROB; 1014 } 1015 } 1016 1017 // Segmentation data update 1018 seg->update_data = vpx_rb_read_bit(rb); 1019 if (seg->update_data) { 1020 seg->abs_delta = vpx_rb_read_bit(rb); 1021 1022 vp9_clearall_segfeatures(seg); 1023 1024 for (i = 0; i < MAX_SEGMENTS; i++) { 1025 for (j = 0; j < SEG_LVL_MAX; j++) { 1026 int data = 0; 1027 const int feature_enabled = vpx_rb_read_bit(rb); 1028 if (feature_enabled) { 1029 vp9_enable_segfeature(seg, i, j); 1030 data = decode_unsigned_max(rb, vp9_seg_feature_data_max(j)); 1031 if (vp9_is_segfeature_signed(j)) 1032 data = vpx_rb_read_bit(rb) ? -data : data; 1033 } 1034 vp9_set_segdata(seg, i, j, data); 1035 } 1036 } 1037 } 1038 } 1039 1040 static void setup_loopfilter(struct loopfilter *lf, 1041 struct vpx_read_bit_buffer *rb) { 1042 lf->filter_level = vpx_rb_read_literal(rb, 6); 1043 lf->sharpness_level = vpx_rb_read_literal(rb, 3); 1044 1045 // Read in loop filter deltas applied at the MB level based on mode or ref 1046 // frame. 1047 lf->mode_ref_delta_update = 0; 1048 1049 lf->mode_ref_delta_enabled = vpx_rb_read_bit(rb); 1050 if (lf->mode_ref_delta_enabled) { 1051 lf->mode_ref_delta_update = vpx_rb_read_bit(rb); 1052 if (lf->mode_ref_delta_update) { 1053 int i; 1054 1055 for (i = 0; i < MAX_REF_LF_DELTAS; i++) 1056 if (vpx_rb_read_bit(rb)) 1057 lf->ref_deltas[i] = vpx_rb_read_signed_literal(rb, 6); 1058 1059 for (i = 0; i < MAX_MODE_LF_DELTAS; i++) 1060 if (vpx_rb_read_bit(rb)) 1061 lf->mode_deltas[i] = vpx_rb_read_signed_literal(rb, 6); 1062 } 1063 } 1064 } 1065 1066 static INLINE int read_delta_q(struct vpx_read_bit_buffer *rb) { 1067 return vpx_rb_read_bit(rb) ? vpx_rb_read_signed_literal(rb, 4) : 0; 1068 } 1069 1070 static void setup_quantization(VP9_COMMON *const cm, MACROBLOCKD *const xd, 1071 struct vpx_read_bit_buffer *rb) { 1072 cm->base_qindex = vpx_rb_read_literal(rb, QINDEX_BITS); 1073 cm->y_dc_delta_q = read_delta_q(rb); 1074 cm->uv_dc_delta_q = read_delta_q(rb); 1075 cm->uv_ac_delta_q = read_delta_q(rb); 1076 cm->dequant_bit_depth = cm->bit_depth; 1077 xd->lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0 && 1078 cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0; 1079 1080 #if CONFIG_VP9_HIGHBITDEPTH 1081 xd->bd = (int)cm->bit_depth; 1082 #endif 1083 } 1084 1085 static void setup_segmentation_dequant(VP9_COMMON *const cm) { 1086 // Build y/uv dequant values based on segmentation. 1087 if (cm->seg.enabled) { 1088 int i; 1089 for (i = 0; i < MAX_SEGMENTS; ++i) { 1090 const int qindex = vp9_get_qindex(&cm->seg, i, cm->base_qindex); 1091 cm->y_dequant[i][0] = 1092 vp9_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth); 1093 cm->y_dequant[i][1] = vp9_ac_quant(qindex, 0, cm->bit_depth); 1094 cm->uv_dequant[i][0] = 1095 vp9_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth); 1096 cm->uv_dequant[i][1] = 1097 vp9_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth); 1098 } 1099 } else { 1100 const int qindex = cm->base_qindex; 1101 // When segmentation is disabled, only the first value is used. The 1102 // remaining are don't cares. 1103 cm->y_dequant[0][0] = vp9_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth); 1104 cm->y_dequant[0][1] = vp9_ac_quant(qindex, 0, cm->bit_depth); 1105 cm->uv_dequant[0][0] = 1106 vp9_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth); 1107 cm->uv_dequant[0][1] = 1108 vp9_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth); 1109 } 1110 } 1111 1112 static INTERP_FILTER read_interp_filter(struct vpx_read_bit_buffer *rb) { 1113 const INTERP_FILTER literal_to_filter[] = { EIGHTTAP_SMOOTH, EIGHTTAP, 1114 EIGHTTAP_SHARP, BILINEAR }; 1115 return vpx_rb_read_bit(rb) ? SWITCHABLE 1116 : literal_to_filter[vpx_rb_read_literal(rb, 2)]; 1117 } 1118 1119 static void setup_render_size(VP9_COMMON *cm, struct vpx_read_bit_buffer *rb) { 1120 cm->render_width = cm->width; 1121 cm->render_height = cm->height; 1122 if (vpx_rb_read_bit(rb)) 1123 vp9_read_frame_size(rb, &cm->render_width, &cm->render_height); 1124 } 1125 1126 static void resize_mv_buffer(VP9_COMMON *cm) { 1127 vpx_free(cm->cur_frame->mvs); 1128 cm->cur_frame->mi_rows = cm->mi_rows; 1129 cm->cur_frame->mi_cols = cm->mi_cols; 1130 CHECK_MEM_ERROR(cm, cm->cur_frame->mvs, 1131 (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols, 1132 sizeof(*cm->cur_frame->mvs))); 1133 } 1134 1135 static void resize_context_buffers(VP9_COMMON *cm, int width, int height) { 1136 #if CONFIG_SIZE_LIMIT 1137 if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT) 1138 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 1139 "Dimensions of %dx%d beyond allowed size of %dx%d.", 1140 width, height, DECODE_WIDTH_LIMIT, DECODE_HEIGHT_LIMIT); 1141 #endif 1142 if (cm->width != width || cm->height != height) { 1143 const int new_mi_rows = 1144 ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2) >> MI_SIZE_LOG2; 1145 const int new_mi_cols = 1146 ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2; 1147 1148 // Allocations in vp9_alloc_context_buffers() depend on individual 1149 // dimensions as well as the overall size. 1150 if (new_mi_cols > cm->mi_cols || new_mi_rows > cm->mi_rows) { 1151 if (vp9_alloc_context_buffers(cm, width, height)) 1152 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, 1153 "Failed to allocate context buffers"); 1154 } else { 1155 vp9_set_mb_mi(cm, width, height); 1156 } 1157 vp9_init_context_buffers(cm); 1158 cm->width = width; 1159 cm->height = height; 1160 } 1161 if (cm->cur_frame->mvs == NULL || cm->mi_rows > cm->cur_frame->mi_rows || 1162 cm->mi_cols > cm->cur_frame->mi_cols) { 1163 resize_mv_buffer(cm); 1164 } 1165 } 1166 1167 static void setup_frame_size(VP9_COMMON *cm, struct vpx_read_bit_buffer *rb) { 1168 int width, height; 1169 BufferPool *const pool = cm->buffer_pool; 1170 vp9_read_frame_size(rb, &width, &height); 1171 resize_context_buffers(cm, width, height); 1172 setup_render_size(cm, rb); 1173 1174 if (vpx_realloc_frame_buffer( 1175 get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x, 1176 cm->subsampling_y, 1177 #if CONFIG_VP9_HIGHBITDEPTH 1178 cm->use_highbitdepth, 1179 #endif 1180 VP9_DEC_BORDER_IN_PIXELS, cm->byte_alignment, 1181 &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb, 1182 pool->cb_priv)) { 1183 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, 1184 "Failed to allocate frame buffer"); 1185 } 1186 1187 pool->frame_bufs[cm->new_fb_idx].released = 0; 1188 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x; 1189 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y; 1190 pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth; 1191 pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space; 1192 pool->frame_bufs[cm->new_fb_idx].buf.color_range = cm->color_range; 1193 pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width; 1194 pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height; 1195 } 1196 1197 static INLINE int valid_ref_frame_img_fmt(vpx_bit_depth_t ref_bit_depth, 1198 int ref_xss, int ref_yss, 1199 vpx_bit_depth_t this_bit_depth, 1200 int this_xss, int this_yss) { 1201 return ref_bit_depth == this_bit_depth && ref_xss == this_xss && 1202 ref_yss == this_yss; 1203 } 1204 1205 static void setup_frame_size_with_refs(VP9_COMMON *cm, 1206 struct vpx_read_bit_buffer *rb) { 1207 int width, height; 1208 int found = 0, i; 1209 int has_valid_ref_frame = 0; 1210 BufferPool *const pool = cm->buffer_pool; 1211 for (i = 0; i < REFS_PER_FRAME; ++i) { 1212 if (vpx_rb_read_bit(rb)) { 1213 if (cm->frame_refs[i].idx != INVALID_IDX) { 1214 YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf; 1215 width = buf->y_crop_width; 1216 height = buf->y_crop_height; 1217 found = 1; 1218 break; 1219 } else { 1220 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 1221 "Failed to decode frame size"); 1222 } 1223 } 1224 } 1225 1226 if (!found) vp9_read_frame_size(rb, &width, &height); 1227 1228 if (width <= 0 || height <= 0) 1229 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 1230 "Invalid frame size"); 1231 1232 // Check to make sure at least one of frames that this frame references 1233 // has valid dimensions. 1234 for (i = 0; i < REFS_PER_FRAME; ++i) { 1235 RefBuffer *const ref_frame = &cm->frame_refs[i]; 1236 has_valid_ref_frame |= 1237 (ref_frame->idx != INVALID_IDX && 1238 valid_ref_frame_size(ref_frame->buf->y_crop_width, 1239 ref_frame->buf->y_crop_height, width, height)); 1240 } 1241 if (!has_valid_ref_frame) 1242 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 1243 "Referenced frame has invalid size"); 1244 for (i = 0; i < REFS_PER_FRAME; ++i) { 1245 RefBuffer *const ref_frame = &cm->frame_refs[i]; 1246 if (ref_frame->idx == INVALID_IDX || 1247 !valid_ref_frame_img_fmt(ref_frame->buf->bit_depth, 1248 ref_frame->buf->subsampling_x, 1249 ref_frame->buf->subsampling_y, cm->bit_depth, 1250 cm->subsampling_x, cm->subsampling_y)) 1251 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 1252 "Referenced frame has incompatible color format"); 1253 } 1254 1255 resize_context_buffers(cm, width, height); 1256 setup_render_size(cm, rb); 1257 1258 if (vpx_realloc_frame_buffer( 1259 get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x, 1260 cm->subsampling_y, 1261 #if CONFIG_VP9_HIGHBITDEPTH 1262 cm->use_highbitdepth, 1263 #endif 1264 VP9_DEC_BORDER_IN_PIXELS, cm->byte_alignment, 1265 &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb, 1266 pool->cb_priv)) { 1267 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, 1268 "Failed to allocate frame buffer"); 1269 } 1270 1271 pool->frame_bufs[cm->new_fb_idx].released = 0; 1272 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x; 1273 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y; 1274 pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth; 1275 pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space; 1276 pool->frame_bufs[cm->new_fb_idx].buf.color_range = cm->color_range; 1277 pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width; 1278 pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height; 1279 } 1280 1281 static void setup_tile_info(VP9_COMMON *cm, struct vpx_read_bit_buffer *rb) { 1282 int min_log2_tile_cols, max_log2_tile_cols, max_ones; 1283 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); 1284 1285 // columns 1286 max_ones = max_log2_tile_cols - min_log2_tile_cols; 1287 cm->log2_tile_cols = min_log2_tile_cols; 1288 while (max_ones-- && vpx_rb_read_bit(rb)) cm->log2_tile_cols++; 1289 1290 if (cm->log2_tile_cols > 6) 1291 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 1292 "Invalid number of tile columns"); 1293 1294 // rows 1295 cm->log2_tile_rows = vpx_rb_read_bit(rb); 1296 if (cm->log2_tile_rows) cm->log2_tile_rows += vpx_rb_read_bit(rb); 1297 } 1298 1299 // Reads the next tile returning its size and adjusting '*data' accordingly 1300 // based on 'is_last'. 1301 static void get_tile_buffer(const uint8_t *const data_end, int is_last, 1302 struct vpx_internal_error_info *error_info, 1303 const uint8_t **data, vpx_decrypt_cb decrypt_cb, 1304 void *decrypt_state, TileBuffer *buf) { 1305 size_t size; 1306 1307 if (!is_last) { 1308 if (!read_is_valid(*data, 4, data_end)) 1309 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME, 1310 "Truncated packet or corrupt tile length"); 1311 1312 if (decrypt_cb) { 1313 uint8_t be_data[4]; 1314 decrypt_cb(decrypt_state, *data, be_data, 4); 1315 size = mem_get_be32(be_data); 1316 } else { 1317 size = mem_get_be32(*data); 1318 } 1319 *data += 4; 1320 1321 if (size > (size_t)(data_end - *data)) 1322 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME, 1323 "Truncated packet or corrupt tile size"); 1324 } else { 1325 size = data_end - *data; 1326 } 1327 1328 buf->data = *data; 1329 buf->size = size; 1330 1331 *data += size; 1332 } 1333 1334 static void get_tile_buffers(VP9Decoder *pbi, const uint8_t *data, 1335 const uint8_t *data_end, int tile_cols, 1336 int tile_rows, 1337 TileBuffer (*tile_buffers)[1 << 6]) { 1338 int r, c; 1339 1340 for (r = 0; r < tile_rows; ++r) { 1341 for (c = 0; c < tile_cols; ++c) { 1342 const int is_last = (r == tile_rows - 1) && (c == tile_cols - 1); 1343 TileBuffer *const buf = &tile_buffers[r][c]; 1344 buf->col = c; 1345 get_tile_buffer(data_end, is_last, &pbi->common.error, &data, 1346 pbi->decrypt_cb, pbi->decrypt_state, buf); 1347 } 1348 } 1349 } 1350 1351 static const uint8_t *decode_tiles(VP9Decoder *pbi, const uint8_t *data, 1352 const uint8_t *data_end) { 1353 VP9_COMMON *const cm = &pbi->common; 1354 const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); 1355 const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols); 1356 const int tile_cols = 1 << cm->log2_tile_cols; 1357 const int tile_rows = 1 << cm->log2_tile_rows; 1358 TileBuffer tile_buffers[4][1 << 6]; 1359 int tile_row, tile_col; 1360 int mi_row, mi_col; 1361 TileWorkerData *tile_data = NULL; 1362 1363 if (cm->lf.filter_level && !cm->skip_loop_filter && 1364 pbi->lf_worker.data1 == NULL) { 1365 CHECK_MEM_ERROR(cm, pbi->lf_worker.data1, 1366 vpx_memalign(32, sizeof(LFWorkerData))); 1367 pbi->lf_worker.hook = vp9_loop_filter_worker; 1368 if (pbi->max_threads > 1 && !winterface->reset(&pbi->lf_worker)) { 1369 vpx_internal_error(&cm->error, VPX_CODEC_ERROR, 1370 "Loop filter thread creation failed"); 1371 } 1372 } 1373 1374 if (cm->lf.filter_level && !cm->skip_loop_filter) { 1375 LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1; 1376 // Be sure to sync as we might be resuming after a failed frame decode. 1377 winterface->sync(&pbi->lf_worker); 1378 vp9_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm, 1379 pbi->mb.plane); 1380 } 1381 1382 assert(tile_rows <= 4); 1383 assert(tile_cols <= (1 << 6)); 1384 1385 // Note: this memset assumes above_context[0], [1] and [2] 1386 // are allocated as part of the same buffer. 1387 memset(cm->above_context, 0, 1388 sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_cols); 1389 1390 memset(cm->above_seg_context, 0, 1391 sizeof(*cm->above_seg_context) * aligned_cols); 1392 1393 vp9_reset_lfm(cm); 1394 1395 get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers); 1396 1397 // Load all tile information into tile_data. 1398 for (tile_row = 0; tile_row < tile_rows; ++tile_row) { 1399 for (tile_col = 0; tile_col < tile_cols; ++tile_col) { 1400 const TileBuffer *const buf = &tile_buffers[tile_row][tile_col]; 1401 tile_data = pbi->tile_worker_data + tile_cols * tile_row + tile_col; 1402 tile_data->xd = pbi->mb; 1403 tile_data->xd.corrupted = 0; 1404 tile_data->xd.counts = 1405 cm->frame_parallel_decoding_mode ? NULL : &cm->counts; 1406 vp9_zero(tile_data->dqcoeff); 1407 vp9_tile_init(&tile_data->xd.tile, cm, tile_row, tile_col); 1408 setup_token_decoder(buf->data, data_end, buf->size, &cm->error, 1409 &tile_data->bit_reader, pbi->decrypt_cb, 1410 pbi->decrypt_state); 1411 vp9_init_macroblockd(cm, &tile_data->xd, tile_data->dqcoeff); 1412 } 1413 } 1414 1415 for (tile_row = 0; tile_row < tile_rows; ++tile_row) { 1416 TileInfo tile; 1417 vp9_tile_set_row(&tile, cm, tile_row); 1418 for (mi_row = tile.mi_row_start; mi_row < tile.mi_row_end; 1419 mi_row += MI_BLOCK_SIZE) { 1420 for (tile_col = 0; tile_col < tile_cols; ++tile_col) { 1421 const int col = 1422 pbi->inv_tile_order ? tile_cols - tile_col - 1 : tile_col; 1423 tile_data = pbi->tile_worker_data + tile_cols * tile_row + col; 1424 vp9_tile_set_col(&tile, cm, col); 1425 vp9_zero(tile_data->xd.left_context); 1426 vp9_zero(tile_data->xd.left_seg_context); 1427 for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end; 1428 mi_col += MI_BLOCK_SIZE) { 1429 decode_partition(tile_data, pbi, mi_row, mi_col, BLOCK_64X64, 4); 1430 } 1431 pbi->mb.corrupted |= tile_data->xd.corrupted; 1432 if (pbi->mb.corrupted) 1433 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 1434 "Failed to decode tile data"); 1435 } 1436 // Loopfilter one row. 1437 if (cm->lf.filter_level && !cm->skip_loop_filter) { 1438 const int lf_start = mi_row - MI_BLOCK_SIZE; 1439 LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1; 1440 1441 // delay the loopfilter by 1 macroblock row. 1442 if (lf_start < 0) continue; 1443 1444 // decoding has completed: finish up the loop filter in this thread. 1445 if (mi_row + MI_BLOCK_SIZE >= cm->mi_rows) continue; 1446 1447 winterface->sync(&pbi->lf_worker); 1448 lf_data->start = lf_start; 1449 lf_data->stop = mi_row; 1450 if (pbi->max_threads > 1) { 1451 winterface->launch(&pbi->lf_worker); 1452 } else { 1453 winterface->execute(&pbi->lf_worker); 1454 } 1455 } 1456 } 1457 } 1458 1459 // Loopfilter remaining rows in the frame. 1460 if (cm->lf.filter_level && !cm->skip_loop_filter) { 1461 LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1; 1462 winterface->sync(&pbi->lf_worker); 1463 lf_data->start = lf_data->stop; 1464 lf_data->stop = cm->mi_rows; 1465 winterface->execute(&pbi->lf_worker); 1466 } 1467 1468 // Get last tile data. 1469 tile_data = pbi->tile_worker_data + tile_cols * tile_rows - 1; 1470 1471 return vpx_reader_find_end(&tile_data->bit_reader); 1472 } 1473 1474 // On entry 'tile_data->data_end' points to the end of the input frame, on exit 1475 // it is updated to reflect the bitreader position of the final tile column if 1476 // present in the tile buffer group or NULL otherwise. 1477 static int tile_worker_hook(void *arg1, void *arg2) { 1478 TileWorkerData *const tile_data = (TileWorkerData *)arg1; 1479 VP9Decoder *const pbi = (VP9Decoder *)arg2; 1480 1481 TileInfo *volatile tile = &tile_data->xd.tile; 1482 const int final_col = (1 << pbi->common.log2_tile_cols) - 1; 1483 const uint8_t *volatile bit_reader_end = NULL; 1484 volatile int n = tile_data->buf_start; 1485 tile_data->error_info.setjmp = 1; 1486 1487 if (setjmp(tile_data->error_info.jmp)) { 1488 tile_data->error_info.setjmp = 0; 1489 tile_data->xd.corrupted = 1; 1490 tile_data->data_end = NULL; 1491 return 0; 1492 } 1493 1494 tile_data->xd.corrupted = 0; 1495 1496 do { 1497 int mi_row, mi_col; 1498 const TileBuffer *const buf = pbi->tile_buffers + n; 1499 vp9_zero(tile_data->dqcoeff); 1500 vp9_tile_init(tile, &pbi->common, 0, buf->col); 1501 setup_token_decoder(buf->data, tile_data->data_end, buf->size, 1502 &tile_data->error_info, &tile_data->bit_reader, 1503 pbi->decrypt_cb, pbi->decrypt_state); 1504 vp9_init_macroblockd(&pbi->common, &tile_data->xd, tile_data->dqcoeff); 1505 // init resets xd.error_info 1506 tile_data->xd.error_info = &tile_data->error_info; 1507 1508 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; 1509 mi_row += MI_BLOCK_SIZE) { 1510 vp9_zero(tile_data->xd.left_context); 1511 vp9_zero(tile_data->xd.left_seg_context); 1512 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; 1513 mi_col += MI_BLOCK_SIZE) { 1514 decode_partition(tile_data, pbi, mi_row, mi_col, BLOCK_64X64, 4); 1515 } 1516 } 1517 1518 if (buf->col == final_col) { 1519 bit_reader_end = vpx_reader_find_end(&tile_data->bit_reader); 1520 } 1521 } while (!tile_data->xd.corrupted && ++n <= tile_data->buf_end); 1522 1523 tile_data->data_end = bit_reader_end; 1524 return !tile_data->xd.corrupted; 1525 } 1526 1527 // sorts in descending order 1528 static int compare_tile_buffers(const void *a, const void *b) { 1529 const TileBuffer *const buf1 = (const TileBuffer *)a; 1530 const TileBuffer *const buf2 = (const TileBuffer *)b; 1531 return (int)(buf2->size - buf1->size); 1532 } 1533 1534 static const uint8_t *decode_tiles_mt(VP9Decoder *pbi, const uint8_t *data, 1535 const uint8_t *data_end) { 1536 VP9_COMMON *const cm = &pbi->common; 1537 const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); 1538 const uint8_t *bit_reader_end = NULL; 1539 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols); 1540 const int tile_cols = 1 << cm->log2_tile_cols; 1541 const int tile_rows = 1 << cm->log2_tile_rows; 1542 const int num_workers = VPXMIN(pbi->max_threads, tile_cols); 1543 int n; 1544 1545 assert(tile_cols <= (1 << 6)); 1546 assert(tile_rows == 1); 1547 (void)tile_rows; 1548 1549 if (pbi->num_tile_workers == 0) { 1550 const int num_threads = pbi->max_threads; 1551 CHECK_MEM_ERROR(cm, pbi->tile_workers, 1552 vpx_malloc(num_threads * sizeof(*pbi->tile_workers))); 1553 for (n = 0; n < num_threads; ++n) { 1554 VPxWorker *const worker = &pbi->tile_workers[n]; 1555 ++pbi->num_tile_workers; 1556 1557 winterface->init(worker); 1558 if (n < num_threads - 1 && !winterface->reset(worker)) { 1559 vpx_internal_error(&cm->error, VPX_CODEC_ERROR, 1560 "Tile decoder thread creation failed"); 1561 } 1562 } 1563 } 1564 1565 // Reset tile decoding hook 1566 for (n = 0; n < num_workers; ++n) { 1567 VPxWorker *const worker = &pbi->tile_workers[n]; 1568 TileWorkerData *const tile_data = 1569 &pbi->tile_worker_data[n + pbi->total_tiles]; 1570 winterface->sync(worker); 1571 tile_data->xd = pbi->mb; 1572 tile_data->xd.counts = 1573 cm->frame_parallel_decoding_mode ? NULL : &tile_data->counts; 1574 worker->hook = tile_worker_hook; 1575 worker->data1 = tile_data; 1576 worker->data2 = pbi; 1577 } 1578 1579 // Note: this memset assumes above_context[0], [1] and [2] 1580 // are allocated as part of the same buffer. 1581 memset(cm->above_context, 0, 1582 sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_mi_cols); 1583 memset(cm->above_seg_context, 0, 1584 sizeof(*cm->above_seg_context) * aligned_mi_cols); 1585 1586 vp9_reset_lfm(cm); 1587 1588 // Load tile data into tile_buffers 1589 get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, 1590 &pbi->tile_buffers); 1591 1592 // Sort the buffers based on size in descending order. 1593 qsort(pbi->tile_buffers, tile_cols, sizeof(pbi->tile_buffers[0]), 1594 compare_tile_buffers); 1595 1596 if (num_workers == tile_cols) { 1597 // Rearrange the tile buffers such that the largest, and 1598 // presumably the most difficult, tile will be decoded in the main thread. 1599 // This should help minimize the number of instances where the main thread 1600 // is waiting for a worker to complete. 1601 const TileBuffer largest = pbi->tile_buffers[0]; 1602 memmove(pbi->tile_buffers, pbi->tile_buffers + 1, 1603 (tile_cols - 1) * sizeof(pbi->tile_buffers[0])); 1604 pbi->tile_buffers[tile_cols - 1] = largest; 1605 } else { 1606 int start = 0, end = tile_cols - 2; 1607 TileBuffer tmp; 1608 1609 // Interleave the tiles to distribute the load between threads, assuming a 1610 // larger tile implies it is more difficult to decode. 1611 while (start < end) { 1612 tmp = pbi->tile_buffers[start]; 1613 pbi->tile_buffers[start] = pbi->tile_buffers[end]; 1614 pbi->tile_buffers[end] = tmp; 1615 start += 2; 1616 end -= 2; 1617 } 1618 } 1619 1620 // Initialize thread frame counts. 1621 if (!cm->frame_parallel_decoding_mode) { 1622 for (n = 0; n < num_workers; ++n) { 1623 TileWorkerData *const tile_data = 1624 (TileWorkerData *)pbi->tile_workers[n].data1; 1625 vp9_zero(tile_data->counts); 1626 } 1627 } 1628 1629 { 1630 const int base = tile_cols / num_workers; 1631 const int remain = tile_cols % num_workers; 1632 int buf_start = 0; 1633 1634 for (n = 0; n < num_workers; ++n) { 1635 const int count = base + (remain + n) / num_workers; 1636 VPxWorker *const worker = &pbi->tile_workers[n]; 1637 TileWorkerData *const tile_data = (TileWorkerData *)worker->data1; 1638 1639 tile_data->buf_start = buf_start; 1640 tile_data->buf_end = buf_start + count - 1; 1641 tile_data->data_end = data_end; 1642 buf_start += count; 1643 1644 worker->had_error = 0; 1645 if (n == num_workers - 1) { 1646 assert(tile_data->buf_end == tile_cols - 1); 1647 winterface->execute(worker); 1648 } else { 1649 winterface->launch(worker); 1650 } 1651 } 1652 1653 for (; n > 0; --n) { 1654 VPxWorker *const worker = &pbi->tile_workers[n - 1]; 1655 TileWorkerData *const tile_data = (TileWorkerData *)worker->data1; 1656 // TODO(jzern): The tile may have specific error data associated with 1657 // its vpx_internal_error_info which could be propagated to the main info 1658 // in cm. Additionally once the threads have been synced and an error is 1659 // detected, there's no point in continuing to decode tiles. 1660 pbi->mb.corrupted |= !winterface->sync(worker); 1661 if (!bit_reader_end) bit_reader_end = tile_data->data_end; 1662 } 1663 } 1664 1665 // Accumulate thread frame counts. 1666 if (!cm->frame_parallel_decoding_mode) { 1667 for (n = 0; n < num_workers; ++n) { 1668 TileWorkerData *const tile_data = 1669 (TileWorkerData *)pbi->tile_workers[n].data1; 1670 vp9_accumulate_frame_counts(&cm->counts, &tile_data->counts, 1); 1671 } 1672 } 1673 1674 assert(bit_reader_end || pbi->mb.corrupted); 1675 return bit_reader_end; 1676 } 1677 1678 static void error_handler(void *data) { 1679 VP9_COMMON *const cm = (VP9_COMMON *)data; 1680 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet"); 1681 } 1682 1683 static void read_bitdepth_colorspace_sampling(VP9_COMMON *cm, 1684 struct vpx_read_bit_buffer *rb) { 1685 if (cm->profile >= PROFILE_2) { 1686 cm->bit_depth = vpx_rb_read_bit(rb) ? VPX_BITS_12 : VPX_BITS_10; 1687 #if CONFIG_VP9_HIGHBITDEPTH 1688 cm->use_highbitdepth = 1; 1689 #endif 1690 } else { 1691 cm->bit_depth = VPX_BITS_8; 1692 #if CONFIG_VP9_HIGHBITDEPTH 1693 cm->use_highbitdepth = 0; 1694 #endif 1695 } 1696 cm->color_space = vpx_rb_read_literal(rb, 3); 1697 if (cm->color_space != VPX_CS_SRGB) { 1698 cm->color_range = (vpx_color_range_t)vpx_rb_read_bit(rb); 1699 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) { 1700 cm->subsampling_x = vpx_rb_read_bit(rb); 1701 cm->subsampling_y = vpx_rb_read_bit(rb); 1702 if (cm->subsampling_x == 1 && cm->subsampling_y == 1) 1703 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, 1704 "4:2:0 color not supported in profile 1 or 3"); 1705 if (vpx_rb_read_bit(rb)) 1706 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, 1707 "Reserved bit set"); 1708 } else { 1709 cm->subsampling_y = cm->subsampling_x = 1; 1710 } 1711 } else { 1712 cm->color_range = VPX_CR_FULL_RANGE; 1713 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) { 1714 // Note if colorspace is SRGB then 4:4:4 chroma sampling is assumed. 1715 // 4:2:2 or 4:4:0 chroma sampling is not allowed. 1716 cm->subsampling_y = cm->subsampling_x = 0; 1717 if (vpx_rb_read_bit(rb)) 1718 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, 1719 "Reserved bit set"); 1720 } else { 1721 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, 1722 "4:4:4 color not supported in profile 0 or 2"); 1723 } 1724 } 1725 } 1726 1727 static size_t read_uncompressed_header(VP9Decoder *pbi, 1728 struct vpx_read_bit_buffer *rb) { 1729 VP9_COMMON *const cm = &pbi->common; 1730 BufferPool *const pool = cm->buffer_pool; 1731 RefCntBuffer *const frame_bufs = pool->frame_bufs; 1732 int i, mask, ref_index = 0; 1733 size_t sz; 1734 1735 cm->last_frame_type = cm->frame_type; 1736 cm->last_intra_only = cm->intra_only; 1737 1738 if (vpx_rb_read_literal(rb, 2) != VP9_FRAME_MARKER) 1739 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, 1740 "Invalid frame marker"); 1741 1742 cm->profile = vp9_read_profile(rb); 1743 #if CONFIG_VP9_HIGHBITDEPTH 1744 if (cm->profile >= MAX_PROFILES) 1745 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, 1746 "Unsupported bitstream profile"); 1747 #else 1748 if (cm->profile >= PROFILE_2) 1749 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, 1750 "Unsupported bitstream profile"); 1751 #endif 1752 1753 cm->show_existing_frame = vpx_rb_read_bit(rb); 1754 if (cm->show_existing_frame) { 1755 // Show an existing frame directly. 1756 const int frame_to_show = cm->ref_frame_map[vpx_rb_read_literal(rb, 3)]; 1757 if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) { 1758 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, 1759 "Buffer %d does not contain a decoded frame", 1760 frame_to_show); 1761 } 1762 1763 ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show); 1764 pbi->refresh_frame_flags = 0; 1765 cm->lf.filter_level = 0; 1766 cm->show_frame = 1; 1767 1768 return 0; 1769 } 1770 1771 cm->frame_type = (FRAME_TYPE)vpx_rb_read_bit(rb); 1772 cm->show_frame = vpx_rb_read_bit(rb); 1773 cm->error_resilient_mode = vpx_rb_read_bit(rb); 1774 1775 if (cm->frame_type == KEY_FRAME) { 1776 if (!vp9_read_sync_code(rb)) 1777 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, 1778 "Invalid frame sync code"); 1779 1780 read_bitdepth_colorspace_sampling(cm, rb); 1781 pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1; 1782 1783 for (i = 0; i < REFS_PER_FRAME; ++i) { 1784 cm->frame_refs[i].idx = INVALID_IDX; 1785 cm->frame_refs[i].buf = NULL; 1786 } 1787 1788 setup_frame_size(cm, rb); 1789 if (pbi->need_resync) { 1790 memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map)); 1791 pbi->need_resync = 0; 1792 } 1793 } else { 1794 cm->intra_only = cm->show_frame ? 0 : vpx_rb_read_bit(rb); 1795 1796 cm->reset_frame_context = 1797 cm->error_resilient_mode ? 0 : vpx_rb_read_literal(rb, 2); 1798 1799 if (cm->intra_only) { 1800 if (!vp9_read_sync_code(rb)) 1801 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, 1802 "Invalid frame sync code"); 1803 if (cm->profile > PROFILE_0) { 1804 read_bitdepth_colorspace_sampling(cm, rb); 1805 } else { 1806 // NOTE: The intra-only frame header does not include the specification 1807 // of either the color format or color sub-sampling in profile 0. VP9 1808 // specifies that the default color format should be YUV 4:2:0 in this 1809 // case (normative). 1810 cm->color_space = VPX_CS_BT_601; 1811 cm->color_range = VPX_CR_STUDIO_RANGE; 1812 cm->subsampling_y = cm->subsampling_x = 1; 1813 cm->bit_depth = VPX_BITS_8; 1814 #if CONFIG_VP9_HIGHBITDEPTH 1815 cm->use_highbitdepth = 0; 1816 #endif 1817 } 1818 1819 pbi->refresh_frame_flags = vpx_rb_read_literal(rb, REF_FRAMES); 1820 setup_frame_size(cm, rb); 1821 if (pbi->need_resync) { 1822 memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map)); 1823 pbi->need_resync = 0; 1824 } 1825 } else if (pbi->need_resync != 1) { /* Skip if need resync */ 1826 pbi->refresh_frame_flags = vpx_rb_read_literal(rb, REF_FRAMES); 1827 for (i = 0; i < REFS_PER_FRAME; ++i) { 1828 const int ref = vpx_rb_read_literal(rb, REF_FRAMES_LOG2); 1829 const int idx = cm->ref_frame_map[ref]; 1830 RefBuffer *const ref_frame = &cm->frame_refs[i]; 1831 ref_frame->idx = idx; 1832 ref_frame->buf = &frame_bufs[idx].buf; 1833 cm->ref_frame_sign_bias[LAST_FRAME + i] = vpx_rb_read_bit(rb); 1834 } 1835 1836 setup_frame_size_with_refs(cm, rb); 1837 1838 cm->allow_high_precision_mv = vpx_rb_read_bit(rb); 1839 cm->interp_filter = read_interp_filter(rb); 1840 1841 for (i = 0; i < REFS_PER_FRAME; ++i) { 1842 RefBuffer *const ref_buf = &cm->frame_refs[i]; 1843 #if CONFIG_VP9_HIGHBITDEPTH 1844 vp9_setup_scale_factors_for_frame( 1845 &ref_buf->sf, ref_buf->buf->y_crop_width, 1846 ref_buf->buf->y_crop_height, cm->width, cm->height, 1847 cm->use_highbitdepth); 1848 #else 1849 vp9_setup_scale_factors_for_frame( 1850 &ref_buf->sf, ref_buf->buf->y_crop_width, 1851 ref_buf->buf->y_crop_height, cm->width, cm->height); 1852 #endif 1853 } 1854 } 1855 } 1856 #if CONFIG_VP9_HIGHBITDEPTH 1857 get_frame_new_buffer(cm)->bit_depth = cm->bit_depth; 1858 #endif 1859 get_frame_new_buffer(cm)->color_space = cm->color_space; 1860 get_frame_new_buffer(cm)->color_range = cm->color_range; 1861 get_frame_new_buffer(cm)->render_width = cm->render_width; 1862 get_frame_new_buffer(cm)->render_height = cm->render_height; 1863 1864 if (pbi->need_resync) { 1865 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 1866 "Keyframe / intra-only frame required to reset decoder" 1867 " state"); 1868 } 1869 1870 if (!cm->error_resilient_mode) { 1871 cm->refresh_frame_context = vpx_rb_read_bit(rb); 1872 cm->frame_parallel_decoding_mode = vpx_rb_read_bit(rb); 1873 if (!cm->frame_parallel_decoding_mode) vp9_zero(cm->counts); 1874 } else { 1875 cm->refresh_frame_context = 0; 1876 cm->frame_parallel_decoding_mode = 1; 1877 } 1878 1879 // This flag will be overridden by the call to vp9_setup_past_independence 1880 // below, forcing the use of context 0 for those frame types. 1881 cm->frame_context_idx = vpx_rb_read_literal(rb, FRAME_CONTEXTS_LOG2); 1882 1883 // Generate next_ref_frame_map. 1884 for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) { 1885 if (mask & 1) { 1886 cm->next_ref_frame_map[ref_index] = cm->new_fb_idx; 1887 ++frame_bufs[cm->new_fb_idx].ref_count; 1888 } else { 1889 cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index]; 1890 } 1891 // Current thread holds the reference frame. 1892 if (cm->ref_frame_map[ref_index] >= 0) 1893 ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count; 1894 ++ref_index; 1895 } 1896 1897 for (; ref_index < REF_FRAMES; ++ref_index) { 1898 cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index]; 1899 // Current thread holds the reference frame. 1900 if (cm->ref_frame_map[ref_index] >= 0) 1901 ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count; 1902 } 1903 pbi->hold_ref_buf = 1; 1904 1905 if (frame_is_intra_only(cm) || cm->error_resilient_mode) 1906 vp9_setup_past_independence(cm); 1907 1908 setup_loopfilter(&cm->lf, rb); 1909 setup_quantization(cm, &pbi->mb, rb); 1910 setup_segmentation(&cm->seg, rb); 1911 setup_segmentation_dequant(cm); 1912 1913 setup_tile_info(cm, rb); 1914 sz = vpx_rb_read_literal(rb, 16); 1915 1916 if (sz == 0) 1917 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 1918 "Invalid header size"); 1919 1920 return sz; 1921 } 1922 1923 static int read_compressed_header(VP9Decoder *pbi, const uint8_t *data, 1924 size_t partition_size) { 1925 VP9_COMMON *const cm = &pbi->common; 1926 MACROBLOCKD *const xd = &pbi->mb; 1927 FRAME_CONTEXT *const fc = cm->fc; 1928 vpx_reader r; 1929 int k; 1930 1931 if (vpx_reader_init(&r, data, partition_size, pbi->decrypt_cb, 1932 pbi->decrypt_state)) 1933 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, 1934 "Failed to allocate bool decoder 0"); 1935 1936 cm->tx_mode = xd->lossless ? ONLY_4X4 : read_tx_mode(&r); 1937 if (cm->tx_mode == TX_MODE_SELECT) read_tx_mode_probs(&fc->tx_probs, &r); 1938 read_coef_probs(fc, cm->tx_mode, &r); 1939 1940 for (k = 0; k < SKIP_CONTEXTS; ++k) 1941 vp9_diff_update_prob(&r, &fc->skip_probs[k]); 1942 1943 if (!frame_is_intra_only(cm)) { 1944 nmv_context *const nmvc = &fc->nmvc; 1945 int i, j; 1946 1947 read_inter_mode_probs(fc, &r); 1948 1949 if (cm->interp_filter == SWITCHABLE) read_switchable_interp_probs(fc, &r); 1950 1951 for (i = 0; i < INTRA_INTER_CONTEXTS; i++) 1952 vp9_diff_update_prob(&r, &fc->intra_inter_prob[i]); 1953 1954 cm->reference_mode = read_frame_reference_mode(cm, &r); 1955 if (cm->reference_mode != SINGLE_REFERENCE) 1956 setup_compound_reference_mode(cm); 1957 read_frame_reference_mode_probs(cm, &r); 1958 1959 for (j = 0; j < BLOCK_SIZE_GROUPS; j++) 1960 for (i = 0; i < INTRA_MODES - 1; ++i) 1961 vp9_diff_update_prob(&r, &fc->y_mode_prob[j][i]); 1962 1963 for (j = 0; j < PARTITION_CONTEXTS; ++j) 1964 for (i = 0; i < PARTITION_TYPES - 1; ++i) 1965 vp9_diff_update_prob(&r, &fc->partition_prob[j][i]); 1966 1967 read_mv_probs(nmvc, cm->allow_high_precision_mv, &r); 1968 } 1969 1970 return vpx_reader_has_error(&r); 1971 } 1972 1973 static struct vpx_read_bit_buffer *init_read_bit_buffer( 1974 VP9Decoder *pbi, struct vpx_read_bit_buffer *rb, const uint8_t *data, 1975 const uint8_t *data_end, uint8_t clear_data[MAX_VP9_HEADER_SIZE]) { 1976 rb->bit_offset = 0; 1977 rb->error_handler = error_handler; 1978 rb->error_handler_data = &pbi->common; 1979 if (pbi->decrypt_cb) { 1980 const int n = (int)VPXMIN(MAX_VP9_HEADER_SIZE, data_end - data); 1981 pbi->decrypt_cb(pbi->decrypt_state, data, clear_data, n); 1982 rb->bit_buffer = clear_data; 1983 rb->bit_buffer_end = clear_data + n; 1984 } else { 1985 rb->bit_buffer = data; 1986 rb->bit_buffer_end = data_end; 1987 } 1988 return rb; 1989 } 1990 1991 //------------------------------------------------------------------------------ 1992 1993 int vp9_read_sync_code(struct vpx_read_bit_buffer *const rb) { 1994 return vpx_rb_read_literal(rb, 8) == VP9_SYNC_CODE_0 && 1995 vpx_rb_read_literal(rb, 8) == VP9_SYNC_CODE_1 && 1996 vpx_rb_read_literal(rb, 8) == VP9_SYNC_CODE_2; 1997 } 1998 1999 void vp9_read_frame_size(struct vpx_read_bit_buffer *rb, int *width, 2000 int *height) { 2001 *width = vpx_rb_read_literal(rb, 16) + 1; 2002 *height = vpx_rb_read_literal(rb, 16) + 1; 2003 } 2004 2005 BITSTREAM_PROFILE vp9_read_profile(struct vpx_read_bit_buffer *rb) { 2006 int profile = vpx_rb_read_bit(rb); 2007 profile |= vpx_rb_read_bit(rb) << 1; 2008 if (profile > 2) profile += vpx_rb_read_bit(rb); 2009 return (BITSTREAM_PROFILE)profile; 2010 } 2011 2012 void vp9_decode_frame(VP9Decoder *pbi, const uint8_t *data, 2013 const uint8_t *data_end, const uint8_t **p_data_end) { 2014 VP9_COMMON *const cm = &pbi->common; 2015 MACROBLOCKD *const xd = &pbi->mb; 2016 struct vpx_read_bit_buffer rb; 2017 int context_updated = 0; 2018 uint8_t clear_data[MAX_VP9_HEADER_SIZE]; 2019 const size_t first_partition_size = read_uncompressed_header( 2020 pbi, init_read_bit_buffer(pbi, &rb, data, data_end, clear_data)); 2021 const int tile_rows = 1 << cm->log2_tile_rows; 2022 const int tile_cols = 1 << cm->log2_tile_cols; 2023 YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm); 2024 xd->cur_buf = new_fb; 2025 2026 if (!first_partition_size) { 2027 // showing a frame directly 2028 *p_data_end = data + (cm->profile <= PROFILE_2 ? 1 : 2); 2029 return; 2030 } 2031 2032 data += vpx_rb_bytes_read(&rb); 2033 if (!read_is_valid(data, first_partition_size, data_end)) 2034 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 2035 "Truncated packet or corrupt header length"); 2036 2037 cm->use_prev_frame_mvs = 2038 !cm->error_resilient_mode && cm->width == cm->last_width && 2039 cm->height == cm->last_height && !cm->last_intra_only && 2040 cm->last_show_frame && (cm->last_frame_type != KEY_FRAME); 2041 2042 vp9_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y); 2043 2044 *cm->fc = cm->frame_contexts[cm->frame_context_idx]; 2045 if (!cm->fc->initialized) 2046 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 2047 "Uninitialized entropy context."); 2048 2049 xd->corrupted = 0; 2050 new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size); 2051 if (new_fb->corrupted) 2052 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 2053 "Decode failed. Frame data header is corrupted."); 2054 2055 if (cm->lf.filter_level && !cm->skip_loop_filter) { 2056 vp9_loop_filter_frame_init(cm, cm->lf.filter_level); 2057 } 2058 2059 if (pbi->tile_worker_data == NULL || 2060 (tile_cols * tile_rows) != pbi->total_tiles) { 2061 const int num_tile_workers = 2062 tile_cols * tile_rows + ((pbi->max_threads > 1) ? pbi->max_threads : 0); 2063 const size_t twd_size = num_tile_workers * sizeof(*pbi->tile_worker_data); 2064 // Ensure tile data offsets will be properly aligned. This may fail on 2065 // platforms without DECLARE_ALIGNED(). 2066 assert((sizeof(*pbi->tile_worker_data) % 16) == 0); 2067 vpx_free(pbi->tile_worker_data); 2068 CHECK_MEM_ERROR(cm, pbi->tile_worker_data, vpx_memalign(32, twd_size)); 2069 pbi->total_tiles = tile_rows * tile_cols; 2070 } 2071 2072 if (pbi->max_threads > 1 && tile_rows == 1 && tile_cols > 1) { 2073 // Multi-threaded tile decoder 2074 *p_data_end = decode_tiles_mt(pbi, data + first_partition_size, data_end); 2075 if (!xd->corrupted) { 2076 if (!cm->skip_loop_filter) { 2077 // If multiple threads are used to decode tiles, then we use those 2078 // threads to do parallel loopfiltering. 2079 vp9_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane, cm->lf.filter_level, 2080 0, 0, pbi->tile_workers, pbi->num_tile_workers, 2081 &pbi->lf_row_sync); 2082 } 2083 } else { 2084 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 2085 "Decode failed. Frame data is corrupted."); 2086 } 2087 } else { 2088 *p_data_end = decode_tiles(pbi, data + first_partition_size, data_end); 2089 } 2090 2091 if (!xd->corrupted) { 2092 if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) { 2093 vp9_adapt_coef_probs(cm); 2094 2095 if (!frame_is_intra_only(cm)) { 2096 vp9_adapt_mode_probs(cm); 2097 vp9_adapt_mv_probs(cm, cm->allow_high_precision_mv); 2098 } 2099 } 2100 } else { 2101 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 2102 "Decode failed. Frame data is corrupted."); 2103 } 2104 2105 // Non frame parallel update frame context here. 2106 if (cm->refresh_frame_context && !context_updated) 2107 cm->frame_contexts[cm->frame_context_idx] = *cm->fc; 2108 } 2109