1 /* 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #include "vpx_config.h" 12 #include "vp8_rtcd.h" 13 #if !defined(WIN32) && CONFIG_OS_SUPPORT == 1 14 #include <unistd.h> 15 #endif 16 #include "onyxd_int.h" 17 #include "vpx_mem/vpx_mem.h" 18 #include "vp8/common/threading.h" 19 20 #include "vp8/common/loopfilter.h" 21 #include "vp8/common/extend.h" 22 #include "vpx_ports/vpx_timer.h" 23 #include "decoderthreading.h" 24 #include "detokenize.h" 25 #include "vp8/common/reconintra4x4.h" 26 #include "vp8/common/reconinter.h" 27 #include "vp8/common/reconintra.h" 28 #include "vp8/common/setupintrarecon.h" 29 #if CONFIG_ERROR_CONCEALMENT 30 #include "error_concealment.h" 31 #endif 32 33 #define CALLOC_ARRAY(p, n) CHECK_MEM_ERROR((p), vpx_calloc(sizeof(*(p)), (n))) 34 #define CALLOC_ARRAY_ALIGNED(p, n, algn) \ 35 do { \ 36 CHECK_MEM_ERROR((p), vpx_memalign((algn), sizeof(*(p)) * (n))); \ 37 memset((p), 0, (n) * sizeof(*(p))); \ 38 } while (0) 39 40 static void setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd, 41 MB_ROW_DEC *mbrd, int count) { 42 VP8_COMMON *const pc = &pbi->common; 43 int i; 44 45 for (i = 0; i < count; ++i) { 46 MACROBLOCKD *mbd = &mbrd[i].mbd; 47 mbd->subpixel_predict = xd->subpixel_predict; 48 mbd->subpixel_predict8x4 = xd->subpixel_predict8x4; 49 mbd->subpixel_predict8x8 = xd->subpixel_predict8x8; 50 mbd->subpixel_predict16x16 = xd->subpixel_predict16x16; 51 52 mbd->frame_type = pc->frame_type; 53 mbd->pre = xd->pre; 54 mbd->dst = xd->dst; 55 56 mbd->segmentation_enabled = xd->segmentation_enabled; 57 mbd->mb_segement_abs_delta = xd->mb_segement_abs_delta; 58 memcpy(mbd->segment_feature_data, xd->segment_feature_data, 59 sizeof(xd->segment_feature_data)); 60 61 /*signed char ref_lf_deltas[MAX_REF_LF_DELTAS];*/ 62 memcpy(mbd->ref_lf_deltas, xd->ref_lf_deltas, sizeof(xd->ref_lf_deltas)); 63 /*signed char mode_lf_deltas[MAX_MODE_LF_DELTAS];*/ 64 memcpy(mbd->mode_lf_deltas, xd->mode_lf_deltas, sizeof(xd->mode_lf_deltas)); 65 /*unsigned char mode_ref_lf_delta_enabled; 66 unsigned char mode_ref_lf_delta_update;*/ 67 mbd->mode_ref_lf_delta_enabled = xd->mode_ref_lf_delta_enabled; 68 mbd->mode_ref_lf_delta_update = xd->mode_ref_lf_delta_update; 69 70 mbd->current_bc = &pbi->mbc[0]; 71 72 memcpy(mbd->dequant_y1_dc, xd->dequant_y1_dc, sizeof(xd->dequant_y1_dc)); 73 memcpy(mbd->dequant_y1, xd->dequant_y1, sizeof(xd->dequant_y1)); 74 memcpy(mbd->dequant_y2, xd->dequant_y2, sizeof(xd->dequant_y2)); 75 memcpy(mbd->dequant_uv, xd->dequant_uv, sizeof(xd->dequant_uv)); 76 77 mbd->fullpixel_mask = 0xffffffff; 78 79 if (pc->full_pixel) mbd->fullpixel_mask = 0xfffffff8; 80 } 81 82 for (i = 0; i < pc->mb_rows; ++i) 83 vpx_atomic_store_release(&pbi->mt_current_mb_col[i], -1); 84 } 85 86 static void mt_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, 87 unsigned int mb_idx) { 88 MB_PREDICTION_MODE mode; 89 int i; 90 #if CONFIG_ERROR_CONCEALMENT 91 int corruption_detected = 0; 92 #else 93 (void)mb_idx; 94 #endif 95 96 if (xd->mode_info_context->mbmi.mb_skip_coeff) { 97 vp8_reset_mb_tokens_context(xd); 98 } else if (!vp8dx_bool_error(xd->current_bc)) { 99 int eobtotal; 100 eobtotal = vp8_decode_mb_tokens(pbi, xd); 101 102 /* Special case: Force the loopfilter to skip when eobtotal is zero */ 103 xd->mode_info_context->mbmi.mb_skip_coeff = (eobtotal == 0); 104 } 105 106 mode = xd->mode_info_context->mbmi.mode; 107 108 if (xd->segmentation_enabled) vp8_mb_init_dequantizer(pbi, xd); 109 110 #if CONFIG_ERROR_CONCEALMENT 111 112 if (pbi->ec_active) { 113 int throw_residual; 114 /* When we have independent partitions we can apply residual even 115 * though other partitions within the frame are corrupt. 116 */ 117 throw_residual = 118 (!pbi->independent_partitions && pbi->frame_corrupt_residual); 119 throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc)); 120 121 if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual)) { 122 /* MB with corrupt residuals or corrupt mode/motion vectors. 123 * Better to use the predictor as reconstruction. 124 */ 125 pbi->frame_corrupt_residual = 1; 126 memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); 127 128 corruption_detected = 1; 129 130 /* force idct to be skipped for B_PRED and use the 131 * prediction only for reconstruction 132 * */ 133 memset(xd->eobs, 0, 25); 134 } 135 } 136 #endif 137 138 /* do prediction */ 139 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { 140 vp8_build_intra_predictors_mbuv_s( 141 xd, xd->recon_above[1], xd->recon_above[2], xd->recon_left[1], 142 xd->recon_left[2], xd->recon_left_stride[1], xd->dst.u_buffer, 143 xd->dst.v_buffer, xd->dst.uv_stride); 144 145 if (mode != B_PRED) { 146 vp8_build_intra_predictors_mby_s( 147 xd, xd->recon_above[0], xd->recon_left[0], xd->recon_left_stride[0], 148 xd->dst.y_buffer, xd->dst.y_stride); 149 } else { 150 short *DQC = xd->dequant_y1; 151 int dst_stride = xd->dst.y_stride; 152 153 /* clear out residual eob info */ 154 if (xd->mode_info_context->mbmi.mb_skip_coeff) memset(xd->eobs, 0, 25); 155 156 intra_prediction_down_copy(xd, xd->recon_above[0] + 16); 157 158 for (i = 0; i < 16; ++i) { 159 BLOCKD *b = &xd->block[i]; 160 unsigned char *dst = xd->dst.y_buffer + b->offset; 161 B_PREDICTION_MODE b_mode = xd->mode_info_context->bmi[i].as_mode; 162 unsigned char *Above; 163 unsigned char *yleft; 164 int left_stride; 165 unsigned char top_left; 166 167 /*Caution: For some b_mode, it needs 8 pixels (4 above + 4 168 * above-right).*/ 169 if (i < 4 && pbi->common.filter_level) { 170 Above = xd->recon_above[0] + b->offset; 171 } else { 172 Above = dst - dst_stride; 173 } 174 175 if (i % 4 == 0 && pbi->common.filter_level) { 176 yleft = xd->recon_left[0] + i; 177 left_stride = 1; 178 } else { 179 yleft = dst - 1; 180 left_stride = dst_stride; 181 } 182 183 if ((i == 4 || i == 8 || i == 12) && pbi->common.filter_level) { 184 top_left = *(xd->recon_left[0] + i - 1); 185 } else { 186 top_left = Above[-1]; 187 } 188 189 vp8_intra4x4_predict(Above, yleft, left_stride, b_mode, dst, dst_stride, 190 top_left); 191 192 if (xd->eobs[i]) { 193 if (xd->eobs[i] > 1) { 194 vp8_dequant_idct_add(b->qcoeff, DQC, dst, dst_stride); 195 } else { 196 vp8_dc_only_idct_add(b->qcoeff[0] * DQC[0], dst, dst_stride, dst, 197 dst_stride); 198 memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); 199 } 200 } 201 } 202 } 203 } else { 204 vp8_build_inter_predictors_mb(xd); 205 } 206 207 #if CONFIG_ERROR_CONCEALMENT 208 if (corruption_detected) { 209 return; 210 } 211 #endif 212 213 if (!xd->mode_info_context->mbmi.mb_skip_coeff) { 214 /* dequantization and idct */ 215 if (mode != B_PRED) { 216 short *DQC = xd->dequant_y1; 217 218 if (mode != SPLITMV) { 219 BLOCKD *b = &xd->block[24]; 220 221 /* do 2nd order transform on the dc block */ 222 if (xd->eobs[24] > 1) { 223 vp8_dequantize_b(b, xd->dequant_y2); 224 225 vp8_short_inv_walsh4x4(&b->dqcoeff[0], xd->qcoeff); 226 memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0])); 227 } else { 228 b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0]; 229 vp8_short_inv_walsh4x4_1(&b->dqcoeff[0], xd->qcoeff); 230 memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); 231 } 232 233 /* override the dc dequant constant in order to preserve the 234 * dc components 235 */ 236 DQC = xd->dequant_y1_dc; 237 } 238 239 vp8_dequant_idct_add_y_block(xd->qcoeff, DQC, xd->dst.y_buffer, 240 xd->dst.y_stride, xd->eobs); 241 } 242 243 vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv, 244 xd->dst.u_buffer, xd->dst.v_buffer, 245 xd->dst.uv_stride, xd->eobs + 16); 246 } 247 } 248 249 static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd, 250 int start_mb_row) { 251 const vpx_atomic_int *last_row_current_mb_col; 252 vpx_atomic_int *current_mb_col; 253 int mb_row; 254 VP8_COMMON *pc = &pbi->common; 255 const int nsync = pbi->sync_range; 256 const vpx_atomic_int first_row_no_sync_above = 257 VPX_ATOMIC_INIT(pc->mb_cols + nsync); 258 int num_part = 1 << pbi->common.multi_token_partition; 259 int last_mb_row = start_mb_row; 260 261 YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME]; 262 YV12_BUFFER_CONFIG *yv12_fb_lst = pbi->dec_fb_ref[LAST_FRAME]; 263 264 int recon_y_stride = yv12_fb_new->y_stride; 265 int recon_uv_stride = yv12_fb_new->uv_stride; 266 267 unsigned char *ref_buffer[MAX_REF_FRAMES][3]; 268 unsigned char *dst_buffer[3]; 269 int i; 270 int ref_fb_corrupted[MAX_REF_FRAMES]; 271 272 ref_fb_corrupted[INTRA_FRAME] = 0; 273 274 for (i = 1; i < MAX_REF_FRAMES; ++i) { 275 YV12_BUFFER_CONFIG *this_fb = pbi->dec_fb_ref[i]; 276 277 ref_buffer[i][0] = this_fb->y_buffer; 278 ref_buffer[i][1] = this_fb->u_buffer; 279 ref_buffer[i][2] = this_fb->v_buffer; 280 281 ref_fb_corrupted[i] = this_fb->corrupted; 282 } 283 284 dst_buffer[0] = yv12_fb_new->y_buffer; 285 dst_buffer[1] = yv12_fb_new->u_buffer; 286 dst_buffer[2] = yv12_fb_new->v_buffer; 287 288 xd->up_available = (start_mb_row != 0); 289 290 xd->mode_info_context = pc->mi + pc->mode_info_stride * start_mb_row; 291 xd->mode_info_stride = pc->mode_info_stride; 292 293 for (mb_row = start_mb_row; mb_row < pc->mb_rows; 294 mb_row += (pbi->decoding_thread_count + 1)) { 295 int recon_yoffset, recon_uvoffset; 296 int mb_col; 297 int filter_level; 298 loop_filter_info_n *lfi_n = &pc->lf_info; 299 300 /* save last row processed by this thread */ 301 last_mb_row = mb_row; 302 /* select bool coder for current partition */ 303 xd->current_bc = &pbi->mbc[mb_row % num_part]; 304 305 if (mb_row > 0) { 306 last_row_current_mb_col = &pbi->mt_current_mb_col[mb_row - 1]; 307 } else { 308 last_row_current_mb_col = &first_row_no_sync_above; 309 } 310 311 current_mb_col = &pbi->mt_current_mb_col[mb_row]; 312 313 recon_yoffset = mb_row * recon_y_stride * 16; 314 recon_uvoffset = mb_row * recon_uv_stride * 8; 315 316 /* reset contexts */ 317 xd->above_context = pc->above_context; 318 memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); 319 320 xd->left_available = 0; 321 322 xd->mb_to_top_edge = -((mb_row * 16) << 3); 323 xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3; 324 325 if (pbi->common.filter_level) { 326 xd->recon_above[0] = pbi->mt_yabove_row[mb_row] + 0 * 16 + 32; 327 xd->recon_above[1] = pbi->mt_uabove_row[mb_row] + 0 * 8 + 16; 328 xd->recon_above[2] = pbi->mt_vabove_row[mb_row] + 0 * 8 + 16; 329 330 xd->recon_left[0] = pbi->mt_yleft_col[mb_row]; 331 xd->recon_left[1] = pbi->mt_uleft_col[mb_row]; 332 xd->recon_left[2] = pbi->mt_vleft_col[mb_row]; 333 334 /* TODO: move to outside row loop */ 335 xd->recon_left_stride[0] = 1; 336 xd->recon_left_stride[1] = 1; 337 } else { 338 xd->recon_above[0] = dst_buffer[0] + recon_yoffset; 339 xd->recon_above[1] = dst_buffer[1] + recon_uvoffset; 340 xd->recon_above[2] = dst_buffer[2] + recon_uvoffset; 341 342 xd->recon_left[0] = xd->recon_above[0] - 1; 343 xd->recon_left[1] = xd->recon_above[1] - 1; 344 xd->recon_left[2] = xd->recon_above[2] - 1; 345 346 xd->recon_above[0] -= xd->dst.y_stride; 347 xd->recon_above[1] -= xd->dst.uv_stride; 348 xd->recon_above[2] -= xd->dst.uv_stride; 349 350 /* TODO: move to outside row loop */ 351 xd->recon_left_stride[0] = xd->dst.y_stride; 352 xd->recon_left_stride[1] = xd->dst.uv_stride; 353 354 setup_intra_recon_left(xd->recon_left[0], xd->recon_left[1], 355 xd->recon_left[2], xd->dst.y_stride, 356 xd->dst.uv_stride); 357 } 358 359 for (mb_col = 0; mb_col < pc->mb_cols; ++mb_col) { 360 if (((mb_col - 1) % nsync) == 0) { 361 vpx_atomic_store_release(current_mb_col, mb_col - 1); 362 } 363 364 if (mb_row && !(mb_col & (nsync - 1))) { 365 vp8_atomic_spin_wait(mb_col, last_row_current_mb_col, nsync); 366 } 367 368 /* Distance of MB to the various image edges. 369 * These are specified to 8th pel as they are always 370 * compared to values that are in 1/8th pel units. 371 */ 372 xd->mb_to_left_edge = -((mb_col * 16) << 3); 373 xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; 374 375 #if CONFIG_ERROR_CONCEALMENT 376 { 377 int corrupt_residual = 378 (!pbi->independent_partitions && pbi->frame_corrupt_residual) || 379 vp8dx_bool_error(xd->current_bc); 380 if (pbi->ec_active && 381 (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) && 382 corrupt_residual) { 383 /* We have an intra block with corrupt 384 * coefficients, better to conceal with an inter 385 * block. 386 * Interpolate MVs from neighboring MBs 387 * 388 * Note that for the first mb with corrupt 389 * residual in a frame, we might not discover 390 * that before decoding the residual. That 391 * happens after this check, and therefore no 392 * inter concealment will be done. 393 */ 394 vp8_interpolate_motion(xd, mb_row, mb_col, pc->mb_rows, pc->mb_cols); 395 } 396 } 397 #endif 398 399 xd->dst.y_buffer = dst_buffer[0] + recon_yoffset; 400 xd->dst.u_buffer = dst_buffer[1] + recon_uvoffset; 401 xd->dst.v_buffer = dst_buffer[2] + recon_uvoffset; 402 403 xd->pre.y_buffer = 404 ref_buffer[xd->mode_info_context->mbmi.ref_frame][0] + recon_yoffset; 405 xd->pre.u_buffer = 406 ref_buffer[xd->mode_info_context->mbmi.ref_frame][1] + recon_uvoffset; 407 xd->pre.v_buffer = 408 ref_buffer[xd->mode_info_context->mbmi.ref_frame][2] + recon_uvoffset; 409 410 /* propagate errors from reference frames */ 411 xd->corrupted |= ref_fb_corrupted[xd->mode_info_context->mbmi.ref_frame]; 412 413 mt_decode_macroblock(pbi, xd, 0); 414 415 xd->left_available = 1; 416 417 /* check if the boolean decoder has suffered an error */ 418 xd->corrupted |= vp8dx_bool_error(xd->current_bc); 419 420 xd->recon_above[0] += 16; 421 xd->recon_above[1] += 8; 422 xd->recon_above[2] += 8; 423 424 if (!pbi->common.filter_level) { 425 xd->recon_left[0] += 16; 426 xd->recon_left[1] += 8; 427 xd->recon_left[2] += 8; 428 } 429 430 if (pbi->common.filter_level) { 431 int skip_lf = (xd->mode_info_context->mbmi.mode != B_PRED && 432 xd->mode_info_context->mbmi.mode != SPLITMV && 433 xd->mode_info_context->mbmi.mb_skip_coeff); 434 435 const int mode_index = 436 lfi_n->mode_lf_lut[xd->mode_info_context->mbmi.mode]; 437 const int seg = xd->mode_info_context->mbmi.segment_id; 438 const int ref_frame = xd->mode_info_context->mbmi.ref_frame; 439 440 filter_level = lfi_n->lvl[seg][ref_frame][mode_index]; 441 442 if (mb_row != pc->mb_rows - 1) { 443 /* Save decoded MB last row data for next-row decoding */ 444 memcpy((pbi->mt_yabove_row[mb_row + 1] + 32 + mb_col * 16), 445 (xd->dst.y_buffer + 15 * recon_y_stride), 16); 446 memcpy((pbi->mt_uabove_row[mb_row + 1] + 16 + mb_col * 8), 447 (xd->dst.u_buffer + 7 * recon_uv_stride), 8); 448 memcpy((pbi->mt_vabove_row[mb_row + 1] + 16 + mb_col * 8), 449 (xd->dst.v_buffer + 7 * recon_uv_stride), 8); 450 } 451 452 /* save left_col for next MB decoding */ 453 if (mb_col != pc->mb_cols - 1) { 454 MODE_INFO *next = xd->mode_info_context + 1; 455 456 if (next->mbmi.ref_frame == INTRA_FRAME) { 457 for (i = 0; i < 16; ++i) { 458 pbi->mt_yleft_col[mb_row][i] = 459 xd->dst.y_buffer[i * recon_y_stride + 15]; 460 } 461 for (i = 0; i < 8; ++i) { 462 pbi->mt_uleft_col[mb_row][i] = 463 xd->dst.u_buffer[i * recon_uv_stride + 7]; 464 pbi->mt_vleft_col[mb_row][i] = 465 xd->dst.v_buffer[i * recon_uv_stride + 7]; 466 } 467 } 468 } 469 470 /* loopfilter on this macroblock. */ 471 if (filter_level) { 472 if (pc->filter_type == NORMAL_LOOPFILTER) { 473 loop_filter_info lfi; 474 FRAME_TYPE frame_type = pc->frame_type; 475 const int hev_index = lfi_n->hev_thr_lut[frame_type][filter_level]; 476 lfi.mblim = lfi_n->mblim[filter_level]; 477 lfi.blim = lfi_n->blim[filter_level]; 478 lfi.lim = lfi_n->lim[filter_level]; 479 lfi.hev_thr = lfi_n->hev_thr[hev_index]; 480 481 if (mb_col > 0) 482 vp8_loop_filter_mbv(xd->dst.y_buffer, xd->dst.u_buffer, 483 xd->dst.v_buffer, recon_y_stride, 484 recon_uv_stride, &lfi); 485 486 if (!skip_lf) 487 vp8_loop_filter_bv(xd->dst.y_buffer, xd->dst.u_buffer, 488 xd->dst.v_buffer, recon_y_stride, 489 recon_uv_stride, &lfi); 490 491 /* don't apply across umv border */ 492 if (mb_row > 0) 493 vp8_loop_filter_mbh(xd->dst.y_buffer, xd->dst.u_buffer, 494 xd->dst.v_buffer, recon_y_stride, 495 recon_uv_stride, &lfi); 496 497 if (!skip_lf) 498 vp8_loop_filter_bh(xd->dst.y_buffer, xd->dst.u_buffer, 499 xd->dst.v_buffer, recon_y_stride, 500 recon_uv_stride, &lfi); 501 } else { 502 if (mb_col > 0) 503 vp8_loop_filter_simple_mbv(xd->dst.y_buffer, recon_y_stride, 504 lfi_n->mblim[filter_level]); 505 506 if (!skip_lf) 507 vp8_loop_filter_simple_bv(xd->dst.y_buffer, recon_y_stride, 508 lfi_n->blim[filter_level]); 509 510 /* don't apply across umv border */ 511 if (mb_row > 0) 512 vp8_loop_filter_simple_mbh(xd->dst.y_buffer, recon_y_stride, 513 lfi_n->mblim[filter_level]); 514 515 if (!skip_lf) 516 vp8_loop_filter_simple_bh(xd->dst.y_buffer, recon_y_stride, 517 lfi_n->blim[filter_level]); 518 } 519 } 520 } 521 522 recon_yoffset += 16; 523 recon_uvoffset += 8; 524 525 ++xd->mode_info_context; /* next mb */ 526 527 xd->above_context++; 528 } 529 530 /* adjust to the next row of mbs */ 531 if (pbi->common.filter_level) { 532 if (mb_row != pc->mb_rows - 1) { 533 int lasty = yv12_fb_lst->y_width + VP8BORDERINPIXELS; 534 int lastuv = (yv12_fb_lst->y_width >> 1) + (VP8BORDERINPIXELS >> 1); 535 536 for (i = 0; i < 4; ++i) { 537 pbi->mt_yabove_row[mb_row + 1][lasty + i] = 538 pbi->mt_yabove_row[mb_row + 1][lasty - 1]; 539 pbi->mt_uabove_row[mb_row + 1][lastuv + i] = 540 pbi->mt_uabove_row[mb_row + 1][lastuv - 1]; 541 pbi->mt_vabove_row[mb_row + 1][lastuv + i] = 542 pbi->mt_vabove_row[mb_row + 1][lastuv - 1]; 543 } 544 } 545 } else { 546 vp8_extend_mb_row(yv12_fb_new, xd->dst.y_buffer + 16, 547 xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); 548 } 549 550 /* last MB of row is ready just after extension is done */ 551 vpx_atomic_store_release(current_mb_col, mb_col + nsync); 552 553 ++xd->mode_info_context; /* skip prediction column */ 554 xd->up_available = 1; 555 556 /* since we have multithread */ 557 xd->mode_info_context += xd->mode_info_stride * pbi->decoding_thread_count; 558 } 559 560 /* signal end of frame decoding if this thread processed the last mb_row */ 561 if (last_mb_row == (pc->mb_rows - 1)) sem_post(&pbi->h_event_end_decoding); 562 } 563 564 static THREAD_FUNCTION thread_decoding_proc(void *p_data) { 565 int ithread = ((DECODETHREAD_DATA *)p_data)->ithread; 566 VP8D_COMP *pbi = (VP8D_COMP *)(((DECODETHREAD_DATA *)p_data)->ptr1); 567 MB_ROW_DEC *mbrd = (MB_ROW_DEC *)(((DECODETHREAD_DATA *)p_data)->ptr2); 568 ENTROPY_CONTEXT_PLANES mb_row_left_context; 569 570 while (1) { 571 if (vpx_atomic_load_acquire(&pbi->b_multithreaded_rd) == 0) break; 572 573 if (sem_wait(&pbi->h_event_start_decoding[ithread]) == 0) { 574 if (vpx_atomic_load_acquire(&pbi->b_multithreaded_rd) == 0) { 575 break; 576 } else { 577 MACROBLOCKD *xd = &mbrd->mbd; 578 xd->left_context = &mb_row_left_context; 579 580 mt_decode_mb_rows(pbi, xd, ithread + 1); 581 } 582 } 583 } 584 585 return 0; 586 } 587 588 void vp8_decoder_create_threads(VP8D_COMP *pbi) { 589 int core_count = 0; 590 unsigned int ithread; 591 592 vpx_atomic_init(&pbi->b_multithreaded_rd, 0); 593 pbi->allocated_decoding_thread_count = 0; 594 595 /* limit decoding threads to the max number of token partitions */ 596 core_count = (pbi->max_threads > 8) ? 8 : pbi->max_threads; 597 598 /* limit decoding threads to the available cores */ 599 if (core_count > pbi->common.processor_core_count) { 600 core_count = pbi->common.processor_core_count; 601 } 602 603 if (core_count > 1) { 604 vpx_atomic_init(&pbi->b_multithreaded_rd, 1); 605 pbi->decoding_thread_count = core_count - 1; 606 607 CALLOC_ARRAY(pbi->h_decoding_thread, pbi->decoding_thread_count); 608 CALLOC_ARRAY(pbi->h_event_start_decoding, pbi->decoding_thread_count); 609 CALLOC_ARRAY_ALIGNED(pbi->mb_row_di, pbi->decoding_thread_count, 32); 610 CALLOC_ARRAY(pbi->de_thread_data, pbi->decoding_thread_count); 611 612 if (sem_init(&pbi->h_event_end_decoding, 0, 0)) { 613 vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR, 614 "Failed to initialize semaphore"); 615 } 616 617 for (ithread = 0; ithread < pbi->decoding_thread_count; ++ithread) { 618 if (sem_init(&pbi->h_event_start_decoding[ithread], 0, 0)) break; 619 620 vp8_setup_block_dptrs(&pbi->mb_row_di[ithread].mbd); 621 622 pbi->de_thread_data[ithread].ithread = ithread; 623 pbi->de_thread_data[ithread].ptr1 = (void *)pbi; 624 pbi->de_thread_data[ithread].ptr2 = (void *)&pbi->mb_row_di[ithread]; 625 626 if (pthread_create(&pbi->h_decoding_thread[ithread], 0, 627 thread_decoding_proc, &pbi->de_thread_data[ithread])) { 628 sem_destroy(&pbi->h_event_start_decoding[ithread]); 629 break; 630 } 631 } 632 633 pbi->allocated_decoding_thread_count = ithread; 634 if (pbi->allocated_decoding_thread_count != 635 (int)pbi->decoding_thread_count) { 636 /* the remainder of cleanup cases will be handled in 637 * vp8_decoder_remove_threads(). */ 638 if (pbi->allocated_decoding_thread_count == 0) { 639 sem_destroy(&pbi->h_event_end_decoding); 640 } 641 vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR, 642 "Failed to create threads"); 643 } 644 } 645 } 646 647 void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows) { 648 int i; 649 650 vpx_free(pbi->mt_current_mb_col); 651 pbi->mt_current_mb_col = NULL; 652 653 /* Free above_row buffers. */ 654 if (pbi->mt_yabove_row) { 655 for (i = 0; i < mb_rows; ++i) { 656 vpx_free(pbi->mt_yabove_row[i]); 657 pbi->mt_yabove_row[i] = NULL; 658 } 659 vpx_free(pbi->mt_yabove_row); 660 pbi->mt_yabove_row = NULL; 661 } 662 663 if (pbi->mt_uabove_row) { 664 for (i = 0; i < mb_rows; ++i) { 665 vpx_free(pbi->mt_uabove_row[i]); 666 pbi->mt_uabove_row[i] = NULL; 667 } 668 vpx_free(pbi->mt_uabove_row); 669 pbi->mt_uabove_row = NULL; 670 } 671 672 if (pbi->mt_vabove_row) { 673 for (i = 0; i < mb_rows; ++i) { 674 vpx_free(pbi->mt_vabove_row[i]); 675 pbi->mt_vabove_row[i] = NULL; 676 } 677 vpx_free(pbi->mt_vabove_row); 678 pbi->mt_vabove_row = NULL; 679 } 680 681 /* Free left_col buffers. */ 682 if (pbi->mt_yleft_col) { 683 for (i = 0; i < mb_rows; ++i) { 684 vpx_free(pbi->mt_yleft_col[i]); 685 pbi->mt_yleft_col[i] = NULL; 686 } 687 vpx_free(pbi->mt_yleft_col); 688 pbi->mt_yleft_col = NULL; 689 } 690 691 if (pbi->mt_uleft_col) { 692 for (i = 0; i < mb_rows; ++i) { 693 vpx_free(pbi->mt_uleft_col[i]); 694 pbi->mt_uleft_col[i] = NULL; 695 } 696 vpx_free(pbi->mt_uleft_col); 697 pbi->mt_uleft_col = NULL; 698 } 699 700 if (pbi->mt_vleft_col) { 701 for (i = 0; i < mb_rows; ++i) { 702 vpx_free(pbi->mt_vleft_col[i]); 703 pbi->mt_vleft_col[i] = NULL; 704 } 705 vpx_free(pbi->mt_vleft_col); 706 pbi->mt_vleft_col = NULL; 707 } 708 } 709 710 void vp8mt_alloc_temp_buffers(VP8D_COMP *pbi, int width, int prev_mb_rows) { 711 VP8_COMMON *const pc = &pbi->common; 712 int i; 713 int uv_width; 714 715 if (vpx_atomic_load_acquire(&pbi->b_multithreaded_rd)) { 716 vp8mt_de_alloc_temp_buffers(pbi, prev_mb_rows); 717 718 /* our internal buffers are always multiples of 16 */ 719 if ((width & 0xf) != 0) width += 16 - (width & 0xf); 720 721 if (width < 640) { 722 pbi->sync_range = 1; 723 } else if (width <= 1280) { 724 pbi->sync_range = 8; 725 } else if (width <= 2560) { 726 pbi->sync_range = 16; 727 } else { 728 pbi->sync_range = 32; 729 } 730 731 uv_width = width >> 1; 732 733 /* Allocate a vpx_atomic_int for each mb row. */ 734 CHECK_MEM_ERROR(pbi->mt_current_mb_col, 735 vpx_malloc(sizeof(*pbi->mt_current_mb_col) * pc->mb_rows)); 736 for (i = 0; i < pc->mb_rows; ++i) 737 vpx_atomic_init(&pbi->mt_current_mb_col[i], 0); 738 739 /* Allocate memory for above_row buffers. */ 740 CALLOC_ARRAY(pbi->mt_yabove_row, pc->mb_rows); 741 for (i = 0; i < pc->mb_rows; ++i) 742 CHECK_MEM_ERROR( 743 pbi->mt_yabove_row[i], 744 vpx_memalign( 745 16, sizeof(unsigned char) * (width + (VP8BORDERINPIXELS << 1)))); 746 747 CALLOC_ARRAY(pbi->mt_uabove_row, pc->mb_rows); 748 for (i = 0; i < pc->mb_rows; ++i) 749 CHECK_MEM_ERROR( 750 pbi->mt_uabove_row[i], 751 vpx_memalign(16, 752 sizeof(unsigned char) * (uv_width + VP8BORDERINPIXELS))); 753 754 CALLOC_ARRAY(pbi->mt_vabove_row, pc->mb_rows); 755 for (i = 0; i < pc->mb_rows; ++i) 756 CHECK_MEM_ERROR( 757 pbi->mt_vabove_row[i], 758 vpx_memalign(16, 759 sizeof(unsigned char) * (uv_width + VP8BORDERINPIXELS))); 760 761 /* Allocate memory for left_col buffers. */ 762 CALLOC_ARRAY(pbi->mt_yleft_col, pc->mb_rows); 763 for (i = 0; i < pc->mb_rows; ++i) 764 CHECK_MEM_ERROR(pbi->mt_yleft_col[i], 765 vpx_calloc(sizeof(unsigned char) * 16, 1)); 766 767 CALLOC_ARRAY(pbi->mt_uleft_col, pc->mb_rows); 768 for (i = 0; i < pc->mb_rows; ++i) 769 CHECK_MEM_ERROR(pbi->mt_uleft_col[i], 770 vpx_calloc(sizeof(unsigned char) * 8, 1)); 771 772 CALLOC_ARRAY(pbi->mt_vleft_col, pc->mb_rows); 773 for (i = 0; i < pc->mb_rows; ++i) 774 CHECK_MEM_ERROR(pbi->mt_vleft_col[i], 775 vpx_calloc(sizeof(unsigned char) * 8, 1)); 776 } 777 } 778 779 void vp8_decoder_remove_threads(VP8D_COMP *pbi) { 780 /* shutdown MB Decoding thread; */ 781 if (vpx_atomic_load_acquire(&pbi->b_multithreaded_rd)) { 782 int i; 783 vpx_atomic_store_release(&pbi->b_multithreaded_rd, 0); 784 785 /* allow all threads to exit */ 786 for (i = 0; i < pbi->allocated_decoding_thread_count; ++i) { 787 sem_post(&pbi->h_event_start_decoding[i]); 788 pthread_join(pbi->h_decoding_thread[i], NULL); 789 } 790 791 for (i = 0; i < pbi->allocated_decoding_thread_count; ++i) { 792 sem_destroy(&pbi->h_event_start_decoding[i]); 793 } 794 795 if (pbi->allocated_decoding_thread_count) { 796 sem_destroy(&pbi->h_event_end_decoding); 797 } 798 799 vpx_free(pbi->h_decoding_thread); 800 pbi->h_decoding_thread = NULL; 801 802 vpx_free(pbi->h_event_start_decoding); 803 pbi->h_event_start_decoding = NULL; 804 805 vpx_free(pbi->mb_row_di); 806 pbi->mb_row_di = NULL; 807 808 vpx_free(pbi->de_thread_data); 809 pbi->de_thread_data = NULL; 810 811 vp8mt_de_alloc_temp_buffers(pbi, pbi->common.mb_rows); 812 } 813 } 814 815 void vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd) { 816 VP8_COMMON *pc = &pbi->common; 817 unsigned int i; 818 int j; 819 820 int filter_level = pc->filter_level; 821 YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME]; 822 823 if (filter_level) { 824 /* Set above_row buffer to 127 for decoding first MB row */ 825 memset(pbi->mt_yabove_row[0] + VP8BORDERINPIXELS - 1, 127, 826 yv12_fb_new->y_width + 5); 827 memset(pbi->mt_uabove_row[0] + (VP8BORDERINPIXELS >> 1) - 1, 127, 828 (yv12_fb_new->y_width >> 1) + 5); 829 memset(pbi->mt_vabove_row[0] + (VP8BORDERINPIXELS >> 1) - 1, 127, 830 (yv12_fb_new->y_width >> 1) + 5); 831 832 for (j = 1; j < pc->mb_rows; ++j) { 833 memset(pbi->mt_yabove_row[j] + VP8BORDERINPIXELS - 1, (unsigned char)129, 834 1); 835 memset(pbi->mt_uabove_row[j] + (VP8BORDERINPIXELS >> 1) - 1, 836 (unsigned char)129, 1); 837 memset(pbi->mt_vabove_row[j] + (VP8BORDERINPIXELS >> 1) - 1, 838 (unsigned char)129, 1); 839 } 840 841 /* Set left_col to 129 initially */ 842 for (j = 0; j < pc->mb_rows; ++j) { 843 memset(pbi->mt_yleft_col[j], (unsigned char)129, 16); 844 memset(pbi->mt_uleft_col[j], (unsigned char)129, 8); 845 memset(pbi->mt_vleft_col[j], (unsigned char)129, 8); 846 } 847 848 /* Initialize the loop filter for this frame. */ 849 vp8_loop_filter_frame_init(pc, &pbi->mb, filter_level); 850 } else { 851 vp8_setup_intra_recon_top_line(yv12_fb_new); 852 } 853 854 setup_decoding_thread_data(pbi, xd, pbi->mb_row_di, 855 pbi->decoding_thread_count); 856 857 for (i = 0; i < pbi->decoding_thread_count; ++i) { 858 sem_post(&pbi->h_event_start_decoding[i]); 859 } 860 861 mt_decode_mb_rows(pbi, xd, 0); 862 863 sem_wait(&pbi->h_event_end_decoding); /* add back for each frame */ 864 } 865