1 /* 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 12 #include "vpx_ports/config.h" 13 #include "vp8/common/recon.h" 14 #include "vp8/common/reconintra.h" 15 #include "vpx_mem/vpx_mem.h" 16 #include "onyxd_int.h" 17 18 /* For skip_recon_mb(), add vp8_build_intra_predictors_mby_s(MACROBLOCKD *x) and 19 * vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x). 20 */ 21 22 void vp8mt_build_intra_predictors_mby(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col) 23 { 24 unsigned char *yabove_row; /* = x->dst.y_buffer - x->dst.y_stride; */ 25 unsigned char *yleft_col; 26 unsigned char yleft_buf[16]; 27 unsigned char ytop_left; /* = yabove_row[-1]; */ 28 unsigned char *ypred_ptr = x->predictor; 29 int r, c, i; 30 31 if (pbi->common.filter_level) 32 { 33 yabove_row = pbi->mt_yabove_row[mb_row] + mb_col*16 +32; 34 yleft_col = pbi->mt_yleft_col[mb_row]; 35 } else 36 { 37 yabove_row = x->dst.y_buffer - x->dst.y_stride; 38 39 for (i = 0; i < 16; i++) 40 yleft_buf[i] = x->dst.y_buffer [i* x->dst.y_stride -1]; 41 yleft_col = yleft_buf; 42 } 43 44 ytop_left = yabove_row[-1]; 45 46 /* for Y */ 47 switch (x->mode_info_context->mbmi.mode) 48 { 49 case DC_PRED: 50 { 51 int expected_dc; 52 int i; 53 int shift; 54 int average = 0; 55 56 57 if (x->up_available || x->left_available) 58 { 59 if (x->up_available) 60 { 61 for (i = 0; i < 16; i++) 62 { 63 average += yabove_row[i]; 64 } 65 } 66 67 if (x->left_available) 68 { 69 70 for (i = 0; i < 16; i++) 71 { 72 average += yleft_col[i]; 73 } 74 75 } 76 77 78 79 shift = 3 + x->up_available + x->left_available; 80 expected_dc = (average + (1 << (shift - 1))) >> shift; 81 } 82 else 83 { 84 expected_dc = 128; 85 } 86 87 vpx_memset(ypred_ptr, expected_dc, 256); 88 } 89 break; 90 case V_PRED: 91 { 92 93 for (r = 0; r < 16; r++) 94 { 95 96 ((int *)ypred_ptr)[0] = ((int *)yabove_row)[0]; 97 ((int *)ypred_ptr)[1] = ((int *)yabove_row)[1]; 98 ((int *)ypred_ptr)[2] = ((int *)yabove_row)[2]; 99 ((int *)ypred_ptr)[3] = ((int *)yabove_row)[3]; 100 ypred_ptr += 16; 101 } 102 } 103 break; 104 case H_PRED: 105 { 106 107 for (r = 0; r < 16; r++) 108 { 109 110 vpx_memset(ypred_ptr, yleft_col[r], 16); 111 ypred_ptr += 16; 112 } 113 114 } 115 break; 116 case TM_PRED: 117 { 118 119 for (r = 0; r < 16; r++) 120 { 121 for (c = 0; c < 16; c++) 122 { 123 int pred = yleft_col[r] + yabove_row[ c] - ytop_left; 124 125 if (pred < 0) 126 pred = 0; 127 128 if (pred > 255) 129 pred = 255; 130 131 ypred_ptr[c] = pred; 132 } 133 134 ypred_ptr += 16; 135 } 136 137 } 138 break; 139 case B_PRED: 140 case NEARESTMV: 141 case NEARMV: 142 case ZEROMV: 143 case NEWMV: 144 case SPLITMV: 145 case MB_MODE_COUNT: 146 break; 147 } 148 } 149 150 void vp8mt_build_intra_predictors_mby_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col) 151 { 152 unsigned char *yabove_row; /* = x->dst.y_buffer - x->dst.y_stride; */ 153 unsigned char *yleft_col; 154 unsigned char yleft_buf[16]; 155 unsigned char ytop_left; /* = yabove_row[-1]; */ 156 unsigned char *ypred_ptr = x->predictor; 157 int r, c, i; 158 159 int y_stride = x->dst.y_stride; 160 ypred_ptr = x->dst.y_buffer; /*x->predictor;*/ 161 162 if (pbi->common.filter_level) 163 { 164 yabove_row = pbi->mt_yabove_row[mb_row] + mb_col*16 +32; 165 yleft_col = pbi->mt_yleft_col[mb_row]; 166 } else 167 { 168 yabove_row = x->dst.y_buffer - x->dst.y_stride; 169 170 for (i = 0; i < 16; i++) 171 yleft_buf[i] = x->dst.y_buffer [i* x->dst.y_stride -1]; 172 yleft_col = yleft_buf; 173 } 174 175 ytop_left = yabove_row[-1]; 176 177 /* for Y */ 178 switch (x->mode_info_context->mbmi.mode) 179 { 180 case DC_PRED: 181 { 182 int expected_dc; 183 int i; 184 int shift; 185 int average = 0; 186 187 188 if (x->up_available || x->left_available) 189 { 190 if (x->up_available) 191 { 192 for (i = 0; i < 16; i++) 193 { 194 average += yabove_row[i]; 195 } 196 } 197 198 if (x->left_available) 199 { 200 201 for (i = 0; i < 16; i++) 202 { 203 average += yleft_col[i]; 204 } 205 206 } 207 208 209 210 shift = 3 + x->up_available + x->left_available; 211 expected_dc = (average + (1 << (shift - 1))) >> shift; 212 } 213 else 214 { 215 expected_dc = 128; 216 } 217 218 /*vpx_memset(ypred_ptr, expected_dc, 256);*/ 219 for (r = 0; r < 16; r++) 220 { 221 vpx_memset(ypred_ptr, expected_dc, 16); 222 ypred_ptr += y_stride; /*16;*/ 223 } 224 } 225 break; 226 case V_PRED: 227 { 228 229 for (r = 0; r < 16; r++) 230 { 231 232 ((int *)ypred_ptr)[0] = ((int *)yabove_row)[0]; 233 ((int *)ypred_ptr)[1] = ((int *)yabove_row)[1]; 234 ((int *)ypred_ptr)[2] = ((int *)yabove_row)[2]; 235 ((int *)ypred_ptr)[3] = ((int *)yabove_row)[3]; 236 ypred_ptr += y_stride; /*16;*/ 237 } 238 } 239 break; 240 case H_PRED: 241 { 242 243 for (r = 0; r < 16; r++) 244 { 245 246 vpx_memset(ypred_ptr, yleft_col[r], 16); 247 ypred_ptr += y_stride; /*16;*/ 248 } 249 250 } 251 break; 252 case TM_PRED: 253 { 254 255 for (r = 0; r < 16; r++) 256 { 257 for (c = 0; c < 16; c++) 258 { 259 int pred = yleft_col[r] + yabove_row[ c] - ytop_left; 260 261 if (pred < 0) 262 pred = 0; 263 264 if (pred > 255) 265 pred = 255; 266 267 ypred_ptr[c] = pred; 268 } 269 270 ypred_ptr += y_stride; /*16;*/ 271 } 272 273 } 274 break; 275 case B_PRED: 276 case NEARESTMV: 277 case NEARMV: 278 case ZEROMV: 279 case NEWMV: 280 case SPLITMV: 281 case MB_MODE_COUNT: 282 break; 283 } 284 } 285 286 void vp8mt_build_intra_predictors_mbuv(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col) 287 { 288 unsigned char *uabove_row; /* = x->dst.u_buffer - x->dst.uv_stride; */ 289 unsigned char *uleft_col; /*[16];*/ 290 unsigned char uleft_buf[8]; 291 unsigned char utop_left; /* = uabove_row[-1]; */ 292 unsigned char *vabove_row; /* = x->dst.v_buffer - x->dst.uv_stride; */ 293 unsigned char *vleft_col; /*[20];*/ 294 unsigned char vleft_buf[8]; 295 unsigned char vtop_left; /* = vabove_row[-1]; */ 296 unsigned char *upred_ptr = &x->predictor[256]; 297 unsigned char *vpred_ptr = &x->predictor[320]; 298 int i, j; 299 300 if (pbi->common.filter_level) 301 { 302 uabove_row = pbi->mt_uabove_row[mb_row] + mb_col*8 +16; 303 vabove_row = pbi->mt_vabove_row[mb_row] + mb_col*8 +16; 304 uleft_col = pbi->mt_uleft_col[mb_row]; 305 vleft_col = pbi->mt_vleft_col[mb_row]; 306 } else 307 { 308 uabove_row = x->dst.u_buffer - x->dst.uv_stride; 309 vabove_row = x->dst.v_buffer - x->dst.uv_stride; 310 311 for (i = 0; i < 8; i++) 312 { 313 uleft_buf[i] = x->dst.u_buffer [i* x->dst.uv_stride -1]; 314 vleft_buf[i] = x->dst.v_buffer [i* x->dst.uv_stride -1]; 315 } 316 uleft_col = uleft_buf; 317 vleft_col = vleft_buf; 318 } 319 utop_left = uabove_row[-1]; 320 vtop_left = vabove_row[-1]; 321 322 switch (x->mode_info_context->mbmi.uv_mode) 323 { 324 case DC_PRED: 325 { 326 int expected_udc; 327 int expected_vdc; 328 int i; 329 int shift; 330 int Uaverage = 0; 331 int Vaverage = 0; 332 333 if (x->up_available) 334 { 335 for (i = 0; i < 8; i++) 336 { 337 Uaverage += uabove_row[i]; 338 Vaverage += vabove_row[i]; 339 } 340 } 341 342 if (x->left_available) 343 { 344 for (i = 0; i < 8; i++) 345 { 346 Uaverage += uleft_col[i]; 347 Vaverage += vleft_col[i]; 348 } 349 } 350 351 if (!x->up_available && !x->left_available) 352 { 353 expected_udc = 128; 354 expected_vdc = 128; 355 } 356 else 357 { 358 shift = 2 + x->up_available + x->left_available; 359 expected_udc = (Uaverage + (1 << (shift - 1))) >> shift; 360 expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift; 361 } 362 363 364 vpx_memset(upred_ptr, expected_udc, 64); 365 vpx_memset(vpred_ptr, expected_vdc, 64); 366 367 368 } 369 break; 370 case V_PRED: 371 { 372 int i; 373 374 for (i = 0; i < 8; i++) 375 { 376 vpx_memcpy(upred_ptr, uabove_row, 8); 377 vpx_memcpy(vpred_ptr, vabove_row, 8); 378 upred_ptr += 8; 379 vpred_ptr += 8; 380 } 381 382 } 383 break; 384 case H_PRED: 385 { 386 int i; 387 388 for (i = 0; i < 8; i++) 389 { 390 vpx_memset(upred_ptr, uleft_col[i], 8); 391 vpx_memset(vpred_ptr, vleft_col[i], 8); 392 upred_ptr += 8; 393 vpred_ptr += 8; 394 } 395 } 396 397 break; 398 case TM_PRED: 399 { 400 int i; 401 402 for (i = 0; i < 8; i++) 403 { 404 for (j = 0; j < 8; j++) 405 { 406 int predu = uleft_col[i] + uabove_row[j] - utop_left; 407 int predv = vleft_col[i] + vabove_row[j] - vtop_left; 408 409 if (predu < 0) 410 predu = 0; 411 412 if (predu > 255) 413 predu = 255; 414 415 if (predv < 0) 416 predv = 0; 417 418 if (predv > 255) 419 predv = 255; 420 421 upred_ptr[j] = predu; 422 vpred_ptr[j] = predv; 423 } 424 425 upred_ptr += 8; 426 vpred_ptr += 8; 427 } 428 429 } 430 break; 431 case B_PRED: 432 case NEARESTMV: 433 case NEARMV: 434 case ZEROMV: 435 case NEWMV: 436 case SPLITMV: 437 case MB_MODE_COUNT: 438 break; 439 } 440 } 441 442 void vp8mt_build_intra_predictors_mbuv_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col) 443 { 444 unsigned char *uabove_row; /* = x->dst.u_buffer - x->dst.uv_stride; */ 445 unsigned char *uleft_col; /*[16];*/ 446 unsigned char uleft_buf[8]; 447 unsigned char utop_left; /* = uabove_row[-1]; */ 448 unsigned char *vabove_row; /* = x->dst.v_buffer - x->dst.uv_stride; */ 449 unsigned char *vleft_col; /*[20];*/ 450 unsigned char vleft_buf[8]; 451 unsigned char vtop_left; /* = vabove_row[-1]; */ 452 unsigned char *upred_ptr = x->dst.u_buffer; /*&x->predictor[256];*/ 453 unsigned char *vpred_ptr = x->dst.v_buffer; /*&x->predictor[320];*/ 454 int uv_stride = x->dst.uv_stride; 455 int i, j; 456 457 if (pbi->common.filter_level) 458 { 459 uabove_row = pbi->mt_uabove_row[mb_row] + mb_col*8 +16; 460 vabove_row = pbi->mt_vabove_row[mb_row] + mb_col*8 +16; 461 uleft_col = pbi->mt_uleft_col[mb_row]; 462 vleft_col = pbi->mt_vleft_col[mb_row]; 463 } else 464 { 465 uabove_row = x->dst.u_buffer - x->dst.uv_stride; 466 vabove_row = x->dst.v_buffer - x->dst.uv_stride; 467 468 for (i = 0; i < 8; i++) 469 { 470 uleft_buf[i] = x->dst.u_buffer [i* x->dst.uv_stride -1]; 471 vleft_buf[i] = x->dst.v_buffer [i* x->dst.uv_stride -1]; 472 } 473 uleft_col = uleft_buf; 474 vleft_col = vleft_buf; 475 } 476 utop_left = uabove_row[-1]; 477 vtop_left = vabove_row[-1]; 478 479 switch (x->mode_info_context->mbmi.uv_mode) 480 { 481 case DC_PRED: 482 { 483 int expected_udc; 484 int expected_vdc; 485 int i; 486 int shift; 487 int Uaverage = 0; 488 int Vaverage = 0; 489 490 if (x->up_available) 491 { 492 for (i = 0; i < 8; i++) 493 { 494 Uaverage += uabove_row[i]; 495 Vaverage += vabove_row[i]; 496 } 497 } 498 499 if (x->left_available) 500 { 501 for (i = 0; i < 8; i++) 502 { 503 Uaverage += uleft_col[i]; 504 Vaverage += vleft_col[i]; 505 } 506 } 507 508 if (!x->up_available && !x->left_available) 509 { 510 expected_udc = 128; 511 expected_vdc = 128; 512 } 513 else 514 { 515 shift = 2 + x->up_available + x->left_available; 516 expected_udc = (Uaverage + (1 << (shift - 1))) >> shift; 517 expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift; 518 } 519 520 521 /*vpx_memset(upred_ptr,expected_udc,64); 522 vpx_memset(vpred_ptr,expected_vdc,64);*/ 523 for (i = 0; i < 8; i++) 524 { 525 vpx_memset(upred_ptr, expected_udc, 8); 526 vpx_memset(vpred_ptr, expected_vdc, 8); 527 upred_ptr += uv_stride; /*8;*/ 528 vpred_ptr += uv_stride; /*8;*/ 529 } 530 } 531 break; 532 case V_PRED: 533 { 534 int i; 535 536 for (i = 0; i < 8; i++) 537 { 538 vpx_memcpy(upred_ptr, uabove_row, 8); 539 vpx_memcpy(vpred_ptr, vabove_row, 8); 540 upred_ptr += uv_stride; /*8;*/ 541 vpred_ptr += uv_stride; /*8;*/ 542 } 543 544 } 545 break; 546 case H_PRED: 547 { 548 int i; 549 550 for (i = 0; i < 8; i++) 551 { 552 vpx_memset(upred_ptr, uleft_col[i], 8); 553 vpx_memset(vpred_ptr, vleft_col[i], 8); 554 upred_ptr += uv_stride; /*8;*/ 555 vpred_ptr += uv_stride; /*8;*/ 556 } 557 } 558 559 break; 560 case TM_PRED: 561 { 562 int i; 563 564 for (i = 0; i < 8; i++) 565 { 566 for (j = 0; j < 8; j++) 567 { 568 int predu = uleft_col[i] + uabove_row[j] - utop_left; 569 int predv = vleft_col[i] + vabove_row[j] - vtop_left; 570 571 if (predu < 0) 572 predu = 0; 573 574 if (predu > 255) 575 predu = 255; 576 577 if (predv < 0) 578 predv = 0; 579 580 if (predv > 255) 581 predv = 255; 582 583 upred_ptr[j] = predu; 584 vpred_ptr[j] = predv; 585 } 586 587 upred_ptr += uv_stride; /*8;*/ 588 vpred_ptr += uv_stride; /*8;*/ 589 } 590 591 } 592 break; 593 case B_PRED: 594 case NEARESTMV: 595 case NEARMV: 596 case ZEROMV: 597 case NEWMV: 598 case SPLITMV: 599 case MB_MODE_COUNT: 600 break; 601 } 602 } 603 604 605 void vp8mt_predict_intra4x4(VP8D_COMP *pbi, 606 MACROBLOCKD *xd, 607 int b_mode, 608 unsigned char *predictor, 609 int mb_row, 610 int mb_col, 611 int num) 612 { 613 int i, r, c; 614 615 unsigned char *Above; /* = *(x->base_dst) + x->dst - x->dst_stride; */ 616 unsigned char Left[4]; 617 unsigned char top_left; /* = Above[-1]; */ 618 619 BLOCKD *x = &xd->block[num]; 620 621 /*Caution: For some b_mode, it needs 8 pixels (4 above + 4 above-right).*/ 622 if (num < 4 && pbi->common.filter_level) 623 Above = pbi->mt_yabove_row[mb_row] + mb_col*16 + num*4 + 32; 624 else 625 Above = *(x->base_dst) + x->dst - x->dst_stride; 626 627 if (num%4==0 && pbi->common.filter_level) 628 { 629 for (i=0; i<4; i++) 630 Left[i] = pbi->mt_yleft_col[mb_row][num + i]; 631 }else 632 { 633 Left[0] = (*(x->base_dst))[x->dst - 1]; 634 Left[1] = (*(x->base_dst))[x->dst - 1 + x->dst_stride]; 635 Left[2] = (*(x->base_dst))[x->dst - 1 + 2 * x->dst_stride]; 636 Left[3] = (*(x->base_dst))[x->dst - 1 + 3 * x->dst_stride]; 637 } 638 639 if ((num==4 || num==8 || num==12) && pbi->common.filter_level) 640 top_left = pbi->mt_yleft_col[mb_row][num-1]; 641 else 642 top_left = Above[-1]; 643 644 switch (b_mode) 645 { 646 case B_DC_PRED: 647 { 648 int expected_dc = 0; 649 650 for (i = 0; i < 4; i++) 651 { 652 expected_dc += Above[i]; 653 expected_dc += Left[i]; 654 } 655 656 expected_dc = (expected_dc + 4) >> 3; 657 658 for (r = 0; r < 4; r++) 659 { 660 for (c = 0; c < 4; c++) 661 { 662 predictor[c] = expected_dc; 663 } 664 665 predictor += 16; 666 } 667 } 668 break; 669 case B_TM_PRED: 670 { 671 /* prediction similar to true_motion prediction */ 672 for (r = 0; r < 4; r++) 673 { 674 for (c = 0; c < 4; c++) 675 { 676 int pred = Above[c] - top_left + Left[r]; 677 678 if (pred < 0) 679 pred = 0; 680 681 if (pred > 255) 682 pred = 255; 683 684 predictor[c] = pred; 685 } 686 687 predictor += 16; 688 } 689 } 690 break; 691 692 case B_VE_PRED: 693 { 694 695 unsigned int ap[4]; 696 ap[0] = (top_left + 2 * Above[0] + Above[1] + 2) >> 2; 697 ap[1] = (Above[0] + 2 * Above[1] + Above[2] + 2) >> 2; 698 ap[2] = (Above[1] + 2 * Above[2] + Above[3] + 2) >> 2; 699 ap[3] = (Above[2] + 2 * Above[3] + Above[4] + 2) >> 2; 700 701 for (r = 0; r < 4; r++) 702 { 703 for (c = 0; c < 4; c++) 704 { 705 706 predictor[c] = ap[c]; 707 } 708 709 predictor += 16; 710 } 711 712 } 713 break; 714 715 716 case B_HE_PRED: 717 { 718 719 unsigned int lp[4]; 720 lp[0] = (top_left + 2 * Left[0] + Left[1] + 2) >> 2; 721 lp[1] = (Left[0] + 2 * Left[1] + Left[2] + 2) >> 2; 722 lp[2] = (Left[1] + 2 * Left[2] + Left[3] + 2) >> 2; 723 lp[3] = (Left[2] + 2 * Left[3] + Left[3] + 2) >> 2; 724 725 for (r = 0; r < 4; r++) 726 { 727 for (c = 0; c < 4; c++) 728 { 729 predictor[c] = lp[r]; 730 } 731 732 predictor += 16; 733 } 734 } 735 break; 736 case B_LD_PRED: 737 { 738 unsigned char *ptr = Above; 739 predictor[0 * 16 + 0] = (ptr[0] + ptr[1] * 2 + ptr[2] + 2) >> 2; 740 predictor[0 * 16 + 1] = 741 predictor[1 * 16 + 0] = (ptr[1] + ptr[2] * 2 + ptr[3] + 2) >> 2; 742 predictor[0 * 16 + 2] = 743 predictor[1 * 16 + 1] = 744 predictor[2 * 16 + 0] = (ptr[2] + ptr[3] * 2 + ptr[4] + 2) >> 2; 745 predictor[0 * 16 + 3] = 746 predictor[1 * 16 + 2] = 747 predictor[2 * 16 + 1] = 748 predictor[3 * 16 + 0] = (ptr[3] + ptr[4] * 2 + ptr[5] + 2) >> 2; 749 predictor[1 * 16 + 3] = 750 predictor[2 * 16 + 2] = 751 predictor[3 * 16 + 1] = (ptr[4] + ptr[5] * 2 + ptr[6] + 2) >> 2; 752 predictor[2 * 16 + 3] = 753 predictor[3 * 16 + 2] = (ptr[5] + ptr[6] * 2 + ptr[7] + 2) >> 2; 754 predictor[3 * 16 + 3] = (ptr[6] + ptr[7] * 2 + ptr[7] + 2) >> 2; 755 756 } 757 break; 758 case B_RD_PRED: 759 { 760 761 unsigned char pp[9]; 762 763 pp[0] = Left[3]; 764 pp[1] = Left[2]; 765 pp[2] = Left[1]; 766 pp[3] = Left[0]; 767 pp[4] = top_left; 768 pp[5] = Above[0]; 769 pp[6] = Above[1]; 770 pp[7] = Above[2]; 771 pp[8] = Above[3]; 772 773 predictor[3 * 16 + 0] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2; 774 predictor[3 * 16 + 1] = 775 predictor[2 * 16 + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2; 776 predictor[3 * 16 + 2] = 777 predictor[2 * 16 + 1] = 778 predictor[1 * 16 + 0] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2; 779 predictor[3 * 16 + 3] = 780 predictor[2 * 16 + 2] = 781 predictor[1 * 16 + 1] = 782 predictor[0 * 16 + 0] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2; 783 predictor[2 * 16 + 3] = 784 predictor[1 * 16 + 2] = 785 predictor[0 * 16 + 1] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2; 786 predictor[1 * 16 + 3] = 787 predictor[0 * 16 + 2] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2; 788 predictor[0 * 16 + 3] = (pp[6] + pp[7] * 2 + pp[8] + 2) >> 2; 789 790 } 791 break; 792 case B_VR_PRED: 793 { 794 795 unsigned char pp[9]; 796 797 pp[0] = Left[3]; 798 pp[1] = Left[2]; 799 pp[2] = Left[1]; 800 pp[3] = Left[0]; 801 pp[4] = top_left; 802 pp[5] = Above[0]; 803 pp[6] = Above[1]; 804 pp[7] = Above[2]; 805 pp[8] = Above[3]; 806 807 808 predictor[3 * 16 + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2; 809 predictor[2 * 16 + 0] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2; 810 predictor[3 * 16 + 1] = 811 predictor[1 * 16 + 0] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2; 812 predictor[2 * 16 + 1] = 813 predictor[0 * 16 + 0] = (pp[4] + pp[5] + 1) >> 1; 814 predictor[3 * 16 + 2] = 815 predictor[1 * 16 + 1] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2; 816 predictor[2 * 16 + 2] = 817 predictor[0 * 16 + 1] = (pp[5] + pp[6] + 1) >> 1; 818 predictor[3 * 16 + 3] = 819 predictor[1 * 16 + 2] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2; 820 predictor[2 * 16 + 3] = 821 predictor[0 * 16 + 2] = (pp[6] + pp[7] + 1) >> 1; 822 predictor[1 * 16 + 3] = (pp[6] + pp[7] * 2 + pp[8] + 2) >> 2; 823 predictor[0 * 16 + 3] = (pp[7] + pp[8] + 1) >> 1; 824 825 } 826 break; 827 case B_VL_PRED: 828 { 829 830 unsigned char *pp = Above; 831 832 predictor[0 * 16 + 0] = (pp[0] + pp[1] + 1) >> 1; 833 predictor[1 * 16 + 0] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2; 834 predictor[2 * 16 + 0] = 835 predictor[0 * 16 + 1] = (pp[1] + pp[2] + 1) >> 1; 836 predictor[1 * 16 + 1] = 837 predictor[3 * 16 + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2; 838 predictor[2 * 16 + 1] = 839 predictor[0 * 16 + 2] = (pp[2] + pp[3] + 1) >> 1; 840 predictor[3 * 16 + 1] = 841 predictor[1 * 16 + 2] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2; 842 predictor[0 * 16 + 3] = 843 predictor[2 * 16 + 2] = (pp[3] + pp[4] + 1) >> 1; 844 predictor[1 * 16 + 3] = 845 predictor[3 * 16 + 2] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2; 846 predictor[2 * 16 + 3] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2; 847 predictor[3 * 16 + 3] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2; 848 } 849 break; 850 851 case B_HD_PRED: 852 { 853 unsigned char pp[9]; 854 pp[0] = Left[3]; 855 pp[1] = Left[2]; 856 pp[2] = Left[1]; 857 pp[3] = Left[0]; 858 pp[4] = top_left; 859 pp[5] = Above[0]; 860 pp[6] = Above[1]; 861 pp[7] = Above[2]; 862 pp[8] = Above[3]; 863 864 865 predictor[3 * 16 + 0] = (pp[0] + pp[1] + 1) >> 1; 866 predictor[3 * 16 + 1] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2; 867 predictor[2 * 16 + 0] = 868 predictor[3 * 16 + 2] = (pp[1] + pp[2] + 1) >> 1; 869 predictor[2 * 16 + 1] = 870 predictor[3 * 16 + 3] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2; 871 predictor[2 * 16 + 2] = 872 predictor[1 * 16 + 0] = (pp[2] + pp[3] + 1) >> 1; 873 predictor[2 * 16 + 3] = 874 predictor[1 * 16 + 1] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2; 875 predictor[1 * 16 + 2] = 876 predictor[0 * 16 + 0] = (pp[3] + pp[4] + 1) >> 1; 877 predictor[1 * 16 + 3] = 878 predictor[0 * 16 + 1] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2; 879 predictor[0 * 16 + 2] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2; 880 predictor[0 * 16 + 3] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2; 881 } 882 break; 883 884 885 case B_HU_PRED: 886 { 887 unsigned char *pp = Left; 888 predictor[0 * 16 + 0] = (pp[0] + pp[1] + 1) >> 1; 889 predictor[0 * 16 + 1] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2; 890 predictor[0 * 16 + 2] = 891 predictor[1 * 16 + 0] = (pp[1] + pp[2] + 1) >> 1; 892 predictor[0 * 16 + 3] = 893 predictor[1 * 16 + 1] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2; 894 predictor[1 * 16 + 2] = 895 predictor[2 * 16 + 0] = (pp[2] + pp[3] + 1) >> 1; 896 predictor[1 * 16 + 3] = 897 predictor[2 * 16 + 1] = (pp[2] + pp[3] * 2 + pp[3] + 2) >> 2; 898 predictor[2 * 16 + 2] = 899 predictor[2 * 16 + 3] = 900 predictor[3 * 16 + 0] = 901 predictor[3 * 16 + 1] = 902 predictor[3 * 16 + 2] = 903 predictor[3 * 16 + 3] = pp[3]; 904 } 905 break; 906 907 908 } 909 } 910 911 /* copy 4 bytes from the above right down so that the 4x4 prediction modes using pixels above and 912 * to the right prediction have filled in pixels to use. 913 */ 914 void vp8mt_intra_prediction_down_copy(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col) 915 { 916 unsigned char *above_right; /* = *(x->block[0].base_dst) + x->block[0].dst - x->block[0].dst_stride + 16; */ 917 unsigned int *src_ptr; 918 unsigned int *dst_ptr0; 919 unsigned int *dst_ptr1; 920 unsigned int *dst_ptr2; 921 922 if (pbi->common.filter_level) 923 above_right = pbi->mt_yabove_row[mb_row] + mb_col*16 + 32 +16; 924 else 925 above_right = *(x->block[0].base_dst) + x->block[0].dst - x->block[0].dst_stride + 16; 926 927 src_ptr = (unsigned int *)above_right; 928 /*dst_ptr0 = (unsigned int *)(above_right + 4 * x->block[0].dst_stride); 929 dst_ptr1 = (unsigned int *)(above_right + 8 * x->block[0].dst_stride); 930 dst_ptr2 = (unsigned int *)(above_right + 12 * x->block[0].dst_stride);*/ 931 dst_ptr0 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 3 * x->block[0].dst_stride); 932 dst_ptr1 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 7 * x->block[0].dst_stride); 933 dst_ptr2 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 11 * x->block[0].dst_stride); 934 *dst_ptr0 = *src_ptr; 935 *dst_ptr1 = *src_ptr; 936 *dst_ptr2 = *src_ptr; 937 } 938