1 /* 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 12 #include "vp8/common/onyxc_int.h" 13 #include "onyx_int.h" 14 #include "vp8/common/systemdependent.h" 15 #include "quantize.h" 16 #include "vp8/common/alloccommon.h" 17 #include "mcomp.h" 18 #include "firstpass.h" 19 #include "vpx_scale/vpx_scale.h" 20 #include "vp8/common/extend.h" 21 #include "ratectrl.h" 22 #include "vp8/common/quant_common.h" 23 #include "segmentation.h" 24 #include "vpx_mem/vpx_mem.h" 25 #include "vp8/common/swapyv12buffer.h" 26 #include "vp8/common/threading.h" 27 #include "vpx_ports/vpx_timer.h" 28 29 #include <math.h> 30 #include <limits.h> 31 32 #define ALT_REF_MC_ENABLED 1 /* dis/enable MC in AltRef filtering */ 33 #define ALT_REF_SUBPEL_ENABLED 1 /* dis/enable subpel in MC AltRef filtering */ 34 35 #if VP8_TEMPORAL_ALT_REF 36 37 static void vp8_temporal_filter_predictors_mb_c 38 ( 39 MACROBLOCKD *x, 40 unsigned char *y_mb_ptr, 41 unsigned char *u_mb_ptr, 42 unsigned char *v_mb_ptr, 43 int stride, 44 int mv_row, 45 int mv_col, 46 unsigned char *pred 47 ) 48 { 49 int offset; 50 unsigned char *yptr, *uptr, *vptr; 51 52 /* Y */ 53 yptr = y_mb_ptr + (mv_row >> 3) * stride + (mv_col >> 3); 54 55 if ((mv_row | mv_col) & 7) 56 { 57 x->subpixel_predict16x16(yptr, stride, 58 mv_col & 7, mv_row & 7, &pred[0], 16); 59 } 60 else 61 { 62 vp8_copy_mem16x16(yptr, stride, &pred[0], 16); 63 } 64 65 /* U & V */ 66 mv_row >>= 1; 67 mv_col >>= 1; 68 stride = (stride + 1) >> 1; 69 offset = (mv_row >> 3) * stride + (mv_col >> 3); 70 uptr = u_mb_ptr + offset; 71 vptr = v_mb_ptr + offset; 72 73 if ((mv_row | mv_col) & 7) 74 { 75 x->subpixel_predict8x8(uptr, stride, 76 mv_col & 7, mv_row & 7, &pred[256], 8); 77 x->subpixel_predict8x8(vptr, stride, 78 mv_col & 7, mv_row & 7, &pred[320], 8); 79 } 80 else 81 { 82 vp8_copy_mem8x8(uptr, stride, &pred[256], 8); 83 vp8_copy_mem8x8(vptr, stride, &pred[320], 8); 84 } 85 } 86 void vp8_temporal_filter_apply_c 87 ( 88 unsigned char *frame1, 89 unsigned int stride, 90 unsigned char *frame2, 91 unsigned int block_size, 92 int strength, 93 int filter_weight, 94 unsigned int *accumulator, 95 unsigned short *count 96 ) 97 { 98 unsigned int i, j, k; 99 int modifier; 100 int byte = 0; 101 102 for (i = 0,k = 0; i < block_size; i++) 103 { 104 for (j = 0; j < block_size; j++, k++) 105 { 106 107 int src_byte = frame1[byte]; 108 int pixel_value = *frame2++; 109 110 modifier = src_byte - pixel_value; 111 /* This is an integer approximation of: 112 * float coeff = (3.0 * modifer * modifier) / pow(2, strength); 113 * modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff); 114 */ 115 modifier *= modifier; 116 modifier *= 3; 117 modifier += 1 << (strength - 1); 118 modifier >>= strength; 119 120 if (modifier > 16) 121 modifier = 16; 122 123 modifier = 16 - modifier; 124 modifier *= filter_weight; 125 126 count[k] += modifier; 127 accumulator[k] += modifier * pixel_value; 128 129 byte++; 130 } 131 132 byte += stride - block_size; 133 } 134 } 135 136 #if ALT_REF_MC_ENABLED 137 138 static int vp8_temporal_filter_find_matching_mb_c 139 ( 140 VP8_COMP *cpi, 141 YV12_BUFFER_CONFIG *arf_frame, 142 YV12_BUFFER_CONFIG *frame_ptr, 143 int mb_offset, 144 int error_thresh 145 ) 146 { 147 MACROBLOCK *x = &cpi->mb; 148 int step_param; 149 int sadpb = x->sadperbit16; 150 int bestsme = INT_MAX; 151 152 BLOCK *b = &x->block[0]; 153 BLOCKD *d = &x->e_mbd.block[0]; 154 int_mv best_ref_mv1; 155 int_mv best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */ 156 157 /* Save input state */ 158 unsigned char **base_src = b->base_src; 159 int src = b->src; 160 int src_stride = b->src_stride; 161 unsigned char *base_pre = x->e_mbd.pre.y_buffer; 162 int pre = d->offset; 163 int pre_stride = x->e_mbd.pre.y_stride; 164 165 best_ref_mv1.as_int = 0; 166 best_ref_mv1_full.as_mv.col = best_ref_mv1.as_mv.col >>3; 167 best_ref_mv1_full.as_mv.row = best_ref_mv1.as_mv.row >>3; 168 169 /* Setup frame pointers */ 170 b->base_src = &arf_frame->y_buffer; 171 b->src_stride = arf_frame->y_stride; 172 b->src = mb_offset; 173 174 x->e_mbd.pre.y_buffer = frame_ptr->y_buffer; 175 x->e_mbd.pre.y_stride = frame_ptr->y_stride; 176 d->offset = mb_offset; 177 (void)error_thresh; 178 /* Further step/diamond searches as necessary */ 179 if (cpi->Speed < 8) 180 { 181 step_param = cpi->sf.first_step + (cpi->Speed > 5); 182 } 183 else 184 { 185 step_param = cpi->sf.first_step + 2; 186 } 187 188 /* TODO Check that the 16x16 vf & sdf are selected here */ 189 /* Ignore mv costing by sending NULL cost arrays */ 190 bestsme = vp8_hex_search(x, b, d, &best_ref_mv1_full, &d->bmi.mv, 191 step_param, sadpb, 192 &cpi->fn_ptr[BLOCK_16X16], 193 NULL, NULL, &best_ref_mv1); 194 195 #if ALT_REF_SUBPEL_ENABLED 196 /* Try sub-pixel MC? */ 197 { 198 int distortion; 199 unsigned int sse; 200 /* Ignore mv costing by sending NULL cost array */ 201 bestsme = cpi->find_fractional_mv_step(x, b, d, 202 &d->bmi.mv, 203 &best_ref_mv1, 204 x->errorperbit, 205 &cpi->fn_ptr[BLOCK_16X16], 206 NULL, &distortion, &sse); 207 } 208 #endif 209 210 /* Save input state */ 211 b->base_src = base_src; 212 b->src = src; 213 b->src_stride = src_stride; 214 x->e_mbd.pre.y_buffer = base_pre; 215 d->offset = pre; 216 x->e_mbd.pre.y_stride = pre_stride; 217 218 return bestsme; 219 } 220 #endif 221 222 static void vp8_temporal_filter_iterate_c 223 ( 224 VP8_COMP *cpi, 225 int frame_count, 226 int alt_ref_index, 227 int strength 228 ) 229 { 230 int byte; 231 int frame; 232 int mb_col, mb_row; 233 unsigned int filter_weight; 234 int mb_cols = cpi->common.mb_cols; 235 int mb_rows = cpi->common.mb_rows; 236 int mb_y_offset = 0; 237 int mb_uv_offset = 0; 238 DECLARE_ALIGNED_ARRAY(16, unsigned int, accumulator, 16*16 + 8*8 + 8*8); 239 DECLARE_ALIGNED_ARRAY(16, unsigned short, count, 16*16 + 8*8 + 8*8); 240 MACROBLOCKD *mbd = &cpi->mb.e_mbd; 241 YV12_BUFFER_CONFIG *f = cpi->frames[alt_ref_index]; 242 unsigned char *dst1, *dst2; 243 DECLARE_ALIGNED_ARRAY(16, unsigned char, predictor, 16*16 + 8*8 + 8*8); 244 245 /* Save input state */ 246 unsigned char *y_buffer = mbd->pre.y_buffer; 247 unsigned char *u_buffer = mbd->pre.u_buffer; 248 unsigned char *v_buffer = mbd->pre.v_buffer; 249 250 for (mb_row = 0; mb_row < mb_rows; mb_row++) 251 { 252 #if ALT_REF_MC_ENABLED 253 /* Source frames are extended to 16 pixels. This is different than 254 * L/A/G reference frames that have a border of 32 (VP8BORDERINPIXELS) 255 * A 6 tap filter is used for motion search. This requires 2 pixels 256 * before and 3 pixels after. So the largest Y mv on a border would 257 * then be 16 - 3. The UV blocks are half the size of the Y and 258 * therefore only extended by 8. The largest mv that a UV block 259 * can support is 8 - 3. A UV mv is half of a Y mv. 260 * (16 - 3) >> 1 == 6 which is greater than 8 - 3. 261 * To keep the mv in play for both Y and UV planes the max that it 262 * can be on a border is therefore 16 - 5. 263 */ 264 cpi->mb.mv_row_min = -((mb_row * 16) + (16 - 5)); 265 cpi->mb.mv_row_max = ((cpi->common.mb_rows - 1 - mb_row) * 16) 266 + (16 - 5); 267 #endif 268 269 for (mb_col = 0; mb_col < mb_cols; mb_col++) 270 { 271 int i, j, k; 272 int stride; 273 274 vpx_memset(accumulator, 0, 384*sizeof(unsigned int)); 275 vpx_memset(count, 0, 384*sizeof(unsigned short)); 276 277 #if ALT_REF_MC_ENABLED 278 cpi->mb.mv_col_min = -((mb_col * 16) + (16 - 5)); 279 cpi->mb.mv_col_max = ((cpi->common.mb_cols - 1 - mb_col) * 16) 280 + (16 - 5); 281 #endif 282 283 for (frame = 0; frame < frame_count; frame++) 284 { 285 if (cpi->frames[frame] == NULL) 286 continue; 287 288 mbd->block[0].bmi.mv.as_mv.row = 0; 289 mbd->block[0].bmi.mv.as_mv.col = 0; 290 291 if (frame == alt_ref_index) 292 { 293 filter_weight = 2; 294 } 295 else 296 { 297 int err = 0; 298 #if ALT_REF_MC_ENABLED 299 #define THRESH_LOW 10000 300 #define THRESH_HIGH 20000 301 /* Find best match in this frame by MC */ 302 err = vp8_temporal_filter_find_matching_mb_c 303 (cpi, 304 cpi->frames[alt_ref_index], 305 cpi->frames[frame], 306 mb_y_offset, 307 THRESH_LOW); 308 #endif 309 /* Assign higher weight to matching MB if it's error 310 * score is lower. If not applying MC default behavior 311 * is to weight all MBs equal. 312 */ 313 filter_weight = err<THRESH_LOW 314 ? 2 : err<THRESH_HIGH ? 1 : 0; 315 } 316 317 if (filter_weight != 0) 318 { 319 /* Construct the predictors */ 320 vp8_temporal_filter_predictors_mb_c 321 (mbd, 322 cpi->frames[frame]->y_buffer + mb_y_offset, 323 cpi->frames[frame]->u_buffer + mb_uv_offset, 324 cpi->frames[frame]->v_buffer + mb_uv_offset, 325 cpi->frames[frame]->y_stride, 326 mbd->block[0].bmi.mv.as_mv.row, 327 mbd->block[0].bmi.mv.as_mv.col, 328 predictor); 329 330 /* Apply the filter (YUV) */ 331 vp8_temporal_filter_apply 332 (f->y_buffer + mb_y_offset, 333 f->y_stride, 334 predictor, 335 16, 336 strength, 337 filter_weight, 338 accumulator, 339 count); 340 341 vp8_temporal_filter_apply 342 (f->u_buffer + mb_uv_offset, 343 f->uv_stride, 344 predictor + 256, 345 8, 346 strength, 347 filter_weight, 348 accumulator + 256, 349 count + 256); 350 351 vp8_temporal_filter_apply 352 (f->v_buffer + mb_uv_offset, 353 f->uv_stride, 354 predictor + 320, 355 8, 356 strength, 357 filter_weight, 358 accumulator + 320, 359 count + 320); 360 } 361 } 362 363 /* Normalize filter output to produce AltRef frame */ 364 dst1 = cpi->alt_ref_buffer.y_buffer; 365 stride = cpi->alt_ref_buffer.y_stride; 366 byte = mb_y_offset; 367 for (i = 0,k = 0; i < 16; i++) 368 { 369 for (j = 0; j < 16; j++, k++) 370 { 371 unsigned int pval = accumulator[k] + (count[k] >> 1); 372 pval *= cpi->fixed_divide[count[k]]; 373 pval >>= 19; 374 375 dst1[byte] = (unsigned char)pval; 376 377 /* move to next pixel */ 378 byte++; 379 } 380 381 byte += stride - 16; 382 } 383 384 dst1 = cpi->alt_ref_buffer.u_buffer; 385 dst2 = cpi->alt_ref_buffer.v_buffer; 386 stride = cpi->alt_ref_buffer.uv_stride; 387 byte = mb_uv_offset; 388 for (i = 0,k = 256; i < 8; i++) 389 { 390 for (j = 0; j < 8; j++, k++) 391 { 392 int m=k+64; 393 394 /* U */ 395 unsigned int pval = accumulator[k] + (count[k] >> 1); 396 pval *= cpi->fixed_divide[count[k]]; 397 pval >>= 19; 398 dst1[byte] = (unsigned char)pval; 399 400 /* V */ 401 pval = accumulator[m] + (count[m] >> 1); 402 pval *= cpi->fixed_divide[count[m]]; 403 pval >>= 19; 404 dst2[byte] = (unsigned char)pval; 405 406 /* move to next pixel */ 407 byte++; 408 } 409 410 byte += stride - 8; 411 } 412 413 mb_y_offset += 16; 414 mb_uv_offset += 8; 415 } 416 417 mb_y_offset += 16*(f->y_stride-mb_cols); 418 mb_uv_offset += 8*(f->uv_stride-mb_cols); 419 } 420 421 /* Restore input state */ 422 mbd->pre.y_buffer = y_buffer; 423 mbd->pre.u_buffer = u_buffer; 424 mbd->pre.v_buffer = v_buffer; 425 } 426 427 void vp8_temporal_filter_prepare_c 428 ( 429 VP8_COMP *cpi, 430 int distance 431 ) 432 { 433 int frame = 0; 434 435 int num_frames_backward = 0; 436 int num_frames_forward = 0; 437 int frames_to_blur_backward = 0; 438 int frames_to_blur_forward = 0; 439 int frames_to_blur = 0; 440 int start_frame = 0; 441 442 int strength = cpi->oxcf.arnr_strength; 443 444 int blur_type = cpi->oxcf.arnr_type; 445 446 int max_frames = cpi->active_arnr_frames; 447 448 num_frames_backward = distance; 449 num_frames_forward = vp8_lookahead_depth(cpi->lookahead) 450 - (num_frames_backward + 1); 451 452 switch (blur_type) 453 { 454 case 1: 455 /* Backward Blur */ 456 457 frames_to_blur_backward = num_frames_backward; 458 459 if (frames_to_blur_backward >= max_frames) 460 frames_to_blur_backward = max_frames - 1; 461 462 frames_to_blur = frames_to_blur_backward + 1; 463 break; 464 465 case 2: 466 /* Forward Blur */ 467 468 frames_to_blur_forward = num_frames_forward; 469 470 if (frames_to_blur_forward >= max_frames) 471 frames_to_blur_forward = max_frames - 1; 472 473 frames_to_blur = frames_to_blur_forward + 1; 474 break; 475 476 case 3: 477 default: 478 /* Center Blur */ 479 frames_to_blur_forward = num_frames_forward; 480 frames_to_blur_backward = num_frames_backward; 481 482 if (frames_to_blur_forward > frames_to_blur_backward) 483 frames_to_blur_forward = frames_to_blur_backward; 484 485 if (frames_to_blur_backward > frames_to_blur_forward) 486 frames_to_blur_backward = frames_to_blur_forward; 487 488 /* When max_frames is even we have 1 more frame backward than forward */ 489 if (frames_to_blur_forward > (max_frames - 1) / 2) 490 frames_to_blur_forward = ((max_frames - 1) / 2); 491 492 if (frames_to_blur_backward > (max_frames / 2)) 493 frames_to_blur_backward = (max_frames / 2); 494 495 frames_to_blur = frames_to_blur_backward + frames_to_blur_forward + 1; 496 break; 497 } 498 499 start_frame = distance + frames_to_blur_forward; 500 501 /* Setup frame pointers, NULL indicates frame not included in filter */ 502 vpx_memset(cpi->frames, 0, max_frames*sizeof(YV12_BUFFER_CONFIG *)); 503 for (frame = 0; frame < frames_to_blur; frame++) 504 { 505 int which_buffer = start_frame - frame; 506 struct lookahead_entry* buf = vp8_lookahead_peek(cpi->lookahead, 507 which_buffer, 508 PEEK_FORWARD); 509 cpi->frames[frames_to_blur-1-frame] = &buf->img; 510 } 511 512 vp8_temporal_filter_iterate_c ( 513 cpi, 514 frames_to_blur, 515 frames_to_blur_backward, 516 strength ); 517 } 518 #endif 519