1 /* 2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved 3 * 4 * This source code is subject to the terms of the BSD 2 Clause License and 5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License 6 * was not distributed with this source code in the LICENSE file, you can 7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open 8 * Media Patent License 1.0 was not distributed with this source code in the 9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent. 10 */ 11 12 #include <stdio.h> 13 #include <stdlib.h> 14 #include <memory.h> 15 #include <math.h> 16 #include <assert.h> 17 18 #include "config/aom_dsp_rtcd.h" 19 20 #include "av1/encoder/global_motion.h" 21 22 #include "av1/common/convolve.h" 23 #include "av1/common/resize.h" 24 #include "av1/common/warped_motion.h" 25 26 #include "av1/encoder/segmentation.h" 27 #include "av1/encoder/corner_detect.h" 28 #include "av1/encoder/corner_match.h" 29 #include "av1/encoder/ransac.h" 30 31 #define MAX_CORNERS 4096 32 #define MIN_INLIER_PROB 0.1 33 34 #define MIN_TRANS_THRESH (1 * GM_TRANS_DECODE_FACTOR) 35 36 // Border over which to compute the global motion 37 #define ERRORADV_BORDER 0 38 39 // Number of pyramid levels in disflow computation 40 #define N_LEVELS 2 41 // Size of square patches in the disflow dense grid 42 #define PATCH_SIZE 8 43 // Center point of square patch 44 #define PATCH_CENTER ((PATCH_SIZE + 1) >> 1) 45 // Step size between patches, lower value means greater patch overlap 46 #define PATCH_STEP 1 47 // Minimum size of border padding for disflow 48 #define MIN_PAD 7 49 // Warp error convergence threshold for disflow 50 #define DISFLOW_ERROR_TR 0.01 51 // Max number of iterations if warp convergence is not found 52 #define DISFLOW_MAX_ITR 10 53 54 // Struct for an image pyramid 55 typedef struct { 56 int n_levels; 57 int pad_size; 58 int has_gradient; 59 int widths[N_LEVELS]; 60 int heights[N_LEVELS]; 61 int strides[N_LEVELS]; 62 int level_loc[N_LEVELS]; 63 unsigned char *level_buffer; 64 double *level_dx_buffer; 65 double *level_dy_buffer; 66 } ImagePyramid; 67 68 static const double erroradv_tr[] = { 0.65, 0.60, 0.55 }; 69 static const double erroradv_prod_tr[] = { 20000, 18000, 16000 }; 70 71 int av1_is_enough_erroradvantage(double best_erroradvantage, int params_cost, 72 int erroradv_type) { 73 assert(erroradv_type < GM_ERRORADV_TR_TYPES); 74 return best_erroradvantage < erroradv_tr[erroradv_type] && 75 best_erroradvantage * params_cost < erroradv_prod_tr[erroradv_type]; 76 } 77 78 static void convert_to_params(const double *params, int32_t *model) { 79 int i; 80 int alpha_present = 0; 81 model[0] = (int32_t)floor(params[0] * (1 << GM_TRANS_PREC_BITS) + 0.5); 82 model[1] = (int32_t)floor(params[1] * (1 << GM_TRANS_PREC_BITS) + 0.5); 83 model[0] = (int32_t)clamp(model[0], GM_TRANS_MIN, GM_TRANS_MAX) * 84 GM_TRANS_DECODE_FACTOR; 85 model[1] = (int32_t)clamp(model[1], GM_TRANS_MIN, GM_TRANS_MAX) * 86 GM_TRANS_DECODE_FACTOR; 87 88 for (i = 2; i < 6; ++i) { 89 const int diag_value = ((i == 2 || i == 5) ? (1 << GM_ALPHA_PREC_BITS) : 0); 90 model[i] = (int32_t)floor(params[i] * (1 << GM_ALPHA_PREC_BITS) + 0.5); 91 model[i] = 92 (int32_t)clamp(model[i] - diag_value, GM_ALPHA_MIN, GM_ALPHA_MAX); 93 alpha_present |= (model[i] != 0); 94 model[i] = (model[i] + diag_value) * GM_ALPHA_DECODE_FACTOR; 95 } 96 for (; i < 8; ++i) { 97 model[i] = (int32_t)floor(params[i] * (1 << GM_ROW3HOMO_PREC_BITS) + 0.5); 98 model[i] = (int32_t)clamp(model[i], GM_ROW3HOMO_MIN, GM_ROW3HOMO_MAX) * 99 GM_ROW3HOMO_DECODE_FACTOR; 100 alpha_present |= (model[i] != 0); 101 } 102 103 if (!alpha_present) { 104 if (abs(model[0]) < MIN_TRANS_THRESH && abs(model[1]) < MIN_TRANS_THRESH) { 105 model[0] = 0; 106 model[1] = 0; 107 } 108 } 109 } 110 111 void av1_convert_model_to_params(const double *params, 112 WarpedMotionParams *model) { 113 convert_to_params(params, model->wmmat); 114 model->wmtype = get_wmtype(model); 115 model->invalid = 0; 116 } 117 118 // Adds some offset to a global motion parameter and handles 119 // all of the necessary precision shifts, clamping, and 120 // zero-centering. 121 static int32_t add_param_offset(int param_index, int32_t param_value, 122 int32_t offset) { 123 const int scale_vals[3] = { GM_TRANS_PREC_DIFF, GM_ALPHA_PREC_DIFF, 124 GM_ROW3HOMO_PREC_DIFF }; 125 const int clamp_vals[3] = { GM_TRANS_MAX, GM_ALPHA_MAX, GM_ROW3HOMO_MAX }; 126 // type of param: 0 - translation, 1 - affine, 2 - homography 127 const int param_type = (param_index < 2 ? 0 : (param_index < 6 ? 1 : 2)); 128 const int is_one_centered = (param_index == 2 || param_index == 5); 129 130 // Make parameter zero-centered and offset the shift that was done to make 131 // it compatible with the warped model 132 param_value = (param_value - (is_one_centered << WARPEDMODEL_PREC_BITS)) >> 133 scale_vals[param_type]; 134 // Add desired offset to the rescaled/zero-centered parameter 135 param_value += offset; 136 // Clamp the parameter so it does not overflow the number of bits allotted 137 // to it in the bitstream 138 param_value = (int32_t)clamp(param_value, -clamp_vals[param_type], 139 clamp_vals[param_type]); 140 // Rescale the parameter to WARPEDMODEL_PRECISION_BITS so it is compatible 141 // with the warped motion library 142 param_value *= (1 << scale_vals[param_type]); 143 144 // Undo the zero-centering step if necessary 145 return param_value + (is_one_centered << WARPEDMODEL_PREC_BITS); 146 } 147 148 static void force_wmtype(WarpedMotionParams *wm, TransformationType wmtype) { 149 switch (wmtype) { 150 case IDENTITY: 151 wm->wmmat[0] = 0; 152 wm->wmmat[1] = 0; 153 AOM_FALLTHROUGH_INTENDED; 154 case TRANSLATION: 155 wm->wmmat[2] = 1 << WARPEDMODEL_PREC_BITS; 156 wm->wmmat[3] = 0; 157 AOM_FALLTHROUGH_INTENDED; 158 case ROTZOOM: 159 wm->wmmat[4] = -wm->wmmat[3]; 160 wm->wmmat[5] = wm->wmmat[2]; 161 AOM_FALLTHROUGH_INTENDED; 162 case AFFINE: wm->wmmat[6] = wm->wmmat[7] = 0; break; 163 default: assert(0); 164 } 165 wm->wmtype = wmtype; 166 } 167 168 int64_t av1_refine_integerized_param(WarpedMotionParams *wm, 169 TransformationType wmtype, int use_hbd, 170 int bd, uint8_t *ref, int r_width, 171 int r_height, int r_stride, uint8_t *dst, 172 int d_width, int d_height, int d_stride, 173 int n_refinements, 174 int64_t best_frame_error) { 175 static const int max_trans_model_params[TRANS_TYPES] = { 0, 2, 4, 6 }; 176 const int border = ERRORADV_BORDER; 177 int i = 0, p; 178 int n_params = max_trans_model_params[wmtype]; 179 int32_t *param_mat = wm->wmmat; 180 int64_t step_error, best_error; 181 int32_t step; 182 int32_t *param; 183 int32_t curr_param; 184 int32_t best_param; 185 186 force_wmtype(wm, wmtype); 187 best_error = av1_warp_error(wm, use_hbd, bd, ref, r_width, r_height, r_stride, 188 dst + border * d_stride + border, border, border, 189 d_width - 2 * border, d_height - 2 * border, 190 d_stride, 0, 0, best_frame_error); 191 best_error = AOMMIN(best_error, best_frame_error); 192 step = 1 << (n_refinements - 1); 193 for (i = 0; i < n_refinements; i++, step >>= 1) { 194 for (p = 0; p < n_params; ++p) { 195 int step_dir = 0; 196 // Skip searches for parameters that are forced to be 0 197 param = param_mat + p; 198 curr_param = *param; 199 best_param = curr_param; 200 // look to the left 201 *param = add_param_offset(p, curr_param, -step); 202 step_error = 203 av1_warp_error(wm, use_hbd, bd, ref, r_width, r_height, r_stride, 204 dst + border * d_stride + border, border, border, 205 d_width - 2 * border, d_height - 2 * border, d_stride, 206 0, 0, best_error); 207 if (step_error < best_error) { 208 best_error = step_error; 209 best_param = *param; 210 step_dir = -1; 211 } 212 213 // look to the right 214 *param = add_param_offset(p, curr_param, step); 215 step_error = 216 av1_warp_error(wm, use_hbd, bd, ref, r_width, r_height, r_stride, 217 dst + border * d_stride + border, border, border, 218 d_width - 2 * border, d_height - 2 * border, d_stride, 219 0, 0, best_error); 220 if (step_error < best_error) { 221 best_error = step_error; 222 best_param = *param; 223 step_dir = 1; 224 } 225 *param = best_param; 226 227 // look to the direction chosen above repeatedly until error increases 228 // for the biggest step size 229 while (step_dir) { 230 *param = add_param_offset(p, best_param, step * step_dir); 231 step_error = 232 av1_warp_error(wm, use_hbd, bd, ref, r_width, r_height, r_stride, 233 dst + border * d_stride + border, border, border, 234 d_width - 2 * border, d_height - 2 * border, 235 d_stride, 0, 0, best_error); 236 if (step_error < best_error) { 237 best_error = step_error; 238 best_param = *param; 239 } else { 240 *param = best_param; 241 step_dir = 0; 242 } 243 } 244 } 245 } 246 force_wmtype(wm, wmtype); 247 wm->wmtype = get_wmtype(wm); 248 return best_error; 249 } 250 251 static INLINE RansacFunc get_ransac_type(TransformationType type) { 252 switch (type) { 253 case AFFINE: return ransac_affine; 254 case ROTZOOM: return ransac_rotzoom; 255 case TRANSLATION: return ransac_translation; 256 default: assert(0); return NULL; 257 } 258 } 259 260 static unsigned char *downconvert_frame(YV12_BUFFER_CONFIG *frm, 261 int bit_depth) { 262 int i, j; 263 uint16_t *orig_buf = CONVERT_TO_SHORTPTR(frm->y_buffer); 264 uint8_t *buf_8bit = frm->y_buffer_8bit; 265 assert(buf_8bit); 266 if (!frm->buf_8bit_valid) { 267 for (i = 0; i < frm->y_height; ++i) { 268 for (j = 0; j < frm->y_width; ++j) { 269 buf_8bit[i * frm->y_stride + j] = 270 orig_buf[i * frm->y_stride + j] >> (bit_depth - 8); 271 } 272 } 273 frm->buf_8bit_valid = 1; 274 } 275 return buf_8bit; 276 } 277 278 static int compute_global_motion_feature_based( 279 TransformationType type, YV12_BUFFER_CONFIG *frm, YV12_BUFFER_CONFIG *ref, 280 int bit_depth, int *num_inliers_by_motion, double *params_by_motion, 281 int num_motions) { 282 int i; 283 int num_frm_corners, num_ref_corners; 284 int num_correspondences; 285 int *correspondences; 286 int frm_corners[2 * MAX_CORNERS], ref_corners[2 * MAX_CORNERS]; 287 unsigned char *frm_buffer = frm->y_buffer; 288 unsigned char *ref_buffer = ref->y_buffer; 289 RansacFunc ransac = get_ransac_type(type); 290 291 if (frm->flags & YV12_FLAG_HIGHBITDEPTH) { 292 // The frame buffer is 16-bit, so we need to convert to 8 bits for the 293 // following code. We cache the result until the frame is released. 294 frm_buffer = downconvert_frame(frm, bit_depth); 295 } 296 if (ref->flags & YV12_FLAG_HIGHBITDEPTH) { 297 ref_buffer = downconvert_frame(ref, bit_depth); 298 } 299 300 // compute interest points in images using FAST features 301 num_frm_corners = fast_corner_detect(frm_buffer, frm->y_width, frm->y_height, 302 frm->y_stride, frm_corners, MAX_CORNERS); 303 num_ref_corners = fast_corner_detect(ref_buffer, ref->y_width, ref->y_height, 304 ref->y_stride, ref_corners, MAX_CORNERS); 305 306 // find correspondences between the two images 307 correspondences = 308 (int *)malloc(num_frm_corners * 4 * sizeof(*correspondences)); 309 num_correspondences = determine_correspondence( 310 frm_buffer, (int *)frm_corners, num_frm_corners, ref_buffer, 311 (int *)ref_corners, num_ref_corners, frm->y_width, frm->y_height, 312 frm->y_stride, ref->y_stride, correspondences); 313 314 ransac(correspondences, num_correspondences, num_inliers_by_motion, 315 params_by_motion, num_motions); 316 317 free(correspondences); 318 319 // Set num_inliers = 0 for motions with too few inliers so they are ignored. 320 for (i = 0; i < num_motions; ++i) { 321 if (num_inliers_by_motion[i] < MIN_INLIER_PROB * num_correspondences) { 322 num_inliers_by_motion[i] = 0; 323 } 324 } 325 326 // Return true if any one of the motions has inliers. 327 for (i = 0; i < num_motions; ++i) { 328 if (num_inliers_by_motion[i] > 0) return 1; 329 } 330 return 0; 331 } 332 333 static INLINE RansacFuncDouble 334 get_ransac_double_prec_type(TransformationType type) { 335 switch (type) { 336 case AFFINE: return ransac_affine_double_prec; 337 case ROTZOOM: return ransac_rotzoom_double_prec; 338 case TRANSLATION: return ransac_translation_double_prec; 339 default: assert(0); return NULL; 340 } 341 } 342 343 // Don't use points around the frame border since they are less reliable 344 static INLINE int valid_point(int x, int y, int width, int height) { 345 return (x > (PATCH_SIZE + PATCH_CENTER)) && 346 (x < (width - PATCH_SIZE - PATCH_CENTER)) && 347 (y > (PATCH_SIZE + PATCH_CENTER)) && 348 (y < (height - PATCH_SIZE - PATCH_CENTER)); 349 } 350 351 static int determine_disflow_correspondence(int *frm_corners, 352 int num_frm_corners, double *flow_u, 353 double *flow_v, int width, 354 int height, int stride, 355 double *correspondences) { 356 int num_correspondences = 0; 357 int x, y; 358 for (int i = 0; i < num_frm_corners; ++i) { 359 x = frm_corners[2 * i]; 360 y = frm_corners[2 * i + 1]; 361 if (valid_point(x, y, width, height)) { 362 correspondences[4 * num_correspondences] = x; 363 correspondences[4 * num_correspondences + 1] = y; 364 correspondences[4 * num_correspondences + 2] = x + flow_u[y * stride + x]; 365 correspondences[4 * num_correspondences + 3] = y + flow_v[y * stride + x]; 366 num_correspondences++; 367 } 368 } 369 return num_correspondences; 370 } 371 372 double getCubicValue(double p[4], double x) { 373 return p[1] + 0.5 * x * 374 (p[2] - p[0] + 375 x * (2.0 * p[0] - 5.0 * p[1] + 4.0 * p[2] - p[3] + 376 x * (3.0 * (p[1] - p[2]) + p[3] - p[0]))); 377 } 378 379 void get_subcolumn(unsigned char *ref, double col[4], int stride, int x, 380 int y_start) { 381 int i; 382 for (i = 0; i < 4; ++i) { 383 col[i] = ref[(i + y_start) * stride + x]; 384 } 385 } 386 387 double bicubic(unsigned char *ref, double x, double y, int stride) { 388 double arr[4]; 389 int k; 390 int i = (int)x; 391 int j = (int)y; 392 for (k = 0; k < 4; ++k) { 393 double arr_temp[4]; 394 get_subcolumn(ref, arr_temp, stride, i + k - 1, j - 1); 395 arr[k] = getCubicValue(arr_temp, y - j); 396 } 397 return getCubicValue(arr, x - i); 398 } 399 400 // Interpolate a warped block using bicubic interpolation when possible 401 unsigned char interpolate(unsigned char *ref, double x, double y, int width, 402 int height, int stride) { 403 if (x < 0 && y < 0) 404 return ref[0]; 405 else if (x < 0 && y > height - 1) 406 return ref[(height - 1) * stride]; 407 else if (x > width - 1 && y < 0) 408 return ref[width - 1]; 409 else if (x > width - 1 && y > height - 1) 410 return ref[(height - 1) * stride + (width - 1)]; 411 else if (x < 0) { 412 int v; 413 int i = (int)y; 414 double a = y - i; 415 if (y > 1 && y < height - 2) { 416 double arr[4]; 417 get_subcolumn(ref, arr, stride, 0, i - 1); 418 return clamp((int)(getCubicValue(arr, a) + 0.5), 0, 255); 419 } 420 v = (int)(ref[i * stride] * (1 - a) + ref[(i + 1) * stride] * a + 0.5); 421 return clamp(v, 0, 255); 422 } else if (y < 0) { 423 int v; 424 int j = (int)x; 425 double b = x - j; 426 if (x > 1 && x < width - 2) { 427 double arr[4] = { ref[j - 1], ref[j], ref[j + 1], ref[j + 2] }; 428 return clamp((int)(getCubicValue(arr, b) + 0.5), 0, 255); 429 } 430 v = (int)(ref[j] * (1 - b) + ref[j + 1] * b + 0.5); 431 return clamp(v, 0, 255); 432 } else if (x > width - 1) { 433 int v; 434 int i = (int)y; 435 double a = y - i; 436 if (y > 1 && y < height - 2) { 437 double arr[4]; 438 get_subcolumn(ref, arr, stride, width - 1, i - 1); 439 return clamp((int)(getCubicValue(arr, a) + 0.5), 0, 255); 440 } 441 v = (int)(ref[i * stride + width - 1] * (1 - a) + 442 ref[(i + 1) * stride + width - 1] * a + 0.5); 443 return clamp(v, 0, 255); 444 } else if (y > height - 1) { 445 int v; 446 int j = (int)x; 447 double b = x - j; 448 if (x > 1 && x < width - 2) { 449 int row = (height - 1) * stride; 450 double arr[4] = { ref[row + j - 1], ref[row + j], ref[row + j + 1], 451 ref[row + j + 2] }; 452 return clamp((int)(getCubicValue(arr, b) + 0.5), 0, 255); 453 } 454 v = (int)(ref[(height - 1) * stride + j] * (1 - b) + 455 ref[(height - 1) * stride + j + 1] * b + 0.5); 456 return clamp(v, 0, 255); 457 } else if (x > 1 && y > 1 && x < width - 2 && y < height - 2) { 458 return clamp((int)(bicubic(ref, x, y, stride) + 0.5), 0, 255); 459 } else { 460 int i = (int)y; 461 int j = (int)x; 462 double a = y - i; 463 double b = x - j; 464 int v = (int)(ref[i * stride + j] * (1 - a) * (1 - b) + 465 ref[i * stride + j + 1] * (1 - a) * b + 466 ref[(i + 1) * stride + j] * a * (1 - b) + 467 ref[(i + 1) * stride + j + 1] * a * b); 468 return clamp(v, 0, 255); 469 } 470 } 471 472 // Warps a block using flow vector [u, v] and computes the mse 473 double compute_warp_and_error(unsigned char *ref, unsigned char *frm, int width, 474 int height, int stride, int x, int y, double u, 475 double v, int16_t *dt) { 476 int i, j; 477 unsigned char warped; 478 double x_w, y_w; 479 double mse = 0; 480 int16_t err = 0; 481 for (i = y; i < y + PATCH_SIZE; ++i) 482 for (j = x; j < x + PATCH_SIZE; ++j) { 483 x_w = (double)j + u; 484 y_w = (double)i + v; 485 warped = interpolate(ref, x_w, y_w, width, height, stride); 486 err = warped - frm[j + i * stride]; 487 mse += err * err; 488 dt[(i - y) * PATCH_SIZE + (j - x)] = err; 489 } 490 491 mse /= (PATCH_SIZE * PATCH_SIZE); 492 return mse; 493 } 494 495 // Computes the components of the system of equations used to solve for 496 // a flow vector. This includes: 497 // 1.) The hessian matrix for optical flow. This matrix is in the 498 // form of: 499 // 500 // M = |sum(dx * dx) sum(dx * dy)| 501 // |sum(dx * dy) sum(dy * dy)| 502 // 503 // 2.) b = |sum(dx * dt)| 504 // |sum(dy * dt)| 505 // Where the sums are computed over a square window of PATCH_SIZE. 506 static INLINE void compute_flow_system(const double *dx, int dx_stride, 507 const double *dy, int dy_stride, 508 const int16_t *dt, int dt_stride, 509 double *M, double *b) { 510 for (int i = 0; i < PATCH_SIZE; i++) { 511 for (int j = 0; j < PATCH_SIZE; j++) { 512 M[0] += dx[i * dx_stride + j] * dx[i * dx_stride + j]; 513 M[1] += dx[i * dx_stride + j] * dy[i * dy_stride + j]; 514 M[3] += dy[i * dy_stride + j] * dy[i * dy_stride + j]; 515 516 b[0] += dx[i * dx_stride + j] * dt[i * dt_stride + j]; 517 b[1] += dy[i * dy_stride + j] * dt[i * dt_stride + j]; 518 } 519 } 520 521 M[2] = M[1]; 522 } 523 524 // Solves a general Mx = b where M is a 2x2 matrix and b is a 2x1 matrix 525 static INLINE void solve_2x2_system(const double *M, const double *b, 526 double *output_vec) { 527 double M_0 = M[0]; 528 double M_3 = M[3]; 529 double det = (M_0 * M_3) - (M[1] * M[2]); 530 if (det < 1e-5) { 531 // Handle singular matrix 532 // TODO(sarahparker) compare results using pseudo inverse instead 533 M_0 += 1e-10; 534 M_3 += 1e-10; 535 det = (M_0 * M_3) - (M[1] * M[2]); 536 } 537 const double det_inv = 1 / det; 538 const double mult_b0 = det_inv * b[0]; 539 const double mult_b1 = det_inv * b[1]; 540 output_vec[0] = M_3 * mult_b0 - M[1] * mult_b1; 541 output_vec[1] = -M[2] * mult_b0 + M_0 * mult_b1; 542 } 543 544 /* 545 static INLINE void image_difference(const uint8_t *src, int src_stride, 546 const uint8_t *ref, int ref_stride, 547 int16_t *dst, int dst_stride, int height, 548 int width) { 549 const int block_unit = 8; 550 // Take difference in 8x8 blocks to make use of optimized diff function 551 for (int i = 0; i < height; i += block_unit) { 552 for (int j = 0; j < width; j += block_unit) { 553 aom_subtract_block(block_unit, block_unit, dst + i * dst_stride + j, 554 dst_stride, src + i * src_stride + j, src_stride, 555 ref + i * ref_stride + j, ref_stride); 556 } 557 } 558 } 559 */ 560 561 // Compute an image gradient using a sobel filter. 562 // If dir == 1, compute the x gradient. If dir == 0, compute y. This function 563 // assumes the images have been padded so that they can be processed in units 564 // of 8. 565 static INLINE void sobel_xy_image_gradient(const uint8_t *src, int src_stride, 566 double *dst, int dst_stride, 567 int height, int width, int dir) { 568 double norm = 1.0; 569 // TODO(sarahparker) experiment with doing this over larger block sizes 570 const int block_unit = 8; 571 // Filter in 8x8 blocks to eventually make use of optimized convolve function 572 for (int i = 0; i < height; i += block_unit) { 573 for (int j = 0; j < width; j += block_unit) { 574 av1_convolve_2d_sobel_y_c(src + i * src_stride + j, src_stride, 575 dst + i * dst_stride + j, dst_stride, 576 block_unit, block_unit, dir, norm); 577 } 578 } 579 } 580 581 static ImagePyramid *alloc_pyramid(int width, int height, int pad_size, 582 int compute_gradient) { 583 ImagePyramid *pyr = aom_malloc(sizeof(*pyr)); 584 pyr->has_gradient = compute_gradient; 585 // 2 * width * height is the upper bound for a buffer that fits 586 // all pyramid levels + padding for each level 587 const int buffer_size = sizeof(*pyr->level_buffer) * 2 * width * height + 588 (width + 2 * pad_size) * 2 * pad_size * N_LEVELS; 589 pyr->level_buffer = aom_malloc(buffer_size); 590 memset(pyr->level_buffer, 0, buffer_size); 591 592 if (compute_gradient) { 593 const int gradient_size = 594 sizeof(*pyr->level_dx_buffer) * 2 * width * height + 595 (width + 2 * pad_size) * 2 * pad_size * N_LEVELS; 596 pyr->level_dx_buffer = aom_malloc(gradient_size); 597 pyr->level_dy_buffer = aom_malloc(gradient_size); 598 memset(pyr->level_dx_buffer, 0, gradient_size); 599 memset(pyr->level_dy_buffer, 0, gradient_size); 600 } 601 return pyr; 602 } 603 604 static void free_pyramid(ImagePyramid *pyr) { 605 aom_free(pyr->level_buffer); 606 if (pyr->has_gradient) { 607 aom_free(pyr->level_dx_buffer); 608 aom_free(pyr->level_dy_buffer); 609 } 610 aom_free(pyr); 611 } 612 613 static INLINE void update_level_dims(ImagePyramid *frm_pyr, int level) { 614 frm_pyr->widths[level] = frm_pyr->widths[level - 1] >> 1; 615 frm_pyr->heights[level] = frm_pyr->heights[level - 1] >> 1; 616 frm_pyr->strides[level] = frm_pyr->widths[level] + 2 * frm_pyr->pad_size; 617 // Point the beginning of the next level buffer to the correct location inside 618 // the padded border 619 frm_pyr->level_loc[level] = 620 frm_pyr->level_loc[level - 1] + 621 frm_pyr->strides[level - 1] * 622 (2 * frm_pyr->pad_size + frm_pyr->heights[level - 1]); 623 } 624 625 // Compute coarse to fine pyramids for a frame 626 static void compute_flow_pyramids(unsigned char *frm, const int frm_width, 627 const int frm_height, const int frm_stride, 628 int n_levels, int pad_size, int compute_grad, 629 ImagePyramid *frm_pyr) { 630 int cur_width, cur_height, cur_stride, cur_loc; 631 assert((frm_width >> n_levels) > 0); 632 assert((frm_height >> n_levels) > 0); 633 634 // Initialize first level 635 frm_pyr->n_levels = n_levels; 636 frm_pyr->pad_size = pad_size; 637 frm_pyr->widths[0] = frm_width; 638 frm_pyr->heights[0] = frm_height; 639 frm_pyr->strides[0] = frm_width + 2 * frm_pyr->pad_size; 640 // Point the beginning of the level buffer to the location inside 641 // the padded border 642 frm_pyr->level_loc[0] = 643 frm_pyr->strides[0] * frm_pyr->pad_size + frm_pyr->pad_size; 644 // This essentially copies the original buffer into the pyramid buffer 645 // without the original padding 646 av1_resize_plane(frm, frm_height, frm_width, frm_stride, 647 frm_pyr->level_buffer + frm_pyr->level_loc[0], 648 frm_pyr->heights[0], frm_pyr->widths[0], 649 frm_pyr->strides[0]); 650 651 if (compute_grad) { 652 cur_width = frm_pyr->widths[0]; 653 cur_height = frm_pyr->heights[0]; 654 cur_stride = frm_pyr->strides[0]; 655 cur_loc = frm_pyr->level_loc[0]; 656 assert(frm_pyr->has_gradient && frm_pyr->level_dx_buffer != NULL && 657 frm_pyr->level_dy_buffer != NULL); 658 // Computation x gradient 659 sobel_xy_image_gradient(frm_pyr->level_buffer + cur_loc, cur_stride, 660 frm_pyr->level_dx_buffer + cur_loc, cur_stride, 661 cur_height, cur_width, 1); 662 663 // Computation y gradient 664 sobel_xy_image_gradient(frm_pyr->level_buffer + cur_loc, cur_stride, 665 frm_pyr->level_dy_buffer + cur_loc, cur_stride, 666 cur_height, cur_width, 0); 667 } 668 669 // Start at the finest level and resize down to the coarsest level 670 for (int level = 1; level < n_levels; ++level) { 671 update_level_dims(frm_pyr, level); 672 cur_width = frm_pyr->widths[level]; 673 cur_height = frm_pyr->heights[level]; 674 cur_stride = frm_pyr->strides[level]; 675 cur_loc = frm_pyr->level_loc[level]; 676 677 av1_resize_plane(frm_pyr->level_buffer + frm_pyr->level_loc[level - 1], 678 frm_pyr->heights[level - 1], frm_pyr->widths[level - 1], 679 frm_pyr->strides[level - 1], 680 frm_pyr->level_buffer + cur_loc, cur_height, cur_width, 681 cur_stride); 682 683 if (compute_grad) { 684 assert(frm_pyr->has_gradient && frm_pyr->level_dx_buffer != NULL && 685 frm_pyr->level_dy_buffer != NULL); 686 // Computation x gradient 687 sobel_xy_image_gradient(frm_pyr->level_buffer + cur_loc, cur_stride, 688 frm_pyr->level_dx_buffer + cur_loc, cur_stride, 689 cur_height, cur_width, 1); 690 691 // Computation y gradient 692 sobel_xy_image_gradient(frm_pyr->level_buffer + cur_loc, cur_stride, 693 frm_pyr->level_dy_buffer + cur_loc, cur_stride, 694 cur_height, cur_width, 0); 695 } 696 } 697 } 698 699 static INLINE void compute_flow_at_point(unsigned char *frm, unsigned char *ref, 700 double *dx, double *dy, int x, int y, 701 int width, int height, int stride, 702 double *u, double *v) { 703 double M[4] = { 0 }; 704 double b[2] = { 0 }; 705 double tmp_output_vec[2] = { 0 }; 706 double error = 0; 707 int16_t dt[PATCH_SIZE * PATCH_SIZE]; 708 double o_u = *u; 709 double o_v = *v; 710 711 for (int itr = 0; itr < DISFLOW_MAX_ITR; itr++) { 712 error = compute_warp_and_error(ref, frm, width, height, stride, x, y, *u, 713 *v, dt); 714 if (error <= DISFLOW_ERROR_TR) break; 715 compute_flow_system(dx, stride, dy, stride, dt, PATCH_SIZE, M, b); 716 solve_2x2_system(M, b, tmp_output_vec); 717 *u += tmp_output_vec[0]; 718 *v += tmp_output_vec[1]; 719 } 720 if (fabs(*u - o_u) > PATCH_SIZE || fabs(*v - o_u) > PATCH_SIZE) { 721 *u = o_u; 722 *v = o_v; 723 } 724 } 725 726 // make sure flow_u and flow_v start at 0 727 static void compute_flow_field(ImagePyramid *frm_pyr, ImagePyramid *ref_pyr, 728 double *flow_u, double *flow_v) { 729 int cur_width, cur_height, cur_stride, cur_loc, patch_loc, patch_center; 730 double *u_upscale = 731 aom_malloc(frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_u)); 732 double *v_upscale = 733 aom_malloc(frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_v)); 734 735 assert(frm_pyr->n_levels == ref_pyr->n_levels); 736 737 // Compute flow field from coarsest to finest level of the pyramid 738 for (int level = frm_pyr->n_levels - 1; level >= 0; --level) { 739 cur_width = frm_pyr->widths[level]; 740 cur_height = frm_pyr->heights[level]; 741 cur_stride = frm_pyr->strides[level]; 742 cur_loc = frm_pyr->level_loc[level]; 743 744 for (int i = PATCH_SIZE; i < cur_height - PATCH_SIZE; i += PATCH_STEP) { 745 for (int j = PATCH_SIZE; j < cur_width - PATCH_SIZE; j += PATCH_STEP) { 746 patch_loc = i * cur_stride + j; 747 patch_center = patch_loc + PATCH_CENTER * cur_stride + PATCH_CENTER; 748 compute_flow_at_point(frm_pyr->level_buffer + cur_loc, 749 ref_pyr->level_buffer + cur_loc, 750 frm_pyr->level_dx_buffer + cur_loc + patch_loc, 751 frm_pyr->level_dy_buffer + cur_loc + patch_loc, j, 752 i, cur_width, cur_height, cur_stride, 753 flow_u + patch_center, flow_v + patch_center); 754 } 755 } 756 // TODO(sarahparker) Replace this with upscale function in resize.c 757 if (level > 0) { 758 int h_upscale = frm_pyr->heights[level - 1]; 759 int w_upscale = frm_pyr->widths[level - 1]; 760 int s_upscale = frm_pyr->strides[level - 1]; 761 for (int i = 0; i < h_upscale; ++i) { 762 for (int j = 0; j < w_upscale; ++j) { 763 u_upscale[j + i * s_upscale] = 764 flow_u[(int)(j >> 1) + (int)(i >> 1) * cur_stride]; 765 v_upscale[j + i * s_upscale] = 766 flow_v[(int)(j >> 1) + (int)(i >> 1) * cur_stride]; 767 } 768 } 769 memcpy(flow_u, u_upscale, 770 frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_u)); 771 memcpy(flow_v, v_upscale, 772 frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_v)); 773 } 774 } 775 aom_free(u_upscale); 776 aom_free(v_upscale); 777 } 778 779 static int compute_global_motion_disflow_based( 780 TransformationType type, YV12_BUFFER_CONFIG *frm, YV12_BUFFER_CONFIG *ref, 781 int bit_depth, int *num_inliers_by_motion, double *params_by_motion, 782 int num_motions) { 783 unsigned char *frm_buffer = frm->y_buffer; 784 unsigned char *ref_buffer = ref->y_buffer; 785 const int frm_width = frm->y_width; 786 const int frm_height = frm->y_height; 787 const int ref_width = ref->y_width; 788 const int ref_height = ref->y_height; 789 const int pad_size = AOMMAX(PATCH_SIZE, MIN_PAD); 790 int num_frm_corners; 791 int num_correspondences; 792 double *correspondences; 793 int frm_corners[2 * MAX_CORNERS]; 794 RansacFuncDouble ransac = get_ransac_double_prec_type(type); 795 assert(frm_width == ref_width); 796 assert(frm_height == ref_height); 797 798 // Ensure the number of pyramid levels will work with the frame resolution 799 const int msb = 800 frm_width < frm_height ? get_msb(frm_width) : get_msb(frm_height); 801 const int n_levels = AOMMIN(msb, N_LEVELS); 802 803 if (frm->flags & YV12_FLAG_HIGHBITDEPTH) { 804 // The frame buffer is 16-bit, so we need to convert to 8 bits for the 805 // following code. We cache the result until the frame is released. 806 frm_buffer = downconvert_frame(frm, bit_depth); 807 } 808 if (ref->flags & YV12_FLAG_HIGHBITDEPTH) { 809 ref_buffer = downconvert_frame(ref, bit_depth); 810 } 811 812 // TODO(sarahparker) We will want to do the source pyramid computation 813 // outside of this function so it doesn't get recomputed for every 814 // reference. We also don't need to compute every pyramid level for the 815 // reference in advance, since lower levels can be overwritten once their 816 // flow field is computed and upscaled. I'll add these optimizations 817 // once the full implementation is working. 818 // Allocate frm image pyramids 819 int compute_gradient = 1; 820 ImagePyramid *frm_pyr = 821 alloc_pyramid(frm_width, frm_height, pad_size, compute_gradient); 822 compute_flow_pyramids(frm_buffer, frm_width, frm_height, frm->y_stride, 823 n_levels, pad_size, compute_gradient, frm_pyr); 824 // Allocate ref image pyramids 825 compute_gradient = 0; 826 ImagePyramid *ref_pyr = 827 alloc_pyramid(ref_width, ref_height, pad_size, compute_gradient); 828 compute_flow_pyramids(ref_buffer, ref_width, ref_height, ref->y_stride, 829 n_levels, pad_size, compute_gradient, ref_pyr); 830 831 double *flow_u = 832 aom_malloc(frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_u)); 833 double *flow_v = 834 aom_malloc(frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_v)); 835 836 memset(flow_u, 0, 837 frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_u)); 838 memset(flow_v, 0, 839 frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_v)); 840 841 compute_flow_field(frm_pyr, ref_pyr, flow_u, flow_v); 842 843 // compute interest points in images using FAST features 844 num_frm_corners = fast_corner_detect(frm_buffer, frm_width, frm_height, 845 frm->y_stride, frm_corners, MAX_CORNERS); 846 // find correspondences between the two images using the flow field 847 correspondences = aom_malloc(num_frm_corners * 4 * sizeof(*correspondences)); 848 num_correspondences = determine_disflow_correspondence( 849 frm_corners, num_frm_corners, flow_u, flow_v, frm_width, frm_height, 850 frm_pyr->strides[0], correspondences); 851 ransac(correspondences, num_correspondences, num_inliers_by_motion, 852 params_by_motion, num_motions); 853 854 free_pyramid(frm_pyr); 855 free_pyramid(ref_pyr); 856 aom_free(correspondences); 857 aom_free(flow_u); 858 aom_free(flow_v); 859 // Set num_inliers = 0 for motions with too few inliers so they are ignored. 860 for (int i = 0; i < num_motions; ++i) { 861 if (num_inliers_by_motion[i] < MIN_INLIER_PROB * num_correspondences) { 862 num_inliers_by_motion[i] = 0; 863 } 864 } 865 866 // Return true if any one of the motions has inliers. 867 for (int i = 0; i < num_motions; ++i) { 868 if (num_inliers_by_motion[i] > 0) return 1; 869 } 870 return 0; 871 } 872 873 int av1_compute_global_motion(TransformationType type, YV12_BUFFER_CONFIG *frm, 874 YV12_BUFFER_CONFIG *ref, int bit_depth, 875 GlobalMotionEstimationType gm_estimation_type, 876 int *num_inliers_by_motion, 877 double *params_by_motion, int num_motions) { 878 switch (gm_estimation_type) { 879 case GLOBAL_MOTION_FEATURE_BASED: 880 return compute_global_motion_feature_based(type, frm, ref, bit_depth, 881 num_inliers_by_motion, 882 params_by_motion, num_motions); 883 case GLOBAL_MOTION_DISFLOW_BASED: 884 return compute_global_motion_disflow_based(type, frm, ref, bit_depth, 885 num_inliers_by_motion, 886 params_by_motion, num_motions); 887 default: assert(0 && "Unknown global motion estimation type"); 888 } 889 return 0; 890 } 891