1 /* 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 12 #include "vp9/common/vp9_common.h" 13 #include "vp9/encoder/vp9_encodemv.h" 14 #include "vp9/common/vp9_entropymode.h" 15 #include "vp9/common/vp9_systemdependent.h" 16 17 #include <math.h> 18 19 #ifdef ENTROPY_STATS 20 extern unsigned int active_section; 21 #endif 22 23 static void encode_mv_component(vp9_writer* w, int comp, 24 const nmv_component* mvcomp, int usehp) { 25 int offset; 26 const int sign = comp < 0; 27 const int mag = sign ? -comp : comp; 28 const int mv_class = vp9_get_mv_class(mag - 1, &offset); 29 const int d = offset >> 3; // int mv data 30 const int fr = (offset >> 1) & 3; // fractional mv data 31 const int hp = offset & 1; // high precision mv data 32 33 assert(comp != 0); 34 35 // Sign 36 vp9_write(w, sign, mvcomp->sign); 37 38 // Class 39 write_token(w, vp9_mv_class_tree, mvcomp->classes, 40 &vp9_mv_class_encodings[mv_class]); 41 42 // Integer bits 43 if (mv_class == MV_CLASS_0) { 44 write_token(w, vp9_mv_class0_tree, mvcomp->class0, 45 &vp9_mv_class0_encodings[d]); 46 } else { 47 int i; 48 const int n = mv_class + CLASS0_BITS - 1; // number of bits 49 for (i = 0; i < n; ++i) 50 vp9_write(w, (d >> i) & 1, mvcomp->bits[i]); 51 } 52 53 // Fractional bits 54 write_token(w, vp9_mv_fp_tree, 55 mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp, 56 &vp9_mv_fp_encodings[fr]); 57 58 // High precision bit 59 if (usehp) 60 vp9_write(w, hp, 61 mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp); 62 } 63 64 65 static void build_nmv_component_cost_table(int *mvcost, 66 const nmv_component* const mvcomp, 67 int usehp) { 68 int i, v; 69 int sign_cost[2], class_cost[MV_CLASSES], class0_cost[CLASS0_SIZE]; 70 int bits_cost[MV_OFFSET_BITS][2]; 71 int class0_fp_cost[CLASS0_SIZE][4], fp_cost[4]; 72 int class0_hp_cost[2], hp_cost[2]; 73 74 sign_cost[0] = vp9_cost_zero(mvcomp->sign); 75 sign_cost[1] = vp9_cost_one(mvcomp->sign); 76 vp9_cost_tokens(class_cost, mvcomp->classes, vp9_mv_class_tree); 77 vp9_cost_tokens(class0_cost, mvcomp->class0, vp9_mv_class0_tree); 78 for (i = 0; i < MV_OFFSET_BITS; ++i) { 79 bits_cost[i][0] = vp9_cost_zero(mvcomp->bits[i]); 80 bits_cost[i][1] = vp9_cost_one(mvcomp->bits[i]); 81 } 82 83 for (i = 0; i < CLASS0_SIZE; ++i) 84 vp9_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp9_mv_fp_tree); 85 vp9_cost_tokens(fp_cost, mvcomp->fp, vp9_mv_fp_tree); 86 87 if (usehp) { 88 class0_hp_cost[0] = vp9_cost_zero(mvcomp->class0_hp); 89 class0_hp_cost[1] = vp9_cost_one(mvcomp->class0_hp); 90 hp_cost[0] = vp9_cost_zero(mvcomp->hp); 91 hp_cost[1] = vp9_cost_one(mvcomp->hp); 92 } 93 mvcost[0] = 0; 94 for (v = 1; v <= MV_MAX; ++v) { 95 int z, c, o, d, e, f, cost = 0; 96 z = v - 1; 97 c = vp9_get_mv_class(z, &o); 98 cost += class_cost[c]; 99 d = (o >> 3); /* int mv data */ 100 f = (o >> 1) & 3; /* fractional pel mv data */ 101 e = (o & 1); /* high precision mv data */ 102 if (c == MV_CLASS_0) { 103 cost += class0_cost[d]; 104 } else { 105 int i, b; 106 b = c + CLASS0_BITS - 1; /* number of bits */ 107 for (i = 0; i < b; ++i) 108 cost += bits_cost[i][((d >> i) & 1)]; 109 } 110 if (c == MV_CLASS_0) { 111 cost += class0_fp_cost[d][f]; 112 } else { 113 cost += fp_cost[f]; 114 } 115 if (usehp) { 116 if (c == MV_CLASS_0) { 117 cost += class0_hp_cost[e]; 118 } else { 119 cost += hp_cost[e]; 120 } 121 } 122 mvcost[v] = cost + sign_cost[0]; 123 mvcost[-v] = cost + sign_cost[1]; 124 } 125 } 126 127 static int update_mv(vp9_writer *w, const unsigned int ct[2], 128 vp9_prob *cur_p, vp9_prob new_p, vp9_prob upd_p) { 129 vp9_prob mod_p = new_p | 1; 130 const int cur_b = cost_branch256(ct, *cur_p); 131 const int mod_b = cost_branch256(ct, mod_p); 132 const int cost = 7 * 256 + (vp9_cost_one(upd_p) - vp9_cost_zero(upd_p)); 133 if (cur_b - mod_b > cost) { 134 *cur_p = mod_p; 135 vp9_write(w, 1, upd_p); 136 vp9_write_literal(w, mod_p >> 1, 7); 137 return 1; 138 } else { 139 vp9_write(w, 0, upd_p); 140 return 0; 141 } 142 } 143 144 static void counts_to_nmv_context( 145 nmv_context_counts *nmv_count, 146 nmv_context *prob, 147 int usehp, 148 unsigned int (*branch_ct_joint)[2], 149 unsigned int (*branch_ct_sign)[2], 150 unsigned int (*branch_ct_classes)[MV_CLASSES - 1][2], 151 unsigned int (*branch_ct_class0)[CLASS0_SIZE - 1][2], 152 unsigned int (*branch_ct_bits)[MV_OFFSET_BITS][2], 153 unsigned int (*branch_ct_class0_fp)[CLASS0_SIZE][4 - 1][2], 154 unsigned int (*branch_ct_fp)[4 - 1][2], 155 unsigned int (*branch_ct_class0_hp)[2], 156 unsigned int (*branch_ct_hp)[2]) { 157 int i, j, k; 158 vp9_tree_probs_from_distribution(vp9_mv_joint_tree, 159 prob->joints, 160 branch_ct_joint, 161 nmv_count->joints, 0); 162 for (i = 0; i < 2; ++i) { 163 const uint32_t s0 = nmv_count->comps[i].sign[0]; 164 const uint32_t s1 = nmv_count->comps[i].sign[1]; 165 166 prob->comps[i].sign = get_binary_prob(s0, s1); 167 branch_ct_sign[i][0] = s0; 168 branch_ct_sign[i][1] = s1; 169 vp9_tree_probs_from_distribution(vp9_mv_class_tree, 170 prob->comps[i].classes, 171 branch_ct_classes[i], 172 nmv_count->comps[i].classes, 0); 173 vp9_tree_probs_from_distribution(vp9_mv_class0_tree, 174 prob->comps[i].class0, 175 branch_ct_class0[i], 176 nmv_count->comps[i].class0, 0); 177 for (j = 0; j < MV_OFFSET_BITS; ++j) { 178 const uint32_t b0 = nmv_count->comps[i].bits[j][0]; 179 const uint32_t b1 = nmv_count->comps[i].bits[j][1]; 180 181 prob->comps[i].bits[j] = get_binary_prob(b0, b1); 182 branch_ct_bits[i][j][0] = b0; 183 branch_ct_bits[i][j][1] = b1; 184 } 185 } 186 for (i = 0; i < 2; ++i) { 187 for (k = 0; k < CLASS0_SIZE; ++k) { 188 vp9_tree_probs_from_distribution(vp9_mv_fp_tree, 189 prob->comps[i].class0_fp[k], 190 branch_ct_class0_fp[i][k], 191 nmv_count->comps[i].class0_fp[k], 0); 192 } 193 vp9_tree_probs_from_distribution(vp9_mv_fp_tree, 194 prob->comps[i].fp, 195 branch_ct_fp[i], 196 nmv_count->comps[i].fp, 0); 197 } 198 if (usehp) { 199 for (i = 0; i < 2; ++i) { 200 const uint32_t c0_hp0 = nmv_count->comps[i].class0_hp[0]; 201 const uint32_t c0_hp1 = nmv_count->comps[i].class0_hp[1]; 202 const uint32_t hp0 = nmv_count->comps[i].hp[0]; 203 const uint32_t hp1 = nmv_count->comps[i].hp[1]; 204 205 prob->comps[i].class0_hp = get_binary_prob(c0_hp0, c0_hp1); 206 branch_ct_class0_hp[i][0] = c0_hp0; 207 branch_ct_class0_hp[i][1] = c0_hp1; 208 209 prob->comps[i].hp = get_binary_prob(hp0, hp1); 210 branch_ct_hp[i][0] = hp0; 211 branch_ct_hp[i][1] = hp1; 212 } 213 } 214 } 215 216 void vp9_write_nmv_probs(VP9_COMP* const cpi, int usehp, vp9_writer* const bc) { 217 int i, j; 218 nmv_context prob; 219 unsigned int branch_ct_joint[MV_JOINTS - 1][2]; 220 unsigned int branch_ct_sign[2][2]; 221 unsigned int branch_ct_classes[2][MV_CLASSES - 1][2]; 222 unsigned int branch_ct_class0[2][CLASS0_SIZE - 1][2]; 223 unsigned int branch_ct_bits[2][MV_OFFSET_BITS][2]; 224 unsigned int branch_ct_class0_fp[2][CLASS0_SIZE][4 - 1][2]; 225 unsigned int branch_ct_fp[2][4 - 1][2]; 226 unsigned int branch_ct_class0_hp[2][2]; 227 unsigned int branch_ct_hp[2][2]; 228 nmv_context *mvc = &cpi->common.fc.nmvc; 229 230 counts_to_nmv_context(&cpi->NMVcount, &prob, usehp, 231 branch_ct_joint, branch_ct_sign, branch_ct_classes, 232 branch_ct_class0, branch_ct_bits, 233 branch_ct_class0_fp, branch_ct_fp, 234 branch_ct_class0_hp, branch_ct_hp); 235 236 for (j = 0; j < MV_JOINTS - 1; ++j) 237 update_mv(bc, branch_ct_joint[j], &mvc->joints[j], prob.joints[j], 238 NMV_UPDATE_PROB); 239 240 for (i = 0; i < 2; ++i) { 241 update_mv(bc, branch_ct_sign[i], &mvc->comps[i].sign, 242 prob.comps[i].sign, NMV_UPDATE_PROB); 243 for (j = 0; j < MV_CLASSES - 1; ++j) 244 update_mv(bc, branch_ct_classes[i][j], &mvc->comps[i].classes[j], 245 prob.comps[i].classes[j], NMV_UPDATE_PROB); 246 247 for (j = 0; j < CLASS0_SIZE - 1; ++j) 248 update_mv(bc, branch_ct_class0[i][j], &mvc->comps[i].class0[j], 249 prob.comps[i].class0[j], NMV_UPDATE_PROB); 250 251 for (j = 0; j < MV_OFFSET_BITS; ++j) 252 update_mv(bc, branch_ct_bits[i][j], &mvc->comps[i].bits[j], 253 prob.comps[i].bits[j], NMV_UPDATE_PROB); 254 } 255 256 for (i = 0; i < 2; ++i) { 257 for (j = 0; j < CLASS0_SIZE; ++j) { 258 int k; 259 for (k = 0; k < 3; ++k) 260 update_mv(bc, branch_ct_class0_fp[i][j][k], 261 &mvc->comps[i].class0_fp[j][k], 262 prob.comps[i].class0_fp[j][k], NMV_UPDATE_PROB); 263 } 264 265 for (j = 0; j < 3; ++j) 266 update_mv(bc, branch_ct_fp[i][j], &mvc->comps[i].fp[j], 267 prob.comps[i].fp[j], NMV_UPDATE_PROB); 268 } 269 270 if (usehp) { 271 for (i = 0; i < 2; ++i) { 272 update_mv(bc, branch_ct_class0_hp[i], &mvc->comps[i].class0_hp, 273 prob.comps[i].class0_hp, NMV_UPDATE_PROB); 274 update_mv(bc, branch_ct_hp[i], &mvc->comps[i].hp, 275 prob.comps[i].hp, NMV_UPDATE_PROB); 276 } 277 } 278 } 279 280 void vp9_encode_mv(VP9_COMP* cpi, vp9_writer* w, 281 const MV* mv, const MV* ref, 282 const nmv_context* mvctx, int usehp) { 283 const MV diff = {mv->row - ref->row, 284 mv->col - ref->col}; 285 const MV_JOINT_TYPE j = vp9_get_mv_joint(&diff); 286 usehp = usehp && vp9_use_mv_hp(ref); 287 288 write_token(w, vp9_mv_joint_tree, mvctx->joints, &vp9_mv_joint_encodings[j]); 289 if (mv_joint_vertical(j)) 290 encode_mv_component(w, diff.row, &mvctx->comps[0], usehp); 291 292 if (mv_joint_horizontal(j)) 293 encode_mv_component(w, diff.col, &mvctx->comps[1], usehp); 294 295 // If auto_mv_step_size is enabled then keep track of the largest 296 // motion vector component used. 297 if (!cpi->dummy_packing && cpi->sf.auto_mv_step_size) { 298 unsigned int maxv = MAX(abs(mv->row), abs(mv->col)) >> 3; 299 cpi->max_mv_magnitude = MAX(maxv, cpi->max_mv_magnitude); 300 } 301 } 302 303 void vp9_build_nmv_cost_table(int *mvjoint, 304 int *mvcost[2], 305 const nmv_context* const mvctx, 306 int usehp, 307 int mvc_flag_v, 308 int mvc_flag_h) { 309 vp9_clear_system_state(); 310 vp9_cost_tokens(mvjoint, mvctx->joints, vp9_mv_joint_tree); 311 if (mvc_flag_v) 312 build_nmv_component_cost_table(mvcost[0], &mvctx->comps[0], usehp); 313 if (mvc_flag_h) 314 build_nmv_component_cost_table(mvcost[1], &mvctx->comps[1], usehp); 315 } 316 317 void vp9_update_nmv_count(VP9_COMP *cpi, MACROBLOCK *x, 318 int_mv *best_ref_mv, int_mv *second_best_ref_mv) { 319 MODE_INFO *mi = x->e_mbd.mi_8x8[0]; 320 MB_MODE_INFO *const mbmi = &mi->mbmi; 321 MV diff; 322 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type]; 323 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type]; 324 int idx, idy; 325 326 if (mbmi->sb_type < BLOCK_8X8) { 327 PARTITION_INFO *pi = x->partition_info; 328 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { 329 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { 330 const int i = idy * 2 + idx; 331 if (pi->bmi[i].mode == NEWMV) { 332 diff.row = mi->bmi[i].as_mv[0].as_mv.row - best_ref_mv->as_mv.row; 333 diff.col = mi->bmi[i].as_mv[0].as_mv.col - best_ref_mv->as_mv.col; 334 vp9_inc_mv(&diff, &cpi->NMVcount); 335 336 if (mi->mbmi.ref_frame[1] > INTRA_FRAME) { 337 diff.row = mi->bmi[i].as_mv[1].as_mv.row - 338 second_best_ref_mv->as_mv.row; 339 diff.col = mi->bmi[i].as_mv[1].as_mv.col - 340 second_best_ref_mv->as_mv.col; 341 vp9_inc_mv(&diff, &cpi->NMVcount); 342 } 343 } 344 } 345 } 346 } else if (mbmi->mode == NEWMV) { 347 diff.row = mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row; 348 diff.col = mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col; 349 vp9_inc_mv(&diff, &cpi->NMVcount); 350 351 if (mbmi->ref_frame[1] > INTRA_FRAME) { 352 diff.row = mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row; 353 diff.col = mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col; 354 vp9_inc_mv(&diff, &cpi->NMVcount); 355 } 356 } 357 } 358