Home | History | Annotate | Download | only in encoder
      1 /*
      2  * Copyright (c) 2016, Alliance for Open Media. All rights reserved
      3  *
      4  * This source code is subject to the terms of the BSD 2 Clause License and
      5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
      6  * was not distributed with this source code in the LICENSE file, you can
      7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
      8  * Media Patent License 1.0 was not distributed with this source code in the
      9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
     10  */
     11 
     12 #include <math.h>
     13 #include <string.h>
     14 
     15 #include "config/aom_scale_rtcd.h"
     16 
     17 #include "aom/aom_integer.h"
     18 #include "av1/common/cdef.h"
     19 #include "av1/common/onyxc_int.h"
     20 #include "av1/common/reconinter.h"
     21 #include "av1/encoder/encoder.h"
     22 
     23 #define REDUCED_PRI_STRENGTHS 8
     24 #define REDUCED_TOTAL_STRENGTHS (REDUCED_PRI_STRENGTHS * CDEF_SEC_STRENGTHS)
     25 #define TOTAL_STRENGTHS (CDEF_PRI_STRENGTHS * CDEF_SEC_STRENGTHS)
     26 
     27 static int priconv[REDUCED_PRI_STRENGTHS] = { 0, 1, 2, 3, 5, 7, 10, 13 };
     28 
     29 /* Search for the best strength to add as an option, knowing we
     30    already selected nb_strengths options. */
     31 static uint64_t search_one(int *lev, int nb_strengths,
     32                            uint64_t mse[][TOTAL_STRENGTHS], int sb_count,
     33                            int fast) {
     34   uint64_t tot_mse[TOTAL_STRENGTHS];
     35   const int total_strengths = fast ? REDUCED_TOTAL_STRENGTHS : TOTAL_STRENGTHS;
     36   int i, j;
     37   uint64_t best_tot_mse = (uint64_t)1 << 63;
     38   int best_id = 0;
     39   memset(tot_mse, 0, sizeof(tot_mse));
     40   for (i = 0; i < sb_count; i++) {
     41     int gi;
     42     uint64_t best_mse = (uint64_t)1 << 63;
     43     /* Find best mse among already selected options. */
     44     for (gi = 0; gi < nb_strengths; gi++) {
     45       if (mse[i][lev[gi]] < best_mse) {
     46         best_mse = mse[i][lev[gi]];
     47       }
     48     }
     49     /* Find best mse when adding each possible new option. */
     50     for (j = 0; j < total_strengths; j++) {
     51       uint64_t best = best_mse;
     52       if (mse[i][j] < best) best = mse[i][j];
     53       tot_mse[j] += best;
     54     }
     55   }
     56   for (j = 0; j < total_strengths; j++) {
     57     if (tot_mse[j] < best_tot_mse) {
     58       best_tot_mse = tot_mse[j];
     59       best_id = j;
     60     }
     61   }
     62   lev[nb_strengths] = best_id;
     63   return best_tot_mse;
     64 }
     65 
     66 /* Search for the best luma+chroma strength to add as an option, knowing we
     67    already selected nb_strengths options. */
     68 static uint64_t search_one_dual(int *lev0, int *lev1, int nb_strengths,
     69                                 uint64_t (**mse)[TOTAL_STRENGTHS], int sb_count,
     70                                 int fast) {
     71   uint64_t tot_mse[TOTAL_STRENGTHS][TOTAL_STRENGTHS];
     72   int i, j;
     73   uint64_t best_tot_mse = (uint64_t)1 << 63;
     74   int best_id0 = 0;
     75   int best_id1 = 0;
     76   const int total_strengths = fast ? REDUCED_TOTAL_STRENGTHS : TOTAL_STRENGTHS;
     77   memset(tot_mse, 0, sizeof(tot_mse));
     78   for (i = 0; i < sb_count; i++) {
     79     int gi;
     80     uint64_t best_mse = (uint64_t)1 << 63;
     81     /* Find best mse among already selected options. */
     82     for (gi = 0; gi < nb_strengths; gi++) {
     83       uint64_t curr = mse[0][i][lev0[gi]];
     84       curr += mse[1][i][lev1[gi]];
     85       if (curr < best_mse) {
     86         best_mse = curr;
     87       }
     88     }
     89     /* Find best mse when adding each possible new option. */
     90     for (j = 0; j < total_strengths; j++) {
     91       int k;
     92       for (k = 0; k < total_strengths; k++) {
     93         uint64_t best = best_mse;
     94         uint64_t curr = mse[0][i][j];
     95         curr += mse[1][i][k];
     96         if (curr < best) best = curr;
     97         tot_mse[j][k] += best;
     98       }
     99     }
    100   }
    101   for (j = 0; j < total_strengths; j++) {
    102     int k;
    103     for (k = 0; k < total_strengths; k++) {
    104       if (tot_mse[j][k] < best_tot_mse) {
    105         best_tot_mse = tot_mse[j][k];
    106         best_id0 = j;
    107         best_id1 = k;
    108       }
    109     }
    110   }
    111   lev0[nb_strengths] = best_id0;
    112   lev1[nb_strengths] = best_id1;
    113   return best_tot_mse;
    114 }
    115 
    116 /* Search for the set of strengths that minimizes mse. */
    117 static uint64_t joint_strength_search(int *best_lev, int nb_strengths,
    118                                       uint64_t mse[][TOTAL_STRENGTHS],
    119                                       int sb_count, int fast) {
    120   uint64_t best_tot_mse;
    121   int i;
    122   best_tot_mse = (uint64_t)1 << 63;
    123   /* Greedy search: add one strength options at a time. */
    124   for (i = 0; i < nb_strengths; i++) {
    125     best_tot_mse = search_one(best_lev, i, mse, sb_count, fast);
    126   }
    127   /* Trying to refine the greedy search by reconsidering each
    128      already-selected option. */
    129   if (!fast) {
    130     for (i = 0; i < 4 * nb_strengths; i++) {
    131       int j;
    132       for (j = 0; j < nb_strengths - 1; j++) best_lev[j] = best_lev[j + 1];
    133       best_tot_mse =
    134           search_one(best_lev, nb_strengths - 1, mse, sb_count, fast);
    135     }
    136   }
    137   return best_tot_mse;
    138 }
    139 
    140 /* Search for the set of luma+chroma strengths that minimizes mse. */
    141 static uint64_t joint_strength_search_dual(int *best_lev0, int *best_lev1,
    142                                            int nb_strengths,
    143                                            uint64_t (**mse)[TOTAL_STRENGTHS],
    144                                            int sb_count, int fast) {
    145   uint64_t best_tot_mse;
    146   int i;
    147   best_tot_mse = (uint64_t)1 << 63;
    148   /* Greedy search: add one strength options at a time. */
    149   for (i = 0; i < nb_strengths; i++) {
    150     best_tot_mse =
    151         search_one_dual(best_lev0, best_lev1, i, mse, sb_count, fast);
    152   }
    153   /* Trying to refine the greedy search by reconsidering each
    154      already-selected option. */
    155   for (i = 0; i < 4 * nb_strengths; i++) {
    156     int j;
    157     for (j = 0; j < nb_strengths - 1; j++) {
    158       best_lev0[j] = best_lev0[j + 1];
    159       best_lev1[j] = best_lev1[j + 1];
    160     }
    161     best_tot_mse = search_one_dual(best_lev0, best_lev1, nb_strengths - 1, mse,
    162                                    sb_count, fast);
    163   }
    164   return best_tot_mse;
    165 }
    166 
    167 /* FIXME: SSE-optimize this. */
    168 static void copy_sb16_16(uint16_t *dst, int dstride, const uint16_t *src,
    169                          int src_voffset, int src_hoffset, int sstride,
    170                          int vsize, int hsize) {
    171   int r, c;
    172   const uint16_t *base = &src[src_voffset * sstride + src_hoffset];
    173   for (r = 0; r < vsize; r++) {
    174     for (c = 0; c < hsize; c++) {
    175       dst[r * dstride + c] = base[r * sstride + c];
    176     }
    177   }
    178 }
    179 
    180 static INLINE uint64_t dist_8x8_16bit(uint16_t *dst, int dstride, uint16_t *src,
    181                                       int sstride, int coeff_shift) {
    182   uint64_t svar = 0;
    183   uint64_t dvar = 0;
    184   uint64_t sum_s = 0;
    185   uint64_t sum_d = 0;
    186   uint64_t sum_s2 = 0;
    187   uint64_t sum_d2 = 0;
    188   uint64_t sum_sd = 0;
    189   int i, j;
    190   for (i = 0; i < 8; i++) {
    191     for (j = 0; j < 8; j++) {
    192       sum_s += src[i * sstride + j];
    193       sum_d += dst[i * dstride + j];
    194       sum_s2 += src[i * sstride + j] * src[i * sstride + j];
    195       sum_d2 += dst[i * dstride + j] * dst[i * dstride + j];
    196       sum_sd += src[i * sstride + j] * dst[i * dstride + j];
    197     }
    198   }
    199   /* Compute the variance -- the calculation cannot go negative. */
    200   svar = sum_s2 - ((sum_s * sum_s + 32) >> 6);
    201   dvar = sum_d2 - ((sum_d * sum_d + 32) >> 6);
    202   return (uint64_t)floor(
    203       .5 + (sum_d2 + sum_s2 - 2 * sum_sd) * .5 *
    204                (svar + dvar + (400 << 2 * coeff_shift)) /
    205                (sqrt((20000 << 4 * coeff_shift) + svar * (double)dvar)));
    206 }
    207 
    208 static INLINE uint64_t mse_8x8_16bit(uint16_t *dst, int dstride, uint16_t *src,
    209                                      int sstride) {
    210   uint64_t sum = 0;
    211   int i, j;
    212   for (i = 0; i < 8; i++) {
    213     for (j = 0; j < 8; j++) {
    214       int e = dst[i * dstride + j] - src[i * sstride + j];
    215       sum += e * e;
    216     }
    217   }
    218   return sum;
    219 }
    220 
    221 static INLINE uint64_t mse_4x4_16bit(uint16_t *dst, int dstride, uint16_t *src,
    222                                      int sstride) {
    223   uint64_t sum = 0;
    224   int i, j;
    225   for (i = 0; i < 4; i++) {
    226     for (j = 0; j < 4; j++) {
    227       int e = dst[i * dstride + j] - src[i * sstride + j];
    228       sum += e * e;
    229     }
    230   }
    231   return sum;
    232 }
    233 
    234 /* Compute MSE only on the blocks we filtered. */
    235 uint64_t compute_cdef_dist(uint16_t *dst, int dstride, uint16_t *src,
    236                            cdef_list *dlist, int cdef_count, BLOCK_SIZE bsize,
    237                            int coeff_shift, int pli) {
    238   uint64_t sum = 0;
    239   int bi, bx, by;
    240   if (bsize == BLOCK_8X8) {
    241     for (bi = 0; bi < cdef_count; bi++) {
    242       by = dlist[bi].by;
    243       bx = dlist[bi].bx;
    244       if (pli == 0) {
    245         sum += dist_8x8_16bit(&dst[(by << 3) * dstride + (bx << 3)], dstride,
    246                               &src[bi << (3 + 3)], 8, coeff_shift);
    247       } else {
    248         sum += mse_8x8_16bit(&dst[(by << 3) * dstride + (bx << 3)], dstride,
    249                              &src[bi << (3 + 3)], 8);
    250       }
    251     }
    252   } else if (bsize == BLOCK_4X8) {
    253     for (bi = 0; bi < cdef_count; bi++) {
    254       by = dlist[bi].by;
    255       bx = dlist[bi].bx;
    256       sum += mse_4x4_16bit(&dst[(by << 3) * dstride + (bx << 2)], dstride,
    257                            &src[bi << (3 + 2)], 4);
    258       sum += mse_4x4_16bit(&dst[((by << 3) + 4) * dstride + (bx << 2)], dstride,
    259                            &src[(bi << (3 + 2)) + 4 * 4], 4);
    260     }
    261   } else if (bsize == BLOCK_8X4) {
    262     for (bi = 0; bi < cdef_count; bi++) {
    263       by = dlist[bi].by;
    264       bx = dlist[bi].bx;
    265       sum += mse_4x4_16bit(&dst[(by << 2) * dstride + (bx << 3)], dstride,
    266                            &src[bi << (2 + 3)], 8);
    267       sum += mse_4x4_16bit(&dst[(by << 2) * dstride + (bx << 3) + 4], dstride,
    268                            &src[(bi << (2 + 3)) + 4], 8);
    269     }
    270   } else {
    271     assert(bsize == BLOCK_4X4);
    272     for (bi = 0; bi < cdef_count; bi++) {
    273       by = dlist[bi].by;
    274       bx = dlist[bi].bx;
    275       sum += mse_4x4_16bit(&dst[(by << 2) * dstride + (bx << 2)], dstride,
    276                            &src[bi << (2 + 2)], 4);
    277     }
    278   }
    279   return sum >> 2 * coeff_shift;
    280 }
    281 
    282 void av1_cdef_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
    283                      AV1_COMMON *cm, MACROBLOCKD *xd, int fast) {
    284   CdefInfo *const cdef_info = &cm->cdef_info;
    285   int r, c;
    286   int fbr, fbc;
    287   uint16_t *src[3];
    288   uint16_t *ref_coeff[3];
    289   static cdef_list dlist[MI_SIZE_128X128 * MI_SIZE_128X128];
    290   int dir[CDEF_NBLOCKS][CDEF_NBLOCKS] = { { 0 } };
    291   int var[CDEF_NBLOCKS][CDEF_NBLOCKS] = { { 0 } };
    292   int stride[3];
    293   int bsize[3];
    294   int mi_wide_l2[3];
    295   int mi_high_l2[3];
    296   int xdec[3];
    297   int ydec[3];
    298   int pli;
    299   int cdef_count;
    300   int coeff_shift = AOMMAX(cm->seq_params.bit_depth - 8, 0);
    301   uint64_t best_tot_mse = (uint64_t)1 << 63;
    302   uint64_t tot_mse;
    303   int sb_count;
    304   int nvfb = (cm->mi_rows + MI_SIZE_64X64 - 1) / MI_SIZE_64X64;
    305   int nhfb = (cm->mi_cols + MI_SIZE_64X64 - 1) / MI_SIZE_64X64;
    306   int *sb_index = aom_malloc(nvfb * nhfb * sizeof(*sb_index));
    307   int *selected_strength = aom_malloc(nvfb * nhfb * sizeof(*sb_index));
    308   uint64_t(*mse[2])[TOTAL_STRENGTHS];
    309   int pri_damping = 3 + (cm->base_qindex >> 6);
    310   int sec_damping = 3 + (cm->base_qindex >> 6);
    311   int i;
    312   int nb_strengths;
    313   int nb_strength_bits;
    314   int quantizer;
    315   double lambda;
    316   const int num_planes = av1_num_planes(cm);
    317   const int total_strengths = fast ? REDUCED_TOTAL_STRENGTHS : TOTAL_STRENGTHS;
    318   DECLARE_ALIGNED(32, uint16_t, inbuf[CDEF_INBUF_SIZE]);
    319   uint16_t *in;
    320   DECLARE_ALIGNED(32, uint16_t, tmp_dst[1 << (MAX_SB_SIZE_LOG2 * 2)]);
    321   quantizer = av1_ac_quant_Q3(cm->base_qindex, 0, cm->seq_params.bit_depth) >>
    322               (cm->seq_params.bit_depth - 8);
    323   lambda = .12 * quantizer * quantizer / 256.;
    324 
    325   av1_setup_dst_planes(xd->plane, cm->seq_params.sb_size, frame, 0, 0, 0,
    326                        num_planes);
    327   mse[0] = aom_malloc(sizeof(**mse) * nvfb * nhfb);
    328   mse[1] = aom_malloc(sizeof(**mse) * nvfb * nhfb);
    329   for (pli = 0; pli < num_planes; pli++) {
    330     uint8_t *ref_buffer;
    331     int ref_stride;
    332     switch (pli) {
    333       case 0:
    334         ref_buffer = ref->y_buffer;
    335         ref_stride = ref->y_stride;
    336         break;
    337       case 1:
    338         ref_buffer = ref->u_buffer;
    339         ref_stride = ref->uv_stride;
    340         break;
    341       case 2:
    342         ref_buffer = ref->v_buffer;
    343         ref_stride = ref->uv_stride;
    344         break;
    345     }
    346     src[pli] = aom_memalign(
    347         32, sizeof(*src) * cm->mi_rows * cm->mi_cols * MI_SIZE * MI_SIZE);
    348     ref_coeff[pli] = aom_memalign(
    349         32, sizeof(*ref_coeff) * cm->mi_rows * cm->mi_cols * MI_SIZE * MI_SIZE);
    350     xdec[pli] = xd->plane[pli].subsampling_x;
    351     ydec[pli] = xd->plane[pli].subsampling_y;
    352     bsize[pli] = ydec[pli] ? (xdec[pli] ? BLOCK_4X4 : BLOCK_8X4)
    353                            : (xdec[pli] ? BLOCK_4X8 : BLOCK_8X8);
    354     stride[pli] = cm->mi_cols << MI_SIZE_LOG2;
    355     mi_wide_l2[pli] = MI_SIZE_LOG2 - xd->plane[pli].subsampling_x;
    356     mi_high_l2[pli] = MI_SIZE_LOG2 - xd->plane[pli].subsampling_y;
    357 
    358     const int frame_height =
    359         (cm->mi_rows * MI_SIZE) >> xd->plane[pli].subsampling_y;
    360     const int frame_width =
    361         (cm->mi_cols * MI_SIZE) >> xd->plane[pli].subsampling_x;
    362 
    363     for (r = 0; r < frame_height; ++r) {
    364       for (c = 0; c < frame_width; ++c) {
    365         if (cm->seq_params.use_highbitdepth) {
    366           src[pli][r * stride[pli] + c] = CONVERT_TO_SHORTPTR(
    367               xd->plane[pli].dst.buf)[r * xd->plane[pli].dst.stride + c];
    368           ref_coeff[pli][r * stride[pli] + c] =
    369               CONVERT_TO_SHORTPTR(ref_buffer)[r * ref_stride + c];
    370         } else {
    371           src[pli][r * stride[pli] + c] =
    372               xd->plane[pli].dst.buf[r * xd->plane[pli].dst.stride + c];
    373           ref_coeff[pli][r * stride[pli] + c] = ref_buffer[r * ref_stride + c];
    374         }
    375       }
    376     }
    377   }
    378   in = inbuf + CDEF_VBORDER * CDEF_BSTRIDE + CDEF_HBORDER;
    379   sb_count = 0;
    380   for (fbr = 0; fbr < nvfb; ++fbr) {
    381     for (fbc = 0; fbc < nhfb; ++fbc) {
    382       int nvb, nhb;
    383       int gi;
    384       int dirinit = 0;
    385       nhb = AOMMIN(MI_SIZE_64X64, cm->mi_cols - MI_SIZE_64X64 * fbc);
    386       nvb = AOMMIN(MI_SIZE_64X64, cm->mi_rows - MI_SIZE_64X64 * fbr);
    387       int hb_step = 1;
    388       int vb_step = 1;
    389       BLOCK_SIZE bs = BLOCK_64X64;
    390       MB_MODE_INFO *const mbmi =
    391           cm->mi_grid_visible[MI_SIZE_64X64 * fbr * cm->mi_stride +
    392                               MI_SIZE_64X64 * fbc];
    393       if (((fbc & 1) &&
    394            (mbmi->sb_type == BLOCK_128X128 || mbmi->sb_type == BLOCK_128X64)) ||
    395           ((fbr & 1) &&
    396            (mbmi->sb_type == BLOCK_128X128 || mbmi->sb_type == BLOCK_64X128)))
    397         continue;
    398       if (mbmi->sb_type == BLOCK_128X128 || mbmi->sb_type == BLOCK_128X64 ||
    399           mbmi->sb_type == BLOCK_64X128)
    400         bs = mbmi->sb_type;
    401       if (bs == BLOCK_128X128 || bs == BLOCK_128X64) {
    402         nhb = AOMMIN(MI_SIZE_128X128, cm->mi_cols - MI_SIZE_64X64 * fbc);
    403         hb_step = 2;
    404       }
    405       if (bs == BLOCK_128X128 || bs == BLOCK_64X128) {
    406         nvb = AOMMIN(MI_SIZE_128X128, cm->mi_rows - MI_SIZE_64X64 * fbr);
    407         vb_step = 2;
    408       }
    409       // No filtering if the entire filter block is skipped
    410       if (sb_all_skip(cm, fbr * MI_SIZE_64X64, fbc * MI_SIZE_64X64)) continue;
    411       cdef_count = sb_compute_cdef_list(cm, fbr * MI_SIZE_64X64,
    412                                         fbc * MI_SIZE_64X64, dlist, bs);
    413       for (pli = 0; pli < num_planes; pli++) {
    414         for (i = 0; i < CDEF_INBUF_SIZE; i++) inbuf[i] = CDEF_VERY_LARGE;
    415         for (gi = 0; gi < total_strengths; gi++) {
    416           int threshold;
    417           uint64_t curr_mse;
    418           int sec_strength;
    419           threshold = gi / CDEF_SEC_STRENGTHS;
    420           if (fast) threshold = priconv[threshold];
    421           /* We avoid filtering the pixels for which some of the pixels to
    422              average
    423              are outside the frame. We could change the filter instead, but it
    424              would add special cases for any future vectorization. */
    425           int yoff = CDEF_VBORDER * (fbr != 0);
    426           int xoff = CDEF_HBORDER * (fbc != 0);
    427           int ysize = (nvb << mi_high_l2[pli]) +
    428                       CDEF_VBORDER * (fbr + vb_step < nvfb) + yoff;
    429           int xsize = (nhb << mi_wide_l2[pli]) +
    430                       CDEF_HBORDER * (fbc + hb_step < nhfb) + xoff;
    431           sec_strength = gi % CDEF_SEC_STRENGTHS;
    432           copy_sb16_16(&in[(-yoff * CDEF_BSTRIDE - xoff)], CDEF_BSTRIDE,
    433                        src[pli],
    434                        (fbr * MI_SIZE_64X64 << mi_high_l2[pli]) - yoff,
    435                        (fbc * MI_SIZE_64X64 << mi_wide_l2[pli]) - xoff,
    436                        stride[pli], ysize, xsize);
    437           cdef_filter_fb(NULL, tmp_dst, CDEF_BSTRIDE, in, xdec[pli], ydec[pli],
    438                          dir, &dirinit, var, pli, dlist, cdef_count, threshold,
    439                          sec_strength + (sec_strength == 3), pri_damping,
    440                          sec_damping, coeff_shift);
    441           curr_mse = compute_cdef_dist(
    442               ref_coeff[pli] +
    443                   (fbr * MI_SIZE_64X64 << mi_high_l2[pli]) * stride[pli] +
    444                   (fbc * MI_SIZE_64X64 << mi_wide_l2[pli]),
    445               stride[pli], tmp_dst, dlist, cdef_count, bsize[pli], coeff_shift,
    446               pli);
    447           if (pli < 2)
    448             mse[pli][sb_count][gi] = curr_mse;
    449           else
    450             mse[1][sb_count][gi] += curr_mse;
    451           sb_index[sb_count] =
    452               MI_SIZE_64X64 * fbr * cm->mi_stride + MI_SIZE_64X64 * fbc;
    453         }
    454       }
    455       sb_count++;
    456     }
    457   }
    458   nb_strength_bits = 0;
    459   /* Search for different number of signalling bits. */
    460   for (i = 0; i <= 3; i++) {
    461     int j;
    462     int best_lev0[CDEF_MAX_STRENGTHS];
    463     int best_lev1[CDEF_MAX_STRENGTHS] = { 0 };
    464     nb_strengths = 1 << i;
    465     if (num_planes >= 3)
    466       tot_mse = joint_strength_search_dual(best_lev0, best_lev1, nb_strengths,
    467                                            mse, sb_count, fast);
    468     else
    469       tot_mse = joint_strength_search(best_lev0, nb_strengths, mse[0], sb_count,
    470                                       fast);
    471     /* Count superblock signalling cost. */
    472     tot_mse += (uint64_t)(sb_count * lambda * i);
    473     /* Count header signalling cost. */
    474     tot_mse += (uint64_t)(nb_strengths * lambda * CDEF_STRENGTH_BITS);
    475     if (tot_mse < best_tot_mse) {
    476       best_tot_mse = tot_mse;
    477       nb_strength_bits = i;
    478       for (j = 0; j < 1 << nb_strength_bits; j++) {
    479         cdef_info->cdef_strengths[j] = best_lev0[j];
    480         cdef_info->cdef_uv_strengths[j] = best_lev1[j];
    481       }
    482     }
    483   }
    484   nb_strengths = 1 << nb_strength_bits;
    485 
    486   cdef_info->cdef_bits = nb_strength_bits;
    487   cdef_info->nb_cdef_strengths = nb_strengths;
    488   for (i = 0; i < sb_count; i++) {
    489     int gi;
    490     int best_gi;
    491     uint64_t best_mse = (uint64_t)1 << 63;
    492     best_gi = 0;
    493     for (gi = 0; gi < cdef_info->nb_cdef_strengths; gi++) {
    494       uint64_t curr = mse[0][i][cdef_info->cdef_strengths[gi]];
    495       if (num_planes >= 3) curr += mse[1][i][cdef_info->cdef_uv_strengths[gi]];
    496       if (curr < best_mse) {
    497         best_gi = gi;
    498         best_mse = curr;
    499       }
    500     }
    501     selected_strength[i] = best_gi;
    502     cm->mi_grid_visible[sb_index[i]]->cdef_strength = best_gi;
    503   }
    504 
    505   if (fast) {
    506     for (int j = 0; j < nb_strengths; j++) {
    507       cdef_info->cdef_strengths[j] =
    508           priconv[cm->cdef_info.cdef_strengths[j] / CDEF_SEC_STRENGTHS] *
    509               CDEF_SEC_STRENGTHS +
    510           (cdef_info->cdef_strengths[j] % CDEF_SEC_STRENGTHS);
    511       cdef_info->cdef_uv_strengths[j] =
    512           priconv[cdef_info->cdef_uv_strengths[j] / CDEF_SEC_STRENGTHS] *
    513               CDEF_SEC_STRENGTHS +
    514           (cdef_info->cdef_uv_strengths[j] % CDEF_SEC_STRENGTHS);
    515     }
    516   }
    517   cdef_info->cdef_pri_damping = pri_damping;
    518   cdef_info->cdef_sec_damping = sec_damping;
    519   aom_free(mse[0]);
    520   aom_free(mse[1]);
    521   for (pli = 0; pli < num_planes; pli++) {
    522     aom_free(src[pli]);
    523     aom_free(ref_coeff[pli]);
    524   }
    525   aom_free(sb_index);
    526   aom_free(selected_strength);
    527 }
    528