Home | History | Annotate | Download | only in generic
      1 /*
      2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 
     12 #include "vpx_ports/config.h"
     13 #include "variance.h"
     14 #include "onyx_int.h"
     15 
     16 
     17 void vp8_arch_x86_encoder_init(VP8_COMP *cpi);
     18 void vp8_arch_arm_encoder_init(VP8_COMP *cpi);
     19 
     20 
     21 void (*vp8_fast_quantize_b)(BLOCK *b, BLOCKD *d);
     22 extern void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d);
     23 
     24 void (*vp8_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
     25 extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
     26 
     27 void vp8_cmachine_specific_config(VP8_COMP *cpi)
     28 {
     29 #if CONFIG_RUNTIME_CPU_DETECT
     30     cpi->rtcd.common                    = &cpi->common.rtcd;
     31     cpi->rtcd.variance.sad16x16              = vp8_sad16x16_c;
     32     cpi->rtcd.variance.sad16x8               = vp8_sad16x8_c;
     33     cpi->rtcd.variance.sad8x16               = vp8_sad8x16_c;
     34     cpi->rtcd.variance.sad8x8                = vp8_sad8x8_c;
     35     cpi->rtcd.variance.sad4x4                = vp8_sad4x4_c;
     36 
     37     cpi->rtcd.variance.sad16x16x3            = vp8_sad16x16x3_c;
     38     cpi->rtcd.variance.sad16x8x3             = vp8_sad16x8x3_c;
     39     cpi->rtcd.variance.sad8x16x3             = vp8_sad8x16x3_c;
     40     cpi->rtcd.variance.sad8x8x3              = vp8_sad8x8x3_c;
     41     cpi->rtcd.variance.sad4x4x3              = vp8_sad4x4x3_c;
     42 
     43     cpi->rtcd.variance.sad16x16x8            = vp8_sad16x16x8_c;
     44     cpi->rtcd.variance.sad16x8x8             = vp8_sad16x8x8_c;
     45     cpi->rtcd.variance.sad8x16x8             = vp8_sad8x16x8_c;
     46     cpi->rtcd.variance.sad8x8x8              = vp8_sad8x8x8_c;
     47     cpi->rtcd.variance.sad4x4x8              = vp8_sad4x4x8_c;
     48 
     49     cpi->rtcd.variance.sad16x16x4d           = vp8_sad16x16x4d_c;
     50     cpi->rtcd.variance.sad16x8x4d            = vp8_sad16x8x4d_c;
     51     cpi->rtcd.variance.sad8x16x4d            = vp8_sad8x16x4d_c;
     52     cpi->rtcd.variance.sad8x8x4d             = vp8_sad8x8x4d_c;
     53     cpi->rtcd.variance.sad4x4x4d             = vp8_sad4x4x4d_c;
     54 
     55     cpi->rtcd.variance.var4x4                = vp8_variance4x4_c;
     56     cpi->rtcd.variance.var8x8                = vp8_variance8x8_c;
     57     cpi->rtcd.variance.var8x16               = vp8_variance8x16_c;
     58     cpi->rtcd.variance.var16x8               = vp8_variance16x8_c;
     59     cpi->rtcd.variance.var16x16              = vp8_variance16x16_c;
     60 
     61     cpi->rtcd.variance.subpixvar4x4          = vp8_sub_pixel_variance4x4_c;
     62     cpi->rtcd.variance.subpixvar8x8          = vp8_sub_pixel_variance8x8_c;
     63     cpi->rtcd.variance.subpixvar8x16         = vp8_sub_pixel_variance8x16_c;
     64     cpi->rtcd.variance.subpixvar16x8         = vp8_sub_pixel_variance16x8_c;
     65     cpi->rtcd.variance.subpixvar16x16        = vp8_sub_pixel_variance16x16_c;
     66     cpi->rtcd.variance.halfpixvar16x16_h     = vp8_variance_halfpixvar16x16_h_c;
     67     cpi->rtcd.variance.halfpixvar16x16_v     = vp8_variance_halfpixvar16x16_v_c;
     68     cpi->rtcd.variance.halfpixvar16x16_hv    = vp8_variance_halfpixvar16x16_hv_c;
     69     cpi->rtcd.variance.subpixmse16x16        = vp8_sub_pixel_mse16x16_c;
     70 
     71     cpi->rtcd.variance.mse16x16              = vp8_mse16x16_c;
     72     cpi->rtcd.variance.getmbss               = vp8_get_mb_ss_c;
     73 
     74     cpi->rtcd.variance.get16x16prederror     = vp8_get16x16pred_error_c;
     75     cpi->rtcd.variance.get8x8var             = vp8_get8x8var_c;
     76     cpi->rtcd.variance.get16x16var           = vp8_get16x16var_c;;
     77     cpi->rtcd.variance.get4x4sse_cs          = vp8_get4x4sse_cs_c;
     78 
     79     cpi->rtcd.fdct.short4x4                  = vp8_short_fdct4x4_c;
     80     cpi->rtcd.fdct.short8x4                  = vp8_short_fdct8x4_c;
     81     cpi->rtcd.fdct.fast4x4                   = vp8_short_fdct4x4_c;
     82     cpi->rtcd.fdct.fast8x4                   = vp8_short_fdct8x4_c;
     83     cpi->rtcd.fdct.walsh_short4x4            = vp8_short_walsh4x4_c;
     84 
     85     cpi->rtcd.encodemb.berr                  = vp8_block_error_c;
     86     cpi->rtcd.encodemb.mberr                 = vp8_mbblock_error_c;
     87     cpi->rtcd.encodemb.mbuverr               = vp8_mbuverror_c;
     88     cpi->rtcd.encodemb.subb                  = vp8_subtract_b_c;
     89     cpi->rtcd.encodemb.submby                = vp8_subtract_mby_c;
     90     cpi->rtcd.encodemb.submbuv               = vp8_subtract_mbuv_c;
     91 
     92     cpi->rtcd.quantize.quantb                = vp8_regular_quantize_b;
     93     cpi->rtcd.quantize.fastquantb            = vp8_fast_quantize_b_c;
     94 
     95     cpi->rtcd.search.full_search             = vp8_full_search_sad;
     96     cpi->rtcd.search.diamond_search          = vp8_diamond_search_sad;
     97 #endif
     98 
     99     // Pure C:
    100     vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame;
    101 
    102 
    103 #if ARCH_X86 || ARCH_X86_64
    104     vp8_arch_x86_encoder_init(cpi);
    105 #endif
    106 
    107 #if ARCH_ARM
    108     vp8_arch_arm_encoder_init(cpi);
    109 #endif
    110 
    111 }
    112