Home | History | Annotate | Download | only in arm
      1 /*
      2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 
     12 #include "vpx_ports/config.h"
     13 #include "vpx_ports/arm.h"
     14 #include "variance.h"
     15 #include "onyx_int.h"
     16 
     17 extern void (*vp8_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
     18 extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
     19 extern void vpxyv12_copy_partial_frame_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
     20 
     21 void vp8_arch_arm_encoder_init(VP8_COMP *cpi)
     22 {
     23 #if CONFIG_RUNTIME_CPU_DETECT
     24     int flags = cpi->common.rtcd.flags;
     25     int has_edsp = flags & HAS_EDSP;
     26     int has_media = flags & HAS_MEDIA;
     27     int has_neon = flags & HAS_NEON;
     28 
     29 #if HAVE_ARMV6
     30     if (has_media)
     31     {
     32         /*cpi->rtcd.variance.sad16x16              = vp8_sad16x16_c;
     33         cpi->rtcd.variance.sad16x8               = vp8_sad16x8_c;
     34         cpi->rtcd.variance.sad8x16               = vp8_sad8x16_c;
     35         cpi->rtcd.variance.sad8x8                = vp8_sad8x8_c;
     36         cpi->rtcd.variance.sad4x4                = vp8_sad4x4_c;*/
     37 
     38         /*cpi->rtcd.variance.var4x4                = vp8_variance4x4_c;
     39         cpi->rtcd.variance.var8x8                = vp8_variance8x8_c;
     40         cpi->rtcd.variance.var8x16               = vp8_variance8x16_c;
     41         cpi->rtcd.variance.var16x8               = vp8_variance16x8_c;
     42         cpi->rtcd.variance.var16x16              = vp8_variance16x16_c;*/
     43 
     44         /*cpi->rtcd.variance.subpixvar4x4          = vp8_sub_pixel_variance4x4_c;
     45         cpi->rtcd.variance.subpixvar8x8          = vp8_sub_pixel_variance8x8_c;
     46         cpi->rtcd.variance.subpixvar8x16         = vp8_sub_pixel_variance8x16_c;
     47         cpi->rtcd.variance.subpixvar16x8         = vp8_sub_pixel_variance16x8_c;
     48         cpi->rtcd.variance.subpixvar16x16        = vp8_sub_pixel_variance16x16_c;*/
     49 
     50         /*cpi->rtcd.variance.mse16x16              = vp8_mse16x16_c;
     51         cpi->rtcd.variance.getmbss               = vp8_get_mb_ss_c;*/
     52 
     53         /*cpi->rtcd.variance.get16x16prederror     = vp8_get16x16pred_error_c;
     54         cpi->rtcd.variance.get8x8var             = vp8_get8x8var_c;
     55         cpi->rtcd.variance.get16x16var           = vp8_get16x16var_c;;
     56         cpi->rtcd.variance.get4x4sse_cs          = vp8_get4x4sse_cs_c;*/
     57 
     58         /*cpi->rtcd.fdct.short4x4                  = vp8_short_fdct4x4_c;
     59         cpi->rtcd.fdct.short8x4                  = vp8_short_fdct8x4_c;
     60         cpi->rtcd.fdct.fast4x4                   = vp8_fast_fdct4x4_c;
     61         cpi->rtcd.fdct.fast8x4                   = vp8_fast_fdct8x4_c;*/
     62         cpi->rtcd.fdct.walsh_short4x4            = vp8_short_walsh4x4_armv6;
     63 
     64         /*cpi->rtcd.encodemb.berr                  = vp8_block_error_c;
     65         cpi->rtcd.encodemb.mberr                 = vp8_mbblock_error_c;
     66         cpi->rtcd.encodemb.mbuverr               = vp8_mbuverror_c;
     67         cpi->rtcd.encodemb.subb                  = vp8_subtract_b_c;
     68         cpi->rtcd.encodemb.submby                = vp8_subtract_mby_c;
     69         cpi->rtcd.encodemb.submbuv               = vp8_subtract_mbuv_c;*/
     70 
     71         /*cpi->rtcd.quantize.quantb                = vp8_regular_quantize_b;
     72         cpi->rtcd.quantize.fastquantb            = vp8_fast_quantize_b_c;*/
     73     }
     74 #endif
     75 
     76 #if HAVE_ARMV7
     77     if (has_neon)
     78     {
     79         cpi->rtcd.variance.sad16x16              = vp8_sad16x16_neon;
     80         cpi->rtcd.variance.sad16x8               = vp8_sad16x8_neon;
     81         cpi->rtcd.variance.sad8x16               = vp8_sad8x16_neon;
     82         cpi->rtcd.variance.sad8x8                = vp8_sad8x8_neon;
     83         cpi->rtcd.variance.sad4x4                = vp8_sad4x4_neon;
     84 
     85         /*cpi->rtcd.variance.var4x4                = vp8_variance4x4_c;*/
     86         cpi->rtcd.variance.var8x8                = vp8_variance8x8_neon;
     87         cpi->rtcd.variance.var8x16               = vp8_variance8x16_neon;
     88         cpi->rtcd.variance.var16x8               = vp8_variance16x8_neon;
     89         cpi->rtcd.variance.var16x16              = vp8_variance16x16_neon;
     90 
     91         /*cpi->rtcd.variance.subpixvar4x4          = vp8_sub_pixel_variance4x4_c;*/
     92         cpi->rtcd.variance.subpixvar8x8          = vp8_sub_pixel_variance8x8_neon;
     93         /*cpi->rtcd.variance.subpixvar8x16         = vp8_sub_pixel_variance8x16_c;
     94         cpi->rtcd.variance.subpixvar16x8         = vp8_sub_pixel_variance16x8_c;*/
     95         cpi->rtcd.variance.subpixvar16x16        = vp8_sub_pixel_variance16x16_neon;
     96         cpi->rtcd.variance.halfpixvar16x16_h     = vp8_variance_halfpixvar16x16_h_neon;
     97         cpi->rtcd.variance.halfpixvar16x16_v     = vp8_variance_halfpixvar16x16_v_neon;
     98         cpi->rtcd.variance.halfpixvar16x16_hv    = vp8_variance_halfpixvar16x16_hv_neon;
     99 
    100         cpi->rtcd.variance.mse16x16              = vp8_mse16x16_neon;
    101         /*cpi->rtcd.variance.getmbss               = vp8_get_mb_ss_c;*/
    102 
    103         cpi->rtcd.variance.get16x16prederror     = vp8_get16x16pred_error_neon;
    104         /*cpi->rtcd.variance.get8x8var             = vp8_get8x8var_c;
    105         cpi->rtcd.variance.get16x16var           = vp8_get16x16var_c;*/
    106         cpi->rtcd.variance.get4x4sse_cs          = vp8_get4x4sse_cs_neon;
    107 
    108         cpi->rtcd.fdct.short4x4                  = vp8_short_fdct4x4_neon;
    109         cpi->rtcd.fdct.short8x4                  = vp8_short_fdct8x4_neon;
    110         cpi->rtcd.fdct.fast4x4                   = vp8_fast_fdct4x4_neon;
    111         cpi->rtcd.fdct.fast8x4                   = vp8_fast_fdct8x4_neon;
    112         cpi->rtcd.fdct.walsh_short4x4            = vp8_short_walsh4x4_neon;
    113 
    114         /*cpi->rtcd.encodemb.berr                  = vp8_block_error_c;
    115         cpi->rtcd.encodemb.mberr                 = vp8_mbblock_error_c;
    116         cpi->rtcd.encodemb.mbuverr               = vp8_mbuverror_c;*/
    117         cpi->rtcd.encodemb.subb                  = vp8_subtract_b_neon;
    118         cpi->rtcd.encodemb.submby                = vp8_subtract_mby_neon;
    119         cpi->rtcd.encodemb.submbuv               = vp8_subtract_mbuv_neon;
    120 
    121         /*cpi->rtcd.quantize.quantb                = vp8_regular_quantize_b;
    122         cpi->rtcd.quantize.fastquantb            = vp8_fast_quantize_b_c;*/
    123         /* The neon quantizer has not been updated to match the new exact
    124          * quantizer introduced in commit e04e2935
    125          */
    126         /*cpi->rtcd.quantize.fastquantb            = vp8_fast_quantize_b_neon;*/
    127     }
    128 #endif
    129 
    130 #if HAVE_ARMV7
    131 #if CONFIG_RUNTIME_CPU_DETECT
    132     if (has_neon)
    133 #endif
    134     {
    135         vp8_yv12_copy_partial_frame_ptr = vpxyv12_copy_partial_frame_neon;
    136     }
    137 #endif
    138 #endif
    139 }
    140