Home | History | Annotate | Download | only in ppc
      1 /*
      2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 
     12 #include "vp8/encoder/variance.h"
     13 #include "vp8/encoder/onyx_int.h"
     14 
     15 SADFunction *vp8_sad16x16;
     16 SADFunction *vp8_sad16x8;
     17 SADFunction *vp8_sad8x16;
     18 SADFunction *vp8_sad8x8;
     19 SADFunction *vp8_sad4x4;
     20 
     21 variance_function *vp8_variance4x4;
     22 variance_function *vp8_variance8x8;
     23 variance_function *vp8_variance8x16;
     24 variance_function *vp8_variance16x8;
     25 variance_function *vp8_variance16x16;
     26 
     27 variance_function *vp8_mse16x16;
     28 
     29 sub_pixel_variance_function *vp8_sub_pixel_variance4x4;
     30 sub_pixel_variance_function *vp8_sub_pixel_variance8x8;
     31 sub_pixel_variance_function *vp8_sub_pixel_variance8x16;
     32 sub_pixel_variance_function *vp8_sub_pixel_variance16x8;
     33 sub_pixel_variance_function *vp8_sub_pixel_variance16x16;
     34 
     35 int (*vp8_block_error)(short *coeff, short *dqcoeff);
     36 int (*vp8_mbblock_error)(MACROBLOCK *mb, int dc);
     37 
     38 int (*vp8_mbuverror)(MACROBLOCK *mb);
     39 unsigned int (*vp8_get_mb_ss)(short *);
     40 void (*vp8_short_fdct4x4)(short *input, short *output, int pitch);
     41 void (*vp8_short_fdct8x4)(short *input, short *output, int pitch);
     42 void (*vp8_fast_fdct4x4)(short *input, short *output, int pitch);
     43 void (*vp8_fast_fdct8x4)(short *input, short *output, int pitch);
     44 void (*short_walsh4x4)(short *input, short *output, int pitch);
     45 
     46 void (*vp8_subtract_b)(BLOCK *be, BLOCKD *bd, int pitch);
     47 void (*vp8_subtract_mby)(short *diff, unsigned char *src, unsigned char *pred, int stride);
     48 void (*vp8_subtract_mbuv)(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride);
     49 void (*vp8_fast_quantize_b)(BLOCK *b, BLOCKD *d);
     50 
     51 unsigned int (*vp8_get16x16pred_error)(unsigned char *src_ptr, int src_stride, unsigned char *ref_ptr, int ref_stride);
     52 unsigned int (*vp8_get8x8var)(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride, unsigned int *SSE, int *Sum);
     53 unsigned int (*vp8_get16x16var)(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride, unsigned int *SSE, int *Sum);
     54 unsigned int (*vp8_get4x4sse_cs)(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride);
     55 
     56 // c imports
     57 extern int block_error_c(short *coeff, short *dqcoeff);
     58 extern int vp8_mbblock_error_c(MACROBLOCK *mb, int dc);
     59 
     60 extern int vp8_mbuverror_c(MACROBLOCK *mb);
     61 extern unsigned int vp8_get8x8var_c(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride, unsigned int *SSE, int *Sum);
     62 extern void short_fdct4x4_c(short *input, short *output, int pitch);
     63 extern void short_fdct8x4_c(short *input, short *output, int pitch);
     64 extern void vp8_short_walsh4x4_c(short *input, short *output, int pitch);
     65 
     66 extern void vp8_subtract_b_c(BLOCK *be, BLOCKD *bd, int pitch);
     67 extern void subtract_mby_c(short *diff, unsigned char *src, unsigned char *pred, int stride);
     68 extern void subtract_mbuv_c(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride);
     69 extern void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d);
     70 
     71 extern SADFunction sad16x16_c;
     72 extern SADFunction sad16x8_c;
     73 extern SADFunction sad8x16_c;
     74 extern SADFunction sad8x8_c;
     75 extern SADFunction sad4x4_c;
     76 
     77 extern variance_function variance16x16_c;
     78 extern variance_function variance8x16_c;
     79 extern variance_function variance16x8_c;
     80 extern variance_function variance8x8_c;
     81 extern variance_function variance4x4_c;
     82 extern variance_function mse16x16_c;
     83 
     84 extern sub_pixel_variance_function sub_pixel_variance4x4_c;
     85 extern sub_pixel_variance_function sub_pixel_variance8x8_c;
     86 extern sub_pixel_variance_function sub_pixel_variance8x16_c;
     87 extern sub_pixel_variance_function sub_pixel_variance16x8_c;
     88 extern sub_pixel_variance_function sub_pixel_variance16x16_c;
     89 
     90 extern unsigned int vp8_get_mb_ss_c(short *);
     91 extern unsigned int vp8_get16x16pred_error_c(unsigned char *src_ptr, int src_stride, unsigned char *ref_ptr, int ref_stride);
     92 extern unsigned int vp8_get8x8var_c(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride, unsigned int *SSE, int *Sum);
     93 extern unsigned int vp8_get16x16var_c(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride, unsigned int *SSE, int *Sum);
     94 extern unsigned int vp8_get4x4sse_cs_c(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride);
     95 
     96 // ppc
     97 extern int vp8_block_error_ppc(short *coeff, short *dqcoeff);
     98 
     99 extern void vp8_short_fdct4x4_ppc(short *input, short *output, int pitch);
    100 extern void vp8_short_fdct8x4_ppc(short *input, short *output, int pitch);
    101 
    102 extern void vp8_subtract_mby_ppc(short *diff, unsigned char *src, unsigned char *pred, int stride);
    103 extern void vp8_subtract_mbuv_ppc(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride);
    104 
    105 extern SADFunction vp8_sad16x16_ppc;
    106 extern SADFunction vp8_sad16x8_ppc;
    107 extern SADFunction vp8_sad8x16_ppc;
    108 extern SADFunction vp8_sad8x8_ppc;
    109 extern SADFunction vp8_sad4x4_ppc;
    110 
    111 extern variance_function vp8_variance16x16_ppc;
    112 extern variance_function vp8_variance8x16_ppc;
    113 extern variance_function vp8_variance16x8_ppc;
    114 extern variance_function vp8_variance8x8_ppc;
    115 extern variance_function vp8_variance4x4_ppc;
    116 extern variance_function vp8_mse16x16_ppc;
    117 
    118 extern sub_pixel_variance_function vp8_sub_pixel_variance4x4_ppc;
    119 extern sub_pixel_variance_function vp8_sub_pixel_variance8x8_ppc;
    120 extern sub_pixel_variance_function vp8_sub_pixel_variance8x16_ppc;
    121 extern sub_pixel_variance_function vp8_sub_pixel_variance16x8_ppc;
    122 extern sub_pixel_variance_function vp8_sub_pixel_variance16x16_ppc;
    123 
    124 extern unsigned int vp8_get8x8var_ppc(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride, unsigned int *SSE, int *Sum);
    125 extern unsigned int vp8_get16x16var_ppc(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride, unsigned int *SSE, int *Sum);
    126 
    127 void vp8_cmachine_specific_config(void)
    128 {
    129     // Pure C:
    130     vp8_mbuverror               = vp8_mbuverror_c;
    131     vp8_fast_quantize_b           = vp8_fast_quantize_b_c;
    132     vp8_short_fdct4x4            = vp8_short_fdct4x4_ppc;
    133     vp8_short_fdct8x4            = vp8_short_fdct8x4_ppc;
    134     vp8_fast_fdct4x4             = vp8_short_fdct4x4_ppc;
    135     vp8_fast_fdct8x4             = vp8_short_fdct8x4_ppc;
    136     short_walsh4x4               = vp8_short_walsh4x4_c;
    137 
    138     vp8_variance4x4             = vp8_variance4x4_ppc;
    139     vp8_variance8x8             = vp8_variance8x8_ppc;
    140     vp8_variance8x16            = vp8_variance8x16_ppc;
    141     vp8_variance16x8            = vp8_variance16x8_ppc;
    142     vp8_variance16x16           = vp8_variance16x16_ppc;
    143     vp8_mse16x16                = vp8_mse16x16_ppc;
    144 
    145     vp8_sub_pixel_variance4x4     = vp8_sub_pixel_variance4x4_ppc;
    146     vp8_sub_pixel_variance8x8     = vp8_sub_pixel_variance8x8_ppc;
    147     vp8_sub_pixel_variance8x16    = vp8_sub_pixel_variance8x16_ppc;
    148     vp8_sub_pixel_variance16x8    = vp8_sub_pixel_variance16x8_ppc;
    149     vp8_sub_pixel_variance16x16   = vp8_sub_pixel_variance16x16_ppc;
    150 
    151     vp8_get_mb_ss                 = vp8_get_mb_ss_c;
    152     vp8_get16x16pred_error       = vp8_get16x16pred_error_c;
    153     vp8_get8x8var               = vp8_get8x8var_ppc;
    154     vp8_get16x16var             = vp8_get16x16var_ppc;
    155     vp8_get4x4sse_cs            = vp8_get4x4sse_cs_c;
    156 
    157     vp8_sad16x16                = vp8_sad16x16_ppc;
    158     vp8_sad16x8                 = vp8_sad16x8_ppc;
    159     vp8_sad8x16                 = vp8_sad8x16_ppc;
    160     vp8_sad8x8                  = vp8_sad8x8_ppc;
    161     vp8_sad4x4                  = vp8_sad4x4_ppc;
    162 
    163     vp8_block_error              = vp8_block_error_ppc;
    164     vp8_mbblock_error            = vp8_mbblock_error_c;
    165 
    166     vp8_subtract_b               = vp8_subtract_b_c;
    167     vp8_subtract_mby             = vp8_subtract_mby_ppc;
    168     vp8_subtract_mbuv            = vp8_subtract_mbuv_ppc;
    169 }
    170