Home | History | Annotate | Download | only in x86
      1 /*
      2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 
     12 #include "vpx_ports/config.h"
     13 #include "vpx_ports/x86.h"
     14 #include "g_common.h"
     15 #include "subpixel.h"
     16 #include "loopfilter.h"
     17 #include "recon.h"
     18 #include "idct.h"
     19 #include "pragmas.h"
     20 #include "onyxc_int.h"
     21 
     22 void vp8_arch_x86_common_init(VP8_COMMON *ctx)
     23 {
     24 #if CONFIG_RUNTIME_CPU_DETECT
     25     VP8_COMMON_RTCD *rtcd = &ctx->rtcd;
     26     int flags = x86_simd_caps();
     27     int mmx_enabled = flags & HAS_MMX;
     28     int xmm_enabled = flags & HAS_SSE;
     29     int wmt_enabled = flags & HAS_SSE2;
     30     int SSSE3Enabled = flags & HAS_SSSE3;
     31 
     32     /* Note:
     33      *
     34      * This platform can be built without runtime CPU detection as well. If
     35      * you modify any of the function mappings present in this file, be sure
     36      * to also update them in static mapings (<arch>/filename_<arch>.h)
     37      */
     38 
     39     /* Override default functions with fastest ones for this CPU. */
     40 #if HAVE_MMX
     41 
     42     if (mmx_enabled)
     43     {
     44         rtcd->idct.idct1        = vp8_short_idct4x4llm_1_mmx;
     45         rtcd->idct.idct16       = vp8_short_idct4x4llm_mmx;
     46         rtcd->idct.idct1_scalar_add = vp8_dc_only_idct_add_mmx;
     47         rtcd->idct.iwalsh16     = vp8_short_inv_walsh4x4_mmx;
     48         rtcd->idct.iwalsh1     = vp8_short_inv_walsh4x4_1_mmx;
     49 
     50 
     51 
     52         rtcd->recon.recon       = vp8_recon_b_mmx;
     53         rtcd->recon.copy8x8     = vp8_copy_mem8x8_mmx;
     54         rtcd->recon.copy8x4     = vp8_copy_mem8x4_mmx;
     55         rtcd->recon.copy16x16   = vp8_copy_mem16x16_mmx;
     56 
     57         rtcd->subpix.sixtap16x16   = vp8_sixtap_predict16x16_mmx;
     58         rtcd->subpix.sixtap8x8     = vp8_sixtap_predict8x8_mmx;
     59         rtcd->subpix.sixtap8x4     = vp8_sixtap_predict8x4_mmx;
     60         rtcd->subpix.sixtap4x4     = vp8_sixtap_predict4x4_mmx;
     61         rtcd->subpix.bilinear16x16 = vp8_bilinear_predict16x16_mmx;
     62         rtcd->subpix.bilinear8x8   = vp8_bilinear_predict8x8_mmx;
     63         rtcd->subpix.bilinear8x4   = vp8_bilinear_predict8x4_mmx;
     64         rtcd->subpix.bilinear4x4   = vp8_bilinear_predict4x4_mmx;
     65 
     66         rtcd->loopfilter.normal_mb_v = vp8_loop_filter_mbv_mmx;
     67         rtcd->loopfilter.normal_b_v  = vp8_loop_filter_bv_mmx;
     68         rtcd->loopfilter.normal_mb_h = vp8_loop_filter_mbh_mmx;
     69         rtcd->loopfilter.normal_b_h  = vp8_loop_filter_bh_mmx;
     70         rtcd->loopfilter.simple_mb_v = vp8_loop_filter_mbvs_mmx;
     71         rtcd->loopfilter.simple_b_v  = vp8_loop_filter_bvs_mmx;
     72         rtcd->loopfilter.simple_mb_h = vp8_loop_filter_mbhs_mmx;
     73         rtcd->loopfilter.simple_b_h  = vp8_loop_filter_bhs_mmx;
     74 
     75 #if CONFIG_POSTPROC
     76         rtcd->postproc.down        = vp8_mbpost_proc_down_mmx;
     77         /*rtcd->postproc.across      = vp8_mbpost_proc_across_ip_c;*/
     78         rtcd->postproc.downacross  = vp8_post_proc_down_and_across_mmx;
     79         rtcd->postproc.addnoise    = vp8_plane_add_noise_mmx;
     80 #endif
     81     }
     82 
     83 #endif
     84 #if HAVE_SSE2
     85 
     86     if (wmt_enabled)
     87     {
     88         rtcd->recon.recon2      = vp8_recon2b_sse2;
     89         rtcd->recon.recon4      = vp8_recon4b_sse2;
     90         rtcd->recon.copy16x16   = vp8_copy_mem16x16_sse2;
     91 
     92         rtcd->idct.iwalsh16     = vp8_short_inv_walsh4x4_sse2;
     93 
     94         rtcd->subpix.sixtap16x16   = vp8_sixtap_predict16x16_sse2;
     95         rtcd->subpix.sixtap8x8     = vp8_sixtap_predict8x8_sse2;
     96         rtcd->subpix.sixtap8x4     = vp8_sixtap_predict8x4_sse2;
     97         rtcd->subpix.bilinear16x16 = vp8_bilinear_predict16x16_sse2;
     98         rtcd->subpix.bilinear8x8   = vp8_bilinear_predict8x8_sse2;
     99 
    100         rtcd->loopfilter.normal_mb_v = vp8_loop_filter_mbv_sse2;
    101         rtcd->loopfilter.normal_b_v  = vp8_loop_filter_bv_sse2;
    102         rtcd->loopfilter.normal_mb_h = vp8_loop_filter_mbh_sse2;
    103         rtcd->loopfilter.normal_b_h  = vp8_loop_filter_bh_sse2;
    104         rtcd->loopfilter.simple_mb_v = vp8_loop_filter_mbvs_sse2;
    105         rtcd->loopfilter.simple_b_v  = vp8_loop_filter_bvs_sse2;
    106         rtcd->loopfilter.simple_mb_h = vp8_loop_filter_mbhs_sse2;
    107         rtcd->loopfilter.simple_b_h  = vp8_loop_filter_bhs_sse2;
    108 
    109 #if CONFIG_POSTPROC
    110         rtcd->postproc.down        = vp8_mbpost_proc_down_xmm;
    111         rtcd->postproc.across      = vp8_mbpost_proc_across_ip_xmm;
    112         rtcd->postproc.downacross  = vp8_post_proc_down_and_across_xmm;
    113         rtcd->postproc.addnoise    = vp8_plane_add_noise_wmt;
    114 #endif
    115     }
    116 
    117 #endif
    118 
    119 #if HAVE_SSSE3
    120 
    121     if (SSSE3Enabled)
    122     {
    123         rtcd->subpix.sixtap16x16   = vp8_sixtap_predict16x16_ssse3;
    124         rtcd->subpix.sixtap8x8     = vp8_sixtap_predict8x8_ssse3;
    125         rtcd->subpix.sixtap8x4     = vp8_sixtap_predict8x4_ssse3;
    126         rtcd->subpix.sixtap4x4     = vp8_sixtap_predict4x4_ssse3;
    127         rtcd->subpix.bilinear16x16 = vp8_bilinear_predict16x16_ssse3;
    128         rtcd->subpix.bilinear8x8   = vp8_bilinear_predict8x8_ssse3;
    129     }
    130 #endif
    131 
    132 #endif
    133 }
    134