Home | History | Annotate | Download | only in Tremolo
      1 /************************************************************************
      2  * Copyright (C) 2002-2009, Xiph.org Foundation
      3  * Copyright (C) 2010, Robin Watts for Pinknoise Productions Ltd
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  *
     10  *     * Redistributions of source code must retain the above copyright
     11  * notice, this list of conditions and the following disclaimer.
     12  *     * Redistributions in binary form must reproduce the above
     13  * copyright notice, this list of conditions and the following disclaimer
     14  * in the documentation and/or other materials provided with the
     15  * distribution.
     16  *     * Neither the names of the Xiph.org Foundation nor Pinknoise
     17  * Productions Ltd nor the names of its contributors may be used to
     18  * endorse or promote products derived from this software without
     19  * specific prior written permission.
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     32  ************************************************************************
     33 
     34  function: miscellaneous math and prototypes
     35 
     36  ************************************************************************/
     37 
     38 #ifndef _V_RANDOM_H_
     39 #define _V_RANDOM_H_
     40 #include "ivorbiscodec.h"
     41 #include "os_types.h"
     42 
     43 /*#define _VDBG_GRAPHFILE "_0.m"*/
     44 
     45 
     46 #ifdef _VDBG_GRAPHFILE
     47 extern void *_VDBG_malloc(void *ptr,long bytes,char *file,long line);
     48 extern void _VDBG_free(void *ptr,char *file,long line);
     49 
     50 #undef _ogg_malloc
     51 #undef _ogg_calloc
     52 #undef _ogg_realloc
     53 #undef _ogg_free
     54 
     55 #define _ogg_malloc(x) _VDBG_malloc(NULL,(x),__FILE__,__LINE__)
     56 #define _ogg_calloc(x,y) _VDBG_malloc(NULL,(x)*(y),__FILE__,__LINE__)
     57 #define _ogg_realloc(x,y) _VDBG_malloc((x),(y),__FILE__,__LINE__)
     58 #define _ogg_free(x) _VDBG_free((x),__FILE__,__LINE__)
     59 #endif
     60 
     61 #include "asm_arm.h"
     62 
     63 #ifndef _V_WIDE_MATH
     64 #define _V_WIDE_MATH
     65 
     66 #ifndef  _LOW_ACCURACY_
     67 /* 64 bit multiply */
     68 
     69 #include <endian.h>
     70 #include <sys/types.h>
     71 
     72 #if BYTE_ORDER==LITTLE_ENDIAN
     73 union magic {
     74   struct {
     75     ogg_int32_t lo;
     76     ogg_int32_t hi;
     77   } halves;
     78   ogg_int64_t whole;
     79 };
     80 #endif
     81 
     82 #if BYTE_ORDER==BIG_ENDIAN
     83 union magic {
     84   struct {
     85     ogg_int32_t hi;
     86     ogg_int32_t lo;
     87   } halves;
     88   ogg_int64_t whole;
     89 };
     90 #endif
     91 
     92 static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
     93   union magic magic;
     94   magic.whole = (ogg_int64_t)x * y;
     95   return magic.halves.hi;
     96 }
     97 
     98 static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
     99   return MULT32(x,y)<<1;
    100 }
    101 
    102 static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
    103   union magic magic;
    104   magic.whole  = (ogg_int64_t)x * y;
    105   return ((ogg_uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17);
    106 }
    107 
    108 #else
    109 /* 32 bit multiply, more portable but less accurate */
    110 
    111 /*
    112  * Note: Precision is biased towards the first argument therefore ordering
    113  * is important.  Shift values were chosen for the best sound quality after
    114  * many listening tests.
    115  */
    116 
    117 /*
    118  * For MULT32 and MULT31: The second argument is always a lookup table
    119  * value already preshifted from 31 to 8 bits.  We therefore take the
    120  * opportunity to save on text space and use unsigned char for those
    121  * tables in this case.
    122  */
    123 
    124 static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
    125   return (x >> 9) * y;  /* y preshifted >>23 */
    126 }
    127 
    128 static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
    129   return (x >> 8) * y;  /* y preshifted >>23 */
    130 }
    131 
    132 static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
    133   return (x >> 6) * y;  /* y preshifted >>9 */
    134 }
    135 
    136 #endif
    137 
    138 /*
    139  * This should be used as a memory barrier, forcing all cached values in
    140  * registers to wr writen back to memory.  Might or might not be beneficial
    141  * depending on the architecture and compiler.
    142  */
    143 #define MB()
    144 
    145 /*
    146  * The XPROD functions are meant to optimize the cross products found all
    147  * over the place in mdct.c by forcing memory operation ordering to avoid
    148  * unnecessary register reloads as soon as memory is being written to.
    149  * However this is only beneficial on CPUs with a sane number of general
    150  * purpose registers which exclude the Intel x86.  On Intel, better let the
    151  * compiler actually reload registers directly from original memory by using
    152  * macros.
    153  */
    154 
    155 #ifdef __i386__
    156 
    157 #define XPROD32(_a, _b, _t, _v, _x, _y)		\
    158   { *(_x)=MULT32(_a,_t)+MULT32(_b,_v);		\
    159     *(_y)=MULT32(_b,_t)-MULT32(_a,_v); }
    160 #define XPROD31(_a, _b, _t, _v, _x, _y)		\
    161   { *(_x)=MULT31(_a,_t)+MULT31(_b,_v);		\
    162     *(_y)=MULT31(_b,_t)-MULT31(_a,_v); }
    163 #define XNPROD31(_a, _b, _t, _v, _x, _y)	\
    164   { *(_x)=MULT31(_a,_t)-MULT31(_b,_v);		\
    165     *(_y)=MULT31(_b,_t)+MULT31(_a,_v); }
    166 
    167 #else
    168 
    169 static inline void XPROD32(ogg_int32_t  a, ogg_int32_t  b,
    170 			   ogg_int32_t  t, ogg_int32_t  v,
    171 			   ogg_int32_t *x, ogg_int32_t *y)
    172 {
    173   *x = MULT32(a, t) + MULT32(b, v);
    174   *y = MULT32(b, t) - MULT32(a, v);
    175 }
    176 
    177 static inline void XPROD31(ogg_int32_t  a, ogg_int32_t  b,
    178 			   ogg_int32_t  t, ogg_int32_t  v,
    179 			   ogg_int32_t *x, ogg_int32_t *y)
    180 {
    181   *x = MULT31(a, t) + MULT31(b, v);
    182   *y = MULT31(b, t) - MULT31(a, v);
    183 }
    184 
    185 static inline void XNPROD31(ogg_int32_t  a, ogg_int32_t  b,
    186 			    ogg_int32_t  t, ogg_int32_t  v,
    187 			    ogg_int32_t *x, ogg_int32_t *y)
    188 {
    189   *x = MULT31(a, t) - MULT31(b, v);
    190   *y = MULT31(b, t) + MULT31(a, v);
    191 }
    192 
    193 #endif
    194 
    195 #endif
    196 
    197 #ifndef _V_CLIP_MATH
    198 #define _V_CLIP_MATH
    199 
    200 static inline ogg_int32_t CLIP_TO_15(ogg_int32_t x) {
    201   int ret=x;
    202   ret-= ((x<=32767)-1)&(x-32767);
    203   ret-= ((x>=-32768)-1)&(x+32768);
    204   return(ret);
    205 }
    206 
    207 #endif
    208 
    209 #endif
    210 
    211 
    212 
    213 
    214