HomeSort by relevance Sort by last modified time
    Searched refs:MULT16_32_Q16 (Results 1 - 10 of 10) sorted by null

  /external/libopus/celt/arm/
fixed_armv4.h 31 #undef MULT16_32_Q16
37 "#MULT16_32_Q16\n\t"
44 #define MULT16_32_Q16(a, b) (MULT16_32_Q16_armv4(a, b))
74 #define MAC16_32_Q16(c, a, b) ADD32(c, MULT16_32_Q16(a, b))
fixed_armv5e.h 36 #undef MULT16_32_Q16
41 "#MULT16_32_Q16\n\t"
48 #define MULT16_32_Q16(a, b) (MULT16_32_Q16_armv5e(a, b))
  /external/libopus/celt/mips/
fixed_generic_mipsr1.h 73 #undef MULT16_32_Q16
74 static inline int MULT16_32_Q16(int a, int b)
mdct_mipsr1.h 71 /* Allows us to scale with MULT16_32_Q16(), which is faster than
153 yc.r = PSHR32(MULT16_32_Q16(scale, yc.r), scale_shift);
154 yc.i = PSHR32(MULT16_32_Q16(scale, yc.i), scale_shift);
  /external/libopus/celt/
fixed_generic.h 41 #define MULT16_32_Q16(a,b) ((opus_val32)SHR((opus_int64)((opus_val16)(a))*(b),16))
43 #define MULT16_32_Q16(a,b) ADD32(MULT16_16((a),SHR((b),16)), SHR(MULT16_16SU((a),((b)&0x0000ffff)),16))
mdct.c 130 /* Allows us to scale with MULT16_32_Q16(), which is faster than
208 yc.r = PSHR32(MULT16_32_Q16(scale, yc.r), scale_shift);
209 yc.i = PSHR32(MULT16_32_Q16(scale, yc.i), scale_shift);
arch.h 217 #define MULT16_32_Q16(a,b) ((a)*(b))
kiss_fft.c 574 /* Allows us to scale with MULT16_32_Q16(), which is faster than
585 fout[st->bitrev[i]].r = SHR32(MULT16_32_Q16(scale, x.r), scale_shift);
586 fout[st->bitrev[i]].i = SHR32(MULT16_32_Q16(scale, x.i), scale_shift);
vq.c 228 rcp = EXTRACT16(MULT16_32_Q16(K-1, celt_rcp(sum)));
fixed_debug.h 48 #define MULT16_32_Q16(a,b) ADD32(MULT16_16((a),SHR32((b),16)), SHR32(MULT16_16SU((a),((b)&0x0000ffff)),16))
499 #define MAC16_32_Q16(c,a,b) (celt_mips-=2,ADD32((c),MULT16_32_Q16((a),(b))))

Completed in 4082 milliseconds