HomeSort by relevance Sort by last modified time
    Searched refs:vtrnq_s16 (Results 1 - 16 of 16) sorted by null

  /external/libhevc/common/arm/
ihevc_cmn_utils_neon.h 154 const int16x8x2_t b0 = vtrnq_s16(*a0, *a1);
155 const int16x8x2_t b1 = vtrnq_s16(*a2, *a3);
205 b0 = vtrnq_s16(*a0, *a1);
206 b1 = vtrnq_s16(*a2, *a3);
207 b2 = vtrnq_s16(*a4, *a5);
208 b3 = vtrnq_s16(*a6, *a7);
  /external/libaom/libaom/av1/common/arm/
transpose_neon.h 344 const int16x8x2_t b0 = vtrnq_s16(*a0, *a1);
345 const int16x8x2_t b1 = vtrnq_s16(*a2, *a3);
346 const int16x8x2_t b2 = vtrnq_s16(*a4, *a5);
347 const int16x8x2_t b3 = vtrnq_s16(*a6, *a7);
418 const int16x8x2_t b0 = vtrnq_s16(*a0, *(a0 + 1));
419 const int16x8x2_t b1 = vtrnq_s16(*(a0 + 2), *(a0 + 3));
420 const int16x8x2_t b2 = vtrnq_s16(*(a0 + 4), *(a0 + 5));
421 const int16x8x2_t b3 = vtrnq_s16(*(a0 + 6), *(a0 + 7));
warp_plane_neon.c 322 b0 = vtrnq_s16(src[0], src[1]);
323 b1 = vtrnq_s16(src[2], src[3]);
324 b2 = vtrnq_s16(src[4], src[5]);
325 b3 = vtrnq_s16(src[6], src[7]);
    [all...]
  /external/libaom/libaom/aom_dsp/arm/
fwd_txfm_neon.c 145 vtrnq_s16(vreinterpretq_s16_s32(r02_s32.val[0]),
148 vtrnq_s16(vreinterpretq_s16_s32(r02_s32.val[1]),
151 vtrnq_s16(vreinterpretq_s16_s32(r46_s32.val[0]),
154 vtrnq_s16(vreinterpretq_s16_s32(r46_s32.val[1]),
  /external/libvpx/libvpx/vp8/common/arm/neon/
idct_blk_neon.c 160 q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]),
162 q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]),
195 q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]),
197 q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]),
  /external/libvpx/libvpx/vp8/encoder/arm/neon/
shortfdct_neon.c 143 v2tmp0 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[0]), // q0
145 v2tmp1 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[1]), // q2
194 v2tmp0 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[0]), // q0
196 v2tmp1 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[1]), // q2
  /external/libvpx/libvpx/vpx_dsp/arm/
fwd_txfm_neon.c 150 vtrnq_s16(vreinterpretq_s16_s32(r02_s32.val[0]),
153 vtrnq_s16(vreinterpretq_s16_s32(r02_s32.val[1]),
156 vtrnq_s16(vreinterpretq_s16_s32(r46_s32.val[0]),
159 vtrnq_s16(vreinterpretq_s16_s32(r46_s32.val[1]),
fdct16x16_neon.c 196 const int16x8x2_t c0 = vtrnq_s16(a[0], a[1]);
197 const int16x8x2_t c1 = vtrnq_s16(a[2], a[3]);
198 const int16x8x2_t c2 = vtrnq_s16(a[4], a[5]);
199 const int16x8x2_t c3 = vtrnq_s16(a[6], a[7]);
transpose_neon.h 154 vtrnq_s16(vreinterpretq_s16_s32(c0), vreinterpretq_s16_s32(c1));
594 const int16x8x2_t b0 = vtrnq_s16(*a0, *a1);
595 const int16x8x2_t b1 = vtrnq_s16(*a2, *a3);
596 const int16x8x2_t b2 = vtrnq_s16(*a4, *a5);
597 const int16x8x2_t b3 = vtrnq_s16(*a6, *a7);
    [all...]
fdct32x32_neon.c     [all...]
  /external/gemmlowp/internal/
output_neon.h 346 const int16x8x2_t t0 = vtrnq_s16(src.buf.reg[0], src.buf.reg[1]);
347 const int16x8x2_t t1 = vtrnq_s16(src.buf.reg[2], src.buf.reg[3]);
640 a[0] = vtrnq_s16(src.buf.reg[0], src.buf.reg[1]);
641 a[1] = vtrnq_s16(src.buf.reg[2], src.buf.reg[3]);
642 a[2] = vtrnq_s16(src.buf.reg[4], src.buf.reg[5]);
643 a[3] = vtrnq_s16(src.buf.reg[6], src.buf.reg[7]);
  /external/webp/src/dsp/
enc_neon.c 569 const int16x8x2_t q2_tmp0 = vtrnq_s16(q4_in.val[0], q4_in.val[1]);
570 const int16x8x2_t q2_tmp1 = vtrnq_s16(q4_in.val[2], q4_in.val[3]);
    [all...]
  /external/clang/test/CodeGen/
aarch64-neon-perm.c     [all...]
arm_neon_intrinsics.c     [all...]
  /external/tensorflow/tensorflow/lite/kernels/internal/optimized/
depthwiseconv_uint8_3x3_filter.h 119 r16x8 = vtrnq_s16(vreinterpretq_s16_s8(*a), vreinterpretq_s16_s8(*b));
    [all...]
  /external/neon_2_sse/
NEON_2_SSE.h 15730 _NEON2SSE_INLINE int16x8x2_t vtrnq_s16(int16x8_t a, int16x8_t b) \/\/ VTRN.16 q0,q0 function
    [all...]

Completed in 1381 milliseconds