HomeSort by relevance Sort by last modified time
    Searched full:vmla (Results 1 - 25 of 56) sorted by null

1 2 3

  /external/llvm/test/CodeGen/ARM/
a15-mla.ll 8 ; CHECK: vmla
14 ; This tests checks that VMLA FP patterns can be matched in instruction selection when targeting
18 ; CHECK: vmla.f32
24 ; This tests checks that FP VMLA instructions are not expanded into separate multiply/addition
28 ; CHECK: vmla.f32
31 ; CHECK: vmla.f32
fmacs.ll 10 ; VFP2: vmla.f32
13 ; NEON: vmla.f32
26 ; VFP2: vmla.f64
29 ; NEON: vmla.f64
42 ; VFP2: vmla.f32
45 ; NEON: vmla.f32
55 ; It's possible to make use of fp vmla / vmls on Cortex-A9.
65 ; Two vmla with now RAW hazard
67 ; A9: vmla.f32
68 ; A9: vmla.f3
    [all...]
vmla.ll 5 ;CHECK: vmla.i8
16 ;CHECK: vmla.i16
27 ;CHECK: vmla.i32
38 ;CHECK: vmla.f32
49 ;CHECK: vmla.i8
60 ;CHECK: vmla.i16
71 ;CHECK: vmla.i32
82 ;CHECK: vmla.f32
  /external/llvm/test/MC/ARM/
neon-mul-accum-encoding.s 3 vmla.i8 d16, d18, d17
4 vmla.i16 d16, d18, d17
5 vmla.i32 d16, d18, d17
6 vmla.f32 d16, d18, d17
7 vmla.i8 q9, q8, q10
8 vmla.i16 q9, q8, q10
9 vmla.i32 q9, q8, q10
10 vmla.f32 q9, q8, q10
11 vmla.i32 q12, q8, d3[0]
13 @ CHECK: vmla.i8 d16, d18, d17 @ encoding: [0xa1,0x09,0x42,0xf2
    [all...]
neont2-mul-accum-encoding.s 5 vmla.i8 d16, d18, d17
6 vmla.i16 d16, d18, d17
7 vmla.i32 d16, d18, d17
8 vmla.f32 d16, d18, d17
9 vmla.i8 q9, q8, q10
10 vmla.i16 q9, q8, q10
11 vmla.i32 q9, q8, q10
12 vmla.f32 q9, q8, q10
13 vmla.i32 q12, q8, d3[0]
15 @ CHECK: vmla.i8 d16, d18, d17 @ encoding: [0x42,0xef,0xa1,0x09
    [all...]
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/
armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.S 29 VMLA.I16 d10,d2,d31
41 VMLA.I16 d12,d2,d31
53 VMLA.I16 d14,d2,d31
65 VMLA.I16 d16,d2,d31
77 VMLA.I16 d18,d2,d31
89 VMLA.I16 d20,d2,d31
101 VMLA.I16 d22,d2,d31
113 VMLA.I16 d24,d2,d31
124 VMLA.I16 d26,d2,d31
132 VMLA.I32 q5,q1,q1
    [all...]
armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe_s.S 27 VMLA.I16 d22,d8,d31
39 VMLA.I16 d24,d8,d31
51 VMLA.I16 d26,d8,d31
62 VMLA.I16 d28,d8,d31
armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe_s.S 49 VMLA.I16 d6,d18,d31
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/
armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.s 111 VMLA dRes0, dSrcC, dCoeff20 ;// Acc += 20*(c+d)
128 VMLA dRes1, dSrcC, dCoeff20 ;// Acc += 20*(c+d)
145 VMLA dRes2, dSrcC, dCoeff20 ;// Acc += 20*(c+d)
162 VMLA dRes3, dSrcC, dCoeff20 ;// Acc += 20*(c+d)
179 VMLA dRes4, dSrcC, dCoeff20 ;// Acc += 20*(c+d)
196 VMLA dRes5, dSrcC, dCoeff20 ;// Acc += 20*(c+d)
213 VMLA dRes6, dSrcC, dCoeff20 ;// Acc += 20*(c+d)
230 VMLA dRes7, dSrcC, dCoeff20 ;// Acc += 20*(c+d)
246 VMLA dRes8, dSrcC, dCoeff20 ;// Acc += 20*(c+d)
260 VMLA qAcc01, qSumCD, qCoeff20 ;// Acc += 20*(c+d
    [all...]
armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe_s.s 91 ; VMLA dRes0, dSumCD0, dCoeff20 ;// Acc += 20*(c+d)
99 ; VMLA dRes1, dSumCD0, dCoeff20 ;// Acc += 20*(c+d)
107 ; VMLA dRes2, dSumCD0, dCoeff20 ;// Acc += 20*(c+d)
118 VMLA dRes3, dSumCD0, dCoeff20 ;// Acc += 20*(c+d)
armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe_s.s 103 VMLA dRes0, dTemp0, dCoeff20 ;// Acc += 20*(c+d)
122 VMLA dRes2, dTemp0, dCoeff20 ;// Acc += 20*(c+d)
140 VMLA dRes4, dTemp0, dCoeff20 ;// Acc += 20*(c+d)
156 VMLA dRes6, dTemp0, dCoeff20 ;// Acc += 20*(c+d)
  /external/chromium_org/third_party/skia/src/opts/
SkBitmapProcState_filter_neon.h 43 "vmla.i16 d4, d1, d5 \n\t" // d4 += a11 * x
44 "vmla.i16 d4, d6, d3 \n\t" // d4 += a00 * (16-x)
45 "vmla.i16 d4, d0, d3 \n\t" // d4 += a10 * (16-x)
76 "vmla.i16 d4, d1, d5 \n\t" // d4 += a11 * x
77 "vmla.i16 d4, d6, d3 \n\t" // d4 += a00 * (16-x)
78 "vmla.i16 d4, d0, d3 \n\t" // d4 += a10 * (16-x)
  /external/skia/src/opts/
SkBitmapProcState_filter_neon.h 43 "vmla.i16 d4, d1, d5 \n\t" // d4 += a11 * x
44 "vmla.i16 d4, d6, d3 \n\t" // d4 += a00 * (16-x)
45 "vmla.i16 d4, d0, d3 \n\t" // d4 += a10 * (16-x)
76 "vmla.i16 d4, d1, d5 \n\t" // d4 += a11 * x
77 "vmla.i16 d4, d6, d3 \n\t" // d4 += a00 * (16-x)
78 "vmla.i16 d4, d0, d3 \n\t" // d4 += a10 * (16-x)
  /external/chromium_org/third_party/WebKit/Source/core/platform/graphics/transforms/
TransformationMatrix.cpp     [all...]
  /hardware/samsung_slsi/exynos5/libswconverter/
csc_ARGB8888_to_YUV420SP_NEON.s 83 vmla.u16 q8,q6,q13 @112 * B[k]
89 vmla.u16 q7,q4,q13 @112 * R[k]
107 vmla.u16 q7,q5,q15 @q0 += 129 *G[k]
108 vmla.u16 q7,q6,q8 @q0 += 25 *B[k]
125 vmla.u16 q0,q5,q15 @q0 += 129 *G[k]
126 vmla.u16 q0,q6,q8 @q0 += 25 *B[k]
158 vmla.u16 q7,q5,q15 @q0 += 129 *G[k]
159 vmla.u16 q7,q6,q8 @q0 += 25 *B[k]
175 vmla.u16 q0,q5,q15 @q0 += 129 *G[k]
176 vmla.u16 q0,q6,q8 @q0 += 25 *B[k
    [all...]
  /external/speex/libspeex/
resample_neon.h 159 " vmla.f32 q0, q4, q8\n"
160 " vmla.f32 q1, q5, q9\n"
161 " vmla.f32 q2, q6, q10\n"
162 " vmla.f32 q3, q7, q11\n"
174 " vmla.f32 q0, q6, q10\n"
  /external/llvm/lib/Target/ARM/
ARMHazardRecognizer.cpp 41 // Look for special VMLA / VMLS hazards. A VMUL / VADD / VSUB following
42 // a VMLA / VMLS will cause 4 cycle stall.
MLxExpansionPass.cpp 221 // r0 = vmla
222 // r3 = vmla r0, r1, r2
225 // r0 = vmla
242 // If a VMLA.F is followed by an VADD.F or VMUL.F with no RAW hazard, the
246 // then the scheduler can't *fix* this, we'd better break up the VMLA.
  /external/valgrind/main/none/tests/arm/
vfp.stdout.exp 117 ---- VMLA (fp) ----
118 vmla.f64 d0, d11, d12 :: Qd 0x7ff80000 0x00000000 Qm 0xfff00000 00000000 Qn 0x7ff80000 00000000
119 vmla.f64 d7, d1, d6 :: Qd 0x7ff80000 0x00000000 Qm 0x7ff00000 00000000 Qn 0x7ff80000 00000000
120 vmla.f64 d0, d5, d2 :: Qd 0x7ff80000 0x00000000 Qm 0x7ff80000 00000000 Qn 0xbff00000 00000000
121 vmla.f64 d10, d13, d15 :: Qd 0x7ff80000 0x00000000 Qm 0x7ff80000 00000000 Qn 0x00000000 00000000
122 vmla.f64 d10, d13, d15 :: Qd 0x7ff80000 0x00000000 Qm 0x7ff80000 00000000 Qn 0x7ff80000 00000000
123 vmla.f64 d20, d25, d22 :: Qd 0xc0906794 0x842f8549 Qm 0x40370a3d 70a3d70a Qn 0xc046c8cb 295e9e1b
124 vmla.f64 d23, d24, d25 :: Qd 0xc1bbe864 0x1f579999 Qm 0xc1153b41 e6666666 Qn 0x40950800 00000000
125 vmla.f64 d20, d31, d12 :: Qd 0xc1e0a1cf 0xd2abe8f6 Qm 0x40e7ce60 00000000 Qn 0xc0e65b4f 3b645a1d
126 vmla.f64 d19, d25, d27 :: Qd 0x41d860c7 0xf71a1999 Qm 0x40f767bc 28f5c28f Qn 0x40d0aa40 0000000
    [all...]
vfp.c     [all...]
neon128.c     [all...]
neon64.c     [all...]
  /frameworks/rs/cpu_ref/
rsCpuIntrinsics_neon.S 145 vmla.f32 q10, q3, d0[0]
146 vmla.f32 q11, q4, d0[0]
194 vmla.f32 q0, q1, d6[0]
195 vmla.f32 q0, q2, d6[1]
231 vmla.f32 q0, q1, d4[0]
759 vmla.i16 q12, q8, q6
760 vmla.i16 q13, q9, q6
761 vmla.i16 q14, q10, q6
762 vmla.i16 q15, q11, q6
834 vmla.i16 q8, q12, q
    [all...]
  /external/chromium_org/third_party/libwebp/dsp/
enc_neon.c 556 "vmla.u32 q1, q12, q15 \n"
557 "vmla.u32 q9, q14, q15 \n"
564 "vmla.u32 q1, q0, q13 \n"
565 "vmla.u32 q9, q8, q13 \n"
568 "vmla.u32 q1, q2, q15 \n"
569 "vmla.u32 q9, q3, q15 \n"
  /external/webp/src/dsp/
enc_neon.c 556 "vmla.u32 q1, q12, q15 \n"
557 "vmla.u32 q9, q14, q15 \n"
564 "vmla.u32 q1, q0, q13 \n"
565 "vmla.u32 q9, q8, q13 \n"
568 "vmla.u32 q1, q2, q15 \n"
569 "vmla.u32 q9, q3, q15 \n"

Completed in 501 milliseconds

1 2 3