/external/eigen/bench/ |
benchVecAdd.cpp | 107 // internal::pstore(&a[i], internal::padd(a0, b0)); 111 // internal::pstore(&a[i+1*PacketSize], internal::padd(a1, b1)); 115 // internal::pstore(&a[i+2*PacketSize], internal::padd(a2, b2)); 119 // internal::pstore(&a[i+3*PacketSize], internal::padd(a3, b3)); 123 // internal::pstore(&a[i+4*PacketSize], internal::padd(a0, b0)); 124 // internal::pstore(&a[i+5*PacketSize], internal::padd(a1, b1)); 125 // internal::pstore(&a[i+6*PacketSize], internal::padd(a2, b2)); 126 // internal::pstore(&a[i+7*PacketSize], internal::padd(a3, b3)); 128 internal::pstore(&a[i+2*PacketSize], internal::padd(internal::ploadu(&a[i+2*PacketSize]), internal::ploadu(&b[i+2*PacketSize]))); 129 internal::pstore(&a[i+3*PacketSize], internal::padd(internal::ploadu(&a[i+3*PacketSize]), internal::ploadu(&b[i+3*PacketSize]))) [all...] |
bench_norm.cpp | 159 // pabig = internal::padd(pabig, internal::pand(maskBig, ax)); 160 // pasml = internal::padd(pasml, internal::pand(maskSml, ax)); 161 // pamed = internal::padd(pamed, internal::pandnot(ax,maskMed)); 164 pabig = internal::padd(pabig, internal::pand(maskBig, internal::pmul(ax_s2m,ax_s2m))); 165 pasml = internal::padd(pasml, internal::pand(maskSml, internal::pmul(ax_s1m,ax_s1m))); 166 pamed = internal::padd(pamed, internal::pandnot(internal::pmul(ax,ax),internal::pand(maskSml,maskBig)));
|
/external/llvm/test/CodeGen/X86/ |
2011-10-30-padd.ll | 4 ;CHECK: padd 14 ;CHECK: padd
|
mmx-bitcast.ll | 11 %u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %t) 24 %u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %t) 37 %u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %t) 50 %u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %t) 103 declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx) 104 declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx) 105 declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx) 106 declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
|
pr18014.ll | 3 ; Ensure PSRAD is generated as the condition is consumed by both PADD and
|
mmx-arith.ll | 249 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %tmp1, x86_mmx %tmp2) 261 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %tmp1, x86_mmx %tmp2) 273 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %tmp1, x86_mmx %tmp2) 285 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %tmp1, x86_mmx %tmp2) 290 declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx) 291 declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx) 292 declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx) 293 declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
|
mmx-fold-load.ll | 148 %u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %v) 153 declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx) 165 %u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %v) 170 declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx) 181 %u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %v) 186 declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx) 197 %u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %v) 202 declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
|
viabs.ll | 10 ; SSE2-NEXT: padd 35 ; SSE2-NEXT: padd 60 ; SSE2-NEXT: padd 85 ; SSE2-NEXT: padd 110 ; SSE2-NEXT: padd
|
unaligned-spill-folding.ll | 33 ; We can't fold the spill into a padd unless the stack is aligned. Just spilling
|
widen_arith-2.ll | 2 ; CHECK: padd
|
/toolchain/binutils/binutils-2.25/gas/testsuite/gas/sh/ |
dsp.s | 25 padd x0,y0,a0 27 padd x0,y0,a0
|
dsp.d | 25 0+020 <[^>]*> f8 00 b1 07 [ ]padd x0,y0,a0 27 0+028 <[^>]*> f8 00 b1 07 [ ]padd x0,y0,a0
|
/external/eigen/Eigen/src/Core/arch/SSE/ |
MathFunctions.h | 66 Packet4f e = padd(_mm_cvtepi32_ps(emm0), p4f_1); 78 x = padd(x, tmp); 96 y = padd(y, y1); 99 x = padd(x, y); 100 x = padd(x, y2); 163 y = padd(y, p4f_1); 324 x = padd(x, xmm1); 325 x = padd(x, xmm2); 326 x = padd(x, xmm3); 338 y = padd(y, p4f_1) [all...] |
Complex.h | 48 template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_add_ps(a.v,b.v)); } function in namespace:Eigen::internal 160 { return padd(pmul(x,y),c); } 178 { return padd(pmul(x,y),c); } 196 { return padd(pmul(x,y),c); } 214 { return padd(c, pmul(x,y)); } 223 { return padd(c, pmul(x,y)); } 274 template<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_add_pd(a.v,b.v)); } function in namespace:Eigen::internal 356 { return padd(pmul(x,y),c); } 374 { return padd(pmul(x,y),c); } 392 { return padd(pmul(x,y),c); [all...] |
/external/eigen/Eigen/src/Geometry/arch/ |
Geometry_SSE.h | 84 t1 = padd(pmul(a_ww, b_xy), pmul(a_yy, b_zw)); 90 pstore(&res.x(), padd(t1, pxor(mask,preverse(t2)))); 99 t2 = padd(pmul(a_zz, b_zw), pmul(a_xx, b_xy));
|
/external/eigen/Eigen/src/Core/arch/AltiVec/ |
Complex.h | 68 template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_add(a.v,b.v)); } function in namespace:Eigen::internal 129 b = padd(a.v, b); 140 b2 = padd(b1, b2); 170 { return padd(pmul(x,y),c); } 181 { return padd(pmul(x,y),c); } 192 { return padd(pmul(x,y),c); }
|
/external/eigen/Eigen/src/Core/arch/NEON/ |
Complex.h | 59 template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(padd<Packet4f>(a.v,b.v)); } function in namespace:Eigen::internal 206 { return padd(pmul(x,y),c); } 217 { return padd(pmul(x,y),c); } 228 { return padd(pmul(x,y),c); }
|
/external/eigen/unsupported/Eigen/src/MoreVectorization/ |
MathFunctions.h | 64 z1=padd(z1,z1);
|
/external/eigen/Eigen/src/Jacobi/ |
Jacobi.h | 353 pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi))); 368 pstoreu(px, padd(pmul(pc,xi),pcj.pmul(ps,yi))); 369 pstoreu(px+PacketSize, padd(pmul(pc,xi1),pcj.pmul(ps,yi1))); 379 pstoreu(x+peelingEnd, padd(pmul(pc,xi),pcj.pmul(ps,yi))); 407 pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
|
/external/libavc/decoder/ |
ih264d_deblocking.c | [all...] |
/toolchain/binutils/binutils-2.25/binutils/ |
objcopy.c | 1789 struct section_add *padd; local 2132 struct section_add *padd; local [all...] |
/toolchain/binutils/binutils-2.25/gold/ |
target-reloc.h | 775 unsigned char* padd = view + offset; local 793 Relocate_functions<size, big_endian>::rel8(padd, object, 798 Relocate_functions<size, big_endian>::rel16(padd, object, 803 Relocate_functions<size, big_endian>::rel32(padd, object, 808 Relocate_functions<size, big_endian>::rel64(padd, object, 813 Relocate_functions<size, big_endian>::rel32_unaligned(padd,
|
/external/eigen/Eigen/src/Core/products/ |
GeneralBlockPanelKernel.h | 121 t = b; t = cj.pmul(a,t); c = padd(c,t); 132 // #define MADD(CJ,A,B,C,T) T = B; T = CJ.pmul(A,T); C = padd(C,T); 208 tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp); 284 tmp = b; tmp = pmul(a.v,tmp); c.v = padd(c.v,tmp); 381 c.first = padd(pmul(a,b.first), c.first); 382 c.second = padd(pmul(a,b.second),c.second); 399 tmp = padd(ResPacket(c.first),tmp); 404 tmp = padd(ResPacket(c.first),tmp); 409 tmp = padd(pconj(ResPacket(c.first)),tmp); 489 tmp = b; tmp.v = pmul(a,tmp.v); c = padd(c,tmp) [all...] |
/external/eigen/Eigen/src/Core/ |
Functors.h | 29 { return internal::padd(a,b); } 589 // in order to avoid the padd() in operator() ? 598 m_base(padd(pset1<Packet>(low), pmul(pset1<Packet>(step),plset<Scalar>(-packet_traits<Scalar>::size)))) {} 603 m_base = padd(m_base, pset1<Packet>(m_step)); 608 EIGEN_STRONG_INLINE const Packet packetOp(Index) const { return m_base = padd(m_base,m_packetStep); } 633 { return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1<Packet>(Scalar(i)),m_interPacket))); } 715 { return internal::padd(a, pset1<Packet>(m_other)); } [all...] |
/external/eigen/Eigen/src/Core/util/ |
BlasUtil.h | 97 { return padd(c, pmul(x,y)); } 106 { return padd(c, pmul(x,y)); }
|