/external/eigen/Eigen/src/SparseLU/ |
SparseLU_kernel_bmod.h | 33 template <typename BlockScalarVector, typename ScalarVector, typename IndexVector, typename Index> 34 static EIGEN_DONT_INLINE void run(const int segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, Index& luptr, const Index lda, 35 const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros); 39 template <typename BlockScalarVector, typename ScalarVector, typename IndexVector, typename Index> 40 EIGEN_DONT_INLINE void LU_kernel_bmod<SegSizeAtCompileTime>::run(const int segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, Index& luptr, const Index lda, 41 const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros [all...] |
SparseLU_pivotL.h | 59 template <typename Scalar, typename Index> 60 Index SparseLUImpl<Scalar,Index>::pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu) 63 Index fsupc = (glu.xsup)((glu.supno)(jcol)); // First column in the supernode containing the column jcol 64 Index nsupc = jcol - fsupc; // Number of columns in the supernode portion, excluding jcol; nsupc >=0 65 Index lptr = glu.xlsub(fsupc); // pointer to the starting location of the row subscripts for this supernode portion 66 Index nsupr = glu.xlsub(fsupc+1) - lptr; // Number of rows in the supernode 67 Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc); // leading dimensio [all...] |
SparseLU_heap_relax_snode.h | 45 template <typename Scalar, typename Index> 46 void SparseLUImpl<Scalar,Index>::heap_relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end) 53 Index i; 68 Index j, parent; 77 Index snode_start; // beginning of a snode 78 Index k; 79 Index nsuper_et_post = 0; // Number of relaxed snodes in postordered etree 80 Index nsuper_et = 0; // Number of relaxed snodes in the original etree [all...] |
/external/eigen/Eigen/src/Core/ |
Block.h | 26 * type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block<int,int>(Index,Index) and 45 * \sa DenseBase::block(Index,Index,Index,Index), DenseBase::block(Index,Index), class VectorBloc [all...] |
ReturnByValue.h | 62 inline Index rows() const { return static_cast<const Derived*>(this)->rows(); } 63 inline Index cols() const { return static_cast<const Derived*>(this)->cols(); } 71 const Unusable& coeff(Index) const { return *reinterpret_cast<const Unusable*>(this); } 72 const Unusable& coeff(Index,Index) const { return *reinterpret_cast<const Unusable*>(this); } 73 Unusable& coeffRef(Index) { return *reinterpret_cast<Unusable*>(this); } 74 Unusable& coeffRef(Index,Index) { return *reinterpret_cast<Unusable*>(this); } 75 template<int LoadMode> Unusable& packet(Index) const; 76 template<int LoadMode> Unusable& packet(Index, Index) const [all...] |
MapBase.h | 40 typedef typename internal::traits<Derived>::Index Index; 79 inline Index rows() const { return m_rows.value(); } 80 inline Index cols() const { return m_cols.value(); } 90 inline const Scalar& coeff(Index rowId, Index colId) const 95 inline const Scalar& coeff(Index index) const 98 return m_data[index * innerStride()]; 101 inline const Scalar& coeffRef(Index rowId, Index colId) cons [all...] |
BandMatrix.h | 35 typedef typename DenseMatrixType::Index Index; 54 inline Index supers() const { return derived().supers(); } 57 inline Index subs() const { return derived().subs(); } 68 inline Block<CoefficientsType,Dynamic,1> col(Index i) 71 Index start = 0; 72 Index len = coeffs().rows(); 76 len = (std::min)(rows(),std::max<Index>(0,coeffs().rows() - (supers()-i))); 79 len = std::max<Index>(0,coeffs().rows() - (i + 1 - rows() + subs())); 91 template<int Index> struct DiagonalIntReturnType [all...] |
Diagonal.h | 22 * \param DiagIndex the index of the sub/super diagonal. The default is 0 and it means the main diagonal. 24 * You can also use Dynamic so the index can be set at runtime. 29 * of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(Index) and most of the 32 * \sa MatrixBase::diagonal(), MatrixBase::diagonal(Index) 73 inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(a_index) {} 77 inline Index rows() const 78 { return m_index.value()<0 ? (std::min<Index>)(m_matrix.cols(),m_matrix.rows()+m_index.value()) : (std::min<Index>)(m_matrix.rows(),m_matrix.cols()-m_index.value()); } 80 inline Index cols() const { return 1; } 82 inline Index innerStride() cons 139 int index() const function in class:Eigen::Diagonal [all...] |
/external/eigen/unsupported/Eigen/src/SparseExtra/ |
BlockOfDynamicSparseMatrix.h | 38 inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer) 41 inline Index row() const { return IsRowMajor ? m_outer : this->index(); } 42 inline Index col() const { return IsRowMajor ? this->index() : m_outer; } 44 Index m_outer; 47 inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize) 53 inline SparseInnerVectorSet(const MatrixType& matrix, Index outer) 72 for (Index j=0; j<m_outerSize.value(); ++j [all...] |
/external/clang/bindings/python/tests/cindex/ |
test_file.py | 1 from clang.cindex import Index, File 4 index = Index.create() 5 tu = index.parse('t.c', unsaved_files = [('t.c', "")])
|
/external/eigen/Eigen/src/misc/ |
Kernel.h | 42 typedef typename Base::Index Index; 50 inline Index rows() const { return m_dec.cols(); } 51 inline Index cols() const { return m_cols; } 52 inline Index rank() const { return m_rank; } 62 Index m_rank, m_cols; 71 typedef typename MatrixType::Index Index; \
|
/external/eigen/test/ |
miscmatrices.cpp | 17 typedef typename MatrixType::Index Index; 20 Index rows = m.rows(); 21 Index cols = m.cols(); 23 Index r = internal::random<Index>(0, rows-1), r2 = internal::random<Index>(0, rows-1), c = internal::random<Index>(0, cols-1);
|
sizeoverflow.cpp | 21 typedef DenseIndex Index; 24 void triggerMatrixBadAlloc(Index rows, Index cols) 32 void triggerVectorBadAlloc(Index size) 46 size_t times_itself_gives_0 = size_t(1) << (8 * sizeof(Index) / 2); 49 size_t times_4_gives_0 = size_t(1) << (8 * sizeof(Index) - 2); 52 size_t times_8_gives_0 = size_t(1) << (8 * sizeof(Index) - 3);
|
jacobi.cpp | 17 typedef typename MatrixType::Index Index; 18 Index rows = m.rows(); 19 Index cols = m.cols(); 35 Index p = internal::random<Index>(0, rows-1); 36 Index q; 38 q = internal::random<Index>(0, rows-1); 48 Index p = internal::random<Index>(0, cols-1) [all...] |
/external/llvm/lib/Analysis/ |
LoopPass.cpp | 95 for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { 96 LoopPass *LP = getContainedPass(Index); 110 for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { 111 LoopPass *LP = getContainedPass(Index); 118 for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) [all...] |
/external/eigen/Eigen/src/Core/products/ |
SelfadjointMatrixVector_MKL.h | 46 template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs> 48 selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,BuiltIn> {}; 51 template<typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs> \ 52 struct selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Specialized> { \ 54 Index size, const Scalar* lhs, Index lhsStride, \ 55 const Scalar* _rhs, Index rhsIncr, Scalar* res, Scalar alpha) { \ 60 selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,BuiltIn>::run( \ 63 selfadjoint_matrix_vector_product_symv<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs>::run( \ 75 template<typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs> [all...] |
TriangularMatrixVector_MKL.h | 46 template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int StorageOrder> 48 triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,StorageOrder,BuiltIn> {}; 51 template<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \ 52 struct triangular_matrix_vector_product<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,ColMajor,Specialized> { \ 53 static void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \ 54 const Scalar* _rhs, Index rhsIncr, Scalar* _res, Index resIncr, Scalar alpha) { \ 55 triangular_matrix_vector_product_trmv<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,ColMajor>::run( [all...] |
/system/update_engine/payload_generator/ |
topological_sort_unittest.cc | 34 // Returns true if the value is found in vect. If found, the index is stored 54 const Vertex::Index n_a = counter++; 55 const Vertex::Index n_b = counter++; 56 const Vertex::Index n_c = counter++; 57 const Vertex::Index n_d = counter++; 58 const Vertex::Index n_e = counter++; 59 const Vertex::Index n_f = counter++; 60 const Vertex::Index n_g = counter++; 61 const Vertex::Index n_h = counter++; 62 const Vertex::Index n_i = counter++ [all...] |
/external/eigen/Eigen/src/SparseCholesky/ |
SimplicialCholesky_impl.h | 53 const Index size = ap.rows(); 58 ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0); 60 for(Index k = 0; k < size; ++k) 68 Index i = it.index(); 84 /* construct Lp index array from m_nonZerosPerCol column counts */ 85 Index* Lp = m_matrix.outerIndexPtr(); 87 for(Index k = 0; k < size; ++k) 107 const Index size = ap.rows(); 111 const Index* Lp = m_matrix.outerIndexPtr() [all...] |
/external/clang/lib/AST/ |
SelectorLocationsKind.cpp | 20 static SourceLocation getStandardSelLoc(unsigned Index, 27 assert(Index == 0); 35 assert(Index < NumSelArgs); 38 IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Index); 65 SourceLocation getArgLoc(unsigned Index, ArrayRef<T*> Args) { 66 return Index < Args.size() ? getArgLoc(Args[Index]) : SourceLocation(); 104 SourceLocation clang::getStandardSelectorLoc(unsigned Index, 109 return getStandardSelLoc(Index, Sel, WithArgSpace, 110 getArgLoc(Index, Args), EndLoc) [all...] |
/external/eigen/Eigen/src/Eigen2Support/ |
Minor.h | 62 Index row, Index col) 71 inline Index rows() const { return m_matrix.rows() - 1; } 72 inline Index cols() const { return m_matrix.cols() - 1; } 74 inline Scalar& coeffRef(Index row, Index col) 79 inline const Scalar coeff(Index row, Index col) const 86 const Index m_row, m_col; 101 MatrixBase<Derived>::minor(Index row, Index col [all...] |
/external/vboot_reference/utility/ |
efidecompress.c | 212 UINT16 Index; 220 for (Index = 1; Index <= 16; Index++) { 221 Count[Index] = 0; 224 for (Index = 0; Index < NumOfChar; Index++) { 225 Count[BitLen[Index]]++; 230 for (Index = 1; Index <= 16; Index++) [all...] |
/external/lzma/CS/7zip/Compress/LZMA/ |
LzmaBase.cs | 17 public uint Index;
18 public void Init() { Index = 0; }
21 if (Index < 4) Index = 0;
22 else if (Index < 10) Index -= 3;
23 else Index -= 6;
25 public void UpdateMatch() { Index = (uint)(Index < 7 ? 7 : 10); }
26 public void UpdateRep() { Index = (uint)(Index < 7 ? 8 : 11); } [all...] |
/external/eigen/Eigen/src/SparseCore/ |
SparseView.h | 21 typedef typename MatrixType::Index Index; 44 inline Index rows() const { return m_matrix.rows(); } 45 inline Index cols() const { return m_matrix.cols(); } 47 inline Index innerSize() const { return m_matrix.innerSize(); } 48 inline Index outerSize() const { return m_matrix.outerSize(); } 59 typedef typename SparseView::Index Index; 62 InnerIterator(const SparseView& view, Index outer) :
|
/external/eigen/blas/ |
PackedSelfadjointProduct.h | 18 template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjLhs, bool ConjRhs> 21 template<typename Scalar, typename Index, int UpLo, bool ConjLhs, bool ConjRhs> 22 struct selfadjoint_packed_rank1_update<Scalar,Index,ColMajor,UpLo,ConjLhs,ConjRhs> 25 static void run(Index size, Scalar* mat, const Scalar* vec, RealScalar alpha) 31 for (Index i=0; i<size; ++i) 41 template<typename Scalar, typename Index, int UpLo, bool ConjLhs, bool ConjRhs> 42 struct selfadjoint_packed_rank1_update<Scalar,Index,RowMajor,UpLo,ConjLhs,ConjRhs> 45 static void run(Index size, Scalar* mat, const Scalar* vec, RealScalar alpha) 47 selfadjoint_packed_rank1_update<Scalar,Index,ColMajor,UpLo==Lower?Upper:Lower,ConjRhs,ConjLhs>::run(size,mat,vec,alpha);
|