HomeSort by relevance Sort by last modified time
    Searched refs:m_cols (Results 1 - 15 of 15) sorted by null

  /external/eigen/Eigen/src/Core/
DenseStorage.h 252 Index m_cols; member in class:Eigen::DenseStorage
254 EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0), m_cols(0) {}
256 : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
257 EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows), m_cols(other.m_cols) {}
264 m_cols = other.m_cols;
268 EIGEN_DEVICE_FUNC DenseStorage(Index, Index rows, Index cols) : m_rows(rows), m_cols(cols) {}
270 { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
312 Index m_cols; member in class:Eigen::DenseStorage
342 Index m_cols; member in class:Eigen::DenseStorage
424 Index m_cols; member in class:Eigen::DenseStorage
    [all...]
MapBase.h 91 EIGEN_DEVICE_FUNC inline Index cols() const { return m_cols.value(); }
149 explicit inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
160 m_cols(ColsAtCompileTime == Dynamic ? vecSize : Index(ColsAtCompileTime))
171 : m_data(dataPtr), m_rows(rows), m_cols(cols)
202 const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_cols; member in class:Eigen::MapBase
CoreEvaluators.h 1249 const variable_if_dynamic<Index, ArgType::ColsAtCompileTime> m_cols; member in struct:Eigen::internal::unary_evaluator
1533 const variable_if_dynamic<Index, ReverseCol ? ArgType::ColsAtCompileTime : 1> m_cols; member in struct:Eigen::internal::unary_evaluator
    [all...]
CwiseNullaryOp.h 69 : m_rows(rows), m_cols(cols), m_functor(func)
80 EIGEN_STRONG_INLINE Index cols() const { return m_cols.value(); }
88 const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_cols; member in class:Eigen::CwiseNullaryOp
  /external/tensorflow/tensorflow/lite/kernels/internal/optimized/
neon_tensor_utils.h 28 int m_cols, const float* vector,
31 NEON_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols,
36 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
39 NEON_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols,
45 const int m_cols, const float* vector, int n_batch, float* result,
48 matrix, ledger, m_rows, m_cols, vector, n_batch, result, result_stride);
53 const int m_cols, const int8_t* __restrict__ vectors,
56 NeonSparseMatrixBatchVectorMultiplyAccumulate(matrix, ledger, m_rows, m_cols,
tensor_utils_impl.h 38 int m_rows, int m_cols,
43 int m_cols, const float* vector,
49 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
53 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
58 const float* matrix, const uint8_t* ledger, int m_rows, int m_cols,
61 const float* matrix, const uint8_t* ledger, int m_rows, int m_cols,
67 const int m_cols, const int8_t* __restrict__ vectors,
72 const int m_cols, const int8_t* __restrict__ vectors,
neon_tensor_utils.cc 98 int m_cols, const float* vector,
105 m_cols - (m_cols & (kFloatWeightsPerNeonLane - 1));
109 const float* vector_in_batch = vector + b * m_cols;
127 for (int c = postamble_start; c < m_cols; c++) {
130 matrix_row += m_cols;
151 const int m_cols, void** shuffled_vectors_free) {
155 kWeightsPerUint32, n_batch * m_cols, shuffled_vectors_free));
158 int8* shuffled_vectors_ptr = shuffled_vectors + (i * m_cols);
160 reinterpret_cast<const int8*>(vectors) + (i * m_cols);
    [all...]
  /external/tensorflow/tensorflow/lite/kernels/internal/
tensor_utils.h 54 int m_cols, const float* vector,
67 // 1. m_cols is a multiple of 16 so that all blocks are full blocks.
68 // 2. m_cols < 254 * 16 so that block index can be represented by uint8.
70 const float* matrix, const uint8_t* ledger, int m_rows, int m_cols,
81 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
94 // 1. m_cols is a multiple of 16 so that all blocks are full blocks.
95 // 2. m_cols < 254 * 16 so that block index can be represented by uint8.
98 const int m_cols, const int8_t* __restrict__ vectors,
  /external/eigen/Eigen/src/misc/
Image.h 44 m_cols(m_rank == 0 ? 1 : m_rank),
49 inline Index cols() const { return m_cols; }
61 Index m_rank, m_cols; member in struct:Eigen::internal::image_retval_base
Kernel.h 46 m_cols(m_rank==dec.cols() ? 1 : dec.cols() - m_rank)
50 inline Index cols() const { return m_cols; }
61 Index m_rank, m_cols; member in struct:Eigen::internal::kernel_retval_base
  /external/tensorflow/tensorflow/lite/kernels/internal/reference/
portable_tensor_utils.h 41 int m_rows, int m_cols,
47 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
52 const float* matrix, const uint8_t* ledger, int m_rows, int m_cols,
57 const int m_cols, const int8_t* __restrict__ vectors,
162 int m_cols, const float* vector,
165 PortableMatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vector,
170 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
173 PortableMatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vector,
179 const float* matrix, const uint8_t* ledger, int m_rows, int m_cols,
182 matrix, ledger, m_rows, m_cols, vector, n_batch, result, result_stride)
    [all...]
portable_tensor_utils.cc 69 int m_rows, int m_cols,
78 const float* vector_in_batch = vector + b * m_cols;
79 for (int c = 0; c < m_cols; c++) {
89 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
93 for (batch = 0; batch < n_batch; ++batch, vectors += m_cols) {
105 for (col = 0; col < m_cols; ++col, ++row_ptr) {
114 const float* matrix, const uint8_t* ledger, int m_rows, int m_cols,
118 m_cols % kBlockSize, 0);
127 const float* vector_in_batch = vector + b * m_cols;
145 const int m_cols, const int8_t* __restrict__ vectors
    [all...]
  /external/eigen/Eigen/src/SVD/
SVDBase.h 193 inline Index cols() const { return m_cols; }
236 Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize; member in class:Eigen::SVDBase
248 m_rows(-1), m_cols(-1), m_diagSize(0)
281 cols == m_cols &&
288 m_cols = cols;
301 m_diagSize = (std::min)(m_rows, m_cols);
306 m_matrixV.resize(m_cols, m_computeFullV ? m_cols : m_computeThinV ? m_diagSize : 0);
JacobiSVD_LAPACKE.h 64 ldvt = (m_computeFullV) ? internal::convert_index<lapack_int>(m_cols) : (m_computeThinV) ? internal::convert_index<lapack_int>(m_diagSize) : 1; \
66 localV.resize(ldvt, m_cols); \
71 LAPACKE_##LAPACKE_PREFIX##gesvd( matrix_order, jobu, jobvt, internal::convert_index<lapack_int>(m_rows), internal::convert_index<lapack_int>(m_cols), (LAPACKE_TYPE*)m_temp.data(), lda, (LAPACKE_RTYPE*)m_singularValues.data(), u, ldu, vt, ldvt, superb.data()); \
JacobiSVD.h 597 using Base::m_cols;
619 cols == m_cols &&
626 m_cols = cols;
644 m_diagSize = (std::min)(m_rows, m_cols);
651 m_matrixV.resize(m_cols, m_computeFullV ? m_cols
656 if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this);
657 if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this);
658 if(m_rows!=m_cols) m_scaledMatrix.resize(rows,cols);
681 if(m_rows!=m_cols)
    [all...]

Completed in 392 milliseconds