HomeSort by relevance Sort by last modified time
    Searched refs:RowMajor (Results 101 - 125 of 260) sorted by null

1 2 3 45 6 7 8 91011

  /external/tensorflow/tensorflow/core/kernels/
sparse_softmax_op.cc 106 Eigen::Tensor<T, 1, Eigen::RowMajor> tmp(group_size);
113 Eigen::TensorMap<Eigen::Tensor<T, 1, Eigen::RowMajor>> output_part(
eigen_pooling_test.cc 86 Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows, depth);
87 Tensor<float, 4, RowMajor> result(num_batches, output_cols, output_rows,
198 Tensor<float, 5, RowMajor> input(num_batches, input_cols, input_rows,
200 Tensor<float, 5, RowMajor> result(num_batches, output_cols, output_rows,
320 Tensor<float, 5, RowMajor> input(num_batches, input_cols, input_rows,
322 Tensor<float, 5, RowMajor> result(num_batches, output_cols, output_rows,
460 Tensor<float, 5, RowMajor> input(num_batches, input_cols, input_rows,
462 Tensor<float, 5, RowMajor> result(num_batches, output_cols, output_rows,
583 Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows, depth);
584 Tensor<float, 4, RowMajor> result(num_batches, output_cols, output_rows
    [all...]
lrn_op_test.cc 65 Eigen::Tensor<float, 4, Eigen::RowMajor> expected(batch_size, rows, cols,
71 Eigen::Tensor<float, 1, Eigen::RowMajor> out_col(depth);
84 Eigen::Tensor<float, 0, Eigen::RowMajor> sum =
quantized_matmul_op.cc 42 !TransposeC ? gemmlowp::MapOrder::RowMajor : gemmlowp::MapOrder::ColMajor;
44 !TransposeA ? gemmlowp::MapOrder::RowMajor : gemmlowp::MapOrder::ColMajor;
46 !TransposeB ? gemmlowp::MapOrder::RowMajor : gemmlowp::MapOrder::ColMajor;
  /external/eigen/Eigen/src/Core/
SolveTriangular.h 72 (int(Lhs::Flags) & RowMajorBit) ? RowMajor : ColMajor>
95 typedef internal::gemm_blocking_space<(Rhs::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar,
100 triangular_solve_matrix<Scalar,Index,Side,Mode,LhsProductTraits::NeedToConjugate,(int(Lhs::Flags) & RowMajorBit) ? RowMajor : ColMajor,
101 (Rhs::Flags&RowMajorBit) ? RowMajor : ColMajor>
BandMatrix.h 70 EIGEN_STATIC_ASSERT((Options&RowMajor)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
169 * \tparam _Options A combination of either \b #RowMajor or \b #ColMajor, and of \b #SelfAdjoint
195 typedef Matrix<Scalar,DataRowsAtCompileTime,ColsAtCompileTime,Options&RowMajor?RowMajor:ColMajor> CoefficientsType;
312 class TridiagonalMatrix : public BandMatrix<Scalar,Size,Size,Options&SelfAdjoint?0:1,1,Options|RowMajor>
314 typedef BandMatrix<Scalar,Size,Size,Options&SelfAdjoint?0:1,1,Options|RowMajor> Base;
GeneralProduct.h 132 * 2 - the matrix is row-major, BLAS compatible and N is large => call fast BLAS-like rowmajor routine
190 enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor };
231 typedef const_blas_data_mapper<RhsScalar,Index,RowMajor> RhsMapper;
290 template<> struct gemv_dense_selector<OnTheRight,RowMajor,true>
331 typedef const_blas_data_mapper<LhsScalar,Index,RowMajor> LhsMapper;
334 <Index,LhsScalar,LhsMapper,RowMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsMapper,RhsBlasTraits::NeedToConjugate>::run(
357 template<> struct gemv_dense_selector<OnTheRight,RowMajor,false>
  /external/eigen/Eigen/src/Core/products/
TriangularMatrixVector_BLAS.h 60 struct triangular_matrix_vector_product<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,RowMajor,Specialized> { \
63 triangular_matrix_vector_product_trmv<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,RowMajor>::run( \
158 struct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,RowMajor> { \
170 triangular_matrix_vector_product<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,RowMajor,BuiltIn>::run( \
GeneralMatrixMatrixTriangular_BLAS.h 89 char uplo=((IsLower) ? 'L' : 'U'), trans=((AStorageOrder==RowMajor) ? 'T':'N'); \
102 conjA = (((AStorageOrder==ColMajor) && ConjugateA) || ((AStorageOrder==RowMajor) && !ConjugateA)) ? 1 : 0 \
110 char uplo=((IsLower) ? 'L' : 'U'), trans=((AStorageOrder==RowMajor) ? 'C':'N'); \
SelfadjointMatrixVector_BLAS.h 85 IsRowMajor = StorageOrder==RowMajor ? 1 : 0, \
TriangularMatrixMatrix_BLAS.h 144 transa = (LhsStorageOrder==RowMajor) ? ((ConjugateLhs) ? 'C' : 'T') : 'N'; \
156 if (LhsStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \
254 transa = (RhsStorageOrder==RowMajor) ? ((ConjugateRhs) ? 'C' : 'T') : 'N'; \
266 if (RhsStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \
  /external/eigen/blas/
PackedSelfadjointProduct.h 42 struct selfadjoint_packed_rank1_update<Scalar,Index,RowMajor,UpLo,ConjLhs,ConjRhs>
  /external/eigen/test/
selfadjoint.cpp 66 CALL_SUBTEST_5( selfadjoint(Matrix<float,Dynamic,Dynamic,RowMajor>(s, s)) );
product_trsolve.cpp 35 Matrix<Scalar,Size,Size,RowMajor> rmLhs(size,size);
37 enum { colmajor = Size==1 ? RowMajor : ColMajor,
38 rowmajor = Cols==1 ? ColMajor : RowMajor }; enumerator in enum:__anon19850
40 Matrix<Scalar,Size,Cols,rowmajor> rmRhs(size,cols);
diagonalmatrices.cpp 120 CALL_SUBTEST_3( diagonalmatrices(Matrix<double,3,3,RowMajor>()) );
122 CALL_SUBTEST_5( diagonalmatrices(Matrix<float,4,4,RowMajor>()) );
125 CALL_SUBTEST_8( diagonalmatrices(Matrix<double,Dynamic,Dynamic,RowMajor>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
  /external/eigen/unsupported/Eigen/src/IterativeSolvers/
IncompleteLU.h 25 typedef SparseMatrix<Scalar,RowMajor> FactorType;
  /external/eigen/unsupported/test/
cxx11_tensor_sugar.cpp 6 using Eigen::RowMajor;
cxx11_tensor_reverse.cpp 185 CALL_SUBTEST(test_simple_reverse<RowMajor>());
187 CALL_SUBTEST(test_expr_reverse<RowMajor>(true));
189 CALL_SUBTEST(test_expr_reverse<RowMajor>(false));
cxx11_tensor_image_patch.cpp 20 Tensor<float, 4, RowMajor> tensor_row_major = tensor.swap_layout();
35 // Single pixel patch: RowMajor
36 Tensor<float, 5, RowMajor> single_pixel_patch_row_major;
52 // RowMajor
74 // Entire image patch: RowMajor
75 Tensor<float, 5, RowMajor> entire_image_patch_row_major;
101 // RowMajor
110 // Check that ColMajor and RowMajor agree.
128 // 2D patch: RowMajor
129 Tensor<float, 5, RowMajor> twod_patch_row_major
    [all...]
cxx11_tensor_morphing.cpp 469 CALL_SUBTEST_1(test_simple_slice<RowMajor>());
472 CALL_SUBTEST_3(test_slice_in_expr<RowMajor>());
474 CALL_SUBTEST_4(test_slice_as_lvalue<RowMajor>());
476 CALL_SUBTEST_5(test_slice_raw_data<RowMajor>());
480 CALL_SUBTEST_6(test_strided_slice_write<RowMajor>());
481 CALL_SUBTEST_6(test_strided_slice<RowMajor>());
484 CALL_SUBTEST_7(test_composition<RowMajor>());
cxx11_tensor_thread_pool.cpp 354 CALL_SUBTEST_2(test_multithread_contraction<RowMajor>());
357 CALL_SUBTEST_3(test_multithread_contraction_agrees_with_singlethread<RowMajor>());
361 CALL_SUBTEST_4(test_contraction_corner_cases<RowMajor>());
364 CALL_SUBTEST_4(test_full_contraction<RowMajor>());
367 CALL_SUBTEST_5(test_multithreaded_reductions<RowMajor>());
372 CALL_SUBTEST_6(test_multithread_shuffle<RowMajor>());
kronecker_product.cpp 108 SparseMatrix<double,RowMajor> SM_row_a(SM_a), SM_row_b(SM_b);
133 SparseMatrix<double,RowMajor> SM_ab2 = kroneckerProduct(SM_a,DM_b);
195 SparseMatrix<float,RowMajor> sC2;
  /external/eigen/Eigen/src/SparseCore/
SparseUtil.h 86 typedef SparseVector<_Scalar, RowMajor, _StorageIndex> type;
100 enum { _Options = ((Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
115 enum { _Options = ((evaluator<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
  /external/eigen/unsupported/Eigen/CXX11/src/Tensor/
TensorLayoutSwap.h 27 * Tensor<float, 2, RowMajor> output = input.swap_layout();
48 static const int Layout = (traits<XprType>::Layout == ColMajor) ? RowMajor : ColMajor;
121 Layout = (static_cast<int>(TensorEvaluator<ArgType, Device>::Layout) == static_cast<int>(ColMajor)) ? RowMajor : ColMajor,
183 Layout = (static_cast<int>(TensorEvaluator<ArgType, Device>::Layout) == static_cast<int>(ColMajor)) ? RowMajor : ColMajor,
  /external/tensorflow/tensorflow/contrib/tensor_forest/kernels/
tree_utils.h 53 Eigen::Tensor<T, 0, Eigen::RowMajor> count_sum =
84 Eigen::Tensor<float, 0, Eigen::RowMajor> ret = sum - (sum2 / sum);
101 Eigen::Tensor<float, 0, Eigen::RowMajor> ret = (e_x2 - e_x.square()).sum();

Completed in 376 milliseconds

1 2 3 45 6 7 8 91011