HomeSort by relevance Sort by last modified time
    Searched defs:ndims (Results 1 - 25 of 38) sorted by null

1 2

  /external/tensorflow/tensorflow/core/kernels/
ops_util.h 91 const int ndims = shape.dims(); local
92 gtl::InlinedVector<T, 8> strides(ndims);
94 for (int i = ndims - 1; i >= 0; --i) {
104 const int ndims = shape.rank(); local
105 gtl::InlinedVector<T, 8> strides(ndims);
107 for (int i = ndims - 1; i >= 0; --i) {
tile_functor_cpu.cc 29 const int ndims = in.dims(); local
39 for (int i = 0; i < ndims; ++i) {
cholesky_op.cc 102 const int ndims = input.dims(); variable
103 const int64 n = input.dim_size(ndims - 1);
106 context, ndims >= 2,
107 errors::InvalidArgument("Input must have rank >= 2, got ", ndims),
110 context, input.dim_size(ndims - 2) == n,
112 input.dim_size(ndims - 2), " != ", n),
sparse_dense_binary_op_shared.cc 121 // "b.y_reshape()" and "b.y_bcast()" are guaranteed to have rank "ndims".
123 const int ndims = lhs_dims.size(); variable
124 switch (ndims) {
158 ndims));
transpose_functor.h 146 template <typename Device, typename T, int NDIMS>
150 Eigen::array<int, NDIMS> p;
151 for (int i = 0; i < NDIMS; ++i) p[i] = perm[i];
152 auto x = typename TTypes<T, NDIMS>::ConstTensor(
154 in.shape().AsEigenDSizes<NDIMS>());
155 auto y = typename TTypes<T, NDIMS>::Tensor(
157 out->shape().AsEigenDSizes<NDIMS>());
240 const int ndims = in.dims(); local
241 if (ndims == 0) return Status::OK();
242 TransposePermsVec perm(ndims);
    [all...]
transpose_functor_cpu.cc 37 const int ndims = in.dims(); local
47 for (int i = 0; i < ndims; ++i) {
60 (conjugate ? 1 : 0) + ndims * (Eigen::TensorOpCost::DivCost<int64>() +
betainc_op.cc 88 int ndims = static_cast<int>(a_shaper.x_reshape().size()); variable
90 switch (ndims) {
109 "Broadcasting rank not supported: ", ndims));
matrix_inverse_op.cc 100 const int ndims = input.dims(); variable
101 const int64 n = input.dim_size(ndims - 1);
104 context, ndims >= 2,
105 errors::InvalidArgument("Input must have rank >= 2, got ", ndims),
108 context, input.dim_size(ndims - 2) == n,
110 input.dim_size(ndims - 2), " != ", n),
mkl_tfconv_op.h 153 size_t ndims = input_shape.GetDimension(); local
154 size_t* in_sizes = new size_t[ndims];
155 for (size_t i = 0; i < ndims; i++) {
self_adjoint_eig_v2_op_gpu.cc 50 const int ndims = input.dims(); variable
52 context, ndims >= 2,
53 errors::InvalidArgument("Input must have rank >= 2, got ", ndims),
55 const int64 n = input.dim_size(ndims - 1);
57 context, input.dim_size(ndims - 2) == n,
59 input.dim_size(ndims - 2), " != ", n),
sparse_tensor_dense_add_op.cc 86 const int ndims = static_cast<int>(a_indices_t->dim_size(1)); variable
90 switch (ndims) {
117 ndims));
124 template <typename T, typename Index, int NDIMS>
125 struct ScatterNdFunctor<CPUDevice, T, Index, NDIMS, scatter_op::UpdateOp::ADD> {
129 typename TTypes<T, NDIMS>::Tensor out) {
130 Eigen::array<Eigen::DenseIndex, NDIMS> idx;
133 for (int d = 0; d < NDIMS; ++d) {
substr_op.cc 102 int ndims = output_shape.dims(); variable
106 switch (ndims) {
211 "Substr broadcast not implemented for ", ndims, " dimensions"));
tile_functor_gpu.cu.cc 32 const int32 ndims, T* dst) {
34 const int32* out_strides = buf + ndims;
35 const int32* in_dim_sizes = buf + ndims * 2;
39 for (int i = 0; i < ndims; ++i) {
55 const int32 ndims = in.dims(); local
56 gtl::InlinedVector<int32, 24> host_buf(ndims * 3);
59 for (int i = 0; i < ndims; ++i) {
61 host_buf[ndims + i] = out_strides[i];
62 host_buf[ndims * 2 + i] = in.dim_size(i);
77 ndims, q)
    [all...]
determinant_op.cc 135 const int ndims = input.dims(); variable
136 const int64 n = input.dim_size(ndims - 1);
139 context, ndims >= 2,
140 errors::InvalidArgument("Input must have rank >= 2, got ", ndims),
143 context, input.dim_size(ndims - 2) == n,
145 input.dim_size(ndims - 2), " != ", n),
150 for (int dim = 0; dim < ndims - 2; ++dim) {
275 const int ndims = input.dims(); variable
276 const int64 n = input.dim_size(ndims - 1);
279 context, ndims >= 2
    [all...]
matrix_solve_op.cc 131 const int ndims = input.dims(); variable
132 const int64 n = input.dim_size(ndims - 1);
133 const int64 nrhs = rhs.dim_size(ndims - 1);
136 context, ndims >= 2,
137 errors::InvalidArgument("Input must have rank >= 2, got ", ndims),
139 OP_REQUIRES_ASYNC(context, rhs.dims() == ndims,
142 ndims, " != ", rhs.dims()),
145 context, input.dim_size(ndims - 2) == n,
147 input.dim_size(ndims - 2), " != ", n),
149 OP_REQUIRES_ASYNC(context, rhs.dim_size(ndims - 2) == n
    [all...]
mkl_batch_matmul_op.cc 61 errors::InvalidArgument("lhs and rhs has different ndims: ",
64 const int ndims = lhs.dims(); variable
66 ctx, ndims >= 2,
67 errors::InvalidArgument("lhs and rhs ndims must be >= 2: ", ndims));
69 for (int i = 0; i < ndims - 2; ++i) {
77 auto batch_size = (ndims == 2) ? 1 : out_shape.num_elements();
78 auto lhs_rows = lhs.dim_size(ndims - 2);
79 auto lhs_cols = lhs.dim_size(ndims - 1);
80 auto rhs_rows = rhs.dim_size(ndims - 2)
    [all...]
qr_op_impl.h 138 const int ndims = input.dims(); variable
139 const int64 m = input.dim_size(ndims - 2);
140 const int64 n = input.dim_size(ndims - 1);
147 context, ndims >= 2,
148 errors::InvalidArgument("Input must have rank >= 2, got ", ndims),
156 q_shape.set_dim(ndims - 1, full_matrices_ ? m : min_size);
161 r_shape.set_dim(ndims - 2, full_matrices_ ? m : min_size);
176 transposed_shape.set_dim(ndims - 2, input.dim_size(ndims - 1));
177 transposed_shape.set_dim(ndims - 1, input.dim_size(ndims - 2))
    [all...]
transpose_functor_gpu.cu.cc 36 const int32 ndims, T* dst) {
38 const int32* out_strides = buf + ndims;
39 const int32* perm = buf + ndims * 2;
43 for (int32 i = 0; i < ndims; ++i) {
63 const int32 ndims = in.dims(); local
64 gtl::InlinedVector<int32, 24> host_buf(ndims * 3);
68 for (int i = 0; i < ndims; ++i) {
70 host_buf[ndims + i] = out_strides[i];
71 host_buf[ndims * 2 + i] = perm[i];
86 ndims, q)
    [all...]
reduction_ops_common.h 98 int ndims() const { return data_reshape_.size(); } function in class:tensorflow::ReductionHelper
156 CHECK_GE(helper.ndims(), 0);
158 if (helper.ndims() == 0 ||
159 (helper.ndims() == 1 && !helper.reduce_first_axis())) {
195 } else if ((helper.ndims() == 1) && helper.reduce_first_axis()) {
199 } else if ((helper.ndims() == 2) && helper.reduce_first_axis()) {
203 } else if ((helper.ndims() == 2) && !helper.reduce_first_axis()) {
207 } else if ((helper.ndims() == 3) && helper.reduce_first_axis()) {
212 } else if ((helper.ndims() == 3) && !helper.reduce_first_axis()) {
serialize_sparse_op.cc 302 const int ndims = serialized_sparse.shape().dims(); variable
305 context, ndims > 0,
309 OP_REQUIRES(context, serialized_sparse.shape().dim_size(ndims - 1) == 3,
315 for (int i = 0; i < ndims - 1; ++i) {
444 Tensor target_shape(DT_INT64, TensorShape({ndims + output.dims() - 2}));
445 for (int i = 0; i < ndims - 1; ++i) {
449 target_shape.vec<int64>()(i + ndims - 1) = output.shape().data()[i + 1];
sparse_reduce_op.cc 60 int ndims = sp.dims(); local
62 reduction_axes[i] = (reduction_axes[i] + ndims) % ndims;
67 // group_by_dims == {0, .., NDIMS-1} \ reduction_axes.
68 std::vector<int64> perm(ndims);
88 out_dim_sizes.reserve(ndims);
91 for (int d = 0; d < ndims; ++d) {
192 // coordinates returned by .group() have the same ndims as group_by_dims.
  /external/tensorflow/tensorflow/compiler/tf2xla/lib/
batch_dot.cc 45 const int ndims = xla::ShapeUtil::Rank(*x_shape); local
46 if (ndims < 2) {
48 "Arguments to BatchedDot must have rank >= 2: ", ndims);
54 for (int i = 0; i < ndims - 2; ++i) {
64 int x_inner_dim = transpose_x ? (ndims - 2) : (ndims - 1);
65 int y_inner_dim = transpose_y ? (ndims - 1) : (ndims - 2);
82 int x_outer_dim = transpose_x ? (ndims - 1) : (ndims - 2)
    [all...]
cholesky.cc 105 const int ndims = xla::ShapeUtil::Rank(*a_shape); local
106 if (ndims < 2) {
108 "Arguments to Cholesky must have rank >= 2: ", ndims);
triangular_solve.cc 46 const int ndims = xla::ShapeUtil::Rank(*a_shape); local
47 if (ndims < 2) {
49 "Arguments to TriangularSolve must have rank >= 2: ", ndims);
53 for (int i = 0; i < ndims - 2; ++i) {
88 std::vector<int64> output(ndims);
375 const int64 ndims = xla::ShapeUtil::Rank(*a_shape); local
378 for (int i = 0; i < ndims - 2; ++i) {
384 std::vector<int64> output(ndims);
485 std::vector<xla::ComputationDataHandle> padded_starts(ndims, zero);
486 padded_starts[ndims - 2] = bodyb->Reshape(starts[0], {1})
    [all...]
  /external/tensorflow/tensorflow/core/distributed_runtime/rpc/
grpc_tensor_coding.cc 81 const int ndims = val.shape().dims(); local
83 (ndims * (4 * kVarintMax64)); // Shape: 4 varints per dim
94 const int ndims = val.shape().dims(); local
96 for (int d = 0; d < ndims; d++) {
108 for (int d = 0; d < ndims; d++) {

Completed in 1346 milliseconds

1 2