HomeSort by relevance Sort by last modified time
    Searched refs:NDIMS (Results 1 - 25 of 28) sorted by null

1 2

  /external/tensorflow/tensorflow/core/kernels/
slice_op.h 27 template <typename Device, typename T, int NDIMS>
29 void operator()(const Device& d, typename TTypes<T, NDIMS>::Tensor output,
30 typename TTypes<T, NDIMS>::ConstTensor input,
31 const Eigen::DSizes<Eigen::DenseIndex, NDIMS>& slice_indices,
32 const Eigen::DSizes<Eigen::DenseIndex, NDIMS>& slice_sizes) {
36 Eigen::DSizes<int, NDIMS> indices;
37 for (int i = 0; i < NDIMS; ++i) {
40 Eigen::DSizes<int, NDIMS> sizes;
41 for (int i = 0; i < NDIMS; ++i) {
strided_slice_op.h 30 template <typename Device, typename T, int NDIMS>
32 void operator()(const Device& d, typename TTypes<T, NDIMS>::Tensor output,
33 typename TTypes<T, NDIMS>::ConstTensor input,
34 const Eigen::DSizes<Eigen::DenseIndex, NDIMS>& start_indices,
35 const Eigen::DSizes<Eigen::DenseIndex, NDIMS>& stop_indices,
36 const Eigen::DSizes<Eigen::DenseIndex, NDIMS>& strides) {
40 Eigen::DSizes<int, NDIMS> start_i, stop_i, strides_i;
41 for (int i = 0; i < NDIMS; ++i) {
55 template <typename T, int NDIMS, typename Device>
57 static void run(const Device& d, typename TTypes<T, NDIMS>::Tensor output)
    [all...]
sparse_tensor_dense_add_op.h 30 template <typename Device, typename T, typename Index, int NDIMS,
36 typename TTypes<T, NDIMS>::Tensor out);
conv_2d.h 144 template <typename Device, typename T, typename IndexType, int NDIMS>
147 typename TTypes<T, NDIMS, IndexType>::ConstTensor in,
148 typename TTypes<T, NDIMS, IndexType>::Tensor out) {
153 for (int i = 1; i < NDIMS - 2; ++i) {
156 merged_dims[1] = in.dimension(NDIMS - 2); // input filters
157 merged_dims[2] = in.dimension(NDIMS - 1); // output filters
159 Eigen::DSizes<IndexType, NDIMS> expanded_dims;
160 expanded_dims[0] = in.dimension(NDIMS - 1); // output filters
161 expanded_dims[1] = in.dimension(NDIMS - 2); // input filters
162 for (int i = 0; i < NDIMS; ++i) { // spatial dimension
    [all...]
conv_ops_gpu_3.cu.cc 373 template <typename T, int NDIMS>
375 Dimension<NDIMS> input_dims, T* output,
376 Dimension<NDIMS> output_dims,
377 Dimension<NDIMS - 2> padding_left) {
380 Index<NDIMS> output_tensor_index =
383 Index<NDIMS> input_tensor_index;
386 for (int i = 1; i < NDIMS - 1; i++) {
391 input_tensor_index[NDIMS - 1] = output_tensor_index[NDIMS - 1]; // channels
402 template <typename T, int NDIMS>
    [all...]
batch_util.cc 150 template <typename T, int NDIMS>
157 auto element_t = element.tensor<T, NDIMS>();
158 auto parent_t = parent->tensor<T, NDIMS + 1>();
159 Eigen::DSizes<Eigen::DenseIndex, NDIMS + 1> slice_indices;
161 Eigen::DSizes<Eigen::DenseIndex, NDIMS + 1> slice_size;
170 template <int NDIMS>
175 return HandleElementToLargerSlice<T, NDIMS>(element, parent, index); \
197 #define HANDLE_DIMS(NDIMS) \
198 case NDIMS: { \
200 HandleElementToLargerSliceWithRank<NDIMS>(element, parent, index));
    [all...]
reverse_op.cc 129 template <typename Device, typename T, int NDIMS>
136 if (NDIMS == 3 && std::is_same<Device, CPUDevice>::value &&
145 typename Eigen::array<bool, NDIMS> axes_di;
146 for (int i = 0; i < NDIMS; i++) {
149 functor::Reverse<Device, T, NDIMS>()(context->eigen_device<Device>(),
150 input.tensor<T, NDIMS>(), axes_di,
151 result->tensor<T, NDIMS>());
185 #define HANDLE_REVERSE(NDIMS) \
186 case NDIMS: \
187 HandleReverseCase<Device, T, NDIMS>(context, dims.vec<bool>(), output);
    [all...]
sparse_tensor_dense_add_op.cc 86 const int ndims = static_cast<int>(a_indices_t->dim_size(1)); variable
90 switch (ndims) {
117 ndims));
124 template <typename T, typename Index, int NDIMS>
125 struct ScatterNdFunctor<CPUDevice, T, Index, NDIMS, scatter_op::UpdateOp::ADD> {
129 typename TTypes<T, NDIMS>::Tensor out) {
130 Eigen::array<Eigen::DenseIndex, NDIMS> idx;
133 for (int d = 0; d < NDIMS; ++d) {
cwise_ops_gpu_common.cu.h 49 template <typename Functor, int NDIMS, bool has_errors>
50 struct BinaryFunctor<GPUDevice, Functor, NDIMS, has_errors> {
79 typename TTypes<typename Functor::out_type, NDIMS>::Tensor out,
80 typename TTypes<typename Functor::in_type, NDIMS>::ConstTensor in0,
81 typename Eigen::array<Eigen::DenseIndex, NDIMS> bcast0,
82 typename TTypes<typename Functor::in_type, NDIMS>::ConstTensor in1,
83 typename Eigen::array<Eigen::DenseIndex, NDIMS> bcast1,
87 if ((NDIMS == 2) && Functor::use_bcast_optimization &&
89 const bool bcast0_all_one = AllOne<NDIMS>(bcast0);
90 const bool bcast1_all_one = AllOne<NDIMS>(bcast1)
    [all...]
cwise_ops_sycl_common.h 49 template <typename Functor, int NDIMS, bool has_errors>
50 struct BinaryFunctor<SYCLDevice, Functor, NDIMS, has_errors> {
82 typename TTypes<typename Functor::out_type, NDIMS>::Tensor out,
83 typename TTypes<typename Functor::in_type, NDIMS>::ConstTensor in0,
84 typename Eigen::array<Eigen::DenseIndex, NDIMS> bcast0,
85 typename TTypes<typename Functor::in_type, NDIMS>::ConstTensor in1,
86 typename Eigen::array<Eigen::DenseIndex, NDIMS> bcast1,
90 if ((NDIMS == 2) && Functor::use_bcast_optimization &&
92 const bool bcast0_all_one = AllOne<NDIMS>(bcast0);
93 const bool bcast1_all_one = AllOne<NDIMS>(bcast1)
    [all...]
transpose_functor.h 146 template <typename Device, typename T, int NDIMS>
150 Eigen::array<int, NDIMS> p;
151 for (int i = 0; i < NDIMS; ++i) p[i] = perm[i];
152 auto x = typename TTypes<T, NDIMS>::ConstTensor(
154 in.shape().AsEigenDSizes<NDIMS>());
155 auto y = typename TTypes<T, NDIMS>::Tensor(
157 out->shape().AsEigenDSizes<NDIMS>());
240 const int ndims = in.dims(); local
241 if (ndims == 0) return Status::OK();
242 TransposePermsVec perm(ndims);
    [all...]
cwise_ops_common.h 69 int ndims; member in struct:tensorflow::BinaryOpShared::BinaryOpState
100 const int ndims = state.ndims; variable
104 if (ndims <= 1) {
121 } else if (ndims == 2) {
128 } else if (ndims == 3) {
135 } else if (ndims == 4) {
142 } else if (ndims == 5) {
278 // Partial specialization of BinaryFunctor<Device=CPUDevice, Functor, NDIMS>
280 template <typename Functor, int NDIMS>
    [all...]
padding_fifo_queue.cc 311 template <typename T, int NDIMS>
321 auto element_t = element.tensor<T, NDIMS>();
322 auto parent_t = parent->tensor<T, NDIMS + 1>();
323 Eigen::DSizes<Eigen::DenseIndex, NDIMS + 1> slice_indices;
325 Eigen::DSizes<Eigen::DenseIndex, NDIMS + 1> slice_size;
336 template <int NDIMS>
341 return HandleElementToLargerSlice<T, NDIMS>(element, parent, index); \
365 #define HANDLE_DIMS(NDIMS) \
366 case NDIMS: { \
368 HandleElementToLargerSliceWithRank<NDIMS>(element, parent, index));
    [all...]
relu_op.h 46 // than once for every NDIMS * NumTypes * Num_different_relu_variants
75 template <int NDIMS>
117 template <int NDIMS>
159 template <int NDIMS>
201 template <int NDIMS>
cwise_ops.h 951 template <typename Device, typename Functor, int NDIMS,
972 // TODO(zhifengc): makes BCast a template member function on NDIMS
973 // instead making BinaryFunctor templates on NDIMS.
975 typename TTypes<typename Functor::out_type, NDIMS>::Tensor out,
976 typename TTypes<typename Functor::in_type, NDIMS>::ConstTensor in0,
977 typename Eigen::array<Eigen::DenseIndex, NDIMS> bcast0,
978 typename TTypes<typename Functor::in_type, NDIMS>::ConstTensor in1,
979 typename Eigen::array<Eigen::DenseIndex, NDIMS> bcast1,
990 template <int NDIMS>
991 bool AllOne(const typename Eigen::array<Eigen::DenseIndex, NDIMS>& a)
    [all...]
  /external/tensorflow/tensorflow/core/framework/
tensor.h 251 template <typename T, size_t NDIMS>
252 typename TTypes<T, NDIMS>::Tensor tensor();
259 template <typename T, size_t NDIMS>
260 typename TTypes<T, NDIMS>::Tensor bit_casted_tensor();
268 /// NDIMS should be 1 less than the original number of dimensions.
269 template <typename T, size_t NDIMS>
270 typename TTypes<T, NDIMS>::Tensor reinterpret_last_dimension();
311 /// Returns the data as an Eigen::Tensor with NDIMS dimensions, collapsing all
312 /// Tensor dimensions but the last NDIMS-1 into the first dimension of the
313 /// result. If NDIMS > dims() then leading dimensions of size 1 will b
    [all...]
tensor_slice.h 118 // We allow NDIMS to be greater than dims(), in which case we will pad the
120 template <int NDIMS>
123 Eigen::DSizes<Eigen::DenseIndex, NDIMS>* indices,
124 Eigen::DSizes<Eigen::DenseIndex, NDIMS>* sizes) const;
198 template <int NDIMS>
200 const TensorShape& shape, Eigen::DSizes<Eigen::DenseIndex, NDIMS>* indices,
201 Eigen::DSizes<Eigen::DenseIndex, NDIMS>* sizes) const {
205 CHECK_GE(NDIMS, dims()) << "Asking for a " << NDIMS << "-dim slice from "
216 for (int d = dims(); d < NDIMS; ++d)
    [all...]
tensor_types.h 24 template <typename T, int NDIMS = 1, typename IndexType = Eigen::DenseIndex>
26 // Rank-<NDIMS> tensor of scalar type T.
27 typedef Eigen::TensorMap<Eigen::Tensor<T, NDIMS, Eigen::RowMajor, IndexType>,
31 Eigen::Tensor<const T, NDIMS, Eigen::RowMajor, IndexType>, Eigen::Aligned>
34 // Unaligned Rank-<NDIMS> tensor of scalar type T.
35 typedef Eigen::TensorMap<Eigen::Tensor<T, NDIMS, Eigen::RowMajor, IndexType> >
38 Eigen::Tensor<const T, NDIMS, Eigen::RowMajor, IndexType> >
41 typedef Eigen::TensorMap<Eigen::Tensor<T, NDIMS, Eigen::RowMajor, int>,
numeric_op.h 86 #define NDIM_CASE(NDIMS) \
87 case NDIMS: { \
88 static_cast<CHILD*>(this)->template Operate<NDIMS>(context, a, b, output); \
tensor_shape.h 297 template <int NDIMS>
298 Eigen::DSizes<Eigen::DenseIndex, NDIMS> AsEigenDSizes() const;
300 /// Same as `AsEigenDSizes()` but allows for `NDIMS > dims()` -- in
302 template <int NDIMS>
303 Eigen::DSizes<Eigen::DenseIndex, NDIMS> AsEigenDSizesWithPadding() const;
307 // REQUIRES: dims() == NDIMS
308 void CheckDimsEqual(int NDIMS) const;
309 // REQUIRES: dims() >= NDIMS
310 void CheckDimsAtLeast(int NDIMS) const;
455 template <int NDIMS>
    [all...]
tensor_shape.cc 43 void TensorShape::CheckDimsEqual(int NDIMS) const {
44 CHECK_EQ(NDIMS, dims()) << "Asking for tensor of " << NDIMS << " dimensions"
48 void TensorShape::CheckDimsAtLeast(int NDIMS) const {
49 CHECK_GE(NDIMS, dims()) << "Asking for tensor of at least " << NDIMS
398 fprintf(stderr, "REP16 NDIMS: %d\n", ndims_byte());
403 fprintf(stderr, "REP32 NDIMS: %d\n", ndims_);
408 fprintf(stderr, "REP_OUT_OF_LINE NDIMS: %d %p\n", ndims_, as16()->dims_);
tensor_shape_test.cc 286 template <int NDIMS>
287 Eigen::DSizes<Eigen::DenseIndex, NDIMS> AsEigenDSizes() const;
289 /// Same as `AsEigenDSizes()` but allows for `NDIMS > dims()` -- in
291 template <int NDIMS>
292 Eigen::DSizes<Eigen::DenseIndex, NDIMS> AsEigenDSizesWithPadding() const;
  /external/libopus/celt/tests/
test_unit_cwrs32.c 56 #define NDIMS (44)
57 static const int pn[NDIMS]={
64 static const int pkmax[NDIMS]={
74 #define NDIMS (22)
75 static const int pn[NDIMS]={
80 static const int pkmax[NDIMS]={
92 for(t=0;t<NDIMS;t++){
  /external/tensorflow/tensorflow/core/util/
bcast.h 108 template <int NDIMS>
109 static Eigen::array<Eigen::DenseIndex, NDIMS> ToIndexArray(
111 CHECK_EQ(vec.size(), NDIMS);
112 Eigen::array<Eigen::DenseIndex, NDIMS> ret;
113 for (int i = 0; i < NDIMS; ++i) ret[i] = vec[i];
  /external/tensorflow/tensorflow/contrib/fused_conv/kernels/
fused_conv2d_bias_activation_op.cc 262 template <typename T, size_t NDIMS>
271 functor::NHWCToNCHW<GPUDevice, T, NDIMS>()(
272 ctx->eigen_device<GPUDevice>(), nhwc_tensor.tensor<T, NDIMS>(),
273 transformed_tensor->tensor<T, NDIMS>());

Completed in 840 milliseconds

1 2