HomeSort by relevance Sort by last modified time
    Searched refs:DimIndex (Results 1 - 9 of 9) sorted by null

  /external/tensorflow/tensorflow/core/kernels/
conv_ops_3d.cc 39 using stream_executor::dnn::DimIndex;
354 .set_spatial_dim(DimIndex::X, in_cols)
355 .set_spatial_dim(DimIndex::Y, in_rows)
356 .set_spatial_dim(DimIndex::Z, in_planes)
360 .set_spatial_dim(DimIndex::X, out_cols)
361 .set_spatial_dim(DimIndex::Y, out_rows)
362 .set_spatial_dim(DimIndex::Z, out_planes)
366 filter_desc.set_spatial_dim(DimIndex::X, filter_cols)
367 .set_spatial_dim(DimIndex::Y, filter_rows)
368 .set_spatial_dim(DimIndex::Z, filter_planes
    [all...]
conv_grad_ops_3d.cc 44 using stream_executor::dnn::DimIndex;
    [all...]
cudnn_pooling_gpu.cc 82 const auto dim_i = static_cast<se::dnn::DimIndex>(i);
203 const auto dim_i = static_cast<se::dnn::DimIndex>(i);
  /external/tensorflow/tensorflow/stream_executor/
dnn.h 54 enum class DimIndex : int {
61 inline int64 GetDim(absl::Span<const int64> data, DimIndex dim) {
65 inline void SetDim(absl::Span<int64> data, DimIndex dim, int64 value) {
69 inline void SetDim(std::vector<int64>* data, DimIndex dim, int64 value) {
260 int64 height() const { return GetDim(spatial_size(), DimIndex::Y); }
261 int64 width() const { return GetDim(spatial_size(), DimIndex::X); }
262 int64 spatial_dim(DimIndex dim) const { return GetDim(spatial_size(), dim); }
288 SetDim(spatial_size(), DimIndex::Y, value);
292 SetDim(spatial_size(), DimIndex::X, value);
295 BatchDescriptor& set_spatial_dim(DimIndex dim, int64 value)
    [all...]
  /external/tensorflow/tensorflow/compiler/xla/service/gpu/
cudnn_conv_runner.cc 37 using se::dnn::DimIndex;
189 static_cast<DimIndex>(effective_num_dimensions - dim - 1),
201 static_cast<DimIndex>(effective_num_dimensions - dim - 1),
211 static_cast<DimIndex>(effective_num_dimensions - dim - 1),
214 static_cast<DimIndex>(effective_num_dimensions - dim - 1),
225 static_cast<DimIndex>(effective_num_dimensions - dim - 1),
231 input_descriptor.set_spatial_dim(static_cast<DimIndex>(0), 1);
232 output_descriptor.set_spatial_dim(static_cast<DimIndex>(0), 1);
233 filter_descriptor.set_spatial_dim(static_cast<DimIndex>(0), 1);
234 convolution_descriptor.set_zero_padding(static_cast<DimIndex>(0), 0
    [all...]
  /external/eigen/unsupported/Eigen/CXX11/src/Tensor/
TensorReduction.h 129 template <int DimIndex, typename Self, typename Op>
132 EIGEN_STATIC_ASSERT((DimIndex > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
133 for (int j = 0; j < self.m_reducedDims[DimIndex]; ++j) {
134 const typename Self::Index input = firstIndex + j * self.m_reducedStrides[DimIndex];
135 GenericDimReducer<DimIndex-1, Self, Op>::reduce(self, input, reducer, accum);
183 template <int DimIndex, typename Self, typename Op, bool vectorizable = (Self::InputPacketAccess & Op::PacketAccess)>
190 template <int DimIndex, typename Self, typename Op>
191 struct InnerMostDimPreserver<DimIndex, Self, Op, true> {
193 EIGEN_STATIC_ASSERT((DimIndex > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
194 for (typename Self::Index j = 0; j < self.m_reducedDims[DimIndex]; ++j)
    [all...]
TensorConvolution.h 490 EIGEN_DEVICE_FUNC void convolve(Index firstIndex, Index firstKernel, int DimIndex, CoeffReturnType& accum) const {
491 for (int j = 0; j < m_kernelImpl.dimensions()[DimIndex]; ++j) {
492 const Index input = firstIndex + j * m_indexStride[DimIndex];
493 const Index kernel = firstKernel + j * m_kernelStride[DimIndex];
494 if (DimIndex > 0) {
495 convolve(input, kernel, DimIndex-1, accum);
503 EIGEN_DEVICE_FUNC void convolvePacket(Index firstIndex, Index firstKernel, int DimIndex, Packet& accum) const {
504 for (int j = 0; j < m_kernelImpl.dimensions()[DimIndex]; ++j) {
505 const Index input = firstIndex + j * m_indexStride[DimIndex];
506 const Index kernel = firstKernel + j * m_kernelStride[DimIndex];
    [all...]
  /external/tensorflow/tensorflow/stream_executor/cuda/
cuda_dnn.cc     [all...]
  /external/tensorflow/tensorflow/stream_executor/rocm/
rocm_dnn.cc     [all...]

Completed in 182 milliseconds