Home | History | Annotate | Download | only in kernels
      1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 // See docs in ../ops/nn_ops.cc.
     17 
     18 #define USE_EIGEN_TENSOR
     19 #define EIGEN_USE_THREADS
     20 
     21 #include "tensorflow/core/kernels/conv_grad_ops.h"
     22 
     23 #include <algorithm>
     24 #include <vector>
     25 
     26 #include "tensorflow/core/framework/common_shape_fns.h"
     27 #include "tensorflow/core/framework/numeric_op.h"
     28 #include "tensorflow/core/framework/op_kernel.h"
     29 #include "tensorflow/core/framework/register_types.h"
     30 #include "tensorflow/core/framework/tensor.h"
     31 #include "tensorflow/core/framework/tensor_shape.h"
     32 #include "tensorflow/core/framework/tensor_slice.h"
     33 #include "tensorflow/core/kernels/conv_2d.h"
     34 #include "tensorflow/core/kernels/ops_util.h"
     35 #include "tensorflow/core/lib/core/errors.h"
     36 #include "tensorflow/core/platform/logging.h"
     37 #include "tensorflow/core/platform/macros.h"
     38 #include "tensorflow/core/util/padding.h"
     39 #include "tensorflow/core/util/tensor_format.h"
     40 #include "tensorflow/core/util/use_cudnn.h"
     41 
     42 namespace tensorflow {
     43 
     44 // The V2 version computes windowed output size with arbitrary dilation_rate,
     45 // while the original version only handles the cases where dilation_rates equal
     46 // to 1.
     47 Status ConvBackpropExtractAndVerifyDimensionV2(
     48     StringPiece label, const TensorShape& input_shape,
     49     const TensorShape& filter_shape, const TensorShape& output_shape,
     50     const gtl::ArraySlice<int32>& dilations, const std::vector<int32>& strides,
     51     Padding padding, int spatial_dim, int filter_spatial_dim,
     52     ConvBackpropSpatialDimension* dim) {
     53   dim->input_size = input_shape.dim_size(spatial_dim);
     54   dim->filter_size = filter_shape.dim_size(filter_spatial_dim);
     55   dim->output_size = output_shape.dim_size(spatial_dim);
     56   dim->stride = strides[spatial_dim];
     57   dim->dilation = dilations[spatial_dim];
     58   int64 out_size = 0, pad_size = 0;
     59   TF_RETURN_IF_ERROR(GetWindowedOutputSizeV2(dim->input_size, dim->filter_size,
     60                                              dim->dilation, dim->stride,
     61                                              padding, &out_size, &pad_size));
     62   if (dim->output_size != out_size) {
     63     return errors::InvalidArgument(
     64         label, ": Size of out_backprop doesn't match computed: ", "actual = ",
     65         dim->output_size, ", computed = ", out_size,
     66         "spatial_dim: ", spatial_dim, " input: ", dim->input_size,
     67         " filter: ", dim->filter_size, " output: ", dim->output_size,
     68         " stride: ", dim->stride, " dilation: ", dim->dilation);
     69   }
     70 
     71   int64 effective_filter_size = (dim->filter_size - 1) * dim->dilation + 1;
     72   dim->expanded_output_size = (dim->output_size - 1) * dim->stride + 1;
     73   const auto padded_out_size = dim->input_size + effective_filter_size - 1;
     74   dim->pad_before = effective_filter_size - 1 - pad_size;
     75   dim->pad_after =
     76       padded_out_size - dim->expanded_output_size - dim->pad_before;
     77   VLOG(2) << label << ": expanded_out = " << dim->expanded_output_size
     78           << ", effective_filter_size = " << effective_filter_size
     79           << ", padded_out = " << padded_out_size
     80           << ", pad_before = " << dim->pad_before
     81           << ", pad_after = " << dim->pad_after
     82           << ", dilation = " << dim->dilation << ", strides = " << dim->stride;
     83   return Status::OK();
     84 }
     85 
     86 Status ConvBackpropExtractAndVerifyDimension(
     87     StringPiece label, const TensorShape& input_shape,
     88     const TensorShape& filter_shape, const TensorShape& output_shape,
     89     const std::vector<int32>& strides, Padding padding, int spatial_dim,
     90     int filter_spatial_dim, ConvBackpropSpatialDimension* dim) {
     91   static constexpr std::array<int32, 5> one_dilations = {{1, 1, 1, 1, 1}};
     92   return ConvBackpropExtractAndVerifyDimensionV2(
     93       label, input_shape, filter_shape, output_shape, one_dilations, strides,
     94       padding, spatial_dim, filter_spatial_dim, dim);
     95 }
     96 
     97 Status ConvBackpropComputeDimensionsV2(
     98     StringPiece label, int num_spatial_dims, const TensorShape& input_shape,
     99     const TensorShape& filter_shape, const TensorShape& out_backprop_shape,
    100     const gtl::ArraySlice<int32>& dilations, const std::vector<int32>& strides,
    101     Padding padding, TensorFormat data_format, ConvBackpropDimensions* dims) {
    102   // The + 2 in the following line is for the batch and feature dimensions.
    103   const int num_dims = num_spatial_dims + 2;
    104   if (input_shape.dims() != num_dims) {
    105     return errors::InvalidArgument(label, ": input must be ", num_dims,
    106                                    "-dimensional");
    107   }
    108   if (filter_shape.dims() != num_dims) {
    109     return errors::InvalidArgument(label, ": filter must be ", num_dims,
    110                                    "-dimensional");
    111   }
    112   if (out_backprop_shape.dims() != num_dims) {
    113     return errors::InvalidArgument(label, ": out_backprop must be ", num_dims,
    114                                    "-dimensional");
    115   }
    116   int batch_dim = GetTensorBatchDimIndex(num_dims, data_format);
    117   dims->batch_size = input_shape.dim_size(batch_dim);
    118   if (dims->batch_size != out_backprop_shape.dim_size(batch_dim)) {
    119     return errors::InvalidArgument(
    120         label, ": input and out_backprop must have the same batch size",
    121         "input batch: ", dims->batch_size,
    122         "outbackprop batch: ", out_backprop_shape.dim_size(batch_dim),
    123         " batch_dim: ", batch_dim);
    124   }
    125 
    126   int feature_dim = GetTensorFeatureDimIndex(num_dims, data_format);
    127   dims->in_depth = input_shape.dim_size(feature_dim);
    128   // The input and output feature dimensions are the second last and last
    129   // dimensions of the filter Tensor.
    130   if (dims->in_depth != filter_shape.dim_size(num_dims - 2)) {
    131     return errors::InvalidArgument(
    132         label, ": input and filter must have the same depth");
    133   }
    134   dims->out_depth = filter_shape.dim_size(num_dims - 1);
    135   if (dims->out_depth != out_backprop_shape.dim_size(feature_dim)) {
    136     return errors::InvalidArgument(
    137         label, ": filter and out_backprop must have the same out_depth");
    138   }
    139 
    140   dims->spatial_dims.resize(num_spatial_dims);
    141   for (int i = 0; i < num_spatial_dims; ++i) {
    142     int image_dim = GetTensorSpatialDimIndex(num_dims, data_format, i);
    143     TF_RETURN_IF_ERROR(ConvBackpropExtractAndVerifyDimensionV2(
    144         label, input_shape, filter_shape, out_backprop_shape, dilations,
    145         strides, padding, image_dim, i, &dims->spatial_dims[i]));
    146   }
    147   return Status::OK();
    148 }
    149 
    150 Status ConvBackpropComputeDimensions(StringPiece label, int num_spatial_dims,
    151                                      const TensorShape& input_shape,
    152                                      const TensorShape& filter_shape,
    153                                      const TensorShape& out_backprop_shape,
    154                                      const std::vector<int32>& strides,
    155                                      Padding padding, TensorFormat data_format,
    156                                      ConvBackpropDimensions* dims) {
    157   static constexpr std::array<int32, 5> one_dilations = {{1, 1, 1, 1, 1}};
    158   return ConvBackpropComputeDimensionsV2(
    159       label, num_spatial_dims, input_shape, filter_shape, out_backprop_shape,
    160       one_dilations, strides, padding, data_format, dims);
    161 }
    162 
    163 }  // namespace tensorflow
    164