Home | History | Annotate | Download | only in kernel_tests
      1 # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 #
      3 # Licensed under the Apache License, Version 2.0 (the "License");
      4 # you may not use this file except in compliance with the License.
      5 # You may obtain a copy of the License at
      6 #
      7 #     http://www.apache.org/licenses/LICENSE-2.0
      8 #
      9 # Unless required by applicable law or agreed to in writing, software
     10 # distributed under the License is distributed on an "AS IS" BASIS,
     11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 # See the License for the specific language governing permissions and
     13 # limitations under the License.
     14 # ==============================================================================
     15 """Tests for convolution related functionality in tensorflow.ops.nn."""
     16 
     17 from __future__ import absolute_import
     18 from __future__ import division
     19 from __future__ import print_function
     20 
     21 import numpy as np
     22 
     23 from tensorflow.python.framework import constant_op
     24 from tensorflow.python.framework import dtypes
     25 from tensorflow.python.ops import array_ops
     26 from tensorflow.python.ops import gradient_checker
     27 from tensorflow.python.ops import nn_ops
     28 import tensorflow.python.ops.nn_grad  # pylint: disable=unused-import
     29 from tensorflow.python.platform import test
     30 
     31 
     32 class Conv2DBackpropFilterGradTest(test.TestCase):
     33 
     34   def testGradient(self):
     35     with self.test_session():
     36       for padding in ["SAME", "VALID"]:
     37         for stride in [1, 2]:
     38           np.random.seed(1)
     39           in_shape = [5, 8, 6, 4]
     40           in_val = constant_op.constant(
     41               2 * np.random.random_sample(in_shape) - 1, dtype=dtypes.float32)
     42           filter_shape = [3, 3, 4, 6]
     43           # Make a convolution op with the current settings, just to easily get
     44           # the shape of the output.
     45           conv_out = nn_ops.conv2d(
     46               in_val,
     47               array_ops.zeros(filter_shape),
     48               strides=[1, stride, stride, 1],
     49               padding=padding)
     50           out_backprop_shape = conv_out.get_shape().as_list()
     51           out_backprop_val = constant_op.constant(
     52               2 * np.random.random_sample(out_backprop_shape) - 1,
     53               dtype=dtypes.float32)
     54           output = nn_ops.conv2d_backprop_filter(
     55               in_val,
     56               filter_shape,
     57               out_backprop_val,
     58               strides=[1, stride, stride, 1],
     59               padding=padding)
     60           err = gradient_checker.compute_gradient_error(
     61               [in_val, out_backprop_val], [in_shape, out_backprop_shape],
     62               output, filter_shape)
     63           print("conv2d_backprop_filter gradient err = %g " % err)
     64           err_tolerance = 2e-3
     65           self.assertLess(err, err_tolerance)
     66 
     67   def testGradientDilatedConv(self):
     68     if test.is_gpu_available(cuda_only=True):
     69       with self.test_session(use_gpu=True):
     70         for padding in ["SAME", "VALID"]:
     71           for stride in [1, 2]:
     72             np.random.seed(1)
     73             in_shape = [5, 8, 6, 4]
     74             in_val = constant_op.constant(
     75                 2 * np.random.random_sample(in_shape) - 1, dtype=dtypes.float32)
     76             filter_shape = [3, 3, 4, 6]
     77             # Make a convolution op with the current settings,
     78             # just to easily get the shape of the output.
     79             conv_out = nn_ops.conv2d(
     80                 in_val,
     81                 array_ops.zeros(filter_shape),
     82                 dilations=[1, 2, 2, 1],
     83                 strides=[1, stride, stride, 1],
     84                 padding=padding)
     85             out_backprop_shape = conv_out.get_shape().as_list()
     86             out_backprop_val = constant_op.constant(
     87                 2 * np.random.random_sample(out_backprop_shape) - 1,
     88                 dtype=dtypes.float32)
     89             output = nn_ops.conv2d_backprop_filter(
     90                 in_val,
     91                 filter_shape,
     92                 out_backprop_val,
     93                 dilations=[1, 2, 2, 1],
     94                 strides=[1, stride, stride, 1],
     95                 padding=padding)
     96             err = gradient_checker.compute_gradient_error(
     97                 [in_val, out_backprop_val], [in_shape, out_backprop_shape],
     98                 output, filter_shape)
     99             print("conv2d_backprop_filter gradient err = %g " % err)
    100             err_tolerance = 2e-3
    101             self.assertLess(err, err_tolerance)
    102 
    103 
    104 if __name__ == "__main__":
    105   test.main()
    106