Home | History | Annotate | Download | only in kernel_tests
      1 # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 #
      3 # Licensed under the Apache License, Version 2.0 (the "License");
      4 # you may not use this file except in compliance with the License.
      5 # You may obtain a copy of the License at
      6 #
      7 #     http://www.apache.org/licenses/LICENSE-2.0
      8 #
      9 # Unless required by applicable law or agreed to in writing, software
     10 # distributed under the License is distributed on an "AS IS" BASIS,
     11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 # See the License for the specific language governing permissions and
     13 # limitations under the License.
     14 # ==============================================================================
     15 """Tests for convolution related functionality in tensorflow.ops.nn."""
     16 
     17 from __future__ import absolute_import
     18 from __future__ import division
     19 from __future__ import print_function
     20 
     21 import numpy as np
     22 
     23 from tensorflow.python.framework import constant_op
     24 from tensorflow.python.framework import dtypes
     25 from tensorflow.python.ops import array_ops
     26 from tensorflow.python.ops import gradient_checker
     27 from tensorflow.python.ops import nn_impl
     28 from tensorflow.python.ops import nn_ops
     29 import tensorflow.python.ops.nn_grad  # pylint: disable=unused-import
     30 from tensorflow.python.platform import test
     31 
     32 
     33 def _upsample_filters(filters, rate):
     34   """Upsamples the filters by a factor of rate along the spatial dimensions.
     35 
     36   Args:
     37     filters: [h, w, in_depth, out_depth]. Original filters.
     38     rate: An int, specifying the upsampling rate.
     39 
     40   Returns:
     41     filters_up: [h_up, w_up, in_depth, out_depth]. Upsampled filters with
     42       h_up = h + (h - 1) * (rate - 1)
     43       w_up = w + (w - 1) * (rate - 1)
     44       containing (rate - 1) zeros between consecutive filter values along
     45       the filters' spatial dimensions.
     46   """
     47   if rate == 1:
     48     return filters
     49   # [h, w, in_depth, out_depth] -> [in_depth, out_depth, h, w]
     50   filters_up = np.transpose(filters, [2, 3, 0, 1])
     51   ker = np.zeros([rate, rate], dtype=np.float32)
     52   ker[0, 0] = 1
     53   filters_up = np.kron(filters_up, ker)[:, :, :-(rate - 1), :-(rate - 1)]
     54   # [in_depth, out_depth, h_up, w_up] -> [h_up, w_up, in_depth, out_depth]
     55   filters_up = np.transpose(filters_up, [2, 3, 0, 1])
     56   return filters_up
     57 
     58 
     59 class AtrousConv2DTest(test.TestCase):
     60 
     61   def testAtrousConv2DForward(self):
     62     with self.test_session(use_gpu=True):
     63       # Input: [batch, height, width, input_depth]
     64       height = 9
     65       for width in [9, 10]:  # Test both odd and even width.
     66         x_shape = [2, height, width, 2]
     67         x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape)
     68 
     69         # Filter: [kernel_height, kernel_width, input_depth, output_depth]
     70         for kernel_height in range(1, 4):
     71           for kernel_width in range(1, 4):
     72             f_shape = [kernel_height, kernel_width, 2, 2]
     73             f = np.arange(np.prod(f_shape), dtype=np.float32).reshape(f_shape)
     74 
     75             for rate in range(1, 4):
     76               f_up = _upsample_filters(f, rate)
     77 
     78               for padding in ["SAME", "VALID"]:
     79                 y1 = nn_ops.atrous_conv2d(x, f, rate, padding=padding)
     80                 y2 = nn_ops.conv2d(
     81                     x, f_up, strides=[1, 1, 1, 1], padding=padding)
     82                 self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-3, atol=1e-3)
     83 
     84   def testAtrousSequence(self):
     85     """Tests optimization of sequence of atrous convolutions.
     86 
     87     Verifies that a sequence of `atrous_conv2d` operations with identical `rate`
     88     parameters, 'SAME' `padding`, and `filters` with odd heights/ widths:
     89 
     90         net = atrous_conv2d(net, filters1, rate, padding="SAME")
     91         net = atrous_conv2d(net, filters2, rate, padding="SAME")
     92         ...
     93         net = atrous_conv2d(net, filtersK, rate, padding="SAME")
     94 
     95     is equivalent to:
     96 
     97         pad = ...  # padding so that the input dims are multiples of rate
     98         net = space_to_batch(net, paddings=pad, block_size=rate)
     99         net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
    100         net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
    101         ...
    102         net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
    103         net = batch_to_space(net, crops=pad, block_size=rate)
    104     """
    105     padding = "SAME"  # The padding needs to be "SAME"
    106     np.random.seed(1)  # Make it reproducible.
    107 
    108     with self.test_session(use_gpu=True):
    109       # Input: [batch, height, width, input_depth]
    110       for height in range(15, 17):
    111         for width in range(15, 17):
    112           x_shape = [3, height, width, 2]
    113           x = np.random.random_sample(x_shape).astype(np.float32)
    114 
    115           for kernel in [1, 3, 5]:  # The kernel size needs to be odd.
    116             # Filter: [kernel_height, kernel_width, input_depth, output_depth]
    117             f_shape = [kernel, kernel, 2, 2]
    118             f = 1e-2 * np.random.random_sample(f_shape).astype(np.float32)
    119 
    120             for rate in range(2, 4):
    121               # y1: three atrous_conv2d in a row.
    122               y1 = nn_ops.atrous_conv2d(x, f, rate, padding=padding)
    123               y1 = nn_ops.atrous_conv2d(y1, f, rate, padding=padding)
    124               y1 = nn_ops.atrous_conv2d(y1, f, rate, padding=padding)
    125               # y2: space_to_batch, three conv2d in a row, batch_to_space
    126               pad_bottom = 0 if height % rate == 0 else rate - height % rate
    127               pad_right = 0 if width % rate == 0 else rate - width % rate
    128               pad = [[0, pad_bottom], [0, pad_right]]
    129               y2 = array_ops.space_to_batch(x, paddings=pad, block_size=rate)
    130               y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
    131               y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
    132               y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
    133               y2 = array_ops.batch_to_space(y2, crops=pad, block_size=rate)
    134               self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-2, atol=1e-2)
    135 
    136   def testGradient(self):
    137     with self.test_session(use_gpu=True):
    138       # Input: [batch, height, width, input_depth]
    139       x_shape = [2, 5, 6, 2]
    140       # Filter: [kernel_height, kernel_width, input_depth, output_depth]
    141       f_shape = [3, 3, 2, 2]
    142       # Output: [batch, height, width, output_depth]
    143       y_shape = [2, 5, 6, 2]
    144 
    145       np.random.seed(1)  # Make it reproducible.
    146       x_val = np.random.random_sample(x_shape).astype(np.float32)
    147       f_val = np.random.random_sample(f_shape).astype(np.float32)
    148       x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
    149       f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
    150 
    151       for rate in range(1, 4):
    152         output = nn_ops.atrous_conv2d(x, f, rate=rate, padding="SAME")
    153         err = gradient_checker.compute_gradient_error([x, f],
    154                                                       [x_shape, f_shape],
    155                                                       output, y_shape)
    156         print("atrous_conv2d gradient err = %g " % err)
    157         err_tolerance = 1e-3
    158         self.assertLess(err, err_tolerance)
    159 
    160 
    161 class AtrousConv2DTransposeTest(test.TestCase):
    162 
    163   def testAtrousConv2DTransposeForward(self):
    164     with self.test_session(use_gpu=True):
    165       # Input: [batch, height, width, input_depth]
    166       height = 9
    167       for width in [9, 10]:  # Test both odd and even width.
    168         x_shape = [2, height, width, 2]
    169         x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape)
    170 
    171         # Filter: [kernel_height, kernel_width, input_depth, output_depth]
    172         for kernel_height in range(1, 4):
    173           for kernel_width in range(1, 4):
    174             f_shape = [kernel_height, kernel_width, 2, 2]
    175             f = np.arange(np.prod(f_shape), dtype=np.float32).reshape(f_shape)
    176 
    177             for rate in range(1, 4):
    178               f_up = _upsample_filters(f, rate)
    179               kernel_height_up = (kernel_height + (kernel_height - 1) *
    180                                   (rate - 1))
    181               kernel_width_up = kernel_width + (kernel_width - 1) * (rate - 1)
    182 
    183               for padding in ["SAME", "VALID"]:
    184                 if padding == "SAME":
    185                   y_shape = [2, height, width, 2]
    186                 else:
    187                   y_shape = [
    188                       2, height + kernel_height_up - 1,
    189                       width + kernel_width_up - 1, 2
    190                   ]
    191 
    192                 y1 = nn_ops.atrous_conv2d_transpose(x, f, y_shape, rate,
    193                                                     padding)
    194                 y2 = nn_ops.conv2d_transpose(
    195                     x, f_up, y_shape, strides=[1, 1, 1, 1], padding=padding)
    196                 self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-3, atol=1e-3)
    197 
    198 
    199 class AtrousDepthwiseConv2DTest(test.TestCase):
    200 
    201   def testAtrousDepthwiseConv2DForward(self):
    202     strides = [1, 1, 1, 1]
    203     with self.test_session(use_gpu=True):
    204       # Input: [batch, height, width, input_depth]
    205       height = 9
    206       for width in [9, 10]:  # Test both odd and even width.
    207         x_shape = [2, height, width, 2]
    208         x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape)
    209 
    210         # Filter: [kernel_height, kernel_width, input_depth, output_depth]
    211         for kernel_height in range(1, 4):
    212           for kernel_width in range(1, 4):
    213             f_shape = [kernel_height, kernel_width, 2, 2]
    214             f = np.arange(np.prod(f_shape), dtype=np.float32).reshape(f_shape)
    215 
    216             for rate in range(1, 4):
    217               f_up = _upsample_filters(f, rate)
    218 
    219               for padding in ["SAME", "VALID"]:
    220                 y1 = nn_impl.depthwise_conv2d(
    221                     x, f, strides, padding, rate=[rate, rate])
    222                 y2 = nn_impl.depthwise_conv2d(x, f_up, strides, padding)
    223                 self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-3, atol=1e-3)
    224 
    225 
    226 if __name__ == "__main__":
    227   test.main()
    228