Home | History | Annotate | Download | only in kernel_tests
      1 # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 #
      3 # Licensed under the Apache License, Version 2.0 (the "License");
      4 # you may not use this file except in compliance with the License.
      5 # You may obtain a copy of the License at
      6 #
      7 #     http://www.apache.org/licenses/LICENSE-2.0
      8 #
      9 # Unless required by applicable law or agreed to in writing, software
     10 # distributed under the License is distributed on an "AS IS" BASIS,
     11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 # See the License for the specific language governing permissions and
     13 # limitations under the License.
     14 # ==============================================================================
     15 """Functional tests for BiasAdd."""
     16 
     17 from __future__ import absolute_import
     18 from __future__ import division
     19 from __future__ import print_function
     20 
     21 import numpy as np
     22 
     23 from tensorflow.python.framework import constant_op
     24 from tensorflow.python.framework import dtypes
     25 from tensorflow.python.ops import array_ops
     26 from tensorflow.python.ops import gradient_checker
     27 from tensorflow.python.ops import gradients_impl
     28 from tensorflow.python.ops import nn_ops
     29 import tensorflow.python.ops.nn_grad  # pylint: disable=unused-import
     30 from tensorflow.python.platform import test
     31 
     32 
     33 def GetTestConfigs():
     34   """Get all the valid tests configs to run.
     35 
     36   Returns:
     37     all the valid test configs as tuples of data_format and use_gpu.
     38   """
     39   test_configs = [("NHWC", False), ("NHWC", True)]
     40   if test.is_gpu_available(cuda_only=True):
     41     # "NCHW" format is currently only supported on CUDA.
     42     test_configs += [("NCHW", True)]
     43   return test_configs
     44 
     45 
     46 class BiasAddTest(test.TestCase):
     47 
     48   def _npBias(self, inputs, bias):
     49     assert len(bias.shape) == 1
     50     print(inputs.shape)
     51     print(bias.shape)
     52     assert inputs.shape[-1] == bias.shape[0]
     53     return inputs + bias.reshape(([1] * (len(inputs.shape) - 1)) +
     54                                  [bias.shape[0]])
     55 
     56   def testNpBias(self):
     57     self.assertAllClose(
     58         np.array([[11, 22, 33], [41, 52, 63]]),
     59         self._npBias(
     60             np.array([[10, 20, 30], [40, 50, 60]]), np.array([1, 2, 3])))
     61 
     62   def _testBias(self, np_inputs, np_bias, use_gpu=False):
     63     np_val = self._npBias(np_inputs, np_bias)
     64     with self.test_session(use_gpu=use_gpu):
     65       tf_val = nn_ops.bias_add(np_inputs, np_bias).eval()
     66     self.assertAllCloseAccordingToType(np_val, tf_val)
     67 
     68   def _AtLeast3d(self, np_value):
     69     # fill the input value to at least 3-dimension
     70     if np_value.ndim < 3:
     71       return np.reshape(np_value, (1,) * (3 - np_value.ndim) + np_value.shape)
     72     return np_value
     73 
     74   def _NHWCToNCHW(self, np_value):
     75     # fill the input value to at least 3-dimension
     76     np_value = self._AtLeast3d(np_value)
     77     # move the last dimension to third-to-last
     78     np_dim = list(range(np_value.ndim))
     79     np_dim_new = list(np_dim[0:-3]) + list(np_dim[-1:]) + list(np_dim[-3:-1])
     80     return np.transpose(np_value, np_dim_new)
     81 
     82   def _NCHWToNHWC(self, np_value):
     83     assert len(np_value.shape) >= 3
     84     np_dim = list(range(np_value.ndim))
     85     # move the third-to-last dimension to the last
     86     np_dim_new = list(np_dim[0:-3]) + list(np_dim[-2:]) + list(np_dim[-3:-2])
     87     return np.transpose(np_value, np_dim_new)
     88 
     89   def _testBiasNCHW(self, np_inputs, np_bias, use_gpu):
     90     np_val = self._npBias(np_inputs, np_bias)
     91     np_inputs = self._NHWCToNCHW(np_inputs)
     92     with self.test_session(use_gpu=use_gpu):
     93       tf_val = nn_ops.bias_add(np_inputs, np_bias, data_format="NCHW").eval()
     94     tf_val = self._NCHWToNHWC(tf_val)
     95     self.assertAllCloseAccordingToType(self._AtLeast3d(np_val), tf_val)
     96 
     97   def _testAll(self, np_inputs, np_bias):
     98     self._testBias(np_inputs, np_bias, use_gpu=False)
     99     if np_inputs.dtype in [np.float16, np.float32, np.float64]:
    100       self._testBias(np_inputs, np_bias, use_gpu=True)
    101       if test.is_gpu_available(cuda_only=True):
    102         self._testBiasNCHW(np_inputs, np_bias, use_gpu=True)
    103 
    104   def testInputDims(self):
    105     with self.assertRaises(ValueError):
    106       nn_ops.bias_add([1, 2], [1])
    107 
    108   def testBiasVec(self):
    109     with self.assertRaises(ValueError):
    110       nn_ops.bias_add(
    111           array_ops.reshape(
    112               [1, 2], shape=[1, 2]),
    113           array_ops.reshape(
    114               [1, 2], shape=[1, 2]))
    115 
    116   def testBiasInputsMatch(self):
    117     with self.assertRaises(ValueError):
    118       nn_ops.bias_add(
    119           array_ops.reshape(
    120               [1, 2], shape=[1, 2]),
    121           array_ops.reshape(
    122               [1], shape=[1]))
    123 
    124   def testIntTypes(self):
    125     for t in [np.int8, np.int16, np.int32, np.int64]:
    126       self._testAll(
    127           np.array([[10, 20, 30], [40, 50, 60]]).astype(t),
    128           np.array([1, 2, 3]).astype(t))
    129 
    130   def testFloatTypes(self):
    131     for t in [np.float16, np.float32, np.float64]:
    132       self._testAll(
    133           np.random.rand(4, 3, 3).astype(t), np.random.rand(3).astype(t))
    134 
    135   def _testGradient(self, np_input, bias, dtype, data_format, use_gpu):
    136     with self.test_session(use_gpu=use_gpu):
    137       if data_format == "NCHW":
    138         np_input = self._NHWCToNCHW(np_input)
    139       input_tensor = constant_op.constant(
    140           np_input, shape=np_input.shape, dtype=dtype)
    141       bias_tensor = constant_op.constant(bias, shape=bias.shape, dtype=dtype)
    142       output_tensor = nn_ops.bias_add(
    143           input_tensor, bias_tensor, data_format=data_format)
    144       tensor_jacob_t, tensor_jacob_n = gradient_checker.compute_gradient(
    145           input_tensor, np_input.shape, output_tensor, np_input.shape)
    146       bias_jacob_t, bias_jacob_n = gradient_checker.compute_gradient(
    147           bias_tensor, bias.shape, output_tensor, np_input.shape)
    148 
    149       # Test gradient of BiasAddGrad
    150       bias_add_grad = gradients_impl.gradients(
    151           nn_ops.l2_loss(output_tensor), bias_tensor)[0]
    152       grad_jacob_t, grad_jacob_n = gradient_checker.compute_gradient(
    153           output_tensor, np_input.shape, bias_add_grad, bias.shape)
    154 
    155       if dtype == np.float16:
    156         # Compare fp16 theoretical gradients to fp32 numerical gradients,
    157         # since fp16 numerical gradients are too imprecise unless great
    158         # care is taken with choosing the inputs and the delta. This is
    159         # a weaker check (in particular, it does not test the op itself,
    160         # only its gradient), but it's much better than nothing.
    161         input_tensor = constant_op.constant(
    162             np_input, shape=np_input.shape, dtype=np.float32)
    163         bias_tensor = constant_op.constant(
    164             bias, shape=bias.shape, dtype=np.float32)
    165         output_tensor = nn_ops.bias_add(
    166             input_tensor, bias_tensor, data_format=data_format)
    167         _, tensor_jacob_n = gradient_checker.compute_gradient(input_tensor,
    168                                                               np_input.shape,
    169                                                               output_tensor,
    170                                                               np_input.shape)
    171         _, bias_jacob_n = gradient_checker.compute_gradient(bias_tensor,
    172                                                             bias.shape,
    173                                                             output_tensor,
    174                                                             np_input.shape)
    175 
    176         bias_add_grad = gradients_impl.gradients(
    177             nn_ops.l2_loss(output_tensor), bias_tensor)[0]
    178         _, grad_jacob_n = gradient_checker.compute_gradient(output_tensor,
    179                                                             np_input.shape,
    180                                                             bias_add_grad,
    181                                                             bias.shape)
    182 
    183       threshold = 2e-3
    184       if dtype == dtypes.float64:
    185         threshold = 1e-10
    186       self.assertAllClose(tensor_jacob_t, tensor_jacob_n, threshold, threshold)
    187       self.assertAllClose(bias_jacob_t, bias_jacob_n, threshold, threshold)
    188       self.assertAllClose(grad_jacob_t, grad_jacob_n, threshold, threshold)
    189 
    190   def testGradientTensor(self):
    191     for (data_format, use_gpu) in GetTestConfigs():
    192       for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
    193         np_input = np.array(
    194             [1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
    195             dtype=dtype.as_numpy_dtype).reshape(3, 2)
    196         bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
    197         self._testGradient(np_input, bias, dtype, data_format, use_gpu)
    198 
    199   def testGradientTensor4D(self):
    200     for (data_format, use_gpu) in GetTestConfigs():
    201       for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
    202         np_input = np.arange(
    203             1.0, 49.0, dtype=dtype.as_numpy_dtype).reshape(
    204                 [2, 3, 4, 2]).astype(np.float32)
    205         bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
    206         self._testGradient(np_input, bias, dtype, data_format, use_gpu)
    207 
    208   def testEmpty(self):
    209     np.random.seed(7)
    210     for shape in (0, 0), (2, 0), (0, 2), (4, 3, 0), (4, 0, 3), (0, 4, 3):
    211       self._testAll(np.random.randn(*shape), np.random.randn(shape[-1]))
    212 
    213   def testEmptyGradient(self):
    214     for data_format, use_gpu in GetTestConfigs():
    215       for shape in (0, 0), (2, 0), (0, 2), (4, 3, 0), (4, 0, 3), (0, 4, 3):
    216         self._testGradient(
    217             np.random.randn(*shape),
    218             np.random.randn(shape[-1]), dtypes.float64, data_format, use_gpu)
    219 
    220 
    221 if __name__ == "__main__":
    222   test.main()
    223