Home | History | Annotate | Download | only in ops
      1 # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
      2 #
      3 # Licensed under the Apache License, Version 2.0 (the "License");
      4 # you may not use this file except in compliance with the License.
      5 # You may obtain a copy of the License at
      6 #
      7 #     http://www.apache.org/licenses/LICENSE-2.0
      8 #
      9 # Unless required by applicable law or agreed to in writing, software
     10 # distributed under the License is distributed on an "AS IS" BASIS,
     11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 # See the License for the specific language governing permissions and
     13 # limitations under the License.
     14 # ==============================================================================
     15 """Tests for Python ops defined in math_grad.py."""
     16 
     17 from __future__ import absolute_import
     18 from __future__ import division
     19 from __future__ import print_function
     20 
     21 import numpy as np
     22 
     23 from tensorflow.python.framework import constant_op
     24 from tensorflow.python.framework import dtypes
     25 from tensorflow.python.framework import ops
     26 from tensorflow.python.ops import array_ops
     27 from tensorflow.python.ops import gradient_checker
     28 from tensorflow.python.ops import math_ops
     29 from tensorflow.python.platform import test
     30 
     31 
     32 class SquaredDifferenceOpTest(test.TestCase):
     33 
     34   def _testGrad(self, left_shape, right_shape):
     35 
     36     if len(left_shape) > len(right_shape):
     37       output_shape = left_shape
     38     else:
     39       output_shape = right_shape
     40     l = np.random.randn(*left_shape)
     41     r = np.random.randn(*right_shape)
     42 
     43     with self.test_session(use_gpu=True):
     44       left_tensor = constant_op.constant(l, shape=left_shape)
     45       right_tensor = constant_op.constant(r, shape=right_shape)
     46       output = math_ops.squared_difference(left_tensor, right_tensor)
     47       left_err = gradient_checker.compute_gradient_error(
     48           left_tensor, left_shape, output, output_shape, x_init_value=l)
     49       right_err = gradient_checker.compute_gradient_error(
     50           right_tensor, right_shape, output, output_shape, x_init_value=r)
     51     self.assertLess(left_err, 1e-10)
     52     self.assertLess(right_err, 1e-10)
     53 
     54   def testGrad(self):
     55     self._testGrad([1, 2, 3, 2], [3, 2])
     56     self._testGrad([2, 4], [3, 2, 4])
     57 
     58 
     59 class AbsOpTest(test.TestCase):
     60 
     61   def _biasedRandN(self, shape, bias=0.1, sigma=1.0):
     62     """Returns samples from a normal distribution shifted `bias` away from 0."""
     63     value = np.random.randn(*shape) * sigma
     64     return value + np.sign(value) * bias
     65 
     66   def _testGrad(self, shape, dtype=None, max_error=None, bias=None, sigma=None):
     67     np.random.seed(7)
     68     if dtype in (dtypes.complex64, dtypes.complex128):
     69       value = math_ops.complex(
     70           self._biasedRandN(
     71               shape, bias=bias, sigma=sigma),
     72           self._biasedRandN(
     73               shape, bias=bias, sigma=sigma))
     74     else:
     75       value = ops.convert_to_tensor(
     76           self._biasedRandN(
     77               shape, bias=bias), dtype=dtype)
     78 
     79     with self.test_session(use_gpu=True):
     80       output = math_ops.abs(value)
     81       error = gradient_checker.compute_gradient_error(
     82           value, shape, output, output.get_shape().as_list())
     83     self.assertLess(error, max_error)
     84 
     85   def testComplexAbs(self):
     86     # Bias random test values away from zero to avoid numeric instabilities.
     87     self._testGrad(
     88         [3, 3], dtype=dtypes.float32, max_error=2e-5, bias=0.1, sigma=1.0)
     89     self._testGrad(
     90         [3, 3], dtype=dtypes.complex64, max_error=2e-5, bias=0.1, sigma=1.0)
     91 
     92     # Ensure stability near the pole at zero.
     93     self._testGrad(
     94         [3, 3], dtype=dtypes.float32, max_error=100.0, bias=0.0, sigma=0.1)
     95     self._testGrad(
     96         [3, 3], dtype=dtypes.complex64, max_error=100.0, bias=0.0, sigma=0.1)
     97 
     98 
     99 class MinOrMaxGradientTest(test.TestCase):
    100 
    101   def testMinGradient(self):
    102     inputs = constant_op.constant([1.0], dtype=dtypes.float32)
    103     outputs = math_ops.reduce_min(array_ops.concat([inputs, inputs], 0))
    104     with self.test_session():
    105       error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
    106       self.assertLess(error, 1e-4)
    107 
    108   def testMaxGradient(self):
    109     inputs = constant_op.constant([1.0], dtype=dtypes.float32)
    110     outputs = math_ops.reduce_max(array_ops.concat([inputs, inputs], 0))
    111     with self.test_session():
    112       error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
    113       self.assertLess(error, 1e-4)
    114 
    115 
    116 class MaximumOrMinimumGradientTest(test.TestCase):
    117 
    118   def testMaximumGradient(self):
    119     inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32)
    120     outputs = math_ops.maximum(inputs, 3.0)
    121     with self.test_session():
    122       error = gradient_checker.compute_gradient_error(inputs, [4], outputs, [4])
    123       self.assertLess(error, 1e-4)
    124 
    125   def testMinimumGradient(self):
    126     inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32)
    127     outputs = math_ops.minimum(inputs, 2.0)
    128     with self.test_session():
    129       error = gradient_checker.compute_gradient_error(inputs, [4], outputs, [4])
    130       self.assertLess(error, 1e-4)
    131 
    132 
    133 class ProdGradientTest(test.TestCase):
    134 
    135   def testProdGradient(self):
    136     inputs = constant_op.constant([[1., 2.], [3., 4.]],
    137                                   dtype=dtypes.float32)
    138     outputs = math_ops.reduce_prod(inputs)
    139     with self.test_session():
    140       error = gradient_checker.compute_gradient_error(
    141           inputs, inputs.get_shape().as_list(),
    142           outputs, outputs.get_shape().as_list())
    143       self.assertLess(error, 1e-4)
    144 
    145   def testProdGradientForNegativeAxis(self):
    146     inputs = constant_op.constant([[1., 2.], [3., 4.]],
    147                                   dtype=dtypes.float32)
    148     outputs = math_ops.reduce_prod(inputs, -1)
    149     with self.test_session():
    150       error = gradient_checker.compute_gradient_error(
    151           inputs, inputs.get_shape().as_list(),
    152           outputs, outputs.get_shape().as_list())
    153       self.assertLess(error, 1e-4)
    154 
    155 
    156 class SegmentMinOrMaxGradientTest(test.TestCase):
    157 
    158   def testSegmentMinGradient(self):
    159     data = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.float32)
    160     segment_ids = constant_op.constant([0, 0, 1], dtype=dtypes.int64)
    161     segment_min = math_ops.segment_min(data, segment_ids)
    162     with self.test_session():
    163       error = gradient_checker.compute_gradient_error(data, [3], segment_min,
    164                                                       [2])
    165       self.assertLess(error, 1e-4)
    166 
    167   def testSegmentMaxGradient(self):
    168     data = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.float32)
    169     segment_ids = constant_op.constant([0, 0, 1], dtype=dtypes.int64)
    170     segment_max = math_ops.segment_max(data, segment_ids)
    171     with self.test_session():
    172       error = gradient_checker.compute_gradient_error(data, [3], segment_max,
    173                                                       [2])
    174       self.assertLess(error, 1e-4)
    175 
    176   def testSegmentMinGradientWithTies(self):
    177     inputs = constant_op.constant([1.0], dtype=dtypes.float32)
    178     data = array_ops.concat([inputs, inputs], 0)
    179     segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64)
    180     segment_min = math_ops.segment_min(data, segment_ids)
    181     with self.test_session():
    182       error = gradient_checker.compute_gradient_error(inputs, [1], segment_min,
    183                                                       [1])
    184       self.assertLess(error, 1e-4)
    185 
    186   def testSegmentMaxGradientWithTies(self):
    187     inputs = constant_op.constant([1.0], dtype=dtypes.float32)
    188     data = array_ops.concat([inputs, inputs], 0)
    189     segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64)
    190     segment_max = math_ops.segment_max(data, segment_ids)
    191     with self.test_session():
    192       error = gradient_checker.compute_gradient_error(inputs, [1], segment_max,
    193                                                       [1])
    194       self.assertLess(error, 1e-4)
    195 
    196 
    197 class FloorModGradientTest(test.TestCase):
    198 
    199   def testFloorModGradient(self):
    200     # Making sure the input is not near the discontinuity point where
    201     # x/y == floor(x/y)
    202     ns = constant_op.constant([17.], dtype=dtypes.float32)
    203     inputs = constant_op.constant([131.], dtype=dtypes.float32)
    204     floor_mod = math_ops.floormod(inputs, ns)
    205     with self.test_session():
    206       error = gradient_checker.compute_gradient_error(inputs, [1],
    207                                                       floor_mod, [1])
    208       self.assertLess(error, 1e-4)
    209 
    210 
    211 if __name__ == "__main__":
    212   test.main()
    213