Home | History | Annotate | Download | only in ops
      1 # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 #
      3 # Licensed under the Apache License, Version 2.0 (the "License");
      4 # you may not use this file except in compliance with the License.
      5 # You may obtain a copy of the License at
      6 #
      7 #     http://www.apache.org/licenses/LICENSE-2.0
      8 #
      9 # Unless required by applicable law or agreed to in writing, software
     10 # distributed under the License is distributed on an "AS IS" BASIS,
     11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 # See the License for the specific language governing permissions and
     13 # limitations under the License.
     14 # ==============================================================================
     15 """Tests for tensorflow.ops.math_ops."""
     16 from __future__ import absolute_import
     17 from __future__ import division
     18 from __future__ import print_function
     19 
     20 import numpy as np
     21 
     22 from tensorflow.python.eager import context
     23 from tensorflow.python.framework import constant_op
     24 from tensorflow.python.framework import ops
     25 from tensorflow.python.framework import test_util
     26 from tensorflow.python.ops import array_ops
     27 from tensorflow.python.ops import gradients
     28 from tensorflow.python.ops import math_ops
     29 from tensorflow.python.ops import resource_variable_ops
     30 from tensorflow.python.ops import variables
     31 from tensorflow.python.platform import googletest
     32 
     33 
     34 exp = np.exp
     35 log = np.log
     36 
     37 
     38 @test_util.with_c_api
     39 class ReduceTest(test_util.TensorFlowTestCase):
     40 
     41   @test_util.run_in_graph_and_eager_modes()
     42   def testReduceAllDims(self):
     43     x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
     44     with test_util.device(use_gpu=True):
     45       y_tf = self.evaluate(math_ops.reduce_sum(x))
     46       self.assertEqual(y_tf, 21)
     47 
     48   @test_util.run_in_graph_and_eager_modes()
     49   def testReduceExplicitAxes(self):
     50     x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
     51     with test_util.device(use_gpu=True):
     52       for axis in (0, -2, (0, 0), (0, -2)):
     53         self.assertAllEqual(self.evaluate(math_ops.reduce_sum(x, axis=axis)),
     54                             [5, 7, 9])
     55       for axis in (1, -1, (1, 1), (1, -1)):
     56         self.assertAllEqual(self.evaluate(math_ops.reduce_sum(x, axis=axis)),
     57                             [6, 15])
     58       for axis in (None, (0, 1), (-1, -2), (-2, -1, 0, 1)):
     59         self.assertEqual(self.evaluate(math_ops.reduce_sum(x, axis=axis)), 21)
     60 
     61   @test_util.run_in_graph_and_eager_modes()
     62   def testReduceInvalidAxis(self):
     63     if context.in_eager_mode():
     64       # The shape check is in run a graph construction time. In eager mode,
     65       # it misses the check, magically return result given wrong shape.
     66       return
     67     x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
     68     axis = np.array([[0], [1]])
     69     with self.assertRaisesRegexp(ValueError, "must be at most rank 1"):
     70       math_ops.reduce_sum(x, axis)
     71 
     72 
     73 @test_util.with_c_api
     74 class LogSumExpTest(test_util.TensorFlowTestCase):
     75 
     76   def testReduceLogSumExp(self):
     77     for dtype in [np.float16, np.float32, np.double]:
     78       x_np = np.random.rand(5, 5).astype(dtype)
     79       with self.test_session(use_gpu=True):
     80         y_tf_np = math_ops.reduce_logsumexp(x_np).eval()
     81         y_np = log(np.sum(exp(x_np)))
     82         self.assertAllClose(y_tf_np, y_np)
     83 
     84   def testReductionIndices(self):
     85     for dtype in [np.float16, np.float32, np.double]:
     86       x_np = np.random.rand(5, 5).astype(dtype)
     87       with self.test_session(use_gpu=True):
     88         y_tf = math_ops.reduce_logsumexp(x_np, reduction_indices=[0])
     89         y_np = log(np.sum(exp(x_np), axis=0))
     90         self.assertShapeEqual(y_np, y_tf)
     91         y_tf_np = y_tf.eval()
     92         self.assertAllClose(y_tf_np, y_np)
     93 
     94   def testReductionIndices2(self):
     95     for dtype in [np.float16, np.float32, np.double]:
     96       x_np = np.random.rand(5, 5).astype(dtype)
     97       with self.test_session(use_gpu=True):
     98         y_tf = math_ops.reduce_logsumexp(x_np, reduction_indices=0)
     99         y_np = log(np.sum(exp(x_np), axis=0))
    100         self.assertShapeEqual(y_np, y_tf)
    101         y_tf_np = y_tf.eval()
    102         self.assertAllClose(y_tf_np, y_np)
    103 
    104   def testKeepDims(self):
    105     for dtype in [np.float16, np.float32, np.double]:
    106       x_np = np.random.rand(5, 5).astype(dtype)
    107       with self.test_session(use_gpu=True):
    108         y_tf_np = math_ops.reduce_logsumexp(x_np, keepdims=True).eval()
    109         self.assertEqual(y_tf_np.ndim, x_np.ndim)
    110         y_np = log(np.sum(exp(x_np), keepdims=True))
    111         self.assertAllClose(y_tf_np, y_np)
    112 
    113   def testOverflow(self):
    114     x = [1000, 1001, 1002, 1003]
    115     for dtype in [np.float16, np.float32, np.double]:
    116       x_np = np.array(x, dtype=dtype)
    117       max_np = np.max(x_np)
    118       with self.assertRaisesRegexp(RuntimeWarning,
    119                                    "overflow encountered in exp"):
    120         out = log(np.sum(exp(x_np)))
    121         if out == np.inf:
    122           raise RuntimeWarning("overflow encountered in exp")
    123 
    124       with self.test_session(use_gpu=True):
    125         x_tf = constant_op.constant(x_np, shape=x_np.shape)
    126         y_tf_np = math_ops.reduce_logsumexp(x_tf).eval()
    127         y_np = log(np.sum(exp(x_np - max_np))) + max_np
    128         self.assertAllClose(y_tf_np, y_np)
    129 
    130   def testUnderflow(self):
    131     x = [-1000, -1001, -1002, -1003]
    132     for dtype in [np.float16, np.float32, np.double]:
    133       x_np = np.array(x, dtype=dtype)
    134       max_np = np.max(x_np)
    135       with self.assertRaisesRegexp(RuntimeWarning,
    136                                    "divide by zero encountered in log"):
    137         out = log(np.sum(exp(x_np)))
    138         if out == -np.inf:
    139           raise RuntimeWarning("divide by zero encountered in log")
    140 
    141       with self.test_session(use_gpu=True):
    142         x_tf = constant_op.constant(x_np, shape=x_np.shape)
    143         y_tf_np = math_ops.reduce_logsumexp(x_tf).eval()
    144         y_np = log(np.sum(exp(x_np - max_np))) + max_np
    145         self.assertAllClose(y_tf_np, y_np)
    146 
    147   def testInfinity(self):
    148     with self.test_session(use_gpu=True):
    149       res = math_ops.reduce_logsumexp(-np.inf).eval()
    150       self.assertEqual(-np.inf, res)
    151 
    152 
    153 @test_util.with_c_api
    154 class RoundTest(test_util.TensorFlowTestCase):
    155 
    156   @test_util.run_in_graph_and_eager_modes()
    157   def testRounding(self):
    158     x = [0.49, 0.7, -0.3, -0.8]
    159     # TODO(nolivia): Remove this when RoundOp is forwards compatible
    160     # x = np.arange(-5.0, 5.0, .25)
    161     for dtype in [np.float32, np.double, np.int32]:
    162       x_np = np.array(x, dtype=dtype)
    163       with test_util.device(use_gpu=True):
    164         x_tf = constant_op.constant(x_np, shape=x_np.shape)
    165         y_tf = math_ops.round(x_tf)
    166         y_tf_np = self.evaluate(y_tf)
    167         y_np = np.round(x_np)
    168         self.assertAllClose(y_tf_np, y_np, atol=1e-2)
    169 
    170 
    171 @test_util.with_c_api
    172 class ModTest(test_util.TensorFlowTestCase):
    173 
    174   def testFloat(self):
    175     x = [0.5, 0.7, 0.3]
    176     for dtype in [np.float32, np.double]:
    177       # Test scalar and vector versions.
    178       for denom in [x[0], [x[0]] * 3]:
    179         x_np = np.array(x, dtype=dtype)
    180         with self.test_session(use_gpu=True):
    181           x_tf = constant_op.constant(x_np, shape=x_np.shape)
    182           y_tf = math_ops.mod(x_tf, denom)
    183           y_tf_np = y_tf.eval()
    184           y_np = np.fmod(x_np, denom)
    185         self.assertAllClose(y_tf_np, y_np, atol=1e-2)
    186 
    187   def testFixed(self):
    188     x = [5, 10, 23]
    189     for dtype in [np.int32, np.int64]:
    190       # Test scalar and vector versions.
    191       for denom in [x[0], x]:
    192         x_np = np.array(x, dtype=dtype)
    193         with self.test_session(use_gpu=True):
    194           x_tf = constant_op.constant(x_np, shape=x_np.shape)
    195           y_tf = math_ops.mod(x_tf, denom)
    196           y_tf_np = y_tf.eval()
    197           y_np = np.mod(x_np, denom)
    198         self.assertAllClose(y_tf_np, y_np)
    199 
    200 
    201 @test_util.with_c_api
    202 class SquaredDifferenceTest(test_util.TensorFlowTestCase):
    203 
    204   @test_util.run_in_graph_and_eager_modes()
    205   def testSquaredDifference(self):
    206     for dtype in [np.int32, np.float16]:
    207       x = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
    208       y = np.array([-3, -2, -1], dtype=dtype)
    209       z = (x - y) * (x - y)
    210       with test_util.device(use_gpu=True):
    211         z_tf = self.evaluate(math_ops.squared_difference(x, y))
    212         self.assertAllClose(z, z_tf)
    213 
    214 
    215 @test_util.with_c_api
    216 class ApproximateEqualTest(test_util.TensorFlowTestCase):
    217 
    218   @test_util.run_in_graph_and_eager_modes()
    219   def testApproximateEqual(self):
    220     for dtype in [np.float32, np.double]:
    221       x = dtype(1)
    222       y = dtype(1.00009)
    223       z = False
    224       with test_util.device(use_gpu=True):
    225         # Default tolerance is 0.00001
    226         z_tf = self.evaluate(math_ops.approximate_equal(x, y))
    227         self.assertAllEqual(z, z_tf)
    228 
    229     for dtype in [np.float32, np.double]:
    230       x = dtype(1)
    231       y = dtype(1.000009)
    232       z = True
    233       with test_util.device(use_gpu=True):
    234         # Default tolerance is 0.00001
    235         z_tf = self.evaluate(math_ops.approximate_equal(x, y))
    236         self.assertAllEqual(z, z_tf)
    237 
    238     for dtype in [np.float32, np.double]:
    239       x = np.array([[[[-1, 2.00009999], [-3, 4.01]]]], dtype=dtype)
    240       y = np.array([[[[-1.001, 2], [-3.00009, 4]]]], dtype=dtype)
    241       z = np.array([[[[False, True], [True, False]]]], dtype=np.bool)
    242       with test_util.device(use_gpu=True):
    243         z_tf = self.evaluate(math_ops.approximate_equal(x, y, tolerance=0.0001))
    244         self.assertAllEqual(z, z_tf)
    245 
    246 
    247 @test_util.with_c_api
    248 class ScalarMulTest(test_util.TensorFlowTestCase):
    249 
    250   @test_util.run_in_graph_and_eager_modes()
    251   def testAcceptsRefs(self):
    252     if context.in_eager_mode():
    253       var = resource_variable_ops.ResourceVariable(10, name="var")
    254     else:
    255       var = variables.Variable(10)
    256     result = math_ops.scalar_mul(3, var)
    257     init = variables.global_variables_initializer()
    258     with test_util.device(use_gpu=True):
    259       self.evaluate(init)
    260       self.assertEqual(30, self.evaluate(result))
    261 
    262   @test_util.run_in_graph_and_eager_modes()
    263   def testAcceptsConstant(self):
    264     const = constant_op.constant(10)
    265     result = math_ops.scalar_mul(3, const)
    266     with test_util.device(use_gpu=True):
    267       self.assertEqual(30, self.evaluate(result))
    268 
    269   @test_util.run_in_graph_and_eager_modes()
    270   def testAcceptsTensor(self):
    271     tensor = array_ops.ones([10, 10])
    272     result = math_ops.scalar_mul(3, tensor)
    273     expected = array_ops.ones([10, 10]) * 3
    274 
    275     with test_util.device(use_gpu=True):
    276       self.assertAllEqual(self.evaluate(expected), self.evaluate(result))
    277 
    278   @test_util.run_in_graph_and_eager_modes()
    279   def testAcceptsIndexedSlices(self):
    280     values = constant_op.constant([2, 3, 5, 7, 0, -1], shape=[3, 2])
    281     indices = constant_op.constant([0, 2, 5])
    282     x = math_ops.scalar_mul(-3, ops.IndexedSlices(values, indices))
    283     with test_util.device(use_gpu=True):
    284       self.assertAllEqual(self.evaluate(x.values),
    285                           [[-6, -9], [-15, -21], [0, 3]])
    286       self.assertAllEqual(self.evaluate(x.indices), [0, 2, 5])
    287 
    288 
    289 @test_util.with_c_api
    290 class AccumulateNTest(test_util.TensorFlowTestCase):
    291 
    292   def testFloat(self):
    293     np.random.seed(12345)
    294     x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
    295     tf_x = ops.convert_n_to_tensor(x)
    296     with self.test_session(use_gpu=True):
    297       self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())
    298       self.assertAllClose(x[0] * 5, math_ops.accumulate_n([tf_x[0]] * 5).eval())
    299 
    300   def testInt(self):
    301     np.random.seed(54321)
    302     x = [np.random.randint(-128, 128, (5, 4, 3, 2, 1)) for _ in range(6)]
    303     tf_x = ops.convert_n_to_tensor(x)
    304     with self.test_session(use_gpu=True):
    305       self.assertAllEqual(sum(x), math_ops.accumulate_n(tf_x).eval())
    306       self.assertAllEqual(x[0] * 6, math_ops.accumulate_n([tf_x[0]] * 6).eval())
    307 
    308 
    309 @test_util.with_c_api
    310 class AddNTest(test_util.TensorFlowTestCase):
    311 
    312   def testPartials(self):
    313     """Test that previously revealed a bug in buffer forwarding for AddN."""
    314     partials = []
    315     for _ in range(98):
    316       partials.append(math_ops.add_n([constant_op.constant(1)]))
    317     partials.append(
    318         math_ops.add_n([constant_op.constant(1),
    319                         constant_op.constant(1)]))
    320 
    321     res = math_ops.add_n(partials) + constant_op.constant(0)
    322     with self.test_session(use_gpu=True):
    323       self.assertAllEqual(res.eval(), 100)
    324 
    325   def testFloat(self):
    326     np.random.seed(12345)
    327     for num_inputs in range(1, 10):
    328       x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(num_inputs)]
    329       tf_x = ops.convert_n_to_tensor(x)
    330       with self.test_session(use_gpu=True):
    331         self.assertAllClose(sum(x), math_ops.add_n(tf_x).eval())
    332         self.assertAllClose(x[0] * num_inputs,
    333                             math_ops.add_n([tf_x[0]] * num_inputs).eval())
    334 
    335   def testInt(self):
    336     np.random.seed(54321)
    337     for num_inputs in range(1, 10):
    338       x = [
    339           np.random.randint(-128, 128, (5, 4, 3, 2, 1))
    340           for _ in range(num_inputs)
    341       ]
    342       tf_x = ops.convert_n_to_tensor(x)
    343       with self.test_session(use_gpu=True):
    344         self.assertAllEqual(sum(x), math_ops.add_n(tf_x).eval())
    345         self.assertAllEqual(x[0] * num_inputs,
    346                             math_ops.add_n([tf_x[0]] * num_inputs).eval())
    347 
    348   def testGrad(self):
    349     np.random.seed(42)
    350     for num_inputs in range(1, 10):
    351       with self.test_session(use_gpu=True) as sess:
    352         input_vars = [
    353             variables.Variable(10.0 * np.random.random())
    354             for i in range(0, num_inputs)
    355         ]
    356         addn = math_ops.add_n(input_vars)
    357         sess.run(variables.global_variables_initializer())
    358         add_n_grad = gradients.gradients(addn, input_vars)
    359         self.assertAllEqual(np.repeat(1.0, num_inputs), # d/dx (x + y + ...) = 1
    360                             [g.eval() for g in add_n_grad])
    361 
    362 
    363 @test_util.with_c_api
    364 class DivAndModTest(test_util.TensorFlowTestCase):
    365   # TODO(aselle): Test more types before exposing new division operators.
    366 
    367   def intTestData(self):
    368     nums = np.arange(-10, 10, 1).reshape(20, 1)
    369     divs = np.arange(-3, 4, 2).reshape(1, 4)
    370     return nums, divs
    371 
    372   def floatTestData(self):
    373     nums = np.arange(-10, 10, .25).reshape(80, 1)
    374     divs = np.arange(-3, 0, .25).reshape(1, 12)
    375     return nums, divs
    376 
    377   def testFloorModInt(self):
    378     nums, divs = self.intTestData()
    379     with self.test_session():
    380       # TODO(aselle): Change test to use % after switch
    381       # tf_result = math_ops.floor_mod(nums, divs).eval()
    382       tf_result = math_ops.floormod(nums, divs).eval()
    383       np_result = nums % divs
    384       self.assertAllEqual(tf_result, np_result)
    385 
    386   def testFloorModFloat(self):
    387     nums, divs = self.floatTestData()
    388     with self.test_session():
    389       tf_result = math_ops.floormod(nums, divs).eval()
    390       np_result = nums % divs
    391       self.assertAllEqual(tf_result, np_result)
    392       # TODO(aselle): put this test in once % switched to floormod
    393       # tf2_result = (array_ops.constant(nums)
    394       #               % array_ops.constant(divs)).eval()
    395       # self.assertAllEqual(tf2_result, tf_result)
    396 
    397   def testTruncateModInt(self):
    398     nums, divs = self.intTestData()
    399     with self.test_session():
    400       tf_result = math_ops.truncatemod(nums, divs).eval()
    401       np_result = np.fmod(nums, divs)
    402       self.assertAllEqual(tf_result, np_result)
    403 
    404   def testTruncateModFloat(self):
    405     nums, divs = self.floatTestData()
    406     with self.test_session():
    407       tf_result = math_ops.truncatemod(nums, divs).eval()
    408       np_result = np.fmod(nums, divs)
    409       self.assertAllEqual(tf_result, np_result)
    410 
    411   def testDivideInt(self):
    412     nums, divs = self.intTestData()
    413     with self.test_session():
    414       tf_result = math_ops.floor_div(nums, divs).eval()
    415       np_result = nums // divs
    416       self.assertAllEqual(tf_result, np_result)
    417       # TODO(aselle): Put this test in once // is switched to floordiv
    418       # tf2_result = (array_ops.constant(nums)
    419       #               // array_ops.constant(divs)).eval()
    420       # self.assertAllEqual(tf2_result, tf_result)
    421 
    422   def testDivideName(self):
    423     with self.test_session():
    424       op = math_ops.divide(
    425           array_ops.constant(3), array_ops.constant(4), name="my_cool_divide")
    426       self.assertEqual(op.name, "my_cool_divide:0")
    427 
    428   def testRealDiv(self):
    429     nums, divs = self.floatTestData()
    430     with self.test_session():
    431       tf_result = math_ops.realdiv(nums, divs).eval()
    432       np_result = np.divide(nums, divs)
    433       self.assertAllEqual(tf_result, np_result)
    434 
    435   def testComplexDiv(self):
    436     foo = array_ops.constant([1. + 3.j])
    437     with self.test_session():
    438       _ = math_ops.divide(foo, 1.).eval()
    439       _ = math_ops.div(foo, 2.).eval()
    440 
    441   def testFloorDivGrad(self):
    442     with self.test_session():
    443       a = variables.Variable(2.)
    444       b = variables.Variable(4.)
    445       with self.test_session() as sess:
    446         sess.run(variables.global_variables_initializer())
    447         c_grad = gradients.gradients(math_ops.divide(a, b), [a, b])
    448         self.assertAllEqual([x.eval() for x in c_grad], [.25, -.125])
    449         c_grad = gradients.gradients(math_ops.div(a, b), [a, b])
    450         self.assertAllEqual([x.eval() for x in c_grad], [.25, -.125])
    451         c_grad = gradients.gradients(math_ops.floordiv(a, b), [a, b])
    452         self.assertAllEqual([None if x is None else x.eval()
    453                              for x in c_grad], [None, None])
    454 
    455   def testConsistent(self):
    456     nums, divs = self.intTestData()
    457     with self.test_session():
    458       tf_result = (math_ops.floor_div(nums, divs) * divs + math_ops.floormod(
    459           nums, divs)).eval()
    460       tf_nums = array_ops.constant(nums)
    461       tf_divs = array_ops.constant(divs)
    462       tf2_result = (tf_nums // tf_divs * tf_divs + tf_nums % tf_divs).eval()
    463       np_result = (nums // divs) * divs + (nums % divs)
    464       # Consistent with numpy
    465       self.assertAllEqual(tf_result, np_result)
    466       # Consistent with two forms of divide
    467       self.assertAllEqual(tf_result, tf2_result)
    468       # consistency for truncation form
    469       tf3_result = (math_ops.truncatediv(nums, divs) * divs +
    470                     math_ops.truncatemod(nums, divs)).eval()
    471       expanded_nums = np.reshape(
    472           np.tile(nums, divs.shape[1]), (nums.shape[0], divs.shape[1]))
    473       # Consistent with desire to get numerator
    474       self.assertAllEqual(tf3_result, expanded_nums)
    475       # Consistent with desire to get numerator
    476       self.assertAllEqual(tf_result, expanded_nums)
    477 
    478 
    479 if __name__ == "__main__":
    480   googletest.main()
    481