Home | History | Annotate | Download | only in kernel_tests
      1 # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 #
      3 # Licensed under the Apache License, Version 2.0 (the "License");
      4 # you may not use this file except in compliance with the License.
      5 # You may obtain a copy of the License at
      6 #
      7 #     http://www.apache.org/licenses/LICENSE-2.0
      8 #
      9 # Unless required by applicable law or agreed to in writing, software
     10 # distributed under the License is distributed on an "AS IS" BASIS,
     11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 # See the License for the specific language governing permissions and
     13 # limitations under the License.
     14 # ==============================================================================
     15 """Tests for SoftmaxOp and LogSoftmaxOp."""
     16 
     17 from __future__ import absolute_import
     18 from __future__ import division
     19 from __future__ import print_function
     20 
     21 import numpy as np
     22 
     23 from tensorflow.python.framework import constant_op
     24 from tensorflow.python.framework import dtypes
     25 from tensorflow.python.framework import errors_impl
     26 from tensorflow.python.framework import test_util
     27 from tensorflow.python.ops import array_ops
     28 from tensorflow.python.ops import nn_ops
     29 from tensorflow.python.platform import test
     30 
     31 
     32 @test_util.with_c_api
     33 class SoftmaxTest(test.TestCase):
     34 
     35   def _npSoftmax(self, features, dim=-1, log=False):
     36     if dim is -1:
     37       dim = len(features.shape) - 1
     38     one_only_on_dim = list(features.shape)
     39     one_only_on_dim[dim] = 1
     40     e = np.exp(features - np.reshape(
     41         np.amax(
     42             features, axis=dim), one_only_on_dim))
     43     softmax = e / np.reshape(np.sum(e, axis=dim), one_only_on_dim)
     44     if log:
     45       return np.log(softmax)
     46     else:
     47       return softmax
     48 
     49   def _testSoftmax(self, np_features, dim=-1, log=False, use_gpu=False):
     50     # A previous version of the code checked the op name rather than the op type
     51     # to distinguish between log and non-log.  Use an arbitrary name to catch
     52     # this bug in future.
     53     name = "arbitrary"
     54     np_softmax = self._npSoftmax(np_features, dim=dim, log=log)
     55     with self.test_session(use_gpu=use_gpu):
     56       if log:
     57         tf_softmax = nn_ops.log_softmax(np_features, dim=dim, name=name)
     58       else:
     59         tf_softmax = nn_ops.softmax(np_features, dim=dim, name=name)
     60       out = tf_softmax.eval()
     61     self.assertAllCloseAccordingToType(np_softmax, out)
     62     self.assertShapeEqual(np_softmax, tf_softmax)
     63     if not log:
     64       # Bonus check: the softmaxes should add to one in dimension dim.
     65       sum_along_dim = np.sum(out, axis=dim)
     66       self.assertAllCloseAccordingToType(
     67           np.ones(sum_along_dim.shape), sum_along_dim)
     68 
     69   def _testAll(self, features):
     70     self._testSoftmax(features, use_gpu=True)
     71     self._testSoftmax(features, log=True, use_gpu=True)
     72     self._testOverflow(use_gpu=True)
     73 
     74   def testNpSoftmax(self):
     75     features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
     76     # Batch 0: All exps are 1.  The expected result is
     77     # Softmaxes = [0.25, 0.25, 0.25, 0.25]
     78     # LogSoftmaxes = [-1.386294, -1.386294, -1.386294, -1.386294]
     79     #
     80     # Batch 1:
     81     # exps = [1., 2.718, 7.389, 20.085]
     82     # sum = 31.192
     83     # Softmaxes = exps / sum = [0.0320586, 0.08714432, 0.23688282, 0.64391426]
     84     # LogSoftmaxes = [-3.44019 , -2.44019 , -1.44019 , -0.44019]
     85     np_sm = self._npSoftmax(np.array(features))
     86     self.assertAllClose(
     87         np.array([[0.25, 0.25, 0.25, 0.25],
     88                   [0.0320586, 0.08714432, 0.23688282, 0.64391426]]),
     89         np_sm,
     90         rtol=1.e-5,
     91         atol=1.e-5)
     92     np_lsm = self._npSoftmax(np.array(features), log=True)
     93     self.assertAllClose(
     94         np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
     95                   [-3.4401897, -2.4401897, -1.4401897, -0.4401897]]),
     96         np_lsm,
     97         rtol=1.e-5,
     98         atol=1.e-5)
     99 
    100   def _testOverflow(self, use_gpu=False):
    101     if use_gpu:
    102       type = np.float32  # pylint: disable=redefined-builtin
    103     else:
    104       type = np.float64  # pylint: disable=redefined-builtin
    105     max = np.finfo(type).max  # pylint: disable=redefined-builtin
    106     features = np.array([[1., 1., 1., 1.], [max, 1., 2., 3.]]).astype(type)
    107     with self.test_session(use_gpu=use_gpu):
    108       tf_log_softmax = nn_ops.log_softmax(features)
    109       out = tf_log_softmax.eval()
    110     self.assertAllClose(
    111         np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
    112                   [0, -max, -max, -max]]),
    113         out,
    114         rtol=1.e-5,
    115         atol=1.e-5)
    116 
    117   def testFloat(self):
    118     self._testAll(
    119         np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32))
    120 
    121   def testHalf(self):
    122     self._testAll(
    123         np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16))
    124 
    125   def testDouble(self):
    126     self._testSoftmax(
    127         np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64))
    128     self._testOverflow()
    129 
    130   def test1DTesnorAsInput(self):
    131     self._testSoftmax(
    132         np.array([3., 2., 3., 9.]).astype(np.float64), use_gpu=False)
    133     self._testOverflow(use_gpu=False)
    134 
    135   def test3DTensorAsInput(self):
    136     self._testSoftmax(
    137         np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
    138                   [[2., 3., 4., 5.], [6., 7., 8., 9.]],
    139                   [[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
    140         use_gpu=False)
    141     self._testOverflow(use_gpu=False)
    142 
    143   def testAlongFirstDimension(self):
    144     self._testSoftmax(
    145         np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
    146                   [[2., 3., 4., 5.], [6., 7., 8., 9.]],
    147                   [[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
    148         dim=0,
    149         use_gpu=False)
    150     self._testOverflow(use_gpu=False)
    151 
    152   def testAlongSecondDimension(self):
    153     self._testSoftmax(
    154         np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
    155                   [[2., 3., 4., 5.], [6., 7., 8., 9.]],
    156                   [[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
    157         dim=1,
    158         use_gpu=False)
    159     self._testOverflow(use_gpu=False)
    160 
    161   def testShapeInference(self):
    162     op = nn_ops.softmax([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
    163                          [[2., 3., 4., 5.], [6., 7., 8., 9.]],
    164                          [[5., 4., 3., 2.], [1., 2., 3., 4.]]])
    165     self.assertEqual([3, 2, 4], op.get_shape())
    166 
    167   def testEmptyInput(self):
    168     with self.test_session():
    169       x = constant_op.constant([[]], shape=[0, 3])
    170       self.assertEqual(0, array_ops.size(x).eval())
    171       # reshape would raise if logits is empty
    172       with self.assertRaises(errors_impl.InvalidArgumentError):
    173         nn_ops.softmax(x, dim=0).eval()
    174 
    175   def testDimTooLarge(self):
    176     with self.test_session():
    177       # Use placeholder to make sure we get runtime error instead of shape
    178       # inference error.
    179       dim = array_ops.placeholder_with_default(100, shape=[])
    180       with self.assertRaises(errors_impl.InvalidArgumentError):
    181         nn_ops.softmax([1., 2., 3., 4.], dim=dim).eval()
    182 
    183   def testLargeDims(self):
    184     # Make sure that we properly handle large inputs. See
    185     # https://github.com/tensorflow/tensorflow/issues/4425 for details
    186     for dims in [129, 256]:
    187       ones = np.random.rand(dims, dims).astype(np.float32)
    188       np_softmax = self._npSoftmax(ones)
    189 
    190       for use_gpu in [True, False]:
    191         with self.test_session(use_gpu=use_gpu) as sess:
    192           x = array_ops.placeholder(dtypes.float32)
    193           y = nn_ops.softmax(x)
    194           tf_softmax = sess.run(y, feed_dict={x: ones})
    195         self.assertAllClose(tf_softmax, np_softmax)
    196 
    197 
    198 if __name__ == "__main__":
    199   test.main()
    200