Home | History | Annotate | Download | only in kernel_tests
      1 # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 #
      3 # Licensed under the Apache License, Version 2.0 (the "License");
      4 # you may not use this file except in compliance with the License.
      5 # You may obtain a copy of the License at
      6 #
      7 #     http://www.apache.org/licenses/LICENSE-2.0
      8 #
      9 # Unless required by applicable law or agreed to in writing, software
     10 # distributed under the License is distributed on an "AS IS" BASIS,
     11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 # See the License for the specific language governing permissions and
     13 # limitations under the License.
     14 # ==============================================================================
     15 """Tests for Softsign and SoftsignGrad."""
     16 
     17 from __future__ import absolute_import
     18 from __future__ import division
     19 from __future__ import print_function
     20 
     21 import numpy as np
     22 
     23 from tensorflow.python.framework import constant_op
     24 from tensorflow.python.ops import gradient_checker
     25 from tensorflow.python.ops import nn_ops
     26 import tensorflow.python.ops.nn_grad  # pylint: disable=unused-import
     27 from tensorflow.python.platform import test
     28 
     29 
     30 class SoftsignTest(test.TestCase):
     31 
     32   def _npSoftsign(self, np_features):
     33     return np_features / (1 + np.abs(np_features))
     34 
     35   def _testSoftsign(self, np_features, use_gpu=False):
     36     np_softsign = self._npSoftsign(np_features)
     37     with self.test_session(use_gpu=use_gpu):
     38       softsign = nn_ops.softsign(np_features)
     39       tf_softsign = softsign.eval()
     40     self.assertAllClose(np_softsign, tf_softsign)
     41     self.assertShapeEqual(np_softsign, softsign)
     42 
     43   def testNumbers(self):
     44     for t in [np.float, np.double]:
     45       self._testSoftsign(
     46           np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
     47           use_gpu=False)
     48       self._testSoftsign(
     49           np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
     50           use_gpu=True)
     51 
     52   def testGradient(self):
     53     with self.test_session():
     54       x = constant_op.constant(
     55           [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
     56           shape=[2, 5],
     57           name="x")
     58       y = nn_ops.softsign(x, name="softsign")
     59       x_init = np.asarray(
     60           [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
     61           dtype=np.float32,
     62           order="F")
     63       err = gradient_checker.compute_gradient_error(
     64           x, [2, 5], y, [2, 5], x_init_value=x_init)
     65     print("softsign (float) gradient err = ", err)
     66     self.assertLess(err, 1e-4)
     67 
     68   def testWarnInts(self):
     69     # NOTE(irving): Actually I don't know how to intercept the warning, but
     70     # let's make sure it runs.  I promised I've looked, and there was a warning.
     71     with self.test_session():
     72       nn_ops.softsign(constant_op.constant(7)).eval()
     73 
     74 
     75 if __name__ == "__main__":
     76   test.main()
     77