Home | History | Annotate | Download | only in kernel_tests
      1 # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 #
      3 # Licensed under the Apache License, Version 2.0 (the "License");
      4 # you may not use this file except in compliance with the License.
      5 # You may obtain a copy of the License at
      6 #
      7 #     http://www.apache.org/licenses/LICENSE-2.0
      8 #
      9 # Unless required by applicable law or agreed to in writing, software
     10 # distributed under the License is distributed on an "AS IS" BASIS,
     11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 # See the License for the specific language governing permissions and
     13 # limitations under the License.
     14 # ==============================================================================
     15 """Tests for tensorflow.ops.tf.cast."""
     16 
     17 from __future__ import absolute_import
     18 from __future__ import division
     19 from __future__ import print_function
     20 
     21 import numpy as np
     22 import sys
     23 import platform
     24 
     25 from tensorflow.python.framework import constant_op
     26 from tensorflow.python.framework import dtypes
     27 from tensorflow.python.framework import sparse_tensor
     28 from tensorflow.python.ops import array_ops
     29 from tensorflow.python.ops import gradient_checker
     30 from tensorflow.python.ops import math_ops
     31 from tensorflow.python.ops import variables
     32 from tensorflow.python.platform import test
     33 
     34 
     35 class CastOpTest(test.TestCase):
     36 
     37   def _toDataType(self, dtype):
     38     """Returns TensorFlow data type for numpy type."""
     39     if dtype == np.float32:
     40       return dtypes.float32
     41     elif dtype == np.float64:
     42       return dtypes.float64
     43     elif dtype == np.int32:
     44       return dtypes.int32
     45     elif dtype == np.int64:
     46       return dtypes.int64
     47     elif dtype == np.bool:
     48       return dtypes.bool
     49     elif dtype == np.complex64:
     50       return dtypes.complex64
     51     elif dtype == np.complex128:
     52       return dtypes.complex128
     53     else:
     54       return None
     55 
     56   def _cast(self, x, dtype, use_gpu=False):
     57     with self.test_session(use_gpu=use_gpu):
     58       val = constant_op.constant(x, self._toDataType(np.array([x]).dtype))
     59       return math_ops.cast(val, self._toDataType(dtype), name="cast").eval()
     60 
     61   def _test(self, x, dtype, use_gpu=False):
     62     """Tests cast(x) to dtype behaves the same as numpy.astype."""
     63     np_ans = x.astype(dtype)
     64     tf_ans = self._cast(x, dtype, use_gpu)
     65     self.assertAllEqual(np_ans, tf_ans)
     66 
     67   def _testTypes(self, x, use_gpu=False):
     68     """Tests cast(x) to different tf."""
     69     if use_gpu:
     70       type_list = [
     71           np.float32, np.float64, np.int64, np.complex64, np.complex128
     72       ]
     73     else:
     74       type_list = [
     75           np.float32, np.float64, np.int32, np.int64, np.complex64,
     76           np.complex128
     77       ]
     78     for from_type in type_list:
     79       for to_type in type_list:
     80         self._test(x.astype(from_type), to_type, use_gpu)
     81 
     82     self._test(x.astype(np.bool), np.float32, use_gpu)
     83     self._test(x.astype(np.uint8), np.float32, use_gpu)
     84     if not use_gpu:
     85       self._test(x.astype(np.bool), np.int32, use_gpu)
     86       self._test(x.astype(np.int32), np.int32, use_gpu)
     87 
     88   def _testAll(self, x):
     89     self._testTypes(x, use_gpu=False)
     90     if x.dtype == np.float32 or x.dtype == np.float64:
     91       self._testTypes(x, use_gpu=True)
     92 
     93   def testBasic(self):
     94     self._testAll(np.arange(-10, 10).reshape(2, 10))
     95     self._testAll(np.linspace(-10, 10, 17))
     96 
     97   def testSmallValues(self):
     98     f4 = np.finfo(np.float32)
     99     f8 = np.finfo(np.float64)
    100     self._testAll(
    101         np.array([
    102             0, -1, 1, -f4.resolution, f4.resolution, f8.resolution,
    103             -f8.resolution
    104         ]))
    105 
    106   def testBfloat16(self):
    107     a = np.random.uniform(-100, 100, 100).astype(np.float32)
    108     with self.test_session(use_gpu=False):
    109       b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
    110       self.assertAllClose(a, b.eval(), rtol=1 / 128.)
    111     with self.test_session(use_gpu=True):
    112       b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
    113       self.assertAllClose(a, b.eval(), rtol=1 / 128.)
    114 
    115   def testRandom(self):
    116     self._testAll(np.random.normal(0, 10, 210).reshape([2, 3, 5, 7]))
    117     self._testAll(np.random.normal(0, 1e6, 210).reshape([2, 3, 5, 7]))
    118 
    119   # Special values like int32max, int64min, inf, -inf, nan casted to
    120   # integer values in somewhat unexpected ways. And they behave
    121   # differently on CPU and GPU.
    122   def _compare(self, x, dst_dtype, expected, use_gpu=False):
    123     np.testing.assert_equal(
    124         self._cast(
    125             x, dst_dtype, use_gpu=use_gpu), dst_dtype(expected))
    126 
    127   def testIntToFloatBoundary(self):
    128     i4 = np.iinfo(np.int32)
    129     i8 = np.iinfo(np.int64)
    130 
    131     self._compare(i4.min, np.float32, i4.min, False)
    132     self._compare(i4.max, np.float32, i4.max, False)
    133     self._compare(i8.min, np.float32, i8.min, False)
    134     self._compare(i8.max, np.float32, i8.max, False)
    135     self._compare(i4.min, np.float64, i4.min, False)
    136     self._compare(i4.max, np.float64, i4.max, False)
    137     self._compare(i8.min, np.float64, i8.min, False)
    138     self._compare(i8.max, np.float64, i8.max, False)
    139     # NOTE: GPU does not support int32/int64 for casting.
    140 
    141   def testInfNan(self):
    142     i4 = np.iinfo(np.int32)
    143     i8 = np.iinfo(np.int64)
    144 
    145     self._compare(np.inf, np.float32, np.inf, False)
    146     self._compare(np.inf, np.float64, np.inf, False)
    147     if sys.byteorder == "big":
    148       self._compare(np.inf, np.int32, i4.max, False)
    149       self._compare(np.inf, np.int64, i8.max, False)
    150     else:
    151       # np.float64("np.inf").astype(np.int32) is negative on x86 but positive on ppc64le
    152       # Numpy link to relevant discussion - https://github.com/numpy/numpy/issues/9040
    153       # Tensorflow link to relevant discussion - https://github.com/tensorflow/tensorflow/issues/9360
    154       if platform.machine() == "ppc64le":
    155         self._compare(-np.inf, np.int32, i4.min, False)
    156         self._compare(-np.inf, np.int64, i8.min, False)
    157       else:
    158         self._compare(np.inf, np.int32, i4.min, False)
    159         self._compare(np.inf, np.int64, i8.min, False)
    160     self._compare(-np.inf, np.float32, -np.inf, False)
    161     self._compare(-np.inf, np.float64, -np.inf, False)
    162     self._compare(-np.inf, np.int32, i4.min, False)
    163     self._compare(-np.inf, np.int64, i8.min, False)
    164     self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, False)), True)
    165     self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, False)), True)
    166     self._compare(np.nan, np.int32, i4.min, False)
    167     self._compare(np.nan, np.int64, i8.min, False)
    168 
    169     self._compare(np.inf, np.float32, np.inf, True)
    170     self._compare(np.inf, np.float64, np.inf, True)
    171     self._compare(-np.inf, np.float32, -np.inf, True)
    172     self._compare(-np.inf, np.float64, -np.inf, True)
    173     self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, True)), True)
    174     self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, True)), True)
    175 
    176   def _OpError(self, x, dtype, err):
    177     with self.test_session():
    178       with self.assertRaisesOpError(err):
    179         math_ops.cast(x, dtype).eval()
    180 
    181   def testNotImplemented(self):
    182     self._OpError(np.arange(0, 10), dtypes.string, "Cast.*int64.*string.*")
    183 
    184   def testCastToTypeOfVariable(self):
    185     with self.test_session() as sess:
    186       x = variables.Variable(5, dtype=dtypes.float32)
    187       y = variables.Variable(True, dtype=dtypes.bool)
    188       cast = math_ops.cast(y, x.dtype)
    189       variables.global_variables_initializer().run()
    190       self.assertEqual(1.0, sess.run(cast))
    191 
    192   def testGradients(self):
    193     t = [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
    194     for src_t in t:
    195       for dst_t in t:
    196         with self.test_session():
    197           x = constant_op.constant(1.0, src_t)
    198           z = array_ops.identity(x)
    199           y = math_ops.cast(z, dst_t)
    200           err = gradient_checker.compute_gradient_error(x, [], y, [])
    201           self.assertLess(err, 1e-3)
    202 
    203 
    204 class SparseTensorCastTest(test.TestCase):
    205 
    206   def testCast(self):
    207     indices = constant_op.constant([[0], [1], [2]], dtypes.int64)
    208     values = constant_op.constant(np.array([1, 2, 3], np.int64))
    209     shape = constant_op.constant([3], dtypes.int64)
    210     st = sparse_tensor.SparseTensor(indices, values, shape)
    211     st_cast = math_ops.cast(st, dtypes.float32)
    212     with self.test_session():
    213       self.assertAllEqual(st_cast.indices.eval(), [[0], [1], [2]])
    214       self.assertAllEqual(st_cast.values.eval(),
    215                           np.array([1, 2, 3], np.float32))
    216       self.assertAllEqual(st_cast.dense_shape.eval(), [3])
    217 
    218 
    219 class SaturateCastTest(test.TestCase):
    220 
    221   def testSaturate(self):
    222     in_types = dtypes.float32,
    223     out_types = dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.float32
    224     with self.test_session() as sess:
    225       for in_type in in_types:
    226         for out_type in out_types:
    227           lo, hi = in_type.min, in_type.max
    228           x = constant_op.constant(
    229               [lo, lo + 1, lo // 2, hi // 2, hi - 1, hi], dtype=in_type)
    230           y = math_ops.saturate_cast(x, dtype=out_type)
    231           self.assertEqual(y.dtype, out_type)
    232           x, y = sess.run([x, y])
    233           correct = np.maximum(out_type.min, np.minimum(out_type.max, x))
    234           self.assertAllEqual(correct, y)
    235 
    236 
    237 if __name__ == "__main__":
    238   test.main()
    239