Home | History | Annotate | Download | only in kernel_tests
      1 # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 #
      3 # Licensed under the Apache License, Version 2.0 (the "License");
      4 # you may not use this file except in compliance with the License.
      5 # You may obtain a copy of the License at
      6 #
      7 #     http://www.apache.org/licenses/LICENSE-2.0
      8 #
      9 # Unless required by applicable law or agreed to in writing, software
     10 # distributed under the License is distributed on an "AS IS" BASIS,
     11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 # See the License for the specific language governing permissions and
     13 # limitations under the License.
     14 # ==============================================================================
     15 """Tests for tensorflow.ops.math_ops.matmul."""
     16 
     17 from __future__ import absolute_import
     18 from __future__ import division
     19 from __future__ import print_function
     20 
     21 import operator
     22 import numpy as np
     23 
     24 from tensorflow.python.framework import constant_op
     25 from tensorflow.python.framework import ops
     26 from tensorflow.python.framework import test_util
     27 from tensorflow.python.ops import array_ops
     28 from tensorflow.python.ops import gradient_checker
     29 from tensorflow.python.ops import math_ops
     30 from tensorflow.python.ops import random_ops
     31 from tensorflow.python.ops import variables
     32 from tensorflow.python.platform import test as test_lib
     33 
     34 # TODO(yangzihao): Currently matmul autotuning is disabled by default. Use
     35 # os.environ["TF_MATMUL_AUTOTUNE_ENABLE"] = "1" to enable it.
     36 
     37 
     38 def _AddTest(test, op_name, testcase_name, fn):
     39   test_name = "_".join(["test", op_name, testcase_name])
     40   if hasattr(test, test_name):
     41     raise RuntimeError("Test %s defined more than once" % test_name)
     42   setattr(test, test_name, fn)
     43 
     44 
     45 def _GetTransposedMatrices(x, x_name, kwargs):
     46   if kwargs["transpose_" + x_name] is True:
     47     return x.T
     48   elif kwargs["adjoint_" + x_name] is True:
     49     return np.conj(x.T)
     50   else:
     51     return x
     52 
     53 
     54 class MatMulTest(test_lib.TestCase):
     55   pass  # Filled in below
     56 
     57 
     58 def _GetMatMulTest(a_np_, b_np_, use_static_shape_, **kwargs_):
     59 
     60   def Test(self):
     61     np_val = np.matrix(a_np_) * np.matrix(b_np_)
     62 
     63     use_gpu = True
     64     if a_np_.dtype is np.float16 and (
     65         not test_util.CudaSupportsHalfMatMulAndConv()):
     66       use_gpu = False
     67       print("Built without fp16 matmul support for Cuda, running test on CPU.")
     68 
     69     # Transpose and possibly conjugate a_np_ and b_np_ according to the
     70     # attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
     71     # results in a valid matrix multiplication and produces the same result as
     72     # np.matrix(a_np_) * np.matrix(b_np_)
     73     effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
     74     effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
     75     with self.test_session(use_gpu=use_gpu) as sess:
     76       if use_static_shape_:
     77         a = constant_op.constant(effective_a_np)
     78         b = constant_op.constant(effective_b_np)
     79         res = math_ops.matmul(a, b, **kwargs_)
     80         tf_val = res.eval()
     81       else:
     82         a = array_ops.placeholder(a_np_.dtype)
     83         b = array_ops.placeholder(b_np_.dtype)
     84         res = math_ops.matmul(a, b, **kwargs_)
     85         tf_val = sess.run(res, feed_dict={a: effective_a_np, b: effective_b_np})
     86 
     87     self.assertAllCloseAccordingToType(
     88         tf_val,
     89         np_val,
     90         float_rtol=2e-5,
     91         float_atol=2e-5,
     92         half_rtol=0.2,
     93         half_atol=0.2)
     94 
     95   return Test
     96 
     97 
     98 class MatMulGradientTest(test_lib.TestCase):
     99   pass  # Will be filled in below.
    100 
    101 
    102 def _GetMatMulGradientTest(a_np_, b_np_, use_static_shape_, **kwargs_):
    103 
    104   def Test(self):
    105     if not use_static_shape_ or a_np_.dtype in (np.int32, np.float16):
    106       self.skipTest("Skipping infeasible gradient test.")
    107 
    108     # Transpose and possibly conjugate a_np_ and b_np_ according to the
    109     # attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
    110     # results in a valid matrix multiplication and produces the same result as
    111     # np.matrix(a_np_) * np.matrix(b_np_)
    112     effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
    113     effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
    114 
    115     epsilon = np.finfo(a_np_.dtype).eps
    116     delta = epsilon**(1.0 / 3.0)
    117     tol = 20 * delta
    118     with self.test_session(use_gpu=True):
    119       a = constant_op.constant(effective_a_np)
    120       b = constant_op.constant(effective_b_np)
    121       res = math_ops.matmul(a, b, **kwargs_)
    122       for x, x_init in [a, effective_a_np], [b, effective_b_np]:
    123         theoretical, numerical = gradient_checker.compute_gradient(
    124             x,
    125             x_init.shape,
    126             res, [a_np_.shape[0], b_np_.shape[1]],
    127             x_init_value=x_init,
    128             delta=delta)
    129         self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
    130 
    131   return Test
    132 
    133 
    134 class MatMulStatsTest(test_lib.TestCase):
    135 
    136   def testSimpleStatistics(self):
    137     g = ops.Graph()
    138     with g.as_default():
    139       a = variables.Variable(random_ops.random_normal([25, 16]))
    140       b = variables.Variable(random_ops.random_normal([16, 9]))
    141       math_ops.matmul(a, b)
    142       for op in g.get_operations():
    143         flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
    144         if op.name == "MatMul":
    145           self.assertEqual(7200, flops)
    146 
    147   def testTransposedStatistics(self):
    148     g = ops.Graph()
    149     with g.as_default():
    150       a = variables.Variable(random_ops.random_normal([16, 25]))
    151       b = variables.Variable(random_ops.random_normal([16, 9]))
    152       math_ops.matmul(a, b, transpose_a=True)
    153       for op in g.get_operations():
    154         flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
    155         if op.name == "MatMul":
    156           self.assertEqual(7200, flops)
    157 
    158 
    159 try:
    160   # @ operator supported since python 3.5.
    161   infix_matmul = operator.matmul
    162 except AttributeError:
    163 
    164   # For earlier versions of python, emulate regular behavior.
    165   # Useful to build and test for 3.5+ on earlier versions.
    166   def infix_matmul(x, y):  # pylint: disable=invalid-name
    167     try:
    168       r = type(x).__matmul__(x, y)
    169     except AttributeError:
    170       r = NotImplemented
    171     if r is NotImplemented and type(x) is not type(y):
    172       try:
    173         r = type(y).__rmatmul__(y, x)
    174       except AttributeError:
    175         r = NotImplemented
    176     if r is NotImplemented:
    177       raise TypeError("unsupported operand type(s) for @: '{}' and '{}'"
    178                       .format(type(x).__name__, type(y).__name__))
    179     return r
    180 
    181 
    182 class MatMulInfixOperatorTest(test_lib.TestCase):
    183 
    184   def testMismatchedShape(self):
    185     with self.assertRaisesWithPredicateMatch(ValueError,
    186                                              lambda e: "Shape must" in str(e)):
    187       infix_matmul(
    188           ops.convert_to_tensor([10.0, 20.0, 30.0]),
    189           ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
    190 
    191   def testMismatchedDimensions(self):
    192     with self.assertRaisesWithPredicateMatch(
    193         ValueError, lambda e: "Dimensions must" in str(e)):
    194       infix_matmul(
    195           ops.convert_to_tensor([[10.0, 20.0, 30.0]]),
    196           ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
    197 
    198   def testInfixMatmulIsTfMatmul(self):
    199     a = ops.convert_to_tensor([[10.0, 20.0, 30.0]])
    200     b = ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0], [80.0, 90.0]])
    201     c = infix_matmul(a, b)
    202     self.assertEqual(c.op.type, "MatMul")
    203 
    204   def testInfixMatmulDoesDotProduct(self):
    205     a = ops.convert_to_tensor([[10.0, 20.0, 30.0]])
    206     b = ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0], [80.0, 90.0]])
    207     c = infix_matmul(a, b)
    208     d = math_ops.matmul(a, b)
    209     with self.test_session():
    210       self.assertAllEqual(c.eval(), d.eval())
    211 
    212 
    213 if __name__ == "__main__":
    214   sizes = [1, 3, 5]
    215   trans_options = [[False, False], [True, False], [False, True]]
    216   for use_static_shape in [False, True]:
    217     for dtype in (np.int32, np.float16, np.float32, np.float64, np.complex64,
    218                   np.complex128):
    219       if not use_static_shape and dtype == np.int32:
    220         # TODO(rmlarsen): Re-enable this test when we have fixed the underlying
    221         # bug in Windows (b/35935459).
    222         continue
    223       for m in sizes:
    224         for n in sizes:
    225           for k in sizes:
    226             # Construct compatible random matrices a_np of size [m, k] and b_np
    227             # of size [k, n].
    228             a_np = np.random.normal(-5, 5, m * k).astype(dtype).reshape([m, k])
    229             if dtype in (np.complex64, np.complex128):
    230               a_np.imag = np.random.normal(-5, 5,
    231                                            m * k).astype(dtype).reshape([m, k])
    232             b_np = np.random.normal(-5, 5, k * n).astype(dtype).reshape([k, n])
    233             if dtype in (np.complex64, np.complex128):
    234               b_np.imag = np.random.normal(-5, 5,
    235                                            k * n).astype(dtype).reshape([k, n])
    236             for adjoint_a, transpose_a in trans_options:
    237               for adjoint_b, transpose_b in trans_options:
    238                 name = "%s_%s_%s_%s_%s_%s_%s_%s_%s" % (
    239                     use_static_shape, dtype.__name__, m, n, k, adjoint_a,
    240                     transpose_a, adjoint_b, transpose_b)
    241                 _AddTest(MatMulTest, "MatMulTest", name,
    242                          _GetMatMulTest(
    243                              a_np,
    244                              b_np,
    245                              use_static_shape,
    246                              adjoint_a=adjoint_a,
    247                              transpose_a=transpose_a,
    248                              adjoint_b=adjoint_b,
    249                              transpose_b=transpose_b))
    250                 _AddTest(MatMulGradientTest, "MatMulGradientTest", name,
    251                          _GetMatMulGradientTest(
    252                              a_np,
    253                              b_np,
    254                              use_static_shape,
    255                              adjoint_a=adjoint_a,
    256                              transpose_a=transpose_a,
    257                              adjoint_b=adjoint_b,
    258                              transpose_b=transpose_b))
    259 
    260   test_lib.main()
    261