Home | History | Annotate | Download | only in ops
      1 /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #include <memory>
     17 #include <vector>
     18 
     19 #include "tensorflow/core/framework/function_testlib.h"
     20 #include "tensorflow/core/framework/op_kernel.h"
     21 #include "tensorflow/core/framework/tensor_testutil.h"
     22 #include "tensorflow/core/platform/test.h"
     23 #include "tensorflow/core/public/session.h"
     24 
     25 namespace tensorflow {
     26 namespace {
     27 
     28 namespace f = test::function;
     29 using FDH = FunctionDefHelper;
     30 
     31 std::unique_ptr<Session> NewSession() {
     32   SessionOptions opts;
     33   (*opts.config.mutable_device_count())["CPU"] = 1;
     34   return std::unique_ptr<Session>(NewSession(opts));
     35 }
     36 
     37 class MathGradTest : public ::testing::Test {
     38  protected:
     39   // Unary
     40   Status Unary(const string& op, const Tensor& x, Tensor* y) {
     41     const DataType T = x.dtype();
     42     auto adef = [T](const string& name) {  // E.g., x:float, dy:double
     43       return strings::StrCat(name, ":", DataTypeString(T));
     44     };
     45     // Sum(op(x)), sum all output of op(x).
     46     auto test = FDH::Define("Test", {adef("x")}, {adef("l")}, {},
     47                             {
     48                                 {{"y"}, op, {"x"}, {{"T", T}}},
     49                                 FDH::Const("zero", 0),
     50                                 FDH::Const("one", 1),
     51                                 {{"r"}, "Rank", {"x"}, {{"T", T}}},
     52                                 {{"indices"}, "Range", {"zero", "r", "one"}},
     53                                 {{"l"}, "Sum", {"y", "indices"}, {{"T", T}}},
     54                             });
     55 
     56     // TestGrad = Test'(x)
     57     auto grad = FDH::Define(
     58         "TestGrad", {adef("x")}, {adef("dx")}, {},
     59         {
     60             FDH::Const("one", 1),
     61             {{"dy"}, "Cast", {"one"}, {{"DstT", T}, {"SrcT", DT_INT32}}},
     62             {{"grad"},
     63              "SymbolicGradient",
     64              {"x", "dy"},
     65              {
     66                  {"f", FDH::FunctionRef("Test")},
     67                  {"Tin", DataTypeSlice{T, T}},
     68                  {"Tout", DataTypeSlice{T}},
     69              }},
     70             {{"dx"}, "Identity", {"grad"}, {{"T", T}}},
     71         });
     72     // Each test case will feed in "x:0" and expects to get "dx:0".
     73     auto gdef = test::function::GDef(
     74         {
     75             f::NDef("x", "Placeholder", {}, {{"dtype", T}}),
     76             f::NDef("dx", "TestGrad", {"x"}, {}),
     77         },
     78         {test, grad});
     79 
     80     auto sess = NewSession();
     81     TF_CHECK_OK(sess->Create(gdef));
     82     std::vector<Tensor> outputs;
     83     auto s = sess->Run({{"x:0", x}}, {"dx:0"}, {}, &outputs);
     84     if (s.ok()) {
     85       CHECK_EQ(outputs.size(), 1);
     86       *y = outputs[0];
     87     }
     88     TF_CHECK_OK(sess->Close());
     89     return s;
     90   }
     91 
     92   // Unary op expecting OK.
     93   Tensor SymGrad(const string& op, const Tensor& x) {
     94     Tensor ret;
     95     TF_CHECK_OK(Unary(op, x, &ret));
     96     return ret;
     97   }
     98 
     99   // Binary
    100   void SymGrad(const string& op, const Tensor& x, const Tensor& y, Tensor* dx,
    101                Tensor* dy) {
    102     const DataType T = x.dtype();
    103     auto adef = [T](const string& name) {  // E.g., x:float, dy:double
    104       return strings::StrCat(name, ":", DataTypeString(T));
    105     };
    106     // Sum(op(x)), sum all output of op(x).
    107     auto test = FDH::Define("Test", {adef("x"), adef("y")}, {adef("l")}, {},
    108                             {
    109                                 {{"z"}, op, {"x", "y"}, {{"T", T}}},
    110                                 FDH::Const("zero", 0),
    111                                 FDH::Const("one", 1),
    112                                 {{"r"}, "Rank", {"z"}, {{"T", T}}},
    113                                 {{"indices"}, "Range", {"zero", "r", "one"}},
    114                                 {{"l"}, "Sum", {"z", "indices"}, {{"T", T}}},
    115                             });
    116 
    117     // TestGrad = Test'(x, y)
    118     auto grad = FDH::Define(
    119         "TestGrad", {adef("x"), adef("y")}, {adef("dx"), adef("dy")}, {},
    120         {
    121             FDH::Const("one", 1),
    122             {{"dz"}, "Cast", {"one"}, {{"DstT", T}, {"SrcT", DT_INT32}}},
    123             {{"grad0", "grad1"},
    124              "SymbolicGradient",
    125              {"x", "y", "dz"},
    126              {
    127                  {"f", FDH::FunctionRef("Test")},
    128                  {"Tin", DataTypeSlice{T, T, T}},
    129                  {"Tout", DataTypeSlice{T, T}},
    130              }},
    131             {{"dx"}, "Identity", {"grad0"}, {{"T", T}}},
    132             {{"dy"}, "Identity", {"grad1"}, {{"T", T}}},
    133         });
    134     // Each test case will feed in "x:0" and "y:0" and expects to get "d0" and
    135     // "d:0".
    136     auto gdef = test::function::GDef(
    137         {
    138             f::NDef("x", "Placeholder", {}, {{"dtype", T}}),
    139             f::NDef("y", "Placeholder", {}, {{"dtype", T}}),
    140             f::NDef("d", "TestGrad", {"x", "y"}, {}),
    141         },
    142         {test, grad});
    143 
    144     auto sess = NewSession();
    145     TF_CHECK_OK(sess->Create(gdef));
    146     std::vector<Tensor> outputs;
    147     TF_CHECK_OK(
    148         sess->Run({{"x:0", x}, {"y:0", y}}, {"d:0", "d:1"}, {}, &outputs));
    149     CHECK_EQ(outputs.size(), 2);
    150     TF_CHECK_OK(sess->Close());
    151     *dx = outputs[0];
    152     *dy = outputs[1];
    153   }
    154 
    155   // Reduction grad
    156   void ReductionGrad(const string& op, const Tensor& x, const Tensor& idx,
    157                      Tensor* dx, Tensor* di) {
    158     const DataType T = x.dtype();
    159     auto adef = [T](const string& name) {  // E.g., x:float, dy:double
    160       return strings::StrCat(name, ":", DataTypeString(T));
    161     };
    162     // Sum(op(x, idx)), sum all output of op(x, idx).
    163     auto test = FDH::Define("Test", {adef("x"), "i:int32"}, {adef("l")}, {},
    164                             {
    165                                 {{"y"}, op, {"x", "i"}, {{"T", T}}},
    166                                 FDH::Const("zero", 0),
    167                                 FDH::Const("one", 1),
    168                                 {{"r"}, "Rank", {"y"}, {{"T", T}}},
    169                                 {{"indices"}, "Range", {"zero", "r", "one"}},
    170                                 {{"l"}, "Sum", {"y", "indices"}, {{"T", T}}},
    171                             });
    172 
    173     // TestGrad = Test'(x)
    174     auto grad = FDH::Define(
    175         "TestGrad", {adef("x"), "i:int32"}, {adef("dx"), "di:int32"}, {},
    176         {
    177             FDH::Const("one", 1),
    178             {{"dy"}, "Cast", {"one"}, {{"DstT", T}, {"SrcT", DT_INT32}}},
    179             {{"grad0", "grad1"},
    180              "SymbolicGradient",
    181              {"x", "i", "dy"},
    182              {
    183                  {"f", FDH::FunctionRef("Test")},
    184                  {"Tin", DataTypeSlice{T, DT_INT32, T}},
    185                  {"Tout", DataTypeSlice{T, DT_INT32}},
    186              }},
    187             {{"dx"}, "Identity", {"grad0"}, {{"T", T}}},
    188             {{"di"}, "Identity", {"grad1"}, {{"T", DT_INT32}}},
    189         });
    190     // Each test case will feed in "x:0" and expects to get "dx:0".
    191     auto gdef = test::function::GDef(
    192         {
    193             f::NDef("x", "Placeholder", {}, {{"dtype", T}}),
    194             f::NDef("i", "Placeholder", {}, {{"dtype", DT_INT32}}),
    195             f::NDef("d", "TestGrad", {"x", "i"}, {}),
    196         },
    197         {test, grad});
    198 
    199     auto sess = NewSession();
    200     TF_CHECK_OK(sess->Create(gdef));
    201     std::vector<Tensor> outputs;
    202     TF_CHECK_OK(
    203         sess->Run({{"x:0", x}, {"i:0", idx}}, {"d:0", "d:1"}, {}, &outputs));
    204     CHECK_EQ(outputs.size(), 2);
    205     TF_CHECK_OK(sess->Close());
    206     *dx = outputs[0];
    207     *di = outputs[1];
    208   }
    209 
    210   Tensor MatMulCommon(const string& opname, const string& attr_adj_x,
    211                       const string& attr_adj_y, const Tensor& x, bool ax,
    212                       const Tensor& y, bool ay) {
    213     auto T = x.dtype();
    214     auto gdef = test::function::GDef(
    215         {
    216             f::NDef("x", "Placeholder", {}, {{"dtype", T}}),
    217             f::NDef("y", "Placeholder", {}, {{"dtype", T}}),
    218             f::NDef("z", opname, {"x", "y"},
    219                     {{"T", T}, {attr_adj_x, ax}, {attr_adj_y, ay}}),
    220         },
    221         {});
    222     auto sess = NewSession();
    223     TF_CHECK_OK(sess->Create(gdef));
    224     std::vector<Tensor> outputs;
    225     TF_CHECK_OK(sess->Run({{"x:0", x}, {"y:0", y}}, {"z:0"}, {}, &outputs));
    226     CHECK_EQ(outputs.size(), 1);
    227     TF_CHECK_OK(sess->Close());
    228     return outputs[0];
    229   }
    230 
    231   Tensor MatMul(const Tensor& x, bool ax, const Tensor& y, bool ay) {
    232     return MatMulCommon("MatMul", "transpose_a", "transpose_b", x, ax, y, ay);
    233   }
    234 
    235   Tensor BatchMatMul(const Tensor& x, bool ax, const Tensor& y, bool ay) {
    236     return MatMulCommon("BatchMatMul", "adj_x", "adj_y", x, ax, y, ay);
    237   }
    238 
    239   void MatMulGradCommon(const string& opname, const string& attr_adj_x,
    240                         const string& attr_adj_y, const Tensor& x, bool ax,
    241                         const Tensor& y, bool ay, Tensor* dx, Tensor* dy) {
    242     const DataType T = x.dtype();
    243     auto adef = [T](const string& name) {  // E.g., x:float, dy:double
    244       return strings::StrCat(name, ":", DataTypeString(T));
    245     };
    246     // Sum(op(x)), sum all output of op(x).
    247     auto test =
    248         FDH::Define("Test", {adef("x"), adef("y")}, {adef("l")}, {},
    249                     {
    250                         {{"z"},
    251                          opname,
    252                          {"x", "y"},
    253                          {{"T", T}, {attr_adj_x, ax}, {attr_adj_y, ay}}},
    254                         FDH::Const("zero", 0),
    255                         FDH::Const("one", 1),
    256                         {{"r"}, "Rank", {"z"}, {{"T", T}}},
    257                         {{"indices"}, "Range", {"zero", "r", "one"}},
    258                         {{"l"}, "Sum", {"z", "indices"}, {{"T", T}}},
    259                     });
    260 
    261     // TestGrad = Test'(x, y)
    262     auto grad = FDH::Define(
    263         "TestGrad", {adef("x"), adef("y")}, {adef("dx"), adef("dy")}, {},
    264         {
    265             FDH::Const("one", 1),
    266             {{"dz"}, "Cast", {"one"}, {{"DstT", T}, {"SrcT", DT_INT32}}},
    267             {{"grad0", "grad1"},
    268              "SymbolicGradient",
    269              {"x", "y", "dz"},
    270              {
    271                  {"f", FDH::FunctionRef("Test")},
    272                  {"Tin", DataTypeSlice{T, T, T}},
    273                  {"Tout", DataTypeSlice{T, T}},
    274              }},
    275             {{"dx"}, "Identity", {"grad0"}, {{"T", T}}},
    276             {{"dy"}, "Identity", {"grad1"}, {{"T", T}}},
    277         });
    278     // Each test case will feed in "x:0" and "y:0" and expects to get "d0" and
    279     // "d:0".
    280     auto gdef = test::function::GDef(
    281         {
    282             f::NDef("x", "Placeholder", {}, {{"dtype", T}}),
    283             f::NDef("y", "Placeholder", {}, {{"dtype", T}}),
    284             f::NDef("d", "TestGrad", {"x", "y"}, {}),
    285         },
    286         {test, grad});
    287 
    288     auto sess = NewSession();
    289     TF_CHECK_OK(sess->Create(gdef));
    290     std::vector<Tensor> outputs;
    291     TF_CHECK_OK(
    292         sess->Run({{"x:0", x}, {"y:0", y}}, {"d:0", "d:1"}, {}, &outputs));
    293     CHECK_EQ(outputs.size(), 2);
    294     TF_CHECK_OK(sess->Close());
    295     *dx = outputs[0];
    296     *dy = outputs[1];
    297   }
    298 
    299   void MatMulGrad(const Tensor& x, bool ax, const Tensor& y, bool ay,
    300                   Tensor* dx, Tensor* dy) {
    301     return MatMulGradCommon("MatMul", "transpose_a", "transpose_b", x, ax, y,
    302                             ay, dx, dy);
    303   }
    304 
    305   void BatchMatMulGrad(const Tensor& x, bool ax, const Tensor& y, bool ay,
    306                        Tensor* dx, Tensor* dy) {
    307     return MatMulGradCommon("BatchMatMul", "adj_x", "adj_y", x, ax, y, ay, dx,
    308                             dy);
    309   }
    310 
    311   void SelectGrad(const Tensor& c, const Tensor& x, const Tensor& y, Tensor* dc,
    312                   Tensor* dx, Tensor* dy) {
    313     auto T = DT_FLOAT;
    314     // Sum(Select(c, x, y))
    315     auto test =
    316         FDH::Define("Test", {"c:bool", "x:float", "y:float"}, {"l:float"}, {},
    317                     {
    318                         {{"z"}, "Select", {"c", "x", "y"}, {{"T", T}}},
    319                         FDH::Const("zero", 0),
    320                         FDH::Const("one", 1),
    321                         {{"r"}, "Rank", {"z"}, {{"T", T}}},
    322                         {{"indices"}, "Range", {"zero", "r", "one"}},
    323                         {{"l"}, "Sum", {"z", "indices"}, {{"T", T}}},
    324                     });
    325 
    326     // TestGrad(x, y) = Test'(c, x, y)
    327     auto grad = FDH::Define("TestGrad", {"c:bool", "x:float", "y:float"},
    328                             {"dc:bool", "dx:float", "dy:float"}, {},
    329                             {FDH::Const("dz", 1.f),
    330                              {{"grad0", "grad1", "grad2"},
    331                               "SymbolicGradient",
    332                               {"c", "x", "y", "dz"},
    333                               {
    334                                   {"f", FDH::FunctionRef("Test")},
    335                                   {"Tin", DataTypeSlice{DT_BOOL, T, T, T}},
    336                                   {"Tout", DataTypeSlice{DT_BOOL, T, T}},
    337                               }},
    338                              {{"dc"}, "Identity", {"grad0"}, {{"T", DT_BOOL}}},
    339                              {{"dx"}, "Identity", {"grad1"}, {{"T", T}}},
    340                              {{"dy"}, "Identity", {"grad2"}, {{"T", T}}}});
    341     // Each test case will feed in "x:0" and expects to get "dx:0".
    342     auto gdef = test::function::GDef(
    343         {
    344             f::NDef("c", "Placeholder", {}, {{"dtype", DT_BOOL}}),
    345             f::NDef("x", "Placeholder", {}, {{"dtype", T}}),
    346             f::NDef("y", "Placeholder", {}, {{"dtype", T}}),
    347             f::NDef("d", "TestGrad", {"c", "x", "y"}, {}),
    348         },
    349         {test, grad});
    350 
    351     auto sess = NewSession();
    352     TF_CHECK_OK(sess->Create(gdef));
    353     std::vector<Tensor> outputs;
    354     TF_CHECK_OK(sess->Run({{"c:0", c}, {"x:0", x}, {"y:0", y}},
    355                           {"d:0", "d:1", "d:2"}, {}, &outputs));
    356     CHECK_EQ(outputs.size(), 3);
    357     TF_CHECK_OK(sess->Close());
    358     *dc = outputs[0];
    359     *dx = outputs[1];
    360     *dy = outputs[2];
    361   }
    362 };
    363 
    364 void HasError(const Status& s, const string& substr) {
    365   EXPECT_TRUE(StringPiece(s.ToString()).contains(substr))
    366       << s << ", expected substring " << substr;
    367 }
    368 
    369 REGISTER_OP("TestOpWithNoGrad")
    370     .Input("x: T")
    371     .Output("y: T")
    372     .Attr("T: {float, double}")
    373     .Doc(R"doc(
    374 Test op with no grad registered.
    375 
    376 x: input
    377 y: output
    378 )doc");
    379 
    380 class TestOp : public OpKernel {
    381  public:
    382   explicit TestOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
    383   void Compute(OpKernelContext* ctx) override { ctx->set_output(0, Tensor()); }
    384 };
    385 REGISTER_KERNEL_BUILDER(Name("TestOpWithNoGrad").Device(DEVICE_CPU), TestOp);
    386 #ifdef TENSORFLOW_USE_SYCL
    387 REGISTER_KERNEL_BUILDER(Name("TestOpWithNoGrad").Device(DEVICE_SYCL), TestOp);
    388 #endif  // TENSORFLOW_USE_SYCL
    389 
    390 TEST_F(MathGradTest, Error_Reporting) {
    391   auto x = test::AsTensor<float>({-3.f});
    392   auto dx = test::AsTensor<float>({3.f});
    393   Tensor donotcare;
    394   HasError(Unary("TestOpWithNoGrad", x, &donotcare),
    395            "No gradient defined for op: TestOpWithNoGrad");
    396 }
    397 
    398 TEST_F(MathGradTest, Abs) {
    399   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    400                                  TensorShape({2, 3}));
    401   auto g = [](float x) { return x < 0 ? -1.f : 1.f; };
    402   auto dx = test::AsTensor<float>(
    403       {g(-3.f), g(-2.f), g(-1.f), g(1.f), g(2.f), g(3.f)}, TensorShape({2, 3}));
    404   auto ans = SymGrad("Abs", x);
    405   test::ExpectClose(ans, dx);
    406 }
    407 
    408 TEST_F(MathGradTest, Neg) {
    409   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    410                                  TensorShape({2, 3}));
    411   auto g = [](float x) { return -1.f; };
    412   auto dx = test::AsTensor<float>(
    413       {g(-3.f), g(-2.f), g(-1.f), g(1.f), g(2.f), g(3.f)}, TensorShape({2, 3}));
    414   auto ans = SymGrad("Neg", x);
    415   test::ExpectClose(ans, dx);
    416 }
    417 
    418 TEST_F(MathGradTest, Reciprocal) {
    419   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    420                                  TensorShape({2, 3}));
    421   auto g = [](float x) { return -1.f / (x * x); };
    422   auto dx = test::AsTensor<float>(
    423       {g(-3.f), g(-2.f), g(-1.f), g(1.f), g(2.f), g(3.f)}, TensorShape({2, 3}));
    424   auto ans = SymGrad("Reciprocal", x);
    425   test::ExpectClose(ans, dx);
    426 }
    427 
    428 TEST_F(MathGradTest, Square) {
    429   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    430                                  TensorShape({2, 3}));
    431   auto g = [](float x) { return 2 * x; };
    432   auto dx = test::AsTensor<float>(
    433       {g(-3.f), g(-2.f), g(-1.f), g(1.f), g(2.f), g(3.f)}, TensorShape({2, 3}));
    434   auto ans = SymGrad("Square", x);
    435   test::ExpectClose(ans, dx);
    436 }
    437 
    438 TEST_F(MathGradTest, Sqrt) {
    439   auto x = test::AsTensor<float>({1.f, 2.f, 3.f, 4.f, 5.f, 6.f},
    440                                  TensorShape({2, 3}));
    441   auto g = [](float x) { return 0.5f / std::sqrt(x); };
    442   auto dx = test::AsTensor<float>(
    443       {g(1.f), g(2.f), g(3.f), g(4.f), g(5.f), g(6.f)}, TensorShape({2, 3}));
    444   auto ans = SymGrad("Sqrt", x);
    445   test::ExpectClose(ans, dx);
    446 }
    447 
    448 TEST_F(MathGradTest, Rsqrt) {
    449   auto x = test::AsTensor<float>({1.f, 2.f, 3.f, 4.f, 5.f, 6.f},
    450                                  TensorShape({2, 3}));
    451   auto g = [](float x) { return -0.5f / (x * std::sqrt(x)); };
    452   auto dx = test::AsTensor<float>(
    453       {g(1.f), g(2.f), g(3.f), g(4.f), g(5.f), g(6.f)}, TensorShape({2, 3}));
    454   auto ans = SymGrad("Rsqrt", x);
    455   test::ExpectClose(ans, dx);
    456 }
    457 
    458 TEST_F(MathGradTest, Exp) {
    459   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    460                                  TensorShape({2, 3}));
    461   auto g = [](float x) { return std::exp(x); };
    462   auto dx = test::AsTensor<float>(
    463       {g(-3.f), g(-2.f), g(-1.f), g(1.f), g(2.f), g(3.f)}, TensorShape({2, 3}));
    464   auto ans = SymGrad("Exp", x);
    465   test::ExpectClose(ans, dx);
    466 }
    467 
    468 TEST_F(MathGradTest, Expm1) {
    469   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    470                                  TensorShape({2, 3}));
    471   auto g = [](float x) { return std::exp(x); };
    472   auto dx = test::AsTensor<float>(
    473       {g(-3.f), g(-2.f), g(-1.f), g(1.f), g(2.f), g(3.f)}, TensorShape({2, 3}));
    474   auto ans = SymGrad("Expm1", x);
    475   test::ExpectClose(ans, dx);
    476 }
    477 
    478 TEST_F(MathGradTest, Log) {
    479   auto x = test::AsTensor<float>({0.1f, 1.f, 2.f, 3.f, 4.f, 10.f},
    480                                  TensorShape({2, 3}));
    481   auto g = [](float x) { return 1 / x; };
    482   auto dx = test::AsTensor<float>(
    483       {g(.1f), g(1.f), g(2.f), g(3.f), g(4.f), g(10.f)}, TensorShape({2, 3}));
    484   auto ans = SymGrad("Log", x);
    485   test::ExpectClose(ans, dx);
    486 }
    487 
    488 TEST_F(MathGradTest, Log1p) {
    489   auto x = test::AsTensor<float>({0.1f, 1.f, 2.f, 3.f, 4.f, 10.f},
    490                                  TensorShape({2, 3}));
    491   auto g = [](float x) { return 1 / (1 + x); };
    492   auto dx = test::AsTensor<float>(
    493       {g(.1f), g(1.f), g(2.f), g(3.f), g(4.f), g(10.f)}, TensorShape({2, 3}));
    494   auto ans = SymGrad("Log1p", x);
    495   test::ExpectClose(ans, dx);
    496 }
    497 
    498 TEST_F(MathGradTest, Sinh) {
    499   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    500                                  TensorShape({2, 3}));
    501   auto g = [](float x) { return std::cosh(x); };
    502   auto dx = test::AsTensor<float>(
    503       {g(-3.f), g(-2.f), g(-1.f), g(1.f), g(2.f), g(3.f)}, TensorShape({2, 3}));
    504   auto ans = SymGrad("Sinh", x);
    505   test::ExpectClose(ans, dx);
    506 }
    507 
    508 TEST_F(MathGradTest, Cosh) {
    509   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    510                                  TensorShape({2, 3}));
    511   auto g = [](float x) { return std::sinh(x); };
    512   auto dx = test::AsTensor<float>(
    513       {g(-3.f), g(-2.f), g(-1.f), g(1.f), g(2.f), g(3.f)}, TensorShape({2, 3}));
    514   auto ans = SymGrad("Cosh", x);
    515   test::ExpectClose(ans, dx);
    516 }
    517 
    518 TEST_F(MathGradTest, Tanh) {
    519   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    520                                  TensorShape({2, 3}));
    521   auto g = [](float x) {
    522     auto y = std::tanh(x);
    523     return 1 - y * y;
    524   };
    525   auto dx = test::AsTensor<float>(
    526       {g(-3.f), g(-2.f), g(-1.f), g(1.f), g(2.f), g(3.f)}, TensorShape({2, 3}));
    527   auto ans = SymGrad("Tanh", x);
    528   test::ExpectClose(ans, dx);
    529 }
    530 
    531 TEST_F(MathGradTest, Asinh) {
    532   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    533                                  TensorShape({2, 3}));
    534   auto g = [](float x) {
    535     auto y = std::asinh(x);
    536     return std::cosh(y);
    537   };
    538   auto dx = test::AsTensor<float>(
    539       {g(-3.f), g(-2.f), g(-1.f), g(1.f), g(2.f), g(3.f)}, TensorShape({2, 3}));
    540   auto ans = SymGrad("Asinh", x);
    541   test::ExpectClose(ans, dx);
    542 }
    543 
    544 TEST_F(MathGradTest, Acosh) {
    545   auto x = test::AsTensor<float>({6.f, 5.f, 4.f, 1.f, 2.f, 3.f},
    546                                  TensorShape({2, 3}));
    547   auto g = [](float x) {
    548     auto y = std::acosh(x);
    549     return std::sinh(y);
    550   };
    551   auto dx = test::AsTensor<float>(
    552       {g(6.f), g(5.f), g(4.f), g(1.f), g(2.f), g(3.f)}, TensorShape({2, 3}));
    553   auto ans = SymGrad("Acosh", x);
    554   test::ExpectClose(ans, dx);
    555 }
    556 
    557 TEST_F(MathGradTest, Atanh) {
    558   auto x = test::AsTensor<float>({-0.3f, -0.2f, -0.1f, 0.1f, 0.2f, 0.3f},
    559                                  TensorShape({2, 3}));
    560   auto g = [](float x) { return 1.f / (1.f - x * x); };
    561   auto dx = test::AsTensor<float>(
    562       {g(-0.3f), g(-0.2f), g(-0.1f), g(0.1f), g(0.2f), g(0.3f)},
    563       TensorShape({2, 3}));
    564   auto ans = SymGrad("Atanh", x);
    565   test::ExpectClose(ans, dx);
    566 }
    567 
    568 TEST_F(MathGradTest, Sigmoid) {
    569   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    570                                  TensorShape({2, 3}));
    571   auto g = [](float x) {
    572     auto y = 1.f / (1.f + std::exp(-x));
    573     return y * (1 - y);
    574   };
    575   auto dx = test::AsTensor<float>(
    576       {g(-3.f), g(-2.f), g(-1.f), g(1.f), g(2.f), g(3.f)}, TensorShape({2, 3}));
    577   auto ans = SymGrad("Sigmoid", x);
    578   test::ExpectClose(ans, dx);
    579 }
    580 
    581 TEST_F(MathGradTest, Sign) {
    582   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    583                                  TensorShape({2, 3}));
    584   auto g = [](float x) { return 0.f; };
    585   auto dx = test::AsTensor<float>(
    586       {g(-3.f), g(-2.f), g(-1.f), g(1.f), g(2.f), g(3.f)}, TensorShape({2, 3}));
    587   auto ans = SymGrad("Sign", x);
    588   test::ExpectClose(ans, dx);
    589 }
    590 
    591 TEST_F(MathGradTest, Sin) {
    592   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    593                                  TensorShape({2, 3}));
    594   auto g = [](float x) { return std::cos(x); };
    595   auto dx = test::AsTensor<float>(
    596       {g(-3.f), g(-2.f), g(-1.f), g(1.f), g(2.f), g(3.f)}, TensorShape({2, 3}));
    597   auto ans = SymGrad("Sin", x);
    598   test::ExpectClose(ans, dx);
    599 }
    600 
    601 TEST_F(MathGradTest, Cos) {
    602   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    603                                  TensorShape({2, 3}));
    604   auto g = [](float x) { return -std::sin(x); };
    605   auto dx = test::AsTensor<float>(
    606       {g(-3.f), g(-2.f), g(-1.f), g(1.f), g(2.f), g(3.f)}, TensorShape({2, 3}));
    607   auto ans = SymGrad("Cos", x);
    608   test::ExpectClose(ans, dx);
    609 }
    610 
    611 // TODO(zhifengc)
    612 // TEST_F(MathGradSComplexTest, Real) {}
    613 // TEST_F(MathGradSComplexTest, Imag) {}
    614 // TEST_F(MathGradSComplexTest, Angle) {}
    615 // TEST_F(MathGradSComplexTest, Conj) {}
    616 // TEST_F(MathGradTernary, Select) {}
    617 
    618 TEST_F(MathGradTest, Add) {
    619   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    620                                  TensorShape({2, 3}));
    621   auto y = test::AsTensor<float>({-10.f, 10.f}, TensorShape({2, 1}));
    622   auto ans_dx = test::AsTensor<float>({1.f, 1.f, 1.f, 1.f, 1.f, 1.f},
    623                                       TensorShape({2, 3}));
    624   auto ans_dy = test::AsTensor<float>({3.f, 3.f}, TensorShape({2, 1}));
    625   Tensor dx;
    626   Tensor dy;
    627   {
    628     SymGrad("Add", x, y, &dx, &dy);
    629     test::ExpectClose(ans_dx, dx);
    630     test::ExpectClose(ans_dy, dy);
    631   }
    632   {  // Swap x and y
    633     SymGrad("Add", y, x, &dy, &dx);
    634     test::ExpectClose(ans_dx, dx);
    635     test::ExpectClose(ans_dy, dy);
    636   }
    637 }
    638 
    639 TEST_F(MathGradTest, Sub) {
    640   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    641                                  TensorShape({2, 3}));
    642   auto y = test::AsTensor<float>({-10.f, 10.f}, TensorShape({2, 1}));
    643   Tensor dx;
    644   Tensor dy;
    645   {
    646     SymGrad("Sub", x, y, &dx, &dy);
    647     auto ans_dx = test::AsTensor<float>({1.f, 1.f, 1.f, 1.f, 1.f, 1.f},
    648                                         TensorShape({2, 3}));
    649     auto ans_dy = test::AsTensor<float>({-3.f, -3.f}, TensorShape({2, 1}));
    650     test::ExpectClose(ans_dx, dx);
    651     test::ExpectClose(ans_dy, dy);
    652   }
    653   {  // Swap x and y
    654     SymGrad("Sub", y, x, &dy, &dx);
    655     auto ans_dx = test::AsTensor<float>({-1.f, -1.f, -1.f, -1.f, -1.f, -1.f},
    656                                         TensorShape({2, 3}));
    657     auto ans_dy = test::AsTensor<float>({3.f, 3.f}, TensorShape({2, 1}));
    658     test::ExpectClose(ans_dx, dx);
    659     test::ExpectClose(ans_dy, dy);
    660   }
    661 }
    662 
    663 TEST_F(MathGradTest, Mul) {
    664   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    665                                  TensorShape({2, 3}));
    666   auto y = test::AsTensor<float>({-10.f, 10.f}, TensorShape({2, 1}));
    667   auto ans_dx = test::AsTensor<float>({-10.f, -10.f, -10.f, 10.f, 10.f, 10.f},
    668                                       TensorShape({2, 3}));
    669   auto ans_dy = test::AsTensor<float>({-3.f + (-2.f) + (-1.f), 1.f + 2.f + 3.f},
    670                                       TensorShape({2, 1}));
    671   Tensor dx;
    672   Tensor dy;
    673   {
    674     SymGrad("Mul", x, y, &dx, &dy);
    675     test::ExpectClose(ans_dx, dx);
    676     test::ExpectClose(ans_dy, dy);
    677   }
    678   {  // Swap x and y
    679     SymGrad("Mul", y, x, &dy, &dx);
    680     test::ExpectClose(ans_dx, dx);
    681     test::ExpectClose(ans_dy, dy);
    682   }
    683 }
    684 
    685 TEST_F(MathGradTest, Div) {
    686   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    687                                  TensorShape({2, 3}));
    688   auto y = test::AsTensor<float>({-10.f, 10.f}, TensorShape({2, 1}));
    689   Tensor dx;
    690   Tensor dy;
    691   {
    692     SymGrad("Div", x, y, &dx, &dy);
    693     {
    694       auto g = [](float x, float y) { return 1.f / y; };
    695       test::ExpectClose(dx, test::AsTensor<float>(
    696                                 {g(-3.f, -10.f), g(-2.f, -10.f), g(-1.f, -10.f),
    697                                  g(1.f, 10.f), g(2.f, 10.f), g(3.f, 10.f)},
    698                                 TensorShape({2, 3})));
    699     }
    700     {
    701       auto g = [](float x, float y) { return -x / (y * y); };
    702       test::ExpectClose(dy,
    703                         test::AsTensor<float>(
    704                             {g(-3.f, -10.f) + g(-2.f, -10.f) + g(-1.f, -10.f),
    705                              g(1.f, 10.f) + g(2.f, 10.f) + g(3.f, 10.f)},
    706                             TensorShape({2, 1})));
    707     }
    708   }
    709   {  // Swap x and y
    710     SymGrad("Div", y, x, &dy, &dx);
    711     {
    712       auto g = [](float x, float y) { return 1.f / y; };
    713       test::ExpectClose(dy,
    714                         test::AsTensor<float>(
    715                             {g(-10.f, -3.f) + g(-10.f, -2.f) + g(-10.f, -1.f),
    716                              g(10.f, 1.f) + g(10.f, 2.f) + g(10.f, 3.f)},
    717                             TensorShape({2, 1})));
    718     }
    719     {
    720       auto g = [](float x, float y) { return -x / (y * y); };
    721       test::ExpectClose(dx, test::AsTensor<float>(
    722                                 {g(-10.f, -3.f), g(-10.f, -2.f), g(-10.f, -1.f),
    723                                  g(10.f, 1.f), g(10.f, 2.f), g(10.f, 3.f)},
    724                                 TensorShape({2, 3})));
    725     }
    726   }
    727 }
    728 
    729 TEST_F(MathGradTest, Pow) {
    730   auto x = test::AsTensor<float>({0.f, 1.f, 2.f, 3.f, 4.f, 5.f},
    731                                  TensorShape({2, 3}));
    732   auto y = test::AsTensor<float>({.5f, 2.f}, TensorShape({2, 1}));
    733   Tensor dx;
    734   Tensor dy;
    735   auto g = [](float x, float y) { return y * std::pow(x, y - 1); };
    736   auto h = [](float x, float y) {
    737     return std::pow(x, y) * (x ? std::log(x) : 0);
    738   };
    739   {
    740     SymGrad("Pow", x, y, &dx, &dy);
    741     test::ExpectClose(
    742         dx, test::AsTensor<float>({g(0.f, .5f), g(1.f, .5f), g(2.f, .5f),
    743                                    g(3.f, 2.f), g(4.f, 2.f), g(5.f, 2.f)},
    744                                   TensorShape({2, 3})));
    745     test::ExpectClose(
    746         dy, test::AsTensor<float>({h(0.f, .5f) + h(1.f, .5f) + h(2.f, .5f),
    747                                    h(3.f, 2.f) + h(4.f, 2.f) + h(5.f, 2.f)},
    748                                   TensorShape({2, 1})));
    749   }
    750   {  // Swap x and y
    751     SymGrad("Pow", y, x, &dy, &dx);
    752     test::ExpectClose(
    753         dy, test::AsTensor<float>({g(.5f, 0.f) + g(.5f, 1.f) + g(.5f, 2.f),
    754                                    g(2.f, 3.f) + g(2.f, 4.f) + g(2.f, 5.f)},
    755                                   TensorShape({2, 1})));
    756     test::ExpectClose(
    757         dx, test::AsTensor<float>({h(.5f, 0.f), h(.5f, 1.f), h(.5f, 2.f),
    758                                    h(2.f, 3.f), h(2.f, 4.f), h(2.f, 5.f)},
    759                                   TensorShape({2, 3})));
    760   }
    761 }
    762 
    763 // TODO{lukeiwanski}: Implement Complex Pow for SYCL
    764 #ifndef TENSORFLOW_USE_SYCL
    765 TEST_F(MathGradTest, ComplexPow) {
    766   auto x = test::AsTensor<complex64>({0.f, 2.f, -2.f}, TensorShape({3}));
    767   auto y = test::AsTensor<complex64>({2.f, 2.f, 2.f}, TensorShape({3}));
    768   Tensor dx;
    769   Tensor dy;
    770   auto g = [](complex64 x, complex64 y) { return y * std::pow(x, y - 1.f); };
    771   auto h = [](complex64 x, complex64 y) {
    772     return std::pow(x, y) * (x != complex64(0) ? std::log(x) : 0);
    773   };
    774   SymGrad("Pow", x, y, &dx, &dy);
    775 
    776   test::ExpectClose(
    777       dx, test::AsTensor<complex64>({g(0.f, 2.f), g(2.f, 2.f), g(-2.f, 2.f)},
    778                                     TensorShape({3})));
    779   test::ExpectClose(
    780       dy, test::AsTensor<complex64>({h(0.f, 2.f), h(2.f, 2.f), h(-2.f, 2.f)},
    781                                     TensorShape({3})));
    782 }
    783 #endif  // TENSORFLOW_USE_SYCL
    784 
    785 TEST_F(MathGradTest, Maximum) {
    786   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    787                                  TensorShape({2, 3}));
    788   auto y = test::AsTensor<float>({-1.5f, 1.5f}, TensorShape({2, 1}));
    789   Tensor dx;
    790   Tensor dy;
    791   {
    792     SymGrad("Maximum", x, y, &dx, &dy);
    793     {
    794       auto g = [](float x, float y) { return x >= y ? 1.f : 0.f; };
    795       test::ExpectClose(dx, test::AsTensor<float>(
    796                                 {g(-3.f, -1.5f), g(-2.f, -1.5f), g(-1.f, -1.5f),
    797                                  g(1.f, 1.5f), g(2.f, 1.5f), g(3.f, 1.5f)},
    798                                 TensorShape({2, 3})));
    799     }
    800     {
    801       auto g = [](float x, float y) { return x < y ? 1.f : 0.f; };
    802       test::ExpectClose(dy,
    803                         test::AsTensor<float>(
    804                             {g(-3.f, -1.5f) + g(-2.f, -1.5f) + g(-1.f, -1.5f),
    805                              g(1.f, 1.5f) + g(2.f, 1.5f) + g(3.f, 1.5f)},
    806                             TensorShape({2, 1})));
    807     }
    808   }
    809   {  // Swap x and y
    810     SymGrad("Maximum", y, x, &dy, &dx);
    811     {
    812       auto g = [](float x, float y) { return x >= y ? 1.f : 0.f; };
    813       test::ExpectClose(dy,
    814                         test::AsTensor<float>(
    815                             {g(-1.5f, -3.f) + g(-1.5f, -2.f) + g(-1.5f, -1.f),
    816                              g(1.5f, 1.f) + g(1.5f, 2.f) + g(1.5f, 3.f)},
    817                             TensorShape({2, 1})));
    818     }
    819     {
    820       auto g = [](float x, float y) { return x < y ? 1.f : 0.f; };
    821       test::ExpectClose(dx, test::AsTensor<float>(
    822                                 {g(-1.5f, -3.f), g(-1.5f, -2.f), g(-1.5f, -1.f),
    823                                  g(1.5f, 1.f), g(1.5f, 2.f), g(1.5f, 3.f)},
    824                                 TensorShape({2, 3})));
    825     }
    826   }
    827 }
    828 
    829 TEST_F(MathGradTest, Minimum) {
    830   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    831                                  TensorShape({2, 3}));
    832   auto y = test::AsTensor<float>({-1.5f, 1.5f}, TensorShape({2, 1}));
    833   Tensor dx;
    834   Tensor dy;
    835   {
    836     SymGrad("Minimum", x, y, &dx, &dy);
    837     {
    838       auto g = [](float x, float y) { return x <= y ? 1.f : 0.f; };
    839       test::ExpectClose(dx, test::AsTensor<float>(
    840                                 {g(-3.f, -1.5f), g(-2.f, -1.5f), g(-1.f, -1.5f),
    841                                  g(1.f, 1.5f), g(2.f, 1.5f), g(3.f, 1.5f)},
    842                                 TensorShape({2, 3})));
    843     }
    844     {
    845       auto g = [](float x, float y) { return x > y ? 1.f : 0.f; };
    846       test::ExpectClose(dy,
    847                         test::AsTensor<float>(
    848                             {g(-3.f, -1.5f) + g(-2.f, -1.5f) + g(-1.f, -1.5f),
    849                              g(1.f, 1.5f) + g(2.f, 1.5f) + g(3.f, 1.5f)},
    850                             TensorShape({2, 1})));
    851     }
    852   }
    853   {  // Swap x and y
    854     SymGrad("Minimum", y, x, &dy, &dx);
    855     {
    856       auto g = [](float x, float y) { return x <= y ? 1.f : 0.f; };
    857       test::ExpectClose(dy,
    858                         test::AsTensor<float>(
    859                             {g(-1.5f, -3.f) + g(-1.5f, -2.f) + g(-1.5f, -1.f),
    860                              g(1.5f, 1.f) + g(1.5f, 2.f) + g(1.5f, 3.f)},
    861                             TensorShape({2, 1})));
    862     }
    863     {
    864       auto g = [](float x, float y) { return x > y ? 1.f : 0.f; };
    865       test::ExpectClose(dx, test::AsTensor<float>(
    866                                 {g(-1.5f, -3.f), g(-1.5f, -2.f), g(-1.5f, -1.f),
    867                                  g(1.5f, 1.f), g(1.5f, 2.f), g(1.5f, 3.f)},
    868                                 TensorShape({2, 3})));
    869     }
    870   }
    871 }
    872 
    873 TEST_F(MathGradTest, Select) {
    874   auto c = test::AsTensor<bool>({true, false, false, true, true, false},
    875                                 TensorShape({2, 3}));
    876   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    877                                  TensorShape({2, 3}));
    878   auto y = test::AsTensor<float>({3.f, 2.f, 1.f, 1.f, 2.f, 3.f},
    879                                  TensorShape({2, 3}));
    880   Tensor dc;
    881   Tensor dx;
    882   Tensor dy;
    883   {
    884     SelectGrad(c, x, y, &dc, &dx, &dy);
    885     test::ExpectTensorEqual<bool>(
    886         dc, test::AsTensor<bool>({false, false, false, false, false, false},
    887                                  TensorShape({2, 3})));
    888     test::ExpectTensorEqual<float>(
    889         dx, test::AsTensor<float>({1.f, 0.f, 0.f, 1.f, 1.f, 0.f},
    890                                   TensorShape({2, 3})));
    891     test::ExpectTensorEqual<float>(
    892         dy, test::AsTensor<float>({0.f, 1.f, 1.f, 0.f, 0.f, 1.f},
    893                                   TensorShape({2, 3})));
    894   }
    895 }
    896 
    897 TEST_F(MathGradTest, MatMul_00) {
    898   auto x = test::AsTensor<float>({1.f, 2.f, 3.f, 4.f, 5.f, 6.f},
    899                                  TensorShape({2, 3}));
    900   auto y = test::AsTensor<float>({-1.f, .5f, 2.f}, TensorShape({3, 1}));
    901   Tensor dx;
    902   Tensor dy;
    903   MatMulGrad(x, false, y, false, &dx, &dy);
    904   auto dz = test::AsTensor<float>({1.f, 1.f}, TensorShape({2, 1}));
    905   test::ExpectClose(dx, MatMul(dz, false, y, true));
    906   test::ExpectClose(dy, MatMul(x, true, dz, false));
    907 }
    908 
    909 TEST_F(MathGradTest, MatMul_01) {
    910   auto x = test::AsTensor<float>({1.f, 2.f, 3.f, 4.f, 5.f, 6.f},
    911                                  TensorShape({2, 3}));
    912   auto y = test::AsTensor<float>({-1.f, .5f, 2.f}, TensorShape({1, 3}));
    913   Tensor dx;
    914   Tensor dy;
    915   MatMulGrad(x, false, y, true, &dx, &dy);
    916   auto dz = test::AsTensor<float>({1.f, 1.f}, TensorShape({2, 1}));
    917   test::ExpectClose(dx, MatMul(dz, false, y, false));
    918   test::ExpectClose(dy, MatMul(dz, true, x, false));
    919 }
    920 
    921 TEST_F(MathGradTest, MatMul_10) {
    922   auto x = test::AsTensor<float>({1.f, 2.f, 3.f, 4.f, 5.f, 6.f},
    923                                  TensorShape({3, 2}));
    924   auto y = test::AsTensor<float>({-1.f, .5f, 2.f}, TensorShape({3, 1}));
    925   Tensor dx;
    926   Tensor dy;
    927   MatMulGrad(x, true, y, false, &dx, &dy);
    928   auto dz = test::AsTensor<float>({1.f, 1.f}, TensorShape({2, 1}));
    929   test::ExpectClose(dx, MatMul(y, false, dz, true));
    930   test::ExpectClose(dy, MatMul(x, false, dz, false));
    931 }
    932 
    933 TEST_F(MathGradTest, MatMul_11) {
    934   auto x = test::AsTensor<float>({1.f, 2.f, 3.f, 4.f, 5.f, 6.f},
    935                                  TensorShape({3, 2}));
    936   auto y = test::AsTensor<float>({-1.f, .5f, 2.f}, TensorShape({1, 3}));
    937   Tensor dx;
    938   Tensor dy;
    939   MatMulGrad(x, true, y, true, &dx, &dy);
    940   auto dz = test::AsTensor<float>({1.f, 1.f}, TensorShape({2, 1}));
    941   test::ExpectClose(dx, MatMul(y, true, dz, true));
    942   test::ExpectClose(dy, MatMul(dz, true, x, true));
    943 }
    944 
    945 // TODO{lukeiwanski}: Implement BatchMatMul for SYCL
    946 #ifndef TENSORFLOW_USE_SYCL
    947 TEST_F(MathGradTest, BatchMatMul_00) {
    948   auto x = test::AsTensor<float>({1.f, 2.f, 3.f, 4.f, 5.f, 6.f},
    949                                  TensorShape({1, 2, 3}));
    950   auto y = test::AsTensor<float>({-1.f, .5f, 2.f}, TensorShape({1, 3, 1}));
    951   Tensor dx;
    952   Tensor dy;
    953   BatchMatMulGrad(x, false, y, false, &dx, &dy);
    954   auto dz = test::AsTensor<float>({1.f, 1.f}, TensorShape({1, 2, 1}));
    955   test::ExpectClose(dx, BatchMatMul(dz, false, y, true));
    956   test::ExpectClose(dy, BatchMatMul(x, true, dz, false));
    957 }
    958 
    959 TEST_F(MathGradTest, BatchMatMul_01) {
    960   auto x = test::AsTensor<float>({1.f, 2.f, 3.f, 4.f, 5.f, 6.f},
    961                                  TensorShape({1, 2, 3}));
    962   auto y = test::AsTensor<float>({-1.f, .5f, 2.f}, TensorShape({1, 1, 3}));
    963   Tensor dx;
    964   Tensor dy;
    965   BatchMatMulGrad(x, false, y, true, &dx, &dy);
    966   auto dz = test::AsTensor<float>({1.f, 1.f}, TensorShape({1, 2, 1}));
    967   test::ExpectClose(dx, BatchMatMul(dz, false, y, false));
    968   test::ExpectClose(dy, BatchMatMul(dz, true, x, false));
    969 }
    970 
    971 TEST_F(MathGradTest, BatchMatMul_10) {
    972   auto x = test::AsTensor<float>({1.f, 2.f, 3.f, 4.f, 5.f, 6.f},
    973                                  TensorShape({1, 3, 2}));
    974   auto y = test::AsTensor<float>({-1.f, .5f, 2.f}, TensorShape({1, 3, 1}));
    975   Tensor dx;
    976   Tensor dy;
    977   BatchMatMulGrad(x, true, y, false, &dx, &dy);
    978   auto dz = test::AsTensor<float>({1.f, 1.f}, TensorShape({1, 2, 1}));
    979   test::ExpectClose(dx, BatchMatMul(y, false, dz, true));
    980   test::ExpectClose(dy, BatchMatMul(x, false, dz, false));
    981 }
    982 
    983 TEST_F(MathGradTest, BatchMatMul_11) {
    984   auto x = test::AsTensor<float>({1.f, 2.f, 3.f, 4.f, 5.f, 6.f},
    985                                  TensorShape({1, 3, 2}));
    986   auto y = test::AsTensor<float>({-1.f, .5f, 2.f}, TensorShape({1, 1, 3}));
    987   Tensor dx;
    988   Tensor dy;
    989   BatchMatMulGrad(x, true, y, true, &dx, &dy);
    990   auto dz = test::AsTensor<float>({1.f, 1.f}, TensorShape({1, 2, 1}));
    991   test::ExpectClose(dx, BatchMatMul(y, true, dz, true));
    992   test::ExpectClose(dy, BatchMatMul(dz, true, x, true));
    993 }
    994 #endif  // TENSORFLOW_USE_SYCL
    995 
    996 TEST_F(MathGradTest, Sum_dim0) {
    997   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
    998                                  TensorShape({2, 3}));
    999   auto i = test::AsTensor<int32>({0}, TensorShape({}));
   1000   Tensor dx;
   1001   Tensor di;
   1002   ReductionGrad("Sum", x, i, &dx, &di);
   1003   test::ExpectTensorEqual<float>(
   1004       dx, test::AsTensor<float>({1.f, 1.f, 1.f, 1.f, 1.f, 1.f},
   1005                                 TensorShape({2, 3})));
   1006   test::ExpectTensorEqual<int32>(di,
   1007                                  test::AsTensor<int32>({0}, TensorShape({})));
   1008 }
   1009 
   1010 TEST_F(MathGradTest, Sum_dim1) {
   1011   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
   1012                                  TensorShape({2, 3}));
   1013   auto i = test::AsTensor<int32>({1}, TensorShape({}));
   1014   Tensor dx;
   1015   Tensor di;
   1016   ReductionGrad("Sum", x, i, &dx, &di);
   1017   test::ExpectTensorEqual<float>(
   1018       dx, test::AsTensor<float>({1.f, 1.f, 1.f, 1.f, 1.f, 1.f},
   1019                                 TensorShape({2, 3})));
   1020   test::ExpectTensorEqual<int32>(di,
   1021                                  test::AsTensor<int32>({0}, TensorShape({})));
   1022 }
   1023 
   1024 TEST_F(MathGradTest, Mean_dim0) {
   1025   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
   1026                                  TensorShape({2, 3}));
   1027   auto i = test::AsTensor<int32>({0}, TensorShape({}));
   1028   Tensor dx;
   1029   Tensor di;
   1030   ReductionGrad("Mean", x, i, &dx, &di);
   1031   test::ExpectTensorEqual<float>(
   1032       dx, test::AsTensor<float>(
   1033               {1.f / 2, 1.f / 2, 1.f / 2, 1.f / 2, 1.f / 2, 1.f / 2},
   1034               TensorShape({2, 3})));
   1035   test::ExpectTensorEqual<int32>(di,
   1036                                  test::AsTensor<int32>({0}, TensorShape({})));
   1037 }
   1038 
   1039 TEST_F(MathGradTest, Mean_dim1) {
   1040   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
   1041                                  TensorShape({2, 3}));
   1042   auto i = test::AsTensor<int32>({1}, TensorShape({}));
   1043   Tensor dx;
   1044   Tensor di;
   1045   ReductionGrad("Mean", x, i, &dx, &di);
   1046   test::ExpectTensorEqual<float>(
   1047       dx, test::AsTensor<float>(
   1048               {1.f / 3, 1.f / 3, 1.f / 3, 1.f / 3, 1.f / 3, 1.f / 3},
   1049               TensorShape({2, 3})));
   1050   test::ExpectTensorEqual<int32>(di,
   1051                                  test::AsTensor<int32>({0}, TensorShape({})));
   1052 }
   1053 
   1054 TEST_F(MathGradTest, Mean_dim0_dim1) {
   1055   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
   1056                                  TensorShape({2, 3}));
   1057   auto i = test::AsTensor<int32>({0, 1}, TensorShape({2}));
   1058   Tensor dx;
   1059   Tensor di;
   1060   ReductionGrad("Mean", x, i, &dx, &di);
   1061   test::ExpectTensorEqual<float>(
   1062       dx, test::AsTensor<float>(
   1063               {1.f / 6, 1.f / 6, 1.f / 6, 1.f / 6, 1.f / 6, 1.f / 6},
   1064               TensorShape({2, 3})));
   1065   test::ExpectTensorEqual<int32>(
   1066       di, test::AsTensor<int32>({0, 0}, TensorShape({2})));
   1067 }
   1068 
   1069 TEST_F(MathGradTest, Min_dim0) {
   1070   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
   1071                                  TensorShape({2, 3}));
   1072   auto i = test::AsTensor<int32>({0}, TensorShape({}));
   1073   Tensor dx;
   1074   Tensor di;
   1075   ReductionGrad("Min", x, i, &dx, &di);
   1076   test::ExpectTensorEqual<float>(
   1077       dx, test::AsTensor<float>({1.f, 1.f, 1.f, 0.f, 0.f, 0.f},
   1078                                 TensorShape({2, 3})));
   1079   test::ExpectTensorEqual<int32>(di,
   1080                                  test::AsTensor<int32>({0}, TensorShape({})));
   1081 }
   1082 
   1083 TEST_F(MathGradTest, Min_dim1) {
   1084   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
   1085                                  TensorShape({2, 3}));
   1086   auto i = test::AsTensor<int32>({1}, TensorShape({}));
   1087   Tensor dx;
   1088   Tensor di;
   1089   ReductionGrad("Min", x, i, &dx, &di);
   1090   test::ExpectTensorEqual<float>(
   1091       dx, test::AsTensor<float>({1.f, 0.f, 0.f, 1.f, 0.f, 0.f},
   1092                                 TensorShape({2, 3})));
   1093   test::ExpectTensorEqual<int32>(di,
   1094                                  test::AsTensor<int32>({0}, TensorShape({})));
   1095 }
   1096 
   1097 TEST_F(MathGradTest, Min_dim0_dim1) {
   1098   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
   1099                                  TensorShape({2, 3}));
   1100   auto i = test::AsTensor<int32>({0, 1}, TensorShape({2}));
   1101   Tensor dx;
   1102   Tensor di;
   1103   ReductionGrad("Min", x, i, &dx, &di);
   1104   test::ExpectTensorEqual<float>(
   1105       dx, test::AsTensor<float>({1.f, 0.f, 0.f, 0.f, 0.f, 0.f},
   1106                                 TensorShape({2, 3})));
   1107   test::ExpectTensorEqual<int32>(
   1108       di, test::AsTensor<int32>({0, 0}, TensorShape({2})));
   1109 }
   1110 
   1111 TEST_F(MathGradTest, Min_dim0_dim1_Dups) {
   1112   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, -3.f},
   1113                                  TensorShape({2, 3}));
   1114   auto i = test::AsTensor<int32>({0, 1}, TensorShape({2}));
   1115   Tensor dx;
   1116   Tensor di;
   1117   ReductionGrad("Min", x, i, &dx, &di);
   1118   test::ExpectTensorEqual<float>(
   1119       dx, test::AsTensor<float>({.5f, 0.f, 0.f, 0.f, 0.f, .5f},
   1120                                 TensorShape({2, 3})));
   1121   test::ExpectTensorEqual<int32>(
   1122       di, test::AsTensor<int32>({0, 0}, TensorShape({2})));
   1123 }
   1124 
   1125 TEST_F(MathGradTest, Max_dim0) {
   1126   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
   1127                                  TensorShape({2, 3}));
   1128   auto i = test::AsTensor<int32>({0}, TensorShape({}));
   1129   Tensor dx;
   1130   Tensor di;
   1131   ReductionGrad("Max", x, i, &dx, &di);
   1132   LOG(INFO) << dx.SummarizeValue(6);
   1133   test::ExpectTensorEqual<float>(
   1134       dx, test::AsTensor<float>({0.f, 0.f, 0.f, 1.f, 1.f, 1.f},
   1135                                 TensorShape({2, 3})));
   1136   test::ExpectTensorEqual<int32>(di,
   1137                                  test::AsTensor<int32>({0}, TensorShape({})));
   1138 }
   1139 
   1140 TEST_F(MathGradTest, Max_dim1) {
   1141   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
   1142                                  TensorShape({2, 3}));
   1143   auto i = test::AsTensor<int32>({1}, TensorShape({}));
   1144   Tensor dx;
   1145   Tensor di;
   1146   ReductionGrad("Max", x, i, &dx, &di);
   1147   test::ExpectTensorEqual<float>(
   1148       dx, test::AsTensor<float>({0.f, 0.f, 1.f, 0.f, 0.f, 1.f},
   1149                                 TensorShape({2, 3})));
   1150   test::ExpectTensorEqual<int32>(di,
   1151                                  test::AsTensor<int32>({0}, TensorShape({})));
   1152 }
   1153 
   1154 TEST_F(MathGradTest, Max_dim0_dim1) {
   1155   auto x = test::AsTensor<float>({-3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
   1156                                  TensorShape({2, 3}));
   1157   auto i = test::AsTensor<int32>({0, 1}, TensorShape({2}));
   1158   Tensor dx;
   1159   Tensor di;
   1160   ReductionGrad("Max", x, i, &dx, &di);
   1161   test::ExpectTensorEqual<float>(
   1162       dx, test::AsTensor<float>({0.f, 0.f, 0.f, 0.f, 0.f, 1.f},
   1163                                 TensorShape({2, 3})));
   1164   test::ExpectTensorEqual<int32>(
   1165       di, test::AsTensor<int32>({0, 0}, TensorShape({2})));
   1166 }
   1167 
   1168 TEST_F(MathGradTest, Max_dim0_dim1_Dups) {
   1169   auto x = test::AsTensor<float>({3.f, -2.f, -1.f, 1.f, 2.f, 3.f},
   1170                                  TensorShape({2, 3}));
   1171   auto i = test::AsTensor<int32>({0, 1}, TensorShape({2}));
   1172   Tensor dx;
   1173   Tensor di;
   1174   ReductionGrad("Max", x, i, &dx, &di);
   1175   test::ExpectTensorEqual<float>(
   1176       dx, test::AsTensor<float>({.5f, 0.f, 0.f, 0.f, 0.f, .5f},
   1177                                 TensorShape({2, 3})));
   1178   test::ExpectTensorEqual<int32>(
   1179       di, test::AsTensor<int32>({0, 0}, TensorShape({2})));
   1180 }
   1181 
   1182 }  // namespace
   1183 }  // namespace tensorflow
   1184