Home | History | Annotate | Download | only in kernels
      1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #include <functional>
     17 #include <memory>
     18 
     19 #include "tensorflow/core/framework/allocator.h"
     20 #include "tensorflow/core/framework/fake_input.h"
     21 #include "tensorflow/core/framework/node_def_builder.h"
     22 #include "tensorflow/core/framework/op_kernel.h"
     23 #include "tensorflow/core/framework/tensor.h"
     24 #include "tensorflow/core/framework/tensor_testutil.h"
     25 #include "tensorflow/core/framework/types.h"
     26 #include "tensorflow/core/kernels/ops_testutil.h"
     27 #include "tensorflow/core/kernels/ops_util.h"
     28 #include "tensorflow/core/lib/core/status_test_util.h"
     29 #include "tensorflow/core/lib/random/simple_philox.h"
     30 #include "tensorflow/core/platform/test.h"
     31 
     32 namespace tensorflow {
     33 
     34 static const float tol_ = 1e-4;
     35 
     36 class LRNFloatTest : public OpsTestBase {
     37  protected:
     38   LRNFloatTest() : philox_(123, 17), rand_(&philox_) {}
     39 
     40   int GetIntAttr(const string& name) {
     41     int value;
     42     TF_CHECK_OK(GetNodeAttr(*node_def(), name, &value));
     43     return value;
     44   }
     45 
     46   float GetFloatAttr(const string& name) {
     47     float value;
     48     TF_CHECK_OK(GetNodeAttr(*node_def(), name, &value));
     49     return value;
     50   }
     51 
     52   bool Compare() {
     53     const auto& input = GetInput(0);
     54     const int64 batch_size = input.dim_size(0);
     55     const int64 rows = input.dim_size(1);
     56     const int64 cols = input.dim_size(2);
     57     const int64 depth = input.dim_size(3);
     58     const int64 rest = cols * rows * batch_size;
     59 
     60     const int64 depth_radius = GetIntAttr("depth_radius");
     61     const float bias = GetFloatAttr("bias");
     62     const float alpha = GetFloatAttr("alpha");
     63     const float beta = GetFloatAttr("beta");
     64 
     65     Eigen::Tensor<float, 4, Eigen::RowMajor> expected(batch_size, rows, cols,
     66                                                       depth);
     67     auto out = expected.reshape(Eigen::DSizes<int64, 2>{rest, depth});
     68     auto in = input.shaped<float, 2>({rest, depth});
     69 
     70     for (int64 i = 0; i < rest; ++i) {
     71       Eigen::Tensor<float, 1, Eigen::RowMajor> out_col(depth);
     72       for (int64 d = 0; d < depth; ++d) {
     73         float denom = 0.0f;
     74         for (int64 r = std::max(0ll, d - depth_radius);
     75              r < std::min(depth, d + depth_radius + 1); ++r) {
     76           denom += in(i, r) * in(i, r);
     77         }
     78         denom = std::pow(denom * alpha + bias, beta);
     79         out_col(d) = in(i, d) / denom;
     80       }
     81       out.chip<0>(i) = out_col;
     82     }
     83     auto actual = GetOutput(0)->tensor<float, 4>();
     84     Eigen::Tensor<float, 0, Eigen::RowMajor> sum =
     85         ((expected - actual).abs() > actual.constant(tol_))
     86             .select(actual.constant(1), actual.constant(0))
     87             .sum();
     88     return sum() == 0;
     89   }
     90 
     91   random::PhiloxRandom philox_;
     92   random::SimplePhilox rand_;
     93 };
     94 
     95 TEST_F(LRNFloatTest, Depth96) {
     96   TF_ASSERT_OK(NodeDefBuilder("lrn_op", "LRN")
     97                    .Input(FakeInput())
     98                    .Attr("depth_radius", 5)
     99                    .Attr("bias", 1.0f)
    100                    .Attr("alpha", 0.1f)
    101                    .Attr("beta", 2.0f)
    102                    .Finalize(node_def()));
    103   TF_ASSERT_OK(InitOp());
    104   AddInput<float>(TensorShape({1, 1, 1, 96}),
    105                   [this](int i) -> float { return i + 1; });
    106   TF_ASSERT_OK(RunOpKernel());
    107   auto actual = GetOutput(0)->tensor<float, 4>();
    108 
    109   // Output for Node 0 with Value 1:
    110   // 1 / (1 + 0.1*(1^2 + 2^2 + 3^2 + 4^2 + 5^2 + 6^2))^2
    111   EXPECT_NEAR(1. / (10.1 * 10.1), actual(0, 0, 0, 0), tol_);
    112 
    113   // Output for Node 5 with Value 6:
    114   // 6 / (1 + 0.1*(1^2 + 2^2 + 3^2 + 4^2 + 5^2 + 6^2 ... + 11^2))^2
    115   EXPECT_NEAR(6. / (51.6 * 51.6), actual(0, 0, 0, 5), tol_);
    116 
    117   // Output for Node 63 with value 64:
    118   // 64 / (1 + 0.1*(59^2 + 60^2 + 61^2 + 62^2 + 63^2 + 64^2))^2
    119   EXPECT_NEAR(64. / (2272.1 * 2272.1), actual(0, 0, 0, 63), tol_);
    120 
    121   // Output for Node 64 with value 65:
    122   // 65 / (1 + 0.1*(65^2 + 66^2 + 67^2 + 68^2 + 69^2 + 70^2))^2
    123   EXPECT_NEAR(65. / (2736.5 * 2736.5), actual(0, 0, 0, 64), tol_);
    124 
    125   // Output for Node 95 with value 96:
    126   // 96 / (1 + 0.1*(91^2 + 92^2 + 93^2 + 94^2 + 95^2 + 96^2))^2
    127   EXPECT_NEAR(96. / (5248.1 * 5248.1), actual(0, 0, 0, 95), tol_);
    128   EXPECT_TRUE(Compare());
    129 }
    130 
    131 TEST_F(LRNFloatTest, Depth16) {
    132   TF_ASSERT_OK(NodeDefBuilder("lrn_op", "LRN")
    133                    .Input(FakeInput())
    134                    .Attr("depth_radius", 5)
    135                    .Attr("bias", 1.0f)
    136                    .Attr("alpha", 0.1f)
    137                    .Attr("beta", 2.0f)
    138                    .Finalize(node_def()));
    139   TF_ASSERT_OK(InitOp());
    140   AddInput<float>(TensorShape({1, 1, 1, 16}),
    141                   [this](int i) -> float { return i + 1; });
    142   TF_ASSERT_OK(RunOpKernel());
    143   auto actual = GetOutput(0)->tensor<float, 4>();
    144 
    145   // Output for Node 0 with Value 1:
    146   // 1 / (1 + 0.1*(1^2 + 2^2 + 3^2 + 4^2 + 5^2 + 6^2))^2
    147   EXPECT_NEAR(1. / (10.1 * 10.1), actual(0, 0, 0, 0), tol_);
    148 
    149   // Output for Node 5 with Value 6:
    150   // 6 / (1 + 0.1*(1^2 + 2^2 + 3^2 + 4^2 + 5^2 + 6^2 ... + 11^2))^2
    151   EXPECT_NEAR(6. / (51.6 * 51.6), actual(0, 0, 0, 5), tol_);
    152 
    153   // Output for Node 15 with value 16:
    154   // 16 / (1 + 0.1*(11^2 + 12^2 + 13^2 + 14^2 + 15^2 + 16^2))^2
    155   EXPECT_NEAR(16. / (112.1 * 112.1), actual(0, 0, 0, 15), tol_);
    156   EXPECT_TRUE(Compare());
    157 }
    158 
    159 static double RndGaussian(random::SimplePhilox* rnd) {
    160   // Box-Muller transformation.
    161   // See, for example, http://www.taygeta.com/random/gaussian.html
    162   double x1, x2;
    163   double r;
    164   do {
    165     x1 = 2 * rnd->RandDouble() - 1;
    166     x2 = 2 * rnd->RandDouble() - 1;
    167     r = x1 * x1 + x2 * x2;
    168   } while (r == 0 || r >= 1.0);
    169   double w = sqrt(-2.0 * log(r) / r);
    170   return x1 * w;
    171 }
    172 
    173 #define TCASE(NAME, DEPTH, BATCH, DEPTH_RADIUS, BIAS, ALPHA, BETA)           \
    174   TEST_F(LRNFloatTest, NAME) {                                               \
    175     TF_ASSERT_OK(NodeDefBuilder("lrn_op", "LRN")                             \
    176                      .Input(FakeInput())                                     \
    177                      .Attr("depth_radius", (DEPTH_RADIUS))                   \
    178                      .Attr("bias", (BIAS))                                   \
    179                      .Attr("alpha", ((ALPHA) / 10))                          \
    180                      .Attr("beta", (BETA))                                   \
    181                      .Finalize(node_def()));                                 \
    182     TF_ASSERT_OK(InitOp());                                                  \
    183     AddInput<float>(TensorShape({BATCH, 1, 1, DEPTH}),                       \
    184                     [this](int i) -> float { return RndGaussian(&rand_); }); \
    185     TF_ASSERT_OK(RunOpKernel());                                             \
    186     EXPECT_TRUE(Compare());                                                  \
    187   }
    188 
    189 // clang-format off
    190 //        DEPTH  BATCH  DEPTH_RADIUS  BIAS  ALPHA  BETA
    191 TCASE(T0, 4,     2,     2,            1.0f, 1.0f,  2.0f)
    192 TCASE(T1, 16,    1,     5,            1.0f, 1.0f,  2.0f)
    193 TCASE(T2, 16,    32,    2,            1.0f, 2.0f,  1.0f)
    194 TCASE(T3, 128,   4,     3,            2.0f, 1.0f,  1.0f)
    195 // clang-format on
    196 
    197 #undef TCASE
    198 }  // namespace tensorflow
    199