Home | History | Annotate | Download | only in kernels
      1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #if defined(INTEL_MKL) && defined(ENABLE_MKL)
     17 
     18 #define EIGEN_USE_THREADS
     19 
     20 #include <functional>
     21 #include <memory>
     22 #include <vector>
     23 
     24 #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
     25 #include "tensorflow/core/framework/allocator.h"
     26 #include "tensorflow/core/framework/fake_input.h"
     27 #include "tensorflow/core/framework/node_def_builder.h"
     28 #include "tensorflow/core/framework/op_kernel.h"
     29 #include "tensorflow/core/framework/tensor.h"
     30 #include "tensorflow/core/framework/tensor_testutil.h"
     31 #include "tensorflow/core/framework/types.h"
     32 #include "tensorflow/core/framework/types.pb.h"
     33 #include "tensorflow/core/graph/node_builder.h"
     34 #include "tensorflow/core/kernels/ops_testutil.h"
     35 #include "tensorflow/core/kernels/ops_util.h"
     36 #include "tensorflow/core/kernels/quantization_utils.h"
     37 #include "tensorflow/core/lib/core/status.h"
     38 #include "tensorflow/core/lib/core/status_test_util.h"
     39 #include "tensorflow/core/platform/test.h"
     40 #include "tensorflow/core/platform/test_benchmark.h"
     41 
     42 namespace tensorflow {
     43 
     44 using test::graph::Constant;
     45 
     46 static const uint8 dummy_tensor[] = {0, 0, 0, 0, 0, 0, 0, 0};
     47 static const TensorShape dummy_shape({8});
     48 
     49 // Helper class for converting MKL tensors to TF tensors and comparing to
     50 // expected values
     51 
     52 class ConvMklToTF : public OpsTestBase {
     53  public:
     54   template <typename T>
     55   void ConvertMKL2TF(DataType dtype, const Tensor& first, const Tensor& second,
     56                      Tensor& output) {
     57     // Create an MKL to TF conversion node and execute it
     58     TF_EXPECT_OK(NodeDefBuilder("mkl_to_tf_op", "_MklToTf")
     59                      .Input(FakeInput(dtype))     // Input
     60                      .Input(FakeInput(DT_UINT8))  // MKL second tensor
     61                      .Attr("T", dtype)
     62                      .Attr("_kernel", "MklOp")
     63                      .Finalize(node_def()));
     64     TF_EXPECT_OK(InitOp());
     65     AddInputFromArray<T>(first.shape(), first.flat<T>());
     66     AddInputFromArray<uint8>(second.shape(), second.flat<uint8>());
     67     TF_ASSERT_OK(RunOpKernel());
     68 
     69     output = *GetOutput(0);
     70   }
     71   void TestBody(){};
     72 };
     73 
     74 class QuantizedConcatTest : public OpsTestBase {
     75  protected:
     76   QuantizedConcatTest() {}
     77 
     78   void TestSmall8Bit(float first_min, float first_max, float second_min,
     79                      float second_max);
     80   void TestSecondDim8Bit(float first_min, float first_max, float second_min,
     81                          float second_max);
     82 };
     83 
     84 TEST_F(QuantizedConcatTest, Small8BitSameRange) {
     85   // Range for both is the same, so impl can use memcpy.
     86   TestSmall8Bit(0.0f, 255.0f, 0.0f, 255.0f);
     87 }
     88 
     89 void QuantizedConcatTest::TestSmall8Bit(float first_min, float first_max,
     90                                         float second_min, float second_max) {
     91   TF_ASSERT_OK(NodeDefBuilder("quantized_concat_op", "_MklQuantizedConcatV2")
     92                    .Input(FakeInput(2, DT_QUINT8))
     93                    .Input(FakeInput(DT_INT32))
     94                    .Input(FakeInput(2, DT_FLOAT))
     95                    .Input(FakeInput(2, DT_FLOAT))
     96                    .Input(FakeInput(2, DT_UINT8))  // MKL second tensor
     97                    .Input(FakeInput(DT_UINT8))     // MKL second tensor
     98                    .Input(FakeInput(2, DT_UINT8))  // MKL second tensor
     99                    .Input(FakeInput(2, DT_UINT8))  // MKL second tensor
    100                    .Attr("N", 2)
    101                    .Attr("T", DataTypeToEnum<quint8>::v())
    102                    .Attr("Tidx", DT_INT32)
    103                    .Attr("_kernel", "QuantizedMklOp")
    104                    .Finalize(node_def()));
    105   TF_ASSERT_OK(InitOp());
    106   const int first_batch = 2;
    107   const int first_height = 2;
    108   const int first_width = 3;
    109   const int first_depth = 1;
    110   Tensor first_float(DT_FLOAT,
    111                      {first_batch, first_height, first_width, first_depth});
    112   test::FillValues<float>(&first_float,
    113                           {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
    114   Tensor first_quantized =
    115       FloatTensorToQuantized<quint8>(first_float, first_min, first_max);
    116 
    117   const int second_batch = 2;
    118   const int second_height = 2;
    119   const int second_width = 3;
    120   const int second_depth = 1;
    121   Tensor second_float(
    122       DT_FLOAT, {second_batch, second_height, second_width, second_depth});
    123   test::FillValues<float>(&second_float,
    124                           {13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
    125   Tensor second_quantized =
    126       FloatTensorToQuantized<quint8>(second_float, second_min, second_max);
    127 
    128   const int expected_batch = first_batch + second_batch;
    129   Tensor expected_float(
    130       DT_FLOAT, {expected_batch, first_height, first_width, first_depth});
    131   test::FillValues<float>(&expected_float,
    132                           {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
    133                            13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
    134 
    135   AddInputFromArray<quint8>(first_quantized.shape(),
    136                             first_quantized.flat<quint8>());
    137   AddInputFromArray<quint8>(second_quantized.shape(),
    138                             second_quantized.flat<quint8>());
    139   AddInputFromArray<int32>(TensorShape({}), {0});
    140   AddInputFromArray<float>(TensorShape({}), {first_min});
    141   AddInputFromArray<float>(TensorShape({}), {second_min});
    142   AddInputFromArray<float>(TensorShape({}), {first_max});
    143   AddInputFromArray<float>(TensorShape({}), {second_max});
    144   AddInputFromArray<uint8>(dummy_shape, dummy_tensor);
    145   AddInputFromArray<uint8>(dummy_shape, dummy_tensor);
    146   AddInputFromArray<uint8>(dummy_shape, dummy_tensor);
    147   AddInputFromArray<uint8>(dummy_shape, dummy_tensor);
    148   TF_ASSERT_OK(RunOpKernel());
    149   const Tensor& output_quantized = *GetOutput(0);
    150   const float output_min = GetOutput(1)->flat<float>()(0);
    151   const float output_max = GetOutput(2)->flat<float>()(0);
    152   Tensor output_float =
    153       QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max);
    154   test::ExpectTensorNear<float>(expected_float, output_float, 0.2);
    155 }
    156 
    157 TEST_F(QuantizedConcatTest, SecondDim8BitSameRange) {
    158   TestSecondDim8Bit(-10.0f, 150.0f, -10.0f, 150.0f);
    159 }
    160 
    161 void QuantizedConcatTest::TestSecondDim8Bit(float first_min, float first_max,
    162                                             float second_min,
    163                                             float second_max) {
    164   TF_ASSERT_OK(NodeDefBuilder("quantized_concat_op", "_MklQuantizedConcatV2")
    165                    .Input(FakeInput(2, DT_QUINT8))
    166                    .Input(FakeInput(DT_INT32))
    167                    .Input(FakeInput(2, DT_FLOAT))
    168                    .Input(FakeInput(2, DT_FLOAT))
    169                    .Input(FakeInput(2, DT_UINT8))  // MKL second tensor
    170                    .Input(FakeInput(DT_UINT8))     // MKL second tensor
    171                    .Input(FakeInput(2, DT_UINT8))  // MKL second tensor
    172                    .Input(FakeInput(2, DT_UINT8))  // MKL second tensor
    173                    .Attr("N", 2)
    174                    .Attr("T", DataTypeToEnum<quint8>::v())
    175                    .Attr("Tidx", DT_INT32)
    176                    .Attr("_kernel", "QuantizedMklOp")
    177                    .Finalize(node_def()));
    178   TF_ASSERT_OK(InitOp());
    179   const int first_batch = 2;
    180   const int first_height = 2;
    181   const int first_width = 3;
    182   const int first_depth = 1;
    183   Tensor first_float(DT_FLOAT,
    184                      {first_batch, first_height, first_width, first_depth});
    185   test::FillValues<float>(&first_float,
    186                           {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
    187   Tensor first_quantized =
    188       FloatTensorToQuantized<quint8>(first_float, first_min, first_max);
    189 
    190   const int second_batch = 2;
    191   const int second_height = 2;
    192   const int second_width = 3;
    193   const int second_depth = 1;
    194 
    195   Tensor second_float(
    196       DT_FLOAT, {second_batch, second_height, second_width, second_depth});
    197   test::FillValues<float>(&second_float,
    198                           {13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
    199   Tensor second_quantized =
    200       FloatTensorToQuantized<quint8>(second_float, second_min, second_max);
    201 
    202   const int expected_height = first_height + second_height;
    203   Tensor expected_float(
    204       DT_FLOAT, {first_batch, expected_height, first_width, first_depth});
    205   test::FillValues<float>(&expected_float,
    206                           {1, 2, 3, 4,  5,  6,  13, 14, 15, 16, 17, 18,
    207                            7, 8, 9, 10, 11, 12, 19, 20, 21, 22, 23, 24});
    208 
    209   AddInputFromArray<quint8>(first_quantized.shape(),
    210                             first_quantized.flat<quint8>());
    211   AddInputFromArray<quint8>(second_quantized.shape(),
    212                             second_quantized.flat<quint8>());
    213   AddInputFromArray<int32>(TensorShape({}), {1});
    214   AddInputFromArray<float>(TensorShape({}), {first_min});
    215   AddInputFromArray<float>(TensorShape({}), {second_min});
    216   AddInputFromArray<float>(TensorShape({}), {first_max});
    217   AddInputFromArray<float>(TensorShape({}), {second_max});
    218   AddInputFromArray<uint8>(dummy_shape, dummy_tensor);
    219   AddInputFromArray<uint8>(dummy_shape, dummy_tensor);
    220   AddInputFromArray<uint8>(dummy_shape, dummy_tensor);
    221   AddInputFromArray<uint8>(dummy_shape, dummy_tensor);
    222   TF_ASSERT_OK(RunOpKernel());
    223   const Tensor& output_quantized = *GetOutput(0);
    224   const float output_min = GetOutput(1)->flat<float>()(0);
    225   const float output_max = GetOutput(2)->flat<float>()(0);
    226   Tensor output_float =
    227       QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max);
    228   // Using the same error tolerance as in Eigen QuantizedConcat test
    229   test::ExpectTensorNear<float>(expected_float, output_float, 1.0);
    230 }
    231 
    232 }  // namespace tensorflow
    233 
    234 #endif  // INTEL_MKL && ENABLE_MKL
    235