Home | History | Annotate | Download | only in kernels
      1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #define EIGEN_USE_THREADS
     17 
     18 #include <functional>
     19 
     20 #include "tensorflow/core/framework/allocator.h"
     21 #include "tensorflow/core/framework/fake_input.h"
     22 #include "tensorflow/core/framework/node_def_builder.h"
     23 #include "tensorflow/core/framework/op_kernel.h"
     24 #include "tensorflow/core/framework/tensor.h"
     25 #include "tensorflow/core/framework/tensor_testutil.h"
     26 #include "tensorflow/core/framework/types.h"
     27 #include "tensorflow/core/framework/types.pb.h"
     28 #include "tensorflow/core/kernels/ops_testutil.h"
     29 #include "tensorflow/core/kernels/ops_util.h"
     30 #include "tensorflow/core/kernels/quantization_utils.h"
     31 #include "tensorflow/core/lib/core/status_test_util.h"
     32 #include "tensorflow/core/platform/test.h"
     33 
     34 namespace tensorflow {
     35 
     36 class QuantizedBiasAddTest : public OpsTestBase {
     37  protected:
     38 };
     39 
     40 TEST_F(QuantizedBiasAddTest, Small) {
     41   TF_ASSERT_OK(NodeDefBuilder("quantized_bias_add_op", "QuantizedBiasAdd")
     42                    .Input(FakeInput(DT_QUINT8))
     43                    .Input(FakeInput(DT_QUINT8))
     44                    .Input(FakeInput(DT_FLOAT))
     45                    .Input(FakeInput(DT_FLOAT))
     46                    .Input(FakeInput(DT_FLOAT))
     47                    .Input(FakeInput(DT_FLOAT))
     48                    .Attr("out_type", DataTypeToEnum<qint32>::v())
     49                    .Finalize(node_def()));
     50   TF_ASSERT_OK(InitOp());
     51   const float input_min = 0.0f;
     52   const float input_max = 60.0f;
     53   const int input_height = 2;
     54   const int input_width = 3;
     55   Tensor input_float(DT_FLOAT, {input_height, input_width});
     56   test::FillValues<float>(&input_float,
     57                           {10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f});
     58   Tensor input_quantized =
     59       FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
     60 
     61   const float bias_min = 0.0f;
     62   const float bias_max = 3.0f;
     63   const int bias_width = 3;
     64   Tensor bias_float(DT_FLOAT, {bias_width});
     65   test::FillValues<float>(&bias_float, {1.0f, 2.0f, 3.0f});
     66   Tensor bias_quantized =
     67       FloatTensorToQuantized<quint8>(bias_float, bias_min, bias_max);
     68 
     69   Tensor expected_float(DT_FLOAT, {input_height, input_width});
     70   test::FillValues<float>(&expected_float,
     71                           {11.0f, 22.0f, 33.0f, 41.0f, 52.0f, 63.0f});
     72 
     73   AddInputFromArray<quint8>(input_quantized.shape(),
     74                             input_quantized.flat<quint8>());
     75   AddInputFromArray<quint8>(bias_quantized.shape(),
     76                             bias_quantized.flat<quint8>());
     77   AddInputFromArray<float>(TensorShape({1}), {input_min});
     78   AddInputFromArray<float>(TensorShape({1}), {input_max});
     79   AddInputFromArray<float>(TensorShape({1}), {bias_min});
     80   AddInputFromArray<float>(TensorShape({1}), {bias_max});
     81   TF_ASSERT_OK(RunOpKernel());
     82   const Tensor& output_quantized = *GetOutput(0);
     83   const float output_min = GetOutput(1)->flat<float>()(0);
     84   const float output_max = GetOutput(2)->flat<float>()(0);
     85   Tensor output_float =
     86       QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
     87   test::ExpectTensorNear<float>(expected_float, output_float, 0.2);
     88 }
     89 
     90 TEST_F(QuantizedBiasAddTest, RealData) {
     91   TF_ASSERT_OK(NodeDefBuilder("quantized_bias_add_op", "QuantizedBiasAdd")
     92                    .Input(FakeInput(DT_QUINT8))
     93                    .Input(FakeInput(DT_QUINT8))
     94                    .Input(FakeInput(DT_FLOAT))
     95                    .Input(FakeInput(DT_FLOAT))
     96                    .Input(FakeInput(DT_FLOAT))
     97                    .Input(FakeInput(DT_FLOAT))
     98                    .Attr("out_type", DataTypeToEnum<qint32>::v())
     99                    .Finalize(node_def()));
    100   TF_ASSERT_OK(InitOp());
    101   const float input_min = -2164.25f;
    102   const float input_max = 2006.27f;
    103   const int input_height = 1;
    104   const int input_width = 64;
    105   Tensor input_float(DT_FLOAT, {input_height, input_width});
    106   test::FillValues<float>(
    107       &input_float,
    108       {-1014.12, -157.382, -810.17,  1435.28,  1016.37,  219.684,  -316.054,
    109        -2164.25, 2006.27,  -547.444, 857.376,  404.376,  9.72115,  332.588,
    110        194.385,  -286.57,  26.062,   23.1125,  110.436,  247.055,  -127.683,
    111        -376.275, -124.81,  -846.826, -77.1507, 305.581,  -202.747, 12.9528,
    112        9.64886,  872.686,  40.9069,  197.816,  44.16,    -306.768, -1457.52,
    113        -368.939, -1049.42, -486.353, 1745.87,  95.7695,  395.773,  -254.333,
    114        -404.27,  787.16,   -2.44114, 199.37,   -1024.08, 784.901,  235.055,
    115        -42.7295, 241.498,  -245.365, 470.763,  186.159,  186.579,  -220.163,
    116        1304.58,  386.272,  -358.853, -755.996, 360.109,  -866.007, 55.2828,
    117        -508.801});
    118   Tensor input_quantized =
    119       FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
    120 
    121   const float bias_min = -0.739539f;
    122   const float bias_max = 0.641057f;
    123   const int bias_width = 64;
    124   Tensor bias_float(DT_FLOAT, {bias_width});
    125   test::FillValues<float>(
    126       &bias_float,
    127       {-0.294619, -0.0670519, 0.261507,   -0.126274, 0.127229,   -0.176945,
    128        -0.251223, 0.231086,   0.453694,   0.415666,  -0.288733,  0.508717,
    129        0.211551,  0.0435907,  -0.582383,  -0.308779, 0.0696883,  -0.438122,
    130        0.114,     0.433964,   0.109883,   0.284931,  -0.149661,  0.108657,
    131        0.458333,  -0.130231,  -0.35805,   -0.123206, -0.437968,  0.0282411,
    132        0.628818,  -0.0522173, -0.0233403, 0.124863,  0.217165,   0.262294,
    133        -0.171005, -0.254693,  -0.200433,  -0.287354, 0.488166,   -0.0354688,
    134        -0.118091, -0.590444,  0.491537,   -0.739539, 0.083117,   0.282482,
    135        0.275269,  -0.36574,   0.107476,   0.0511428, -0.136887,  -0.0149852,
    136        -0.259694, 0.641057,   0.264054,   -0.295126, -0.0218791, 0.361211,
    137        0.012448,  0.0709718,  -0.392394,  -0.434215});
    138   Tensor bias_quantized =
    139       FloatTensorToQuantized<quint8>(bias_float, bias_min, bias_max);
    140 
    141   Tensor expected_float(DT_FLOAT, {input_height, input_width});
    142   test::FillValues<float>(
    143       &expected_float,
    144       {-1014.42, -157.449, -809.908, 1435.16,  1016.5,  219.507,  -316.305,
    145        -2164.02, 2006.73,  -547.028, 857.088,  404.885, 9.9327,   332.632,
    146        193.803,  -286.878, 26.1317,  22.6744,  110.55,  247.489,  -127.573,
    147        -375.99,  -124.959, -846.717, -76.6923, 305.451, -203.105, 12.8296,
    148        9.21089,  872.714,  41.5357,  197.764,  44.1367, -306.643, -1457.3,
    149        -368.677, -1049.6,  -486.608, 1745.67,  95.4821, 396.261,  -254.368,
    150        -404.388, 786.57,   -1.94961, 198.63,   -1024.0, 785.183,  235.33,
    151        -43.0953, 241.605,  -245.314, 470.627,  186.144, 186.319,  -219.522,
    152        1304.84,  385.977,  -358.874, -755.635, 360.122, -865.936, 54.8904,
    153        -509.235});
    154 
    155   AddInputFromArray<quint8>(input_quantized.shape(),
    156                             input_quantized.flat<quint8>());
    157   AddInputFromArray<quint8>(bias_quantized.shape(),
    158                             bias_quantized.flat<quint8>());
    159   AddInputFromArray<float>(TensorShape({1}), {input_min});
    160   AddInputFromArray<float>(TensorShape({1}), {input_max});
    161   AddInputFromArray<float>(TensorShape({1}), {bias_min});
    162   AddInputFromArray<float>(TensorShape({1}), {bias_max});
    163   TF_ASSERT_OK(RunOpKernel());
    164   const Tensor& output_quantized = *GetOutput(0);
    165   const float output_min = GetOutput(1)->flat<float>()(0);
    166   const float output_max = GetOutput(2)->flat<float>()(0);
    167   Tensor output_float =
    168       QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
    169   test::ExpectTensorNear<float>(expected_float, output_float, 20.0);
    170 }
    171 
    172 }  // namespace tensorflow
    173