Home | History | Annotate | Download | only in kernels
      1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #define EIGEN_USE_THREADS
     17 
     18 #include "tensorflow/core/framework/allocator.h"
     19 #include "tensorflow/core/framework/fake_input.h"
     20 #include "tensorflow/core/framework/node_def_builder.h"
     21 #include "tensorflow/core/framework/op_kernel.h"
     22 #include "tensorflow/core/framework/tensor.h"
     23 #include "tensorflow/core/framework/tensor_testutil.h"
     24 #include "tensorflow/core/framework/types.h"
     25 #include "tensorflow/core/framework/types.pb.h"
     26 #include "tensorflow/core/kernels/ops_testutil.h"
     27 #include "tensorflow/core/kernels/ops_util.h"
     28 #include "tensorflow/core/kernels/quantization_utils.h"
     29 #include "tensorflow/core/lib/core/status_test_util.h"
     30 #include "tensorflow/core/platform/test.h"
     31 
     32 namespace tensorflow {
     33 
     34 class QuantizedPoolingTest : public OpsTestBase {
     35  protected:
     36 };
     37 
     38 TEST_F(QuantizedPoolingTest, SmallAveragePooling) {
     39   const int ksize = 2;
     40   const int stride = 2;
     41   TF_ASSERT_OK(NodeDefBuilder("quantized_avg_pool_op", "QuantizedAvgPool")
     42                    .Input(FakeInput(DT_QUINT8))
     43                    .Input(FakeInput(DT_FLOAT))
     44                    .Input(FakeInput(DT_FLOAT))
     45                    .Attr("T", DataTypeToEnum<quint8>::v())
     46                    .Attr("ksize", {1, ksize, ksize, 1})
     47                    .Attr("strides", {1, stride, stride, 1})
     48                    .Attr("padding", "SAME")
     49                    .Finalize(node_def()));
     50   TF_ASSERT_OK(InitOp());
     51   const float input_min = 0.0f;
     52   const float input_max = 255.0f;
     53   const int input_height = 4;
     54   const int input_width = 4;
     55   const int input_channels = 2;
     56   Tensor input_float(DT_FLOAT, {1, input_height, input_width, input_channels});
     57   test::FillValues<float>(
     58       &input_float,
     59       {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16,
     60        17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32});
     61   Tensor input_quantized =
     62       FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
     63 
     64   const int expected_width = input_width / stride;
     65   const int expected_height = input_height / stride;
     66   Tensor expected_float(DT_FLOAT,
     67                         {1, expected_height, expected_width, input_channels});
     68   test::FillValues<float>(&expected_float, {6, 7, 10, 11, 22, 23, 26, 27});
     69 
     70   AddInputFromArray<quint8>(input_quantized.shape(),
     71                             input_quantized.flat<quint8>());
     72   AddInputFromArray<float>(TensorShape({1}), {input_min});
     73   AddInputFromArray<float>(TensorShape({1}), {input_max});
     74   TF_ASSERT_OK(RunOpKernel());
     75   const Tensor& output_quantized = *GetOutput(0);
     76   const float output_min = GetOutput(1)->flat<float>()(0);
     77   const float output_max = GetOutput(2)->flat<float>()(0);
     78   Tensor output_float =
     79       QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max);
     80   test::ExpectTensorNear<float>(expected_float, output_float, 0.2);
     81 }
     82 
     83 TEST_F(QuantizedPoolingTest, SmallMaxPooling) {
     84   const int ksize = 2;
     85   const int stride = 2;
     86   TF_ASSERT_OK(NodeDefBuilder("quantized_max_pool_op", "QuantizedMaxPool")
     87                    .Input(FakeInput(DT_QUINT8))
     88                    .Input(FakeInput(DT_FLOAT))
     89                    .Input(FakeInput(DT_FLOAT))
     90                    .Attr("T", DataTypeToEnum<quint8>::v())
     91                    .Attr("ksize", {1, ksize, ksize, 1})
     92                    .Attr("strides", {1, stride, stride, 1})
     93                    .Attr("padding", "SAME")
     94                    .Finalize(node_def()));
     95   TF_ASSERT_OK(InitOp());
     96   const float input_min = 0.0f;
     97   const float input_max = 255.0f;
     98   const int input_height = 4;
     99   const int input_width = 4;
    100   const int input_channels = 2;
    101   Tensor input_float(DT_FLOAT, {1, input_height, input_width, input_channels});
    102   test::FillValues<float>(
    103       &input_float,
    104       {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16,
    105        17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32});
    106   Tensor input_quantized =
    107       FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
    108 
    109   const int expected_width = input_width / stride;
    110   const int expected_height = input_height / stride;
    111   Tensor expected_float(DT_FLOAT,
    112                         {1, expected_height, expected_width, input_channels});
    113   test::FillValues<float>(&expected_float, {11, 12, 15, 16, 27, 28, 31, 32});
    114 
    115   AddInputFromArray<quint8>(input_quantized.shape(),
    116                             input_quantized.flat<quint8>());
    117   AddInputFromArray<float>(TensorShape({1}), {input_min});
    118   AddInputFromArray<float>(TensorShape({1}), {input_max});
    119   TF_ASSERT_OK(RunOpKernel());
    120   const Tensor& output_quantized = *GetOutput(0);
    121   const float output_min = GetOutput(1)->flat<float>()(0);
    122   const float output_max = GetOutput(2)->flat<float>()(0);
    123   Tensor output_float =
    124       QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max);
    125   test::ExpectTensorNear<float>(expected_float, output_float, 0.2);
    126 }
    127 
    128 }  // namespace tensorflow
    129