1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #define EIGEN_USE_THREADS 17 18 #include <functional> 19 #include <memory> 20 #include <vector> 21 22 #include "tensorflow/core/framework/allocator.h" 23 #include "tensorflow/core/framework/fake_input.h" 24 #include "tensorflow/core/framework/node_def_builder.h" 25 #include "tensorflow/core/framework/op_kernel.h" 26 #include "tensorflow/core/framework/tensor.h" 27 #include "tensorflow/core/framework/tensor_testutil.h" 28 #include "tensorflow/core/framework/types.h" 29 #include "tensorflow/core/framework/types.pb.h" 30 #include "tensorflow/core/kernels/ops_testutil.h" 31 #include "tensorflow/core/kernels/ops_util.h" 32 #include "tensorflow/core/kernels/quantization_utils.h" 33 #include "tensorflow/core/lib/core/status_test_util.h" 34 #include "tensorflow/core/platform/test.h" 35 36 namespace tensorflow { 37 38 class QuantizedConv2DTest : public OpsTestBase { 39 protected: 40 }; 41 42 TEST_F(QuantizedConv2DTest, Small) { 43 const int stride = 1; 44 TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "QuantizedConv2D") 45 .Input(FakeInput(DT_QUINT8)) 46 .Input(FakeInput(DT_QUINT8)) 47 .Input(FakeInput(DT_FLOAT)) 48 .Input(FakeInput(DT_FLOAT)) 49 .Input(FakeInput(DT_FLOAT)) 50 .Input(FakeInput(DT_FLOAT)) 51 .Attr("out_type", DataTypeToEnum<qint32>::v()) 52 .Attr("strides", {1, stride, stride, 1}) 53 .Attr("padding", "SAME") 54 .Finalize(node_def())); 55 TF_ASSERT_OK(InitOp()); 56 57 const int depth = 1; 58 const int image_width = 4; 59 const int image_height = 3; 60 const int image_batch_count = 1; 61 // The image data should always be able to represent zero, to allow a fast 62 // implementation of border padding, so we set the min value to 0. 63 const float image_min = 0.0f; 64 const float image_max = 12.0f; 65 // The image matrix is: 66 // | 1 | 2 | 3 | 4 | 67 // | 5 | 6 | 7 | 8 | 68 // | 9 | 10 | 11 | 12 | 69 Tensor image_float(DT_FLOAT, 70 {image_batch_count, image_height, image_width, depth}); 71 test::FillValues<float>(&image_float, 72 {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); 73 Tensor image_quantized = 74 FloatTensorToQuantized<quint8>(image_float, image_min, image_max); 75 76 // The filter matrix is: 77 // | 1 | 4 | 7 | 78 // | 2 | 5 | 8 | 79 // | 3 | 6 | 9 | 80 const int filter_size = 3; 81 const int filter_count = 1; 82 const float filter_min = 1.0f; 83 const float filter_max = 9.0f; 84 Tensor filter_float(DT_FLOAT, 85 {filter_size, filter_size, depth, filter_count}); 86 test::FillValues<float>(&filter_float, {1, 4, 7, 2, 5, 8, 3, 6, 9}); 87 Tensor filter_quantized = 88 FloatTensorToQuantized<quint8>(filter_float, filter_min, filter_max); 89 90 AddInputFromArray<quint8>(image_quantized.shape(), 91 image_quantized.flat<quint8>()); 92 AddInputFromArray<quint8>(filter_quantized.shape(), 93 filter_quantized.flat<quint8>()); 94 AddInputFromArray<float>(TensorShape({1}), {image_min}); 95 AddInputFromArray<float>(TensorShape({1}), {image_max}); 96 AddInputFromArray<float>(TensorShape({1}), {filter_min}); 97 AddInputFromArray<float>(TensorShape({1}), {filter_max}); 98 TF_ASSERT_OK(RunOpKernel()); 99 100 // We're sliding the 3x3 filter across the 3x4 image, with accesses outside 101 // the input set to zero because we're using the 'SAME' padding mode. 102 // The calculations behind the expected output are: 103 // (1*0)+(4*0)+(7*0)+(2*0)+(5*1)+(8*2)+(3*0)+(6*5)+(9*6)=105 104 // (1*0)+(4*0)+(7*0)+(2*1)+(5*2)+(8*3)+(3*5)+(6*6)+(9*7)=150 105 // (1*0)+(4*0)+(7*0)+(2*2)+(5*3)+(8*4)+(3*6)+(6*7)+(9*8)=183 106 // (1*0)+(4*0)+(7*0)+(2*3)+(5*4)+(8*0)+(3*7)+(6*8)+(9*0)=95 107 // (1*0)+(4*1)+(7*2)+(2*0)+(5*5)+(8*6)+(3*0)+(6*9)+(9*10)=235 108 // (1*1)+(4*2)+(7*3)+(2*5)+(5*6)+(8*7)+(3*9)+(6*10)+(9*11)=312 109 // (1*2)+(4*3)+(7*4)+(2*6)+(5*7)+(8*8)+(3*10)+(6*11)+(9*12)=357 110 // (1*3)+(4*4)+(7*0)+(2*7)+(5*8)+(8*0)+(3*11)+(6*12)+(9*0)=178 111 // (1*0)+(4*5)+(7*6)+(2*0)+(5*9)+(8*10)+(3*0)+(6*0)+(9*0)=187 112 // (1*5)+(4*6)+(7*7)+(2*9)+(5*10)+(8*11)+(3*0)+(6*0)+(9*0)=234 113 // (1*6)+(4*7)+(7*8)+(2*10)+(5*11)+(8*12)+(3*0)+(6*0)+(9*0)=261 114 // (1*7)+(4*11)+(7*0)+(2*8)+(5*12)+(8*0)+(3*0)+(6*0)+(9*0)=121 115 // This means we should end up with this matrix: 116 // | 105 | 150 | 183 | 95 | 117 // | 235 | 312 | 357 | 178 | 118 // | 187 | 234 | 261 | 121 | 119 const int expected_width = image_width; 120 const int expected_height = image_height * filter_count; 121 Tensor expected_float( 122 DT_FLOAT, TensorShape({image_batch_count, expected_height, expected_width, 123 filter_count})); 124 test::FillValues<float>(&expected_float, {105, 150, 183, 95, 235, 312, 357, 125 178, 187, 234, 261, 121}); 126 const Tensor& output_quantized = *GetOutput(0); 127 const float output_min = GetOutput(1)->flat<float>()(0); 128 const float output_max = GetOutput(2)->flat<float>()(0); 129 Tensor output_float = 130 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); 131 test::ExpectTensorNear<float>(expected_float, output_float, 1.0); 132 } 133 134 TEST_F(QuantizedConv2DTest, Small32Bit) { 135 const int stride = 1; 136 TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "QuantizedConv2D") 137 .Input(FakeInput(DT_QUINT8)) 138 .Input(FakeInput(DT_QUINT8)) 139 .Input(FakeInput(DT_FLOAT)) 140 .Input(FakeInput(DT_FLOAT)) 141 .Input(FakeInput(DT_FLOAT)) 142 .Input(FakeInput(DT_FLOAT)) 143 .Attr("out_type", DataTypeToEnum<qint32>::v()) 144 .Attr("strides", {1, stride, stride, 1}) 145 .Attr("padding", "SAME") 146 .Finalize(node_def())); 147 TF_ASSERT_OK(InitOp()); 148 149 const int depth = 1; 150 const int image_width = 4; 151 const int image_height = 3; 152 const int image_batch_count = 1; 153 AddInputFromArray<quint8>( 154 TensorShape({image_batch_count, image_height, image_width, depth}), 155 {10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120}); 156 const int filter_size = 3; 157 const int filter_count = 1; 158 AddInputFromArray<quint8>( 159 TensorShape({filter_size, filter_size, depth, filter_count}), 160 {10, 40, 70, 20, 50, 80, 30, 60, 90}); 161 AddInputFromArray<float>(TensorShape({1}), {0}); 162 AddInputFromArray<float>(TensorShape({1}), {255.0f}); 163 AddInputFromArray<float>(TensorShape({1}), {0}); 164 AddInputFromArray<float>(TensorShape({1}), {255.0f}); 165 166 TF_ASSERT_OK(RunOpKernel()); 167 const int expected_width = image_width; 168 const int expected_height = image_height * filter_count; 169 Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height, 170 expected_width, filter_count})); 171 test::FillValues<qint32>( 172 &expected, {10500, 15000, 18300, 9500, 23500, 31200, 35700, 17800, 18700, 173 23400, 26100, 12100}); 174 test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); 175 } 176 177 TEST_F(QuantizedConv2DTest, OddPadding) { 178 const int stride = 2; 179 TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "QuantizedConv2D") 180 .Input(FakeInput(DT_QUINT8)) 181 .Input(FakeInput(DT_QUINT8)) 182 .Input(FakeInput(DT_FLOAT)) 183 .Input(FakeInput(DT_FLOAT)) 184 .Input(FakeInput(DT_FLOAT)) 185 .Input(FakeInput(DT_FLOAT)) 186 .Attr("out_type", DataTypeToEnum<qint32>::v()) 187 .Attr("strides", {1, stride, stride, 1}) 188 .Attr("padding", "SAME") 189 .Finalize(node_def())); 190 TF_ASSERT_OK(InitOp()); 191 192 const int depth = 1; 193 const int image_width = 4; 194 const int image_height = 4; 195 const int image_batch_count = 1; 196 AddInputFromArray<quint8>( 197 TensorShape({image_batch_count, image_height, image_width, depth}), 198 {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); 199 const int filter_size = 3; 200 const int filter_count = 1; 201 AddInputFromArray<quint8>( 202 TensorShape({filter_size, filter_size, depth, filter_count}), 203 {1, 2, 3, 4, 5, 6, 7, 8, 9}); 204 AddInputFromArray<float>(TensorShape({1}), {0}); 205 AddInputFromArray<float>(TensorShape({1}), {255.0f}); 206 AddInputFromArray<float>(TensorShape({1}), {0}); 207 AddInputFromArray<float>(TensorShape({1}), {255.0f}); 208 209 TF_ASSERT_OK(RunOpKernel()); 210 const int expected_width = image_width / stride; 211 const int expected_height = (image_height * filter_count) / stride; 212 Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height, 213 expected_width, filter_count})); 214 test::FillValues<qint32>(&expected, {348, 252, 274, 175}); 215 test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); 216 } 217 218 TEST_F(QuantizedConv2DTest, OddPaddingBatch) { 219 const int stride = 2; 220 TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "QuantizedConv2D") 221 .Input(FakeInput(DT_QUINT8)) 222 .Input(FakeInput(DT_QUINT8)) 223 .Input(FakeInput(DT_FLOAT)) 224 .Input(FakeInput(DT_FLOAT)) 225 .Input(FakeInput(DT_FLOAT)) 226 .Input(FakeInput(DT_FLOAT)) 227 .Attr("out_type", DataTypeToEnum<qint32>::v()) 228 .Attr("strides", {1, stride, stride, 1}) 229 .Attr("padding", "SAME") 230 .Finalize(node_def())); 231 TF_ASSERT_OK(InitOp()); 232 233 const int depth = 1; 234 const int image_width = 4; 235 const int image_height = 4; 236 const int image_batch_count = 3; 237 AddInputFromArray<quint8>( 238 TensorShape({image_batch_count, image_height, image_width, depth}), 239 {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 240 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 241 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); 242 const int filter_size = 3; 243 const int filter_count = 1; 244 AddInputFromArray<quint8>( 245 TensorShape({filter_size, filter_size, depth, filter_count}), 246 {1, 2, 3, 4, 5, 6, 7, 8, 9}); 247 AddInputFromArray<float>(TensorShape({1}), {0}); 248 AddInputFromArray<float>(TensorShape({1}), {255.0f}); 249 AddInputFromArray<float>(TensorShape({1}), {0}); 250 AddInputFromArray<float>(TensorShape({1}), {255.0f}); 251 252 TF_ASSERT_OK(RunOpKernel()); 253 const int expected_width = image_width / stride; 254 const int expected_height = (image_height * filter_count) / stride; 255 Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height, 256 expected_width, filter_count})); 257 test::FillValues<qint32>(&expected, {348, 252, 274, 175, // 258 348, 252, 274, 175, // 259 348, 252, 274, 175}); 260 test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); 261 } 262 263 TEST_F(QuantizedConv2DTest, SmallWithNoZero) { 264 const int stride = 1; 265 TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "QuantizedConv2D") 266 .Input(FakeInput(DT_QUINT8)) 267 .Input(FakeInput(DT_QUINT8)) 268 .Input(FakeInput(DT_FLOAT)) 269 .Input(FakeInput(DT_FLOAT)) 270 .Input(FakeInput(DT_FLOAT)) 271 .Input(FakeInput(DT_FLOAT)) 272 .Attr("out_type", DataTypeToEnum<qint32>::v()) 273 .Attr("strides", {1, stride, stride, 1}) 274 .Attr("padding", "SAME") 275 .Finalize(node_def())); 276 TF_ASSERT_OK(InitOp()); 277 const int depth = 1; 278 const int image_width = 4; 279 const int image_height = 3; 280 const int image_batch_count = 1; 281 // Here we're testing a slow implementation path, where zero is not 282 // representable in the image data and so simple border padding is not 283 // possible, so we have a min value greater than 0. 284 const float image_min = 1.0f; 285 const float image_max = 12.0f; 286 Tensor image_float(DT_FLOAT, 287 {image_batch_count, image_height, image_width, depth}); 288 test::FillValues<float>(&image_float, 289 {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); 290 Tensor image_quantized = 291 FloatTensorToQuantized<quint8>(image_float, image_min, image_max); 292 const int filter_size = 3; 293 const int filter_count = 1; 294 const float filter_min = 1.0f; 295 const float filter_max = 9.0f; 296 Tensor filter_float(DT_FLOAT, 297 {filter_size, filter_size, depth, filter_count}); 298 test::FillValues<float>(&filter_float, {1, 4, 7, 2, 5, 8, 3, 6, 9}); 299 Tensor filter_quantized = 300 FloatTensorToQuantized<quint8>(filter_float, filter_min, filter_max); 301 AddInputFromArray<quint8>(image_quantized.shape(), 302 image_quantized.flat<quint8>()); 303 AddInputFromArray<quint8>(filter_quantized.shape(), 304 filter_quantized.flat<quint8>()); 305 AddInputFromArray<float>(TensorShape({1}), {image_min}); 306 AddInputFromArray<float>(TensorShape({1}), {image_max}); 307 AddInputFromArray<float>(TensorShape({1}), {filter_min}); 308 AddInputFromArray<float>(TensorShape({1}), {filter_max}); 309 TF_ASSERT_OK(RunOpKernel()); 310 const int expected_width = image_width; 311 const int expected_height = image_height * filter_count; 312 Tensor expected_float( 313 DT_FLOAT, TensorShape({image_batch_count, expected_height, expected_width, 314 filter_count})); 315 test::FillValues<float>(&expected_float, {105, 150, 183, 95, 235, 312, 357, 316 178, 187, 234, 261, 121}); 317 const Tensor& output_quantized = *GetOutput(0); 318 const float output_min = GetOutput(1)->flat<float>()(0); 319 const float output_max = GetOutput(2)->flat<float>()(0); 320 Tensor output_float = 321 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); 322 test::ExpectTensorNear<float>(expected_float, output_float, 1.0); 323 } 324 325 } // namespace tensorflow 326