1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #include <functional> 17 #include <memory> 18 #include <vector> 19 20 #include "tensorflow/cc/ops/array_ops.h" 21 #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" 22 #include "tensorflow/core/framework/allocator.h" 23 #include "tensorflow/core/framework/fake_input.h" 24 #include "tensorflow/core/framework/node_def_builder.h" 25 #include "tensorflow/core/framework/op_kernel.h" 26 #include "tensorflow/core/framework/tensor.h" 27 #include "tensorflow/core/framework/tensor_testutil.h" 28 #include "tensorflow/core/framework/types.h" 29 #include "tensorflow/core/framework/types.pb.h" 30 #include "tensorflow/core/kernels/ops_testutil.h" 31 #include "tensorflow/core/lib/core/status_test_util.h" 32 #include "tensorflow/core/platform/test_benchmark.h" 33 34 namespace tensorflow { 35 namespace { 36 37 class QuantizeAndDequantizeTest : public OpsTestBase {}; 38 39 // Convert a simple scalar tensor. 40 TEST_F(QuantizeAndDequantizeTest, Convert_scalar_tensor) { 41 TF_ASSERT_OK( 42 NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2") 43 .Input(FakeInput(DT_FLOAT)) 44 .Input(FakeInput(DT_FLOAT)) 45 .Input(FakeInput(DT_FLOAT)) 46 .Attr("signed_input", true) 47 .Attr("num_bits", 8) 48 .Attr("range_given", false) 49 .Finalize(node_def())); 50 TF_ASSERT_OK(InitOp()); 51 AddInputFromArray<float>(TensorShape({1}), {-3.5}); 52 AddInputFromArray<float>(TensorShape({}), {0.0}); // Min 53 AddInputFromArray<float>(TensorShape({}), {0.0}); // Max 54 55 TF_ASSERT_OK(RunOpKernel()); 56 Tensor expected(allocator(), DT_FLOAT, TensorShape({1})); 57 test::FillValues<float>(&expected, {-3.5}); 58 test::ExpectTensorEqual<float>(expected, *GetOutput(0)); 59 60 // Ensure that the inputs haven't been changed. 61 EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0); 62 EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0); 63 } 64 65 TEST_F(QuantizeAndDequantizeTest, Convert_scalar_tensor_V3) { 66 TF_ASSERT_OK( 67 NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3") 68 .Input(FakeInput(DT_FLOAT)) 69 .Input(FakeInput(DT_FLOAT)) 70 .Input(FakeInput(DT_FLOAT)) 71 .Input(FakeInput(DT_INT32)) 72 .Attr("signed_input", true) 73 .Attr("range_given", false) 74 .Finalize(node_def())); 75 TF_ASSERT_OK(InitOp()); 76 AddInputFromArray<float>(TensorShape({1}), {-3.5}); 77 AddInputFromArray<float>(TensorShape({}), {0.0}); // Min 78 AddInputFromArray<float>(TensorShape({}), {0.0}); // Max 79 AddInputFromArray<int32>(TensorShape({}), {8}); // num_bits 80 81 TF_ASSERT_OK(RunOpKernel()); 82 Tensor expected(allocator(), DT_FLOAT, TensorShape({1})); 83 test::FillValues<float>(&expected, {-3.5}); 84 test::ExpectTensorEqual<float>(expected, *GetOutput(0)); 85 86 // Ensure that the inputs haven't been changed. 87 EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0); 88 EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0); 89 } 90 91 // Convert a 1D tensor with signed 8 bits. 92 TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int8) { 93 TF_ASSERT_OK( 94 NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2") 95 .Input(FakeInput(DT_FLOAT)) 96 .Input(FakeInput(DT_FLOAT)) 97 .Input(FakeInput(DT_FLOAT)) 98 .Attr("signed_input", true) 99 .Attr("num_bits", 8) 100 .Attr("range_given", false) 101 .Finalize(node_def())); 102 TF_ASSERT_OK(InitOp()); 103 AddInputFromArray<float>(TensorShape({6}), {-1, -0.5, 0, 0.3, 0.8, 0.555}); 104 AddInputFromArray<float>(TensorShape({}), {0.0}); // Min 105 AddInputFromArray<float>(TensorShape({}), {0.0}); // Max 106 107 // With int8, the tensor is quantized to {-127, -63, 0, 38, 102, 70}. 108 // Scale is: 1/127 109 // Then it is dequantized to {-1, -63.0/127, 0, 38.0/127, 102.0/127, 70.0/127} 110 TF_ASSERT_OK(RunOpKernel()); 111 Tensor expected(allocator(), DT_FLOAT, TensorShape({6})); 112 test::FillValues<float>( 113 &expected, {-1, -63.0 / 127, 0, 38.0 / 127, 102.0 / 127, 70.0 / 127}); 114 test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); 115 116 // Ensure that the inputs haven't been changed. 117 EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0); 118 EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0); 119 } 120 121 // Convert a 1D tensor with signed 8 bits. 122 TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int8_V3) { 123 TF_ASSERT_OK( 124 NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3") 125 .Input(FakeInput(DT_FLOAT)) 126 .Input(FakeInput(DT_FLOAT)) 127 .Input(FakeInput(DT_FLOAT)) 128 .Input(FakeInput(DT_INT32)) 129 .Attr("signed_input", true) 130 .Attr("range_given", false) 131 .Finalize(node_def())); 132 TF_ASSERT_OK(InitOp()); 133 AddInputFromArray<float>(TensorShape({6}), {-1, -0.5, 0, 0.3, 0.8, 0.555}); 134 AddInputFromArray<float>(TensorShape({}), {0.0}); // Min 135 AddInputFromArray<float>(TensorShape({}), {0.0}); // Max 136 AddInputFromArray<int32>(TensorShape({}), {8}); // num_bits 137 138 // With int8, the tensor is quantized to {-127, -63, 0, 38, 102, 70}. 139 // Scale is: 1/127 140 // Then it is dequantized to {-1, -63.0/127, 0, 38.0/127, 102.0/127, 70.0/127} 141 TF_ASSERT_OK(RunOpKernel()); 142 Tensor expected(allocator(), DT_FLOAT, TensorShape({6})); 143 test::FillValues<float>( 144 &expected, {-1, -63.0 / 127, 0, 38.0 / 127, 102.0 / 127, 70.0 / 127}); 145 test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); 146 147 // Ensure that the inputs haven't been changed. 148 EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0); 149 EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0); 150 } 151 152 // Convert a 1D tensor with signed 4 bits. 153 TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int4) { 154 TF_ASSERT_OK( 155 NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2") 156 .Input(FakeInput(DT_FLOAT)) 157 .Input(FakeInput(DT_FLOAT)) 158 .Input(FakeInput(DT_FLOAT)) 159 .Attr("signed_input", true) 160 .Attr("num_bits", 4) 161 .Attr("range_given", false) 162 .Finalize(node_def())); 163 TF_ASSERT_OK(InitOp()); 164 AddInputFromArray<float>(TensorShape({6}), {-1, -0.5, 0, 0.3, 0.8, 0.555}); 165 AddInputFromArray<float>(TensorShape({}), {0.0}); // Min 166 AddInputFromArray<float>(TensorShape({}), {0.0}); // Max 167 168 // With int4, the tensor is quantized to {-7, -3, 0, 2, 6, 4}. 169 // Scale is: 1/7 170 TF_ASSERT_OK(RunOpKernel()); 171 Tensor expected(allocator(), DT_FLOAT, TensorShape({6})); 172 test::FillValues<float>(&expected, 173 {-1, -3.0 / 7, 0, 2.0 / 7, 6.0 / 7, 4.0 / 7}); 174 test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); 175 176 // Ensure that the inputs haven't been changed. 177 EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0); 178 EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0); 179 } 180 181 // Convert a 1D tensor with signed 4 bits. 182 TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int4_V3) { 183 TF_ASSERT_OK( 184 NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3") 185 .Input(FakeInput(DT_FLOAT)) 186 .Input(FakeInput(DT_FLOAT)) 187 .Input(FakeInput(DT_FLOAT)) 188 .Input(FakeInput(DT_INT32)) 189 .Attr("signed_input", true) 190 .Attr("range_given", false) 191 .Finalize(node_def())); 192 TF_ASSERT_OK(InitOp()); 193 AddInputFromArray<float>(TensorShape({6}), {-1, -0.5, 0, 0.3, 0.8, 0.555}); 194 AddInputFromArray<float>(TensorShape({}), {0.0}); // Min 195 AddInputFromArray<float>(TensorShape({}), {0.0}); // Max 196 AddInputFromArray<int32>(TensorShape({}), {4}); // num_bits 197 198 // With int4, the tensor is quantized to {-7, -3, 0, 2, 6, 4}. 199 // Scale is: 1/7 200 TF_ASSERT_OK(RunOpKernel()); 201 Tensor expected(allocator(), DT_FLOAT, TensorShape({6})); 202 test::FillValues<float>(&expected, 203 {-1, -3.0 / 7, 0, 2.0 / 7, 6.0 / 7, 4.0 / 7}); 204 test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); 205 206 // Ensure that the inputs haven't been changed. 207 EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0); 208 EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0); 209 } 210 211 // Convert a 2D tensor with signed 8 bits with given range. 212 TEST_F(QuantizeAndDequantizeTest, Convert_2D_tensor_with_int8_range_given) { 213 TF_ASSERT_OK( 214 NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2") 215 .Input(FakeInput(DT_FLOAT)) 216 .Input(FakeInput(DT_FLOAT)) 217 .Input(FakeInput(DT_FLOAT)) 218 .Attr("signed_input", true) 219 .Attr("num_bits", 8) 220 .Attr("range_given", true) 221 .Finalize(node_def())); 222 TF_ASSERT_OK(InitOp()); 223 // Note that the last two values are saturated. 224 AddInputFromArray<float>(TensorShape({2, 4}), 225 {-0.8, -0.5, 0, 0.3, 0.8, 0.555, -2, 33}); 226 AddInputFromArray<float>(TensorShape({}), {-1.0}); // Min 227 AddInputFromArray<float>(TensorShape({}), {1.0}); // Max 228 229 // Note that the range is given as [-1, 1]. 230 // With int8, the tensor is quantized to {-102, -63, 0, 38, 102, 70, -127, 231 // 127}. 232 // Scale is: 1/127 233 TF_ASSERT_OK(RunOpKernel()); 234 Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 4})); 235 test::FillValues<float>(&expected, {-102.0 / 127, -63.0 / 127, 0, 38.0 / 127, 236 102.0 / 127, 70.0 / 127, -1, 1}); 237 test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); 238 } 239 240 // Convert a 2D tensor with signed 8 bits with given range. 241 TEST_F(QuantizeAndDequantizeTest, Convert_2D_tensor_with_int8_range_given_V3) { 242 TF_ASSERT_OK( 243 NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3") 244 .Input(FakeInput(DT_FLOAT)) 245 .Input(FakeInput(DT_FLOAT)) 246 .Input(FakeInput(DT_FLOAT)) 247 .Input(FakeInput(DT_INT32)) 248 .Attr("signed_input", true) 249 .Attr("range_given", true) 250 .Finalize(node_def())); 251 TF_ASSERT_OK(InitOp()); 252 // Note that the last two values are saturated. 253 AddInputFromArray<float>(TensorShape({2, 4}), 254 {-0.8, -0.5, 0, 0.3, 0.8, 0.555, -2, 33}); 255 AddInputFromArray<float>(TensorShape({}), {-1.0}); // Min 256 AddInputFromArray<float>(TensorShape({}), {1.0}); // Max 257 AddInputFromArray<int32>(TensorShape({}), {8}); // num_bits 258 259 // Note that the range is given as [-1, 1]. 260 // With int8, the tensor is quantized to {-102, -63, 0, 38, 102, 70, -127, 261 // 127}. 262 // Scale is: 1/127 263 TF_ASSERT_OK(RunOpKernel()); 264 Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 4})); 265 test::FillValues<float>(&expected, {-102.0 / 127, -63.0 / 127, 0, 38.0 / 127, 266 102.0 / 127, 70.0 / 127, -1, 1}); 267 test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); 268 } 269 270 // Convert a 4D tensor with unsigned 8 bits with given range. 271 TEST_F(QuantizeAndDequantizeTest, Convert_4D_tensor_with_uint8_range_given) { 272 TF_ASSERT_OK( 273 NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2") 274 .Input(FakeInput(DT_FLOAT)) 275 .Input(FakeInput(DT_FLOAT)) 276 .Input(FakeInput(DT_FLOAT)) 277 .Attr("signed_input", false) 278 .Attr("num_bits", 8) 279 .Attr("range_given", true) 280 .Finalize(node_def())); 281 TF_ASSERT_OK(InitOp()); 282 AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8}); 283 AddInputFromArray<float>(TensorShape({}), {0.0}); // Min 284 AddInputFromArray<float>(TensorShape({}), {1.0}); // Max 285 286 // Note that the range is given as [0, 1]. 287 // With int8, the tensor is quantized to {0, 0, 77, 204} 288 // Scale is: 1/255 289 TF_ASSERT_OK(RunOpKernel()); 290 Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1})); 291 test::FillValues<float>(&expected, {0, 0, 77.0 / 255, 204.0 / 255}); 292 test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); 293 } 294 295 // Convert a 4D tensor with unsigned 8 bits with given range. 296 TEST_F(QuantizeAndDequantizeTest, Convert_4D_tensor_with_uint8_range_given_V3) { 297 TF_ASSERT_OK( 298 NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3") 299 .Input(FakeInput(DT_FLOAT)) 300 .Input(FakeInput(DT_FLOAT)) 301 .Input(FakeInput(DT_FLOAT)) 302 .Input(FakeInput(DT_INT32)) 303 .Attr("signed_input", false) 304 .Attr("range_given", true) 305 .Finalize(node_def())); 306 TF_ASSERT_OK(InitOp()); 307 AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8}); 308 AddInputFromArray<float>(TensorShape({}), {0.0}); // Min 309 AddInputFromArray<float>(TensorShape({}), {1.0}); // Max 310 AddInputFromArray<int32>(TensorShape({}), {8}); // num_bits 311 312 // Note that the range is given as [0, 1]. 313 // With int8, the tensor is quantized to {0, 0, 77, 204} 314 // Scale is: 1/255 315 TF_ASSERT_OK(RunOpKernel()); 316 Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1})); 317 test::FillValues<float>(&expected, {0, 0, 77.0 / 255, 204.0 / 255}); 318 test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); 319 } 320 321 // Convert a tensor with all 0. 322 TEST_F(QuantizeAndDequantizeTest, Convert_tensor_with_all_0) { 323 TF_ASSERT_OK( 324 NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2") 325 .Input(FakeInput(DT_FLOAT)) 326 .Input(FakeInput(DT_FLOAT)) 327 .Input(FakeInput(DT_FLOAT)) 328 .Attr("signed_input", false) 329 .Attr("num_bits", 8) 330 .Attr("range_given", false) 331 .Finalize(node_def())); 332 TF_ASSERT_OK(InitOp()); 333 AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {0, 0, 0, 0}); 334 AddInputFromArray<float>(TensorShape({}), {0.0}); // Min 335 AddInputFromArray<float>(TensorShape({}), {0.0}); // Max 336 337 TF_ASSERT_OK(RunOpKernel()); 338 Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1})); 339 test::FillValues<float>(&expected, {0, 0, 0, 0}); 340 test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); 341 } 342 343 // Convert a tensor with all 0. 344 TEST_F(QuantizeAndDequantizeTest, Convert_tensor_with_all_0_V3) { 345 TF_ASSERT_OK( 346 NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3") 347 .Input(FakeInput(DT_FLOAT)) 348 .Input(FakeInput(DT_FLOAT)) 349 .Input(FakeInput(DT_FLOAT)) 350 .Input(FakeInput(DT_INT32)) 351 .Attr("signed_input", false) 352 .Attr("range_given", false) 353 .Finalize(node_def())); 354 TF_ASSERT_OK(InitOp()); 355 AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {0, 0, 0, 0}); 356 AddInputFromArray<float>(TensorShape({}), {0.0}); // Min 357 AddInputFromArray<float>(TensorShape({}), {0.0}); // Max 358 AddInputFromArray<int32>(TensorShape({}), {8}); // num_bits 359 360 TF_ASSERT_OK(RunOpKernel()); 361 Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1})); 362 test::FillValues<float>(&expected, {0, 0, 0, 0}); 363 test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); 364 } 365 366 // Range is invalid 367 TEST_F(QuantizeAndDequantizeTest, Invalid_range_given) { 368 TF_ASSERT_OK( 369 NodeDefBuilder("quantize_and_dequantize_Op", "QuantizeAndDequantizeV2") 370 .Input(FakeInput(DT_FLOAT)) 371 .Input(FakeInput(DT_FLOAT)) 372 .Input(FakeInput(DT_FLOAT)) 373 .Attr("num_bits", 8) 374 .Attr("range_given", true) 375 .Finalize(node_def())); 376 TF_ASSERT_OK(InitOp()); 377 AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8}); 378 AddInputFromArray<float>(TensorShape({}), {1.0}); // Min 379 AddInputFromArray<float>(TensorShape({}), {0.0}); // Max 380 381 Status s = RunOpKernel(); 382 EXPECT_TRUE(StringPiece(s.ToString()) 383 .contains("Invalid range: input_min 1 > input_max 0")) 384 << s; 385 } 386 387 // Range is invalid 388 TEST_F(QuantizeAndDequantizeTest, Invalid_range_given_V3) { 389 TF_ASSERT_OK( 390 NodeDefBuilder("quantize_and_dequantize_Op", "QuantizeAndDequantizeV3") 391 .Input(FakeInput(DT_FLOAT)) 392 .Input(FakeInput(DT_FLOAT)) 393 .Input(FakeInput(DT_FLOAT)) 394 .Input(FakeInput(DT_INT32)) 395 .Attr("range_given", true) 396 .Finalize(node_def())); 397 TF_ASSERT_OK(InitOp()); 398 AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8}); 399 AddInputFromArray<float>(TensorShape({}), {1.0}); // Min 400 AddInputFromArray<float>(TensorShape({}), {0.0}); // Max 401 AddInputFromArray<int32>(TensorShape({}), {8}); // num_bits 402 403 Status s = RunOpKernel(); 404 EXPECT_TRUE(StringPiece(s.ToString()) 405 .contains("Invalid range: input_min 1 > input_max 0")) 406 << s; 407 } 408 409 #define BM_SIMPLE_QUAN_DEQUAN(DEVICE) \ 410 static void BM_SIMPLE_QUAN_DEQUAN_##DEVICE(int iters) { \ 411 auto root = Scope::NewRootScope().ExitOnError(); \ 412 ops::QuantizeAndDequantizeV2(root, -3.5, -3.5, -3.5); \ 413 TF_CHECK_OK(root.status()); \ 414 Graph* g = new Graph(OpRegistry::Global()); \ 415 TF_CHECK_OK(root.ToGraph(g)); \ 416 test::Benchmark(#DEVICE, g).Run(iters); \ 417 } \ 418 BENCHMARK(BM_SIMPLE_QUAN_DEQUAN_##DEVICE); 419 420 BM_SIMPLE_QUAN_DEQUAN(cpu); 421 BM_SIMPLE_QUAN_DEQUAN(gpu); 422 423 } // namespace 424 } // namespace tensorflow 425