Home | History | Annotate | Download | only in graph_transforms
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #define EIGEN_USE_THREADS
     17 
     18 #include "tensorflow/core/graph/quantize_training.h"
     19 #include "tensorflow/tools/graph_transforms/transform_utils.h"
     20 
     21 namespace tensorflow {
     22 namespace graph_transforms {
     23 
     24 // EXPERIMENTAL: This can change without warning.
     25 // Rewrites the GraphDef for quantized training.
     26 // Rewrites the forward pass to include the precision loss with quantization so
     27 // the model can learn to deal with such loss and achieve better accuracy when
     28 // it is quantized later for inference.
     29 // Quantization range information is collected in FakeQuantizeWithMinMaxVars
     30 // ops.
     31 //
     32 // TODO(suharshs): Provide instructions on converting the resulting graph for
     33 // inference.
     34 // TODO(suharshs): Implement this using the GTT rather than calling the old
     35 // prototype function.
     36 Status FakeQuantizeTraining(const GraphDef& input_graph_def,
     37                             const TransformFuncContext& context,
     38                             GraphDef* output_graph_def) {
     39   // TODO(suharshs): Make num_bits a parameter.
     40   const int32 num_bits = 8;
     41   // TODO(suharshs): Make quantization op a parameter?
     42   const string quant_op_type = "FakeQuantWithMinMaxVars";
     43 
     44   return DoQuantizeTrainingOnGraphDef(input_graph_def, num_bits, quant_op_type,
     45                                       output_graph_def);
     46 }
     47 
     48 REGISTER_GRAPH_TRANSFORM("fake_quantize_training", FakeQuantizeTraining);
     49 
     50 }  // namespace graph_transforms
     51 }  // namespace tensorflow
     52