Home | History | Annotate | Download | only in graph
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #ifndef TENSORFLOW_CORE_GRAPH_MKL_GRAPH_UTIL_H_
     17 #define TENSORFLOW_CORE_GRAPH_MKL_GRAPH_UTIL_H_
     18 #ifdef INTEL_MKL
     19 
     20 #include <string>
     21 #include "tensorflow/core/framework/op_kernel.h"
     22 
     23 namespace tensorflow {
     24 // Since our ops are going to produce and also consume N addition tensors
     25 // (Mkl) for N Tensorflow tensors, we can have following different
     26 // orderings among these 2N tensors.
     27 //
     28 // E.g., for Tensorflow tensors A, B, and C, our ops will produce and
     29 // consume A_m, B_m, and C_m additionally.
     30 //
     31 // INTERLEAVED: in this case 2N tensors are interleaved. So for above
     32 //              example, the ordering looks like: A, A_m, B, B_m, C, C_m.
     33 //
     34 // CONTIGUOUS: in thi case N Tensorflow tensors are contiguous followed
     35 //             by N Mkl tensors. So for above example, the ordering looks
     36 //             like: A, B, C, A_m, B_m, C_m
     37 //
     38 // Following APIs map index of original Tensorflow tensors to their
     39 // appropriate position based on selected ordering. For contiguous ordering,
     40 // we need to know the total number of tensors (parameter total).
     41 //
     42 typedef enum { TENSORS_INTERLEAVED, TENSORS_CONTIGUOUS } MklTfTensorOrdering;
     43 // NOTE: Currently, we use contiguous ordering. If you change this, then you
     44 // would need to change Mkl op definitions in nn_ops.cc.
     45 static MklTfTensorOrdering kTensorOrdering = TENSORS_CONTIGUOUS;
     46 
     47 // Get index of MetaData tensor from index 'n' of Data tensor.
     48 inline int DataIndexToMetaDataIndex(int n, int total_tensors) {
     49   if (kTensorOrdering == MklTfTensorOrdering::TENSORS_INTERLEAVED) {
     50     // For interleaved ordering, Mkl tensor follows immediately after
     51     // Tensorflow tensor.
     52     return n + 1;
     53   } else {
     54     CHECK_EQ(kTensorOrdering, MklTfTensorOrdering::TENSORS_CONTIGUOUS);
     55     // For contiguous ordering, Mkl tensor is n+total_tensors / 2 away.
     56     return n + total_tensors / 2;
     57   }
     58 }
     59 
     60 int inline GetTensorDataIndex(int n, int total_tensors) {
     61   if (kTensorOrdering == MklTfTensorOrdering::TENSORS_INTERLEAVED) {
     62     return 2 * n;  // index corresponding to nth input/output tensor
     63   } else {
     64     CHECK_EQ(kTensorOrdering, MklTfTensorOrdering::TENSORS_CONTIGUOUS);
     65     return n;
     66   }
     67 }
     68 
     69 int inline GetTensorMetaDataIndex(int n, int total_tensors) {
     70   // Get index for TensorData first and then use mapping function
     71   // to get TensorMetaData index from TensorData index.
     72   int tidx = GetTensorDataIndex(n, total_tensors);
     73   return DataIndexToMetaDataIndex(tidx, total_tensors);
     74 }
     75 
     76 namespace mkl_op_registry {
     77 static const char* kMklOpLabel = "MklOp";
     78 static const char* kMklOpLabelPattern = "label='MklOp'";
     79 // Prefix that we add to Tensorflow op name to construct Mkl op name.
     80 static const char* const kMklOpPrefix = "_Mkl";
     81 
     82 // Get the name of Mkl op from original TensorFlow op
     83 // We prefix 'Mkl' to the original op to get Mkl op.
     84 inline string GetMklOpName(const string& name) {
     85   return string(kMklOpPrefix) + name;
     86 }
     87 
     88 // Check whether opname with type T is registered as MKL-compliant.
     89 //
     90 // @input: name of the op
     91 // @input: T datatype to be used for checking op
     92 // @return: true if opname is registered as Mkl op; false otherwise
     93 static inline bool IsMklOp(const std::string& op_name, DataType T) {
     94   string kernel = KernelsRegisteredForOp(op_name);
     95   bool result =
     96       kernel.find(kMklOpLabelPattern) != string::npos && (T == DT_FLOAT);
     97   return result;
     98 }
     99 
    100 // Check whether opname with type T is registered as MKL-compliant and
    101 // is element-wise.
    102 //
    103 // @input: name of the op
    104 // @input: T datatype to be used for checking op
    105 // @return: true if opname is registered as element-wise Mkl op;
    106 // false otherwise
    107 static inline bool IsMklElementWiseOp(const std::string& op_name, DataType T) {
    108   if (!IsMklOp(op_name, T)) {
    109     return false;
    110   }
    111   bool result = (0 == op_name.compare(GetMklOpName("Add")) ||
    112                  0 == op_name.compare(GetMklOpName("Sub")) ||
    113                  0 == op_name.compare(GetMklOpName("Mul")) ||
    114                  0 == op_name.compare(GetMklOpName("Maximum")) ||
    115                  0 == op_name.compare(GetMklOpName("SquaredDifference")));
    116 
    117   return result;
    118 }
    119 }  // namespace mkl_op_registry
    120 }  // namespace tensorflow
    121 #endif  // INTEL_MKL
    122 #endif  // TENSORFLOW_CORE_GRAPH_MKL_GRAPH_UTIL_H_
    123