Home | History | Annotate | Download | only in kernels
      1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #ifndef TENSORFLOW_KERNELS_XENT_OP_H_
     17 #define TENSORFLOW_KERNELS_XENT_OP_H_
     18 // Functor definition for XentOp, must be compilable by nvcc.
     19 
     20 #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
     21 #include "tensorflow/core/framework/tensor_types.h"
     22 
     23 namespace tensorflow {
     24 namespace functor {
     25 
     26 // Functor used by XentOp to do the computations.
     27 template <typename Device, typename T>
     28 struct XentFunctor {
     29   // Computes Cross Entropy loss and backprop.
     30   //
     31   // logits: batch_size, num_classes.
     32   // labels: batch_size, num_classes.
     33   // scratch: temporary tensor, dims: batch_size, 1
     34   // loss: output tensor for the loss, dims: batch_size.
     35   // backprop: output tensor for the backprop, dims: batch_size, num_classes.
     36   void operator()(const Device& d, typename TTypes<T>::ConstMatrix logits,
     37                   typename TTypes<T>::ConstMatrix labels,
     38                   typename TTypes<T>::Matrix scratch,
     39                   typename TTypes<T>::Vec loss,
     40                   typename TTypes<T>::Matrix backprop);
     41 };
     42 
     43 // Eigen code implementing XentFunctor::operator().
     44 // This code works for both CPU and GPU and is used by the functor
     45 // specializations for both device types.
     46 template <typename Device, typename T>
     47 struct XentEigenImpl {
     48   static void Compute(const Device& d, typename TTypes<T>::ConstMatrix logits,
     49                       typename TTypes<T>::ConstMatrix labels,
     50                       typename TTypes<T>::Matrix scratch,
     51                       typename TTypes<T>::Vec loss,
     52                       typename TTypes<T>::Matrix backprop) {
     53     // NOTE(touts): This duplicates some of the computations in softmax_op
     54     // because we need the intermediate (logits -max(logits)) values to
     55     // avoid a log(exp()) in the computation of the loss.
     56 
     57     const int kBatchDim = 0;
     58     const int kClassDim = 1;
     59 
     60     const int batch_size = logits.dimension(kBatchDim);
     61     const int num_classes = logits.dimension(kClassDim);
     62 
     63 // These arrays are used to reduce along the class dimension, and broadcast
     64 // the resulting value to all classes.
     65 #if !defined(EIGEN_HAS_INDEX_LIST)
     66     Eigen::array<int, 1> along_class;
     67     along_class[0] = kClassDim;
     68     Eigen::array<int, 1> batch_only;
     69     batch_only[0] = batch_size;
     70     Eigen::array<int, 2> batch_by_one;
     71     batch_by_one[0] = batch_size;
     72     batch_by_one[1] = 1;
     73     Eigen::array<int, 2> one_by_class;
     74     one_by_class[0] = 1;
     75     one_by_class[1] = num_classes;
     76 #else
     77     Eigen::IndexList<Eigen::type2index<kClassDim> > along_class;
     78     Eigen::IndexList<int, Eigen::type2index<1> > batch_by_one;
     79     batch_by_one.set(0, batch_size);
     80     Eigen::IndexList<int> batch_only;
     81     batch_only.set(0, batch_size);
     82     Eigen::IndexList<Eigen::type2index<1>, int> one_by_class;
     83     one_by_class.set(1, num_classes);
     84 #endif
     85 
     86     // max_logits along classes.
     87     scratch.reshape(batch_only).device(d) = logits.maximum(along_class);
     88 
     89     // logits - max_logits.
     90     backprop.device(d) = logits - scratch.broadcast(one_by_class);
     91 
     92     // sum(exp(logits - max_logits)) along classes.
     93     scratch.reshape(batch_only).device(d) = backprop.exp().sum(along_class);
     94 
     95     // NOTE(keveman): Eigen on GPU dispatches to an optimized implementation
     96     // for an expression of the form lhs = rhs.sum().
     97     // lhs = -rhs.sum() doesn't match the above pattern, so folding in the
     98     // negation before calling sum().
     99     //  sum(-labels *
    100     //     ((logits - max_logits) - log(sum(exp(logits - max_logits)))))
    101     //  along classes
    102     loss.device(d) =
    103         (labels * (scratch.log().eval().broadcast(one_by_class) - backprop))
    104             .eval()
    105             .sum(along_class);
    106 
    107     // backprop: prob - labels, where
    108     //   prob = exp(logits - max_logits) / sum(exp(logits - max_logits))
    109     backprop.device(d) =
    110         (backprop.exp() / scratch.broadcast(one_by_class)) - labels;
    111   }
    112 };
    113 
    114 }  // namespace functor
    115 }  // namespace tensorflow
    116 
    117 #endif  // TENSORFLOW_KERNELS_XENT_OP_H_
    118