Home | History | Annotate | Download | only in kernels
      1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #ifndef TENSORFLOW_KERNELS_SOFTMAX_OP_FUNCTOR_H_
     17 #define TENSORFLOW_KERNELS_SOFTMAX_OP_FUNCTOR_H_
     18 // Functor definition for SoftmaxOp, must be compilable by nvcc.
     19 
     20 #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
     21 #include "tensorflow/core/framework/tensor_types.h"
     22 
     23 namespace tensorflow {
     24 namespace functor {
     25 
     26 // Functor used by SoftmaxOp to do the computations.
     27 template <typename Device, typename T>
     28 struct SoftmaxFunctor {
     29   // Computes Softmax or LogSoftmax activation.
     30   //
     31   // logits: dim: batch_size, num_classes.
     32   // softmax: dims: batch_size, num_classes.
     33   // log: boolean
     34   void operator()(const Device& d, typename TTypes<T>::ConstMatrix logits,
     35                   typename TTypes<T>::Matrix softmax, const bool log);
     36 };
     37 
     38 // Eigen code implementing SoftmaxFunctor::operator() or
     39 // LogSoftmaxFunctor::operator().
     40 // This code works for both CPU and GPU and is used by the functor
     41 // specializations for both device types.
     42 template <typename Device, typename T>
     43 struct SoftmaxEigenImpl {
     44   static void Compute(const Device& d, typename TTypes<T>::ConstMatrix logits,
     45                       typename TTypes<T>::Matrix softmax, const bool log) {
     46     const int kBatchDim = 0;
     47     const int kClassDim = 1;
     48 
     49     const int batch_size = logits.dimension(kBatchDim);
     50     const int num_classes = logits.dimension(kClassDim);
     51 
     52 // These arrays are used to reduce along the class dimension, and broadcast
     53 // the resulting value to all classes.
     54 #if !defined(EIGEN_HAS_INDEX_LIST)
     55     Eigen::DSizes<int, 1> along_class(kClassDim);
     56     Eigen::DSizes<int, 2> batch_by_one(batch_size, 1);
     57     Eigen::DSizes<int, 2> one_by_class(1, num_classes);
     58 #else
     59     Eigen::IndexList<Eigen::type2index<kClassDim> > along_class;
     60     Eigen::IndexList<Eigen::type2index<1> > depth_dim;
     61     Eigen::IndexList<int, Eigen::type2index<1> > batch_by_one;
     62     batch_by_one.set(0, batch_size);
     63     Eigen::IndexList<Eigen::type2index<1>, int> one_by_class;
     64     one_by_class.set(1, num_classes);
     65 #endif
     66     // shifted_logits = logits - max(logits along classes);
     67     auto shifted_logits = (logits - logits.maximum(along_class)
     68                                         .eval()
     69                                         .reshape(batch_by_one)
     70                                         .broadcast(one_by_class));
     71     if (log) {
     72       // Calculate the log of the softmax
     73       // softmax = logits - max(logits along classes);
     74       softmax.device(d) = shifted_logits;
     75       // softmax = softmax - log(sum(exp(softmax along classes)));
     76       softmax.device(d) = (softmax - softmax.exp()
     77                                          .sum(along_class)
     78                                          .log()
     79                                          .eval()
     80                                          .reshape(batch_by_one)
     81                                          .broadcast(one_by_class));
     82     } else {
     83       // NOTE(touts): If you modify this implementation please run
     84       // the BM_ImageNetSoftmaxFwd benchmark in nn_ops_test.cc.
     85       //
     86       // softmax = exp(logits - max(logits along classes));
     87       softmax.device(d) = shifted_logits.exp();
     88       // softmax = softmax * (1 / sum(softmax along classes));
     89       softmax.device(d) = (softmax * softmax.sum(along_class)
     90                                          .inverse()
     91                                          .eval()
     92                                          .reshape(batch_by_one)
     93                                          .broadcast(one_by_class));
     94     }
     95   }
     96 };
     97 
     98 }  // namespace functor
     99 }  // namespace tensorflow
    100 
    101 #endif  // TENSORFLOW_KERNELS_SOFTMAX_OP_FUNCTOR_H_
    102