Home | History | Annotate | Download | only in Tensor
      1 // This file is part of Eigen, a lightweight C++ template library
      2 // for linear algebra.
      3 //
      4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog (at) gmail.com>
      5 //
      6 // This Source Code Form is subject to the terms of the Mozilla
      7 // Public License v. 2.0. If a copy of the MPL was not distributed
      8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
      9 
     10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H
     11 #define EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H
     12 
     13 namespace Eigen {
     14 
     15 /** \class TensorShuffling
     16   * \ingroup CXX11_Tensor_Module
     17   *
     18   * \brief Tensor shuffling class.
     19   *
     20   *
     21   */
     22 namespace internal {
     23 template<typename Shuffle, typename XprType>
     24 struct traits<TensorShufflingOp<Shuffle, XprType> > : public traits<XprType>
     25 {
     26   typedef typename XprType::Scalar Scalar;
     27   typedef traits<XprType> XprTraits;
     28   typedef typename XprTraits::StorageKind StorageKind;
     29   typedef typename XprTraits::Index Index;
     30   typedef typename XprType::Nested Nested;
     31   typedef typename remove_reference<Nested>::type _Nested;
     32   static const int NumDimensions = XprTraits::NumDimensions;
     33   static const int Layout = XprTraits::Layout;
     34 };
     35 
     36 template<typename Shuffle, typename XprType>
     37 struct eval<TensorShufflingOp<Shuffle, XprType>, Eigen::Dense>
     38 {
     39   typedef const TensorShufflingOp<Shuffle, XprType>& type;
     40 };
     41 
     42 template<typename Shuffle, typename XprType>
     43 struct nested<TensorShufflingOp<Shuffle, XprType>, 1, typename eval<TensorShufflingOp<Shuffle, XprType> >::type>
     44 {
     45   typedef TensorShufflingOp<Shuffle, XprType> type;
     46 };
     47 
     48 }  // end namespace internal
     49 
     50 
     51 
     52 template<typename Shuffle, typename XprType>
     53 class TensorShufflingOp : public TensorBase<TensorShufflingOp<Shuffle, XprType> >
     54 {
     55   public:
     56   typedef typename Eigen::internal::traits<TensorShufflingOp>::Scalar Scalar;
     57   typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
     58   typedef typename XprType::CoeffReturnType CoeffReturnType;
     59   typedef typename Eigen::internal::nested<TensorShufflingOp>::type Nested;
     60   typedef typename Eigen::internal::traits<TensorShufflingOp>::StorageKind StorageKind;
     61   typedef typename Eigen::internal::traits<TensorShufflingOp>::Index Index;
     62 
     63   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorShufflingOp(const XprType& expr, const Shuffle& shuffle)
     64       : m_xpr(expr), m_shuffle(shuffle) {}
     65 
     66     EIGEN_DEVICE_FUNC
     67     const Shuffle& shufflePermutation() const { return m_shuffle; }
     68 
     69     EIGEN_DEVICE_FUNC
     70     const typename internal::remove_all<typename XprType::Nested>::type&
     71     expression() const { return m_xpr; }
     72 
     73     EIGEN_DEVICE_FUNC
     74     EIGEN_STRONG_INLINE TensorShufflingOp& operator = (const TensorShufflingOp& other)
     75     {
     76       typedef TensorAssignOp<TensorShufflingOp, const TensorShufflingOp> Assign;
     77       Assign assign(*this, other);
     78       internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
     79       return *this;
     80     }
     81 
     82     template<typename OtherDerived>
     83     EIGEN_DEVICE_FUNC
     84     EIGEN_STRONG_INLINE TensorShufflingOp& operator = (const OtherDerived& other)
     85     {
     86       typedef TensorAssignOp<TensorShufflingOp, const OtherDerived> Assign;
     87       Assign assign(*this, other);
     88       internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
     89       return *this;
     90     }
     91 
     92   protected:
     93     typename XprType::Nested m_xpr;
     94     const Shuffle m_shuffle;
     95 };
     96 
     97 
     98 // Eval as rvalue
     99 template<typename Shuffle, typename ArgType, typename Device>
    100 struct TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device>
    101 {
    102   typedef TensorShufflingOp<Shuffle, ArgType> XprType;
    103   typedef typename XprType::Index Index;
    104   static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
    105   typedef DSizes<Index, NumDims> Dimensions;
    106   typedef typename XprType::Scalar Scalar;
    107   typedef typename XprType::CoeffReturnType CoeffReturnType;
    108   typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
    109   static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
    110 
    111   enum {
    112     IsAligned = false,
    113     PacketAccess = (internal::packet_traits<Scalar>::size > 1),
    114     Layout = TensorEvaluator<ArgType, Device>::Layout,
    115     CoordAccess = false,  // to be implemented
    116     RawAccess = false
    117   };
    118 
    119   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
    120       : m_impl(op.expression(), device)
    121   {
    122     const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
    123     const Shuffle& shuffle = op.shufflePermutation();
    124     for (int i = 0; i < NumDims; ++i) {
    125       m_dimensions[i] = input_dims[shuffle[i]];
    126     }
    127 
    128     array<Index, NumDims> inputStrides;
    129 
    130     if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
    131       inputStrides[0] = 1;
    132       m_outputStrides[0] = 1;
    133       for (int i = 1; i < NumDims; ++i) {
    134         inputStrides[i] = inputStrides[i - 1] * input_dims[i - 1];
    135         m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
    136       }
    137     } else {
    138       inputStrides[NumDims - 1] = 1;
    139       m_outputStrides[NumDims - 1] = 1;
    140       for (int i = NumDims - 2; i >= 0; --i) {
    141         inputStrides[i] = inputStrides[i + 1] * input_dims[i + 1];
    142         m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1];
    143       }
    144     }
    145 
    146     for (int i = 0; i < NumDims; ++i) {
    147       m_inputStrides[i] = inputStrides[shuffle[i]];
    148     }
    149   }
    150 
    151   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
    152 
    153   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
    154     m_impl.evalSubExprsIfNeeded(NULL);
    155     return true;
    156   }
    157   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
    158     m_impl.cleanup();
    159   }
    160 
    161   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
    162   {
    163     return m_impl.coeff(srcCoeff(index));
    164   }
    165 
    166   template<int LoadMode>
    167   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
    168   {
    169     EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
    170     eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
    171 
    172     EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
    173     for (int i = 0; i < PacketSize; ++i) {
    174       values[i] = coeff(index+i);
    175     }
    176     PacketReturnType rslt = internal::pload<PacketReturnType>(values);
    177     return rslt;
    178   }
    179 
    180   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
    181     const double compute_cost = NumDims * (2 * TensorOpCost::AddCost<Index>() +
    182                                            2 * TensorOpCost::MulCost<Index>() +
    183                                            TensorOpCost::DivCost<Index>());
    184     return m_impl.costPerCoeff(vectorized) +
    185            TensorOpCost(0, 0, compute_cost, false /* vectorized */, PacketSize);
    186   }
    187 
    188   EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
    189 
    190  protected:
    191   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const {
    192     Index inputIndex = 0;
    193     if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
    194       for (int i = NumDims - 1; i > 0; --i) {
    195         const Index idx = index / m_outputStrides[i];
    196         inputIndex += idx * m_inputStrides[i];
    197         index -= idx * m_outputStrides[i];
    198       }
    199       return inputIndex + index * m_inputStrides[0];
    200     } else {
    201       for (int i = 0; i < NumDims - 1; ++i) {
    202         const Index idx = index / m_outputStrides[i];
    203         inputIndex += idx * m_inputStrides[i];
    204         index -= idx * m_outputStrides[i];
    205       }
    206       return inputIndex + index * m_inputStrides[NumDims - 1];
    207     }
    208   }
    209 
    210   Dimensions m_dimensions;
    211   array<Index, NumDims> m_outputStrides;
    212   array<Index, NumDims> m_inputStrides;
    213   TensorEvaluator<ArgType, Device> m_impl;
    214 };
    215 
    216 
    217 // Eval as lvalue
    218 template<typename Shuffle, typename ArgType, typename Device>
    219 struct TensorEvaluator<TensorShufflingOp<Shuffle, ArgType>, Device>
    220     : public TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device>
    221 {
    222   typedef TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device> Base;
    223 
    224   typedef TensorShufflingOp<Shuffle, ArgType> XprType;
    225   typedef typename XprType::Index Index;
    226   static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
    227   typedef DSizes<Index, NumDims> Dimensions;
    228   typedef typename XprType::Scalar Scalar;
    229   typedef typename XprType::CoeffReturnType CoeffReturnType;
    230   typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
    231   static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
    232 
    233   enum {
    234     IsAligned = false,
    235     PacketAccess = (internal::packet_traits<Scalar>::size > 1),
    236     RawAccess = false
    237   };
    238 
    239   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
    240       : Base(op, device)
    241   { }
    242 
    243   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
    244   {
    245     return this->m_impl.coeffRef(this->srcCoeff(index));
    246   }
    247 
    248   template <int StoreMode> EIGEN_STRONG_INLINE
    249   void writePacket(Index index, const PacketReturnType& x)
    250   {
    251     EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
    252 
    253     EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
    254     internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
    255     for (int i = 0; i < PacketSize; ++i) {
    256       this->coeffRef(index+i) = values[i];
    257     }
    258   }
    259 };
    260 
    261 
    262 } // end namespace Eigen
    263 
    264 #endif // EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H
    265