1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #ifndef TENSORFLOW_CORE_KERNELS_EIGEN_ACTIVATIONS_H_ 17 #define TENSORFLOW_CORE_KERNELS_EIGEN_ACTIVATIONS_H_ 18 19 #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" 20 21 namespace Eigen { 22 23 /** scalar_sigmoid_fast_derivative_op 24 * \ingroup CXX11_NeuralNetworks_Module 25 * \brief Template functor to compute the fast derivative of a sigmoid 26 * 27 * Input should be the backpropagated gradient. 28 * 29 * \sa class CwiseUnaryOp, Cwise::sigmoid_fast_derivative() 30 */ 31 template <typename T> 32 struct scalar_sigmoid_fast_derivative_op { 33 EIGEN_EMPTY_STRUCT_CTOR(scalar_sigmoid_fast_derivative_op) 34 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& y) const { 35 const T one = T(1); 36 return (one - y) * y; 37 } 38 39 template <typename Packet> 40 inline Packet packetOp(const Packet& y) const { 41 const Packet one = internal::pset1<Packet>(1); 42 return internal::pmul(internal::psub(one, y), y); 43 } 44 }; 45 46 namespace internal { 47 template <typename T> 48 struct functor_traits<scalar_sigmoid_fast_derivative_op<T> > { 49 enum { 50 Cost = NumTraits<T>::AddCost * 2 + NumTraits<T>::MulCost, 51 PacketAccess = packet_traits<T>::HasAdd && packet_traits<T>::HasMul && 52 packet_traits<T>::HasNegate 53 }; 54 }; 55 } // namespace internal 56 57 /** scalar_tanh_fast_derivative_op 58 * \ingroup CXX11_NeuralNetworks_Module 59 * \brief Template functor to compute the fast derivative of a tanh 60 * 61 * Input should be the backpropagated gradient. 62 * 63 * \sa class CwiseUnaryOp, Cwise::tanh_fast_derivative() 64 */ 65 template <typename T> 66 struct scalar_tanh_fast_derivative_op { 67 EIGEN_EMPTY_STRUCT_CTOR(scalar_tanh_fast_derivative_op) 68 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& y) const { 69 const T one = T(1); 70 return one - (y * y); 71 } 72 73 template <typename Packet> 74 inline Packet packetOp(const Packet& y) const { 75 const Packet one = internal::pset1<Packet>(1); 76 return internal::psub(one, internal::pmul(y, y)); 77 } 78 }; 79 80 namespace internal { 81 template <typename T> 82 struct functor_traits<scalar_tanh_fast_derivative_op<T> > { 83 enum { 84 Cost = NumTraits<T>::AddCost * 2 + NumTraits<T>::MulCost * 1, 85 PacketAccess = packet_traits<T>::HasAdd && packet_traits<T>::HasMul && 86 packet_traits<T>::HasNegate 87 }; 88 }; 89 } // namespace internal 90 91 /** 92 * \ingroup CXX11_NeuralNetworks_Module 93 * \brief Template functor to clip the magnitude of the first scalar. 94 * 95 * \sa class CwiseBinaryOp, MatrixBase::Clip 96 */ 97 template <typename Scalar> 98 struct scalar_clip_op { 99 EIGEN_EMPTY_STRUCT_CTOR(scalar_clip_op) 100 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar 101 operator()(const Scalar& a, const Scalar& b) const { 102 return numext::mini(numext::maxi(a, -b), b); 103 } 104 template <typename Packet> 105 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet 106 packetOp(const Packet& a, const Packet& b) const { 107 return internal::pmin(internal::pmax(a, internal::pnegate(b)), b); 108 } 109 }; 110 111 namespace internal { 112 template <typename Scalar> 113 struct functor_traits<scalar_clip_op<Scalar> > { 114 enum { 115 Cost = NumTraits<Scalar>::AddCost * 3, 116 PacketAccess = packet_traits<Scalar>::HasMax && 117 packet_traits<Scalar>::HasMin && 118 packet_traits<Scalar>::HasNegate 119 }; 120 }; 121 } // namespace internal 122 123 } // end namespace Eigen 124 125 #endif // TENSORFLOW_CORE_KERNELS_EIGEN_ACTIVATIONS_H_ 126