Home | History | Annotate | Download | only in kernels
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #ifndef TENSORFLOW_KERNELS_SNAPSHOT_OP_H_
     17 #define TENSORFLOW_KERNELS_SNAPSHOT_OP_H_
     18 
     19 #if GOOGLE_CUDA
     20 #define EIGEN_USE_GPU
     21 #endif
     22 
     23 #define EIGEN_USE_THREADS
     24 
     25 #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
     26 #include "tensorflow/core/framework/op_kernel.h"
     27 
     28 namespace tensorflow {
     29 
     30 template <typename Device, typename Scalar>
     31 class SnapshotOp : public OpKernel {
     32  public:
     33   explicit SnapshotOp(OpKernelConstruction* context) : OpKernel(context) {}
     34 
     35   void Compute(OpKernelContext* context) override {
     36     const Tensor& input = context->input(0);
     37     Tensor* output = nullptr;
     38     // Try to use buffer forwarding to avoid an explicit copy.
     39     OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
     40                                 {0}, 0, input.shape(), &output));
     41     if (!output->SharesBufferWith(input)) {
     42       // We had to allocate a new buffer since the refcount on the input was
     43       // greater than 1. Copy the input to the new buffer.
     44       const Device& device = context->eigen_device<Device>();
     45       device.memcpy(output->template flat<Scalar>().data(),
     46                     input.template flat<Scalar>().data(),
     47                     input.NumElements() * sizeof(Scalar));
     48     }
     49   }
     50 };
     51 
     52 }  // namespace tensorflow
     53 
     54 #endif  // TENSORFLOW_KERNELS_SNAPSHOT_OP_H_
     55