Home | History | Annotate | Download | only in kernels
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #ifdef GOOGLE_CUDA
     17 #define EIGEN_USE_GPU
     18 #include "tensorflow/core/common_runtime/gpu/gpu_managed_allocator.h"
     19 #endif
     20 
     21 #include "tensorflow/core/kernels/ops_testutil.h"
     22 
     23 namespace tensorflow {
     24 
     25 void OpsTestBase::SetDevice(const DeviceType& device_type,
     26                             std::unique_ptr<Device> device) {
     27   CHECK(device_.get()) << "No device provided";
     28   device_type_ = device_type;
     29   device_ = std::move(device);
     30 #ifdef GOOGLE_CUDA
     31   if (device_type == DEVICE_GPU) {
     32     managed_allocator_.reset(new GpuManagedAllocator());
     33     allocator_ = managed_allocator_.get();
     34   } else {
     35     managed_allocator_.reset();
     36     allocator_ = device_->GetAllocator(AllocatorAttributes());
     37   }
     38 #else
     39   CHECK_NE(device_type, DEVICE_GPU)
     40       << "Requesting GPU on binary compiled without GOOGLE_CUDA.";
     41 #endif
     42 }
     43 
     44 Tensor* OpsTestBase::GetOutput(int output_index) {
     45   CHECK_LT(output_index, context_->num_outputs());
     46   Tensor* output = context_->mutable_output(output_index);
     47 #ifdef GOOGLE_CUDA
     48   if (device_type_ == DEVICE_GPU) {
     49     managed_outputs_.resize(context_->num_outputs());
     50     // Copy the output tensor to managed memory if we haven't done so.
     51     if (!managed_outputs_[output_index]) {
     52       Tensor* managed_output =
     53           new Tensor(allocator(), output->dtype(), output->shape());
     54       auto src = output->tensor_data();
     55       auto dst = managed_output->tensor_data();
     56       context_->eigen_gpu_device().memcpy(const_cast<char*>(dst.data()),
     57                                           src.data(), src.size());
     58       context_->eigen_gpu_device().synchronize();
     59       managed_outputs_[output_index] = managed_output;
     60     }
     61     output = managed_outputs_[output_index];
     62   }
     63 #endif
     64   return output;
     65 }
     66 
     67 }  // namespace tensorflow
     68