HomeSort by relevance Sort by last modified time
    Searched refs:gpu_device (Results 1 - 25 of 34) sorted by null

1 2

  /external/tensorflow/tensorflow/core/common_runtime/gpu/
gpu_util.h 46 // 'gpu_tensor''s backing memory must be on 'gpu_device' and
49 static void CopyGPUTensorToCPU(Device* gpu_device,
55 // "gpu_device" at the time of the call have completed. Returns any
57 static Status Sync(Device* gpu_device);
62 static Status SyncAll(Device* gpu_device);
82 // on "gpu_device".
83 static uint64 Checksum(Device* gpu_device,
93 Device* gpu_device, Tensor* gpu_tensor,
106 // 'gpu_device' and 'dst_cpu_tensor' must be allocated to be of the same
108 static void CopyGPUTensorToSameGPU(Device* gpu_device,
    [all...]
gpu_util.cc 259 void GPUUtil::CopyGPUTensorToCPU(Device* gpu_device,
266 Status s = PrepareCopy(gpu_device, device_context, *gpu_tensor, cpu_tensor,
306 Device* gpu_device, Tensor* gpu_tensor,
311 Status s = PrepareCopy(gpu_device, device_context, *cpu_tensor, gpu_tensor,
349 Status GPUUtil::Sync(Device* gpu_device) {
351 auto* dev_info = gpu_device->tensorflow_gpu_device_info();
358 Status GPUUtil::SyncAll(Device* gpu_device) {
360 auto* dev_info = gpu_device->tensorflow_gpu_device_info();
397 uint64 GPUUtil::Checksum(Device* gpu_device,
403 CopyGPUTensorToCPU(gpu_device, device_context, &tensor, &copy
    [all...]
gpu_device.cc 22 #include "tensorflow/core/common_runtime/gpu/gpu_device.h"
1025 BaseGPUDevice* gpu_device = CreateGPUDevice( local
    [all...]
  /external/tensorflow/tensorflow/core/kernels/
dynamic_stitch_op_gpu.cu.cc 52 void DynamicStitchGPUImpl(const Eigen::GpuDevice& gpu_device,
58 auto config = GetCudaLaunchConfig(output_size, gpu_device);
61 <<<config.block_count, config.thread_per_block, 0, gpu_device.stream()>>>(
67 const Eigen::GpuDevice& gpu_device, const int32 slice_size, \
concat_lib_gpu_impl.cu.cc 116 const Eigen::GpuDevice& gpu_device,
126 To32Bit(*output).slice(offset, size).device(gpu_device) =
129 output->slice(offset, size).device(gpu_device) = *inputs_flat[i];
137 void ConcatGPUImpl(const Eigen::GpuDevice& gpu_device,
143 output->dimension(0), gpu_device);
148 gpu_device.stream()>>>(input_ptrs, split_size, output->dimension(0),
151 IntType smem_max = gpu_device.sharedMemPerBlock();
161 gpu_device.stream()>>>(input_ptrs, output_scan,
167 gpu_device.stream()>>>(input_ptrs, output_scan,
175 const Eigen::GpuDevice& gpu_device, \
    [all...]
split_lib_gpu.cu.cc 208 void Run(const Eigen::GpuDevice& gpu_device, bool fixed_size,
214 GetCudaLaunchConfig(total_rows * total_cols, gpu_device);
217 gpu_device.stream()>>>(
220 auto config = GetCuda2DLaunchConfig(total_cols, total_rows, gpu_device);
221 IntType smem_max = gpu_device.sharedMemPerBlock();
230 gpu_device.stream()>>>(input_ptr, output_scan, total_rows,
235 gpu_device.stream()>>>(input_ptr, output_scan, total_rows,
concat_lib_gpu.cc 35 const Eigen::GpuDevice& gpu_device,
dynamic_stitch_op.cc 139 void DynamicStitchGPUImpl(const Eigen::GpuDevice& gpu_device,
  /external/tensorflow/tensorflow/core/grappler/costs/
analytical_cost_estimator_test.cc 39 DeviceProperties gpu_device; variable
40 gpu_device.set_type("GPU");
41 gpu_device.set_num_cores(12);
42 gpu_device.set_frequency(1100);
43 gpu_device.set_bandwidth(180 * 1024 * 1024);
44 (*gpu_device.mutable_environment())["architecture"] = "6";
45 devices["/job:localhost/replica:0/task:0/device:GPU:0"] = gpu_device;
virtual_placer_test.cc 32 DeviceProperties gpu_device; local
33 gpu_device.set_type("GPU");
34 devices["/job:localhost/replica:0/task:0/device:GPU:0"] = gpu_device;
62 DeviceProperties gpu_device; local
63 gpu_device.set_type("GPU");
64 devices["/GPU:0"] = gpu_device;
121 DeviceProperties gpu_device; local
122 gpu_device.set_type("GPU");
124 "/replica:0/task:0/device:GPU:0")] = gpu_device;
178 DeviceProperties gpu_device; local
195 DeviceProperties gpu_device; local
290 DeviceProperties gpu_device; local
    [all...]
  /system/sepolicy/prebuilts/api/26.0/private/
adbd.te 71 allow adbd gpu_device:chr_file rw_file_perms;
ephemeral_app.te 52 # execute gpu_device
53 neverallow ephemeral_app gpu_device:chr_file execute;
isolated_app.te 76 neverallow isolated_app gpu_device:chr_file { rw_file_perms execute };
system_server.te 283 allow system_server gpu_device:chr_file rw_file_perms;
  /system/sepolicy/prebuilts/api/27.0/private/
adbd.te 73 allow adbd gpu_device:chr_file rw_file_perms;
ephemeral_app.te 54 # execute gpu_device
55 neverallow ephemeral_app gpu_device:chr_file execute;
isolated_app.te 91 neverallow isolated_app gpu_device:chr_file { rw_file_perms execute };
  /system/sepolicy/prebuilts/api/28.0/private/
adbd.te 78 allow adbd gpu_device:chr_file rw_file_perms;
ephemeral_app.te 65 # execute gpu_device
66 neverallow ephemeral_app gpu_device:chr_file execute;
isolated_app.te 94 neverallow isolated_app gpu_device:chr_file { rw_file_perms execute };
  /system/sepolicy/private/
adbd.te 78 allow adbd gpu_device:chr_file rw_file_perms;
ephemeral_app.te 65 # execute gpu_device
66 neverallow ephemeral_app gpu_device:chr_file execute;
isolated_app.te 94 neverallow isolated_app gpu_device:chr_file { rw_file_perms execute };
  /external/tensorflow/tensorflow/core/grappler/optimizers/
memory_optimizer_test.cc 206 DeviceProperties gpu_device; local
207 gpu_device.set_type("GPU");
208 gpu_device.set_frequency(1000);
209 gpu_device.set_num_cores(24);
210 gpu_device.set_bandwidth(128);
211 gpu_device.set_memory_size(1024 * 1024);
212 gpu_device.mutable_environment()->insert({"architecture", "6"});
215 devices["/job:localhost/replica:0/task:0/gpu:0"] = gpu_device;
  /external/tensorflow/tensorflow/core/graph/
graph_partition_test.cc 60 const char gpu_device[] = "/job:a/replica:0/task:0/device:GPU:0"; member in namespace:tensorflow::__anon39622
67 return gpu_device;

Completed in 210 milliseconds

1 2