HomeSort by relevance Sort by last modified time
    Searched refs:cuda (Results 26 - 50 of 61) sorted by null

12 3

  /external/tensorflow/tensorflow/tools/ci_build/linux/gpu/
run_cc_core.sh 37 bazel test --config=cuda --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-benchmark-test -k \
run_mkl.sh 41 bazel test --config=cuda --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-benchmark-test \
run_py3_core.sh 37 bazel test --config=cuda --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-benchmark-test -k \
  /external/tensorflow/tensorflow/tools/ci_build/linux/ppc64le/gpu/
run_py2.sh 39 bazel test --config=cuda --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-benchmark-test -k \
run_py3.sh 39 bazel test --config=cuda --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-benchmark-test -k \
  /external/tensorflow/tensorflow/tools/ci_build/xla/linux/gpu/
run_py3.sh 38 bazel test --config=cuda --test_tag_filters=-no_gpu,-benchmark-test,-no_oss -k \
  /external/tensorflow/tensorflow/core/common_runtime/gpu/
gpu_cudamalloc_allocator.cc 17 #include "cuda/include/cuda.h"
18 #include "tensorflow/stream_executor/cuda/cuda_activation.h"
42 se::cuda::ScopedActivateExecutorContext scoped_activation{stream_exec_};
  /external/tensorflow/tools/
tf_env_collect.sh 108 echo '== cuda libs ==================================================='
111 find /usr/local -type f -name 'libcudart*' 2>/dev/null | grep cuda | grep -v "\\.cache" >> ${OUTPUT_FILE}
112 find /usr/local -type f -name 'libudnn*' 2>/dev/null | grep cuda | grep -v "\\.cache" >> ${OUTPUT_FILE}
  /external/libcxx/utils/docker/
build_docker_image.sh 24 -s|--source image source dir (i.e. debian8, nvidia-cuda, etc)
  /external/skia/src/compute/hs/cuda/bench/
Makefile 6 ../../../common/cuda/assert_cuda.c \
  /external/skqp/src/compute/hs/cuda/bench/
Makefile 6 ../../../common/cuda/assert_cuda.c \
  /external/tensorflow/tensorflow/tools/build_info/
gen_build_info.py 28 "cuda").
38 if build_config == "cuda":
86 help="Either 'cuda' for GPU builds or 'cpu' for CPU builds.")
  /external/tensorflow/tensorflow/compiler/xla/service/gpu/
gpu_transfer_manager.cc 183 /*id=*/stream_executor::cuda::kCudaPlatformId,
190 stream_executor::cuda::kCudaPlatformId, &CreateNVPTXTransferManager);
nvptx_compiler.cc 107 #include "tensorflow/stream_executor/cuda/cuda_diagnostics.h"
112 /* static */ const char* NVPTXCompiler::kTargetTriple = "nvptx64-nvidia-cuda";
120 // Returns a vector of potential locations of the CUDA root directory.
128 // CUDA location explicitly specified by user via --xla_gpu_cuda_data_dir has
149 "variable XLA_FLAGS=--xla_gpu_cuda_data_dir=/path/to/cuda will work.";
165 "Can't find directory containing CUDA libevice. This may result in "
484 "fail.\n\nYou do not need to update CUDA; cherry-picking the ptxas "
492 "invalid-address errors.\n\nYou do not need to update to CUDA "
507 auto version_or_status = se::cuda::Diagnostician::FindKernelDriverVersion();
509 LOG(WARNING) << "Couldn't read CUDA driver version."
    [all...]
nccl_all_reduce_thunk.cc 25 #include "tensorflow/stream_executor/cuda/cuda_activation.h"
285 se::cuda::ScopedActivateExecutorContext scoped_context(executor);
338 "NCCL support is not available: this binary was not built with a CUDA "
  /external/tensorflow/tensorflow/stream_executor/cuda/
cuda_fft.cc 16 #include "tensorflow/stream_executor/cuda/cuda_fft.h"
20 #include "tensorflow/stream_executor/cuda/cuda_activation.h"
21 #include "tensorflow/stream_executor/cuda/cuda_gpu_executor.h"
22 #include "tensorflow/stream_executor/cuda/cuda_helpers.h"
23 #include "tensorflow/stream_executor/cuda/cuda_platform_id.h"
24 #include "tensorflow/stream_executor/cuda/cuda_stream.h"
65 cuda::ScopedActivateExecutorContext sac(parent);
85 cuda::ScopedActivateExecutorContext sac(parent);
255 cuda::ScopedActivateExecutorContext sac(parent_);
266 cuda::ScopedActivateExecutorContext sac(parent_)
    [all...]
cuda_gpu_executor.cc 16 #include "tensorflow/stream_executor/cuda/cuda_gpu_executor.h"
29 #include "tensorflow/stream_executor/cuda/cuda_diagnostics.h"
30 #include "tensorflow/stream_executor/cuda/cuda_driver.h"
31 #include "tensorflow/stream_executor/cuda/cuda_event.h"
32 #include "tensorflow/stream_executor/cuda/cuda_platform_id.h"
33 #include "tensorflow/stream_executor/cuda/cuda_stream.h"
34 #include "tensorflow/stream_executor/cuda/cuda_timer.h"
67 "CUDA runtime being included into CUDA GPU executor; should be driver only."
91 // Given a platform-independent timer datatype, returns the internal CUDA
    [all...]
cuda_platform.cc 16 #include "tensorflow/stream_executor/cuda/cuda_platform.h"
18 #include "tensorflow/stream_executor/cuda/cuda_driver.h"
19 #include "tensorflow/stream_executor/cuda/cuda_gpu_executor.h"
20 #include "tensorflow/stream_executor/cuda/cuda_platform_id.h"
67 : name_("CUDA"), min_numa_node_(0), limit_numa_node_(0) {}
132 Platform::Id CudaPlatform::id() const { return cuda::kCudaPlatformId; }
178 "failed initializing StreamExecutor for CUDA device ordinal %d: %s",
187 LOG(FATAL) << "not yet implemented: register CUDA trace listener";
191 LOG(FATAL) << "not yet implemented: unregister CUDA trace listener";
  /external/skia/src/compute/hs/cuda/
hs_cuda.inl 17 #include "common/cuda/assert_cuda.h"
104 // NOTE: CUDA streams are in-order so a dependency isn't required for
502 cuda(EventCreate(&event_before));
504 cuda(EventRecord(event_before,from));
506 cuda(StreamWaitEvent(to,event_before,0));
508 cuda(EventDestroy(event_before));
567 cuda(StreamSynchronize(s)); \
842 cuda(MemsetAsync(state->vout + count_lo,
862 cuda(MemsetAsync(state->vin + count,
  /external/skqp/src/compute/hs/cuda/
hs_cuda.inl 17 #include "common/cuda/assert_cuda.h"
104 // NOTE: CUDA streams are in-order so a dependency isn't required for
502 cuda(EventCreate(&event_before));
504 cuda(EventRecord(event_before,from));
506 cuda(StreamWaitEvent(to,event_before,0));
508 cuda(EventDestroy(event_before));
567 cuda(StreamSynchronize(s)); \
842 cuda(MemsetAsync(state->vout + count_lo,
862 cuda(MemsetAsync(state->vin + count,
  /external/eigen/test/
cuda_common.h 5 #include <cuda.h>
88 std::cout << "CUDA device info:\n";
  /external/swiftshader/third_party/llvm-7.0/llvm/utils/docker/
build_docker_image.sh 27 -s|--source image source dir (i.e. debian8, nvidia-cuda, etc)
  /external/tensorflow/tensorflow/compiler/xla/service/
computation_placer.cc 166 stream_executor::cuda::kCudaPlatformId, &CreateComputationPlacer);
platform_util.cc 36 // Minimum supported CUDA compute capability is 3.5.
51 // "gpu" and "cuda" mean the same thing.
53 platform_str = "cuda";
190 if (executor->platform()->id() == se::cuda::kCudaPlatformId) {
191 // CUDA devices must have a minimum compute capability.
197 LOG(INFO) << "StreamExecutor cuda device ("
235 // the device, for example for GPUs cuda context, cudnn handles etc. will
  /external/tensorflow/tensorflow/core/kernels/
check_numerics_op.cc 31 #include "tensorflow/core/platform/cuda.h"
152 // Call the Cuda kernels for the numerical checks
181 se::cuda::ScopedActivateExecutorContext scoped_activation{

Completed in 1755 milliseconds

12 3